changeset 49627:7577b8644558

branching: merge stable into default
author Raphaël Gomès <rgomes@octobus.net>
date Sat, 19 Nov 2022 20:40:47 +0100
parents da636e7a6d63 (diff) 3324f39460e5 (current diff)
children eb383f093a01
files
diffstat 48 files changed, 1359 insertions(+), 357 deletions(-) [+]
line wrap: on
line diff
--- a/.gitlab/merge_request_templates/Default.md	Sat Nov 19 16:14:20 2022 +0100
+++ b/.gitlab/merge_request_templates/Default.md	Sat Nov 19 20:40:47 2022 +0100
@@ -1,5 +1,8 @@
 /assign_reviewer @mercurial.review
 
+
+<!--
+
 Welcome to the Mercurial Merge Request creation process:
 
 * Set a simple title for your MR,
@@ -11,3 +14,5 @@
 
 * https://www.mercurial-scm.org/wiki/ContributingChanges
 * https://www.mercurial-scm.org/wiki/Heptapod
+
+-->
--- a/contrib/check-code.py	Sat Nov 19 16:14:20 2022 +0100
+++ b/contrib/check-code.py	Sat Nov 19 20:40:47 2022 +0100
@@ -372,10 +372,6 @@
         ),
         (r'[^^+=*/!<>&| %-](\s=|=\s)[^= ]', "wrong whitespace around ="),
         (
-            r'\([^()]*( =[^=]|[^<>!=]= )',
-            "no whitespace around = for named parameters",
-        ),
-        (
             r'raise [^,(]+, (\([^\)]+\)|[^,\(\)]+)$',
             "don't use old-style two-argument raise, use Exception(message)",
         ),
--- a/hgdemandimport/demandimportpy3.py	Sat Nov 19 16:14:20 2022 +0100
+++ b/hgdemandimport/demandimportpy3.py	Sat Nov 19 20:40:47 2022 +0100
@@ -23,8 +23,6 @@
   enabled.
 """
 
-# This line is unnecessary, but it satisfies test-check-py3-compat.t.
-
 import contextlib
 import importlib.util
 import sys
@@ -43,6 +41,10 @@
         """Make the module load lazily."""
         with tracing.log('demandimport %s', module):
             if _deactivated or module.__name__ in ignores:
+                # Reset the loader on the module as super() does (issue6725)
+                module.__spec__.loader = self.loader
+                module.__loader__ = self.loader
+
                 self.loader.exec_module(module)
             else:
                 super().exec_module(module)
--- a/hgext/fsmonitor/pywatchman/__init__.py	Sat Nov 19 16:14:20 2022 +0100
+++ b/hgext/fsmonitor/pywatchman/__init__.py	Sat Nov 19 20:40:47 2022 +0100
@@ -26,8 +26,6 @@
 # OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
 # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
 
-# no unicode literals
-
 import inspect
 import math
 import os
--- a/hgext/fsmonitor/pywatchman/capabilities.py	Sat Nov 19 16:14:20 2022 +0100
+++ b/hgext/fsmonitor/pywatchman/capabilities.py	Sat Nov 19 20:40:47 2022 +0100
@@ -26,8 +26,6 @@
 # OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
 # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
 
-# no unicode literals
-
 
 def parse_version(vstr):
     res = 0
--- a/hgext/fsmonitor/pywatchman/compat.py	Sat Nov 19 16:14:20 2022 +0100
+++ b/hgext/fsmonitor/pywatchman/compat.py	Sat Nov 19 20:40:47 2022 +0100
@@ -26,8 +26,6 @@
 # OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
 # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
 
-# no unicode literals
-
 import sys
 
 
--- a/hgext/fsmonitor/pywatchman/encoding.py	Sat Nov 19 16:14:20 2022 +0100
+++ b/hgext/fsmonitor/pywatchman/encoding.py	Sat Nov 19 20:40:47 2022 +0100
@@ -26,8 +26,6 @@
 # OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
 # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
 
-# no unicode literals
-
 import sys
 
 from . import compat
--- a/hgext/fsmonitor/pywatchman/load.py	Sat Nov 19 16:14:20 2022 +0100
+++ b/hgext/fsmonitor/pywatchman/load.py	Sat Nov 19 20:40:47 2022 +0100
@@ -26,8 +26,6 @@
 # OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
 # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
 
-# no unicode literals
-
 import ctypes
 
 
--- a/hgext/fsmonitor/pywatchman/pybser.py	Sat Nov 19 16:14:20 2022 +0100
+++ b/hgext/fsmonitor/pywatchman/pybser.py	Sat Nov 19 20:40:47 2022 +0100
@@ -26,8 +26,6 @@
 # OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
 # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
 
-# no unicode literals
-
 import binascii
 import collections
 import ctypes
--- a/hgext/lfs/blobstore.py	Sat Nov 19 16:14:20 2022 +0100
+++ b/hgext/lfs/blobstore.py	Sat Nov 19 20:40:47 2022 +0100
@@ -168,12 +168,16 @@
             # producing the response (but the server has no way of telling us
             # that), and we really don't need to try to write the response to
             # the localstore, because it's not going to match the expected.
+            # The server also uses this method to store data uploaded by the
+            # client, so if this happens on the server side, it's possible
+            # that the client crashed or an antivirus interfered with the
+            # upload.
             if content_length is not None and int(content_length) != size:
                 msg = (
                     b"Response length (%d) does not match Content-Length "
-                    b"header (%d): likely server-side crash"
+                    b"header (%d) for %s"
                 )
-                raise LfsRemoteError(_(msg) % (size, int(content_length)))
+                raise LfsRemoteError(_(msg) % (size, int(content_length), oid))
 
             realoid = hex(sha256.digest())
             if realoid != oid:
--- a/hgext/remotefilelog/remotefilelog.py	Sat Nov 19 16:14:20 2022 +0100
+++ b/hgext/remotefilelog/remotefilelog.py	Sat Nov 19 20:40:47 2022 +0100
@@ -299,6 +299,7 @@
         deltaprevious=False,
         deltamode=None,
         sidedata_helpers=None,
+        debug_info=None,
     ):
         # we don't use any of these parameters here
         del nodesorder, revisiondata, assumehaveparentrevisions, deltaprevious
--- a/hgext/sqlitestore.py	Sat Nov 19 16:14:20 2022 +0100
+++ b/hgext/sqlitestore.py	Sat Nov 19 20:40:47 2022 +0100
@@ -589,6 +589,7 @@
         assumehaveparentrevisions=False,
         deltamode=repository.CG_DELTAMODE_STD,
         sidedata_helpers=None,
+        debug_info=None,
     ):
         if nodesorder not in (b'nodes', b'storage', b'linear', None):
             raise error.ProgrammingError(
--- a/mercurial/cext/bdiff.pyi	Sat Nov 19 16:14:20 2022 +0100
+++ b/mercurial/cext/bdiff.pyi	Sat Nov 19 20:40:47 2022 +0100
@@ -5,7 +5,7 @@
 
 version: int
 
-def bdiff(a: bytes, b: bytes): bytes
+def bdiff(a: bytes, b: bytes) -> bytes: ...
 def blocks(a: bytes, b: bytes) -> List[Tuple[int, int, int, int]]: ...
 def fixws(s: bytes, allws: bool) -> bytes: ...
 def splitnewlines(text: bytes) -> List[bytes]: ...
--- a/mercurial/cext/osutil.pyi	Sat Nov 19 16:14:20 2022 +0100
+++ b/mercurial/cext/osutil.pyi	Sat Nov 19 20:40:47 2022 +0100
@@ -2,6 +2,7 @@
     AnyStr,
     IO,
     List,
+    Optional,
     Sequence,
 )
 
@@ -15,7 +16,7 @@
     st_mtime: int
     st_ctime: int
 
-def listdir(path: bytes, st: bool, skip: bool) -> List[stat]: ...
+def listdir(path: bytes, st: bool, skip: Optional[bool]) -> List[stat]: ...
 def posixfile(name: AnyStr, mode: bytes, buffering: int) -> IO: ...
 def statfiles(names: Sequence[bytes]) -> List[stat]: ...
 def setprocname(name: bytes) -> None: ...
--- a/mercurial/cffi/bdiff.py	Sat Nov 19 16:14:20 2022 +0100
+++ b/mercurial/cffi/bdiff.py	Sat Nov 19 20:40:47 2022 +0100
@@ -8,6 +8,11 @@
 
 import struct
 
+from typing import (
+    List,
+    Tuple,
+)
+
 from ..pure.bdiff import *
 from . import _bdiff  # pytype: disable=import-error
 
@@ -15,7 +20,7 @@
 lib = _bdiff.lib
 
 
-def blocks(sa, sb):
+def blocks(sa: bytes, sb: bytes) -> List[Tuple[int, int, int, int]]:
     a = ffi.new(b"struct bdiff_line**")
     b = ffi.new(b"struct bdiff_line**")
     ac = ffi.new(b"char[]", str(sa))
@@ -29,7 +34,7 @@
         count = lib.bdiff_diff(a[0], an, b[0], bn, l)
         if count < 0:
             raise MemoryError
-        rl = [None] * count
+        rl = [(0, 0, 0, 0)] * count
         h = l.next
         i = 0
         while h:
@@ -43,7 +48,7 @@
     return rl
 
 
-def bdiff(sa, sb):
+def bdiff(sa: bytes, sb: bytes) -> bytes:
     a = ffi.new(b"struct bdiff_line**")
     b = ffi.new(b"struct bdiff_line**")
     ac = ffi.new(b"char[]", str(sa))
--- a/mercurial/cffi/mpatch.py	Sat Nov 19 16:14:20 2022 +0100
+++ b/mercurial/cffi/mpatch.py	Sat Nov 19 20:40:47 2022 +0100
@@ -6,6 +6,8 @@
 # GNU General Public License version 2 or any later version.
 
 
+from typing import List
+
 from ..pure.mpatch import *
 from ..pure.mpatch import mpatchError  # silence pyflakes
 from . import _mpatch  # pytype: disable=import-error
@@ -26,7 +28,7 @@
     return container[0]
 
 
-def patches(text, bins):
+def patches(text: bytes, bins: List[bytes]) -> bytes:
     lgt = len(bins)
     all = []
     if not lgt:
--- a/mercurial/changegroup.py	Sat Nov 19 16:14:20 2022 +0100
+++ b/mercurial/changegroup.py	Sat Nov 19 20:40:47 2022 +0100
@@ -105,6 +105,164 @@
                 os.unlink(cleanup)
 
 
+def _dbg_ubdl_line(
+    ui,
+    indent,
+    key,
+    base_value=None,
+    percentage_base=None,
+    percentage_key=None,
+):
+    """Print one line of debug_unbundle_debug_info"""
+    line = b"DEBUG-UNBUNDLING: "
+    line += b' ' * (2 * indent)
+    key += b":"
+    padding = b''
+    if base_value is not None:
+        assert len(key) + 1 + (2 * indent) <= _KEY_PART_WIDTH
+        line += key.ljust(_KEY_PART_WIDTH - (2 * indent))
+        if isinstance(base_value, float):
+            line += b"%14.3f seconds" % base_value
+        else:
+            line += b"%10d" % base_value
+            padding = b'            '
+    else:
+        line += key
+
+    if percentage_base is not None:
+        line += padding
+        padding = b''
+        assert base_value is not None
+        percentage = base_value * 100 // percentage_base
+        if percentage_key is not None:
+            line += b" (%3d%% of %s)" % (
+                percentage,
+                percentage_key,
+            )
+        else:
+            line += b" (%3d%%)" % percentage
+
+    line += b'\n'
+    ui.write_err(line)
+
+
+def _sumf(items):
+    # python < 3.8 does not support a `start=0.0` argument to sum
+    # So we have to cheat a bit until we drop support for those version
+    if not items:
+        return 0.0
+    return sum(items)
+
+
+def display_unbundle_debug_info(ui, debug_info):
+    """display an unbundling report from debug information"""
+    cl_info = []
+    mn_info = []
+    fl_info = []
+    _dispatch = [
+        (b'CHANGELOG:', cl_info),
+        (b'MANIFESTLOG:', mn_info),
+        (b'FILELOG:', fl_info),
+    ]
+    for e in debug_info:
+        for prefix, info in _dispatch:
+            if e["target-revlog"].startswith(prefix):
+                info.append(e)
+                break
+        else:
+            assert False, 'unreachable'
+    each_info = [
+        (b'changelog', cl_info),
+        (b'manifests', mn_info),
+        (b'files', fl_info),
+    ]
+
+    # General Revision Countss
+    _dbg_ubdl_line(ui, 0, b'revisions', len(debug_info))
+    for key, info in each_info:
+        if not info:
+            continue
+        _dbg_ubdl_line(ui, 1, key, len(info), len(debug_info))
+
+    # General Time spent
+    all_durations = [e['duration'] for e in debug_info]
+    all_durations.sort()
+    total_duration = _sumf(all_durations)
+    _dbg_ubdl_line(ui, 0, b'total-time', total_duration)
+
+    for key, info in each_info:
+        if not info:
+            continue
+        durations = [e['duration'] for e in info]
+        durations.sort()
+        _dbg_ubdl_line(ui, 1, key, _sumf(durations), total_duration)
+
+    # Count and cache reuse per delta types
+    each_types = {}
+    for key, info in each_info:
+        each_types[key] = types = {
+            b'full': 0,
+            b'full-cached': 0,
+            b'snapshot': 0,
+            b'snapshot-cached': 0,
+            b'delta': 0,
+            b'delta-cached': 0,
+            b'unknown': 0,
+            b'unknown-cached': 0,
+        }
+        for e in info:
+            types[e['type']] += 1
+            if e['using-cached-base']:
+                types[e['type'] + b'-cached'] += 1
+
+    EXPECTED_TYPES = (b'full', b'snapshot', b'delta', b'unknown')
+    if debug_info:
+        _dbg_ubdl_line(ui, 0, b'type-count')
+    for key, info in each_info:
+        if info:
+            _dbg_ubdl_line(ui, 1, key)
+        t = each_types[key]
+        for tn in EXPECTED_TYPES:
+            if t[tn]:
+                tc = tn + b'-cached'
+                _dbg_ubdl_line(ui, 2, tn, t[tn])
+                _dbg_ubdl_line(ui, 3, b'cached', t[tc], t[tn])
+
+    # time perf delta types and reuse
+    each_type_time = {}
+    for key, info in each_info:
+        each_type_time[key] = t = {
+            b'full': [],
+            b'full-cached': [],
+            b'snapshot': [],
+            b'snapshot-cached': [],
+            b'delta': [],
+            b'delta-cached': [],
+            b'unknown': [],
+            b'unknown-cached': [],
+        }
+        for e in info:
+            t[e['type']].append(e['duration'])
+            if e['using-cached-base']:
+                t[e['type'] + b'-cached'].append(e['duration'])
+        for t_key, value in list(t.items()):
+            value.sort()
+            t[t_key] = _sumf(value)
+
+    if debug_info:
+        _dbg_ubdl_line(ui, 0, b'type-time')
+    for key, info in each_info:
+        if info:
+            _dbg_ubdl_line(ui, 1, key)
+        t = each_type_time[key]
+        td = total_duration  # to same space on next lines
+        for tn in EXPECTED_TYPES:
+            if t[tn]:
+                tc = tn + b'-cached'
+                _dbg_ubdl_line(ui, 2, tn, t[tn], td, b"total")
+                _dbg_ubdl_line(ui, 3, b'cached', t[tc], td, b"total")
+
+
 class cg1unpacker:
     """Unpacker for cg1 changegroup streams.
 
@@ -254,7 +412,15 @@
                     pos = next
             yield closechunk()
 
-    def _unpackmanifests(self, repo, revmap, trp, prog, addrevisioncb=None):
+    def _unpackmanifests(
+        self,
+        repo,
+        revmap,
+        trp,
+        prog,
+        addrevisioncb=None,
+        debug_info=None,
+    ):
         self.callback = prog.increment
         # no need to check for empty manifest group here:
         # if the result of the merge of 1 and 2 is the same in 3 and 4,
@@ -263,7 +429,13 @@
         self.manifestheader()
         deltas = self.deltaiter()
         storage = repo.manifestlog.getstorage(b'')
-        storage.addgroup(deltas, revmap, trp, addrevisioncb=addrevisioncb)
+        storage.addgroup(
+            deltas,
+            revmap,
+            trp,
+            addrevisioncb=addrevisioncb,
+            debug_info=debug_info,
+        )
         prog.complete()
         self.callback = None
 
@@ -292,6 +464,10 @@
         """
         repo = repo.unfiltered()
 
+        debug_info = None
+        if repo.ui.configbool(b'debug', b'unbundling-stats'):
+            debug_info = []
+
         # Only useful if we're adding sidedata categories. If both peers have
         # the same categories, then we simply don't do anything.
         adding_sidedata = (
@@ -366,6 +542,7 @@
                 alwayscache=True,
                 addrevisioncb=onchangelog,
                 duplicaterevisioncb=ondupchangelog,
+                debug_info=debug_info,
             ):
                 repo.ui.develwarn(
                     b'applied empty changelog from changegroup',
@@ -413,6 +590,7 @@
                 trp,
                 progress,
                 addrevisioncb=on_manifest_rev,
+                debug_info=debug_info,
             )
 
             needfiles = {}
@@ -449,6 +627,7 @@
                 efiles,
                 needfiles,
                 addrevisioncb=on_filelog_rev,
+                debug_info=debug_info,
             )
 
             if sidedata_helpers:
@@ -567,6 +746,8 @@
                     b'changegroup-runhooks-%020i' % clstart,
                     lambda tr: repo._afterlock(runhooks),
                 )
+            if debug_info is not None:
+                display_unbundle_debug_info(repo.ui, debug_info)
         finally:
             repo.ui.flush()
         # never return 0 here:
@@ -626,9 +807,22 @@
         protocol_flags = 0
         return node, p1, p2, deltabase, cs, flags, protocol_flags
 
-    def _unpackmanifests(self, repo, revmap, trp, prog, addrevisioncb=None):
+    def _unpackmanifests(
+        self,
+        repo,
+        revmap,
+        trp,
+        prog,
+        addrevisioncb=None,
+        debug_info=None,
+    ):
         super(cg3unpacker, self)._unpackmanifests(
-            repo, revmap, trp, prog, addrevisioncb=addrevisioncb
+            repo,
+            revmap,
+            trp,
+            prog,
+            addrevisioncb=addrevisioncb,
+            debug_info=debug_info,
         )
         for chunkdata in iter(self.filelogheader, {}):
             # If we get here, there are directory manifests in the changegroup
@@ -636,7 +830,11 @@
             repo.ui.debug(b"adding %s revisions\n" % d)
             deltas = self.deltaiter()
             if not repo.manifestlog.getstorage(d).addgroup(
-                deltas, revmap, trp, addrevisioncb=addrevisioncb
+                deltas,
+                revmap,
+                trp,
+                addrevisioncb=addrevisioncb,
+                debug_info=debug_info,
             ):
                 raise error.Abort(_(b"received dir revlog group is empty"))
 
@@ -869,6 +1067,7 @@
     fullclnodes=None,
     precomputedellipsis=None,
     sidedata_helpers=None,
+    debug_info=None,
 ):
     """Calculate deltas for a set of revisions.
 
@@ -978,6 +1177,7 @@
         assumehaveparentrevisions=not ellipses,
         deltamode=deltamode,
         sidedata_helpers=sidedata_helpers,
+        debug_info=debug_info,
     )
 
     for i, revision in enumerate(revisions):
@@ -1003,6 +1203,187 @@
         progress.complete()
 
 
+def make_debug_info():
+    """ "build a "new" debug_info dictionnary
+
+    That dictionnary can be used to gather information about the bundle process
+    """
+    return {
+        'revision-total': 0,
+        'revision-changelog': 0,
+        'revision-manifest': 0,
+        'revision-files': 0,
+        'file-count': 0,
+        'merge-total': 0,
+        'available-delta': 0,
+        'available-full': 0,
+        'delta-against-prev': 0,
+        'delta-full': 0,
+        'delta-against-p1': 0,
+        'denied-delta-candeltafn': 0,
+        'denied-base-not-available': 0,
+        'reused-storage-delta': 0,
+        'computed-delta': 0,
+    }
+
+
+def merge_debug_info(base, other):
+    """merge the debug information from <other> into <base>
+
+    This function can be used to gather lower level information into higher level ones.
+    """
+    for key in (
+        'revision-total',
+        'revision-changelog',
+        'revision-manifest',
+        'revision-files',
+        'merge-total',
+        'available-delta',
+        'available-full',
+        'delta-against-prev',
+        'delta-full',
+        'delta-against-p1',
+        'denied-delta-candeltafn',
+        'denied-base-not-available',
+        'reused-storage-delta',
+        'computed-delta',
+    ):
+        base[key] += other[key]
+
+
+_KEY_PART_WIDTH = 17
+
+
+def _dbg_bdl_line(
+    ui,
+    indent,
+    key,
+    base_value=None,
+    percentage_base=None,
+    percentage_key=None,
+    percentage_ref=None,
+    extra=None,
+):
+    """Print one line of debug_bundle_debug_info"""
+    line = b"DEBUG-BUNDLING: "
+    line += b' ' * (2 * indent)
+    key += b":"
+    if base_value is not None:
+        assert len(key) + 1 + (2 * indent) <= _KEY_PART_WIDTH
+        line += key.ljust(_KEY_PART_WIDTH - (2 * indent))
+        line += b"%10d" % base_value
+    else:
+        line += key
+
+    if percentage_base is not None:
+        assert base_value is not None
+        percentage = base_value * 100 // percentage_base
+        if percentage_key is not None:
+            line += b" (%d%% of %s %d)" % (
+                percentage,
+                percentage_key,
+                percentage_ref,
+            )
+        else:
+            line += b" (%d%%)" % percentage
+
+    if extra:
+        line += b" "
+        line += extra
+
+    line += b'\n'
+    ui.write_err(line)
+
+
+def display_bundling_debug_info(
+    ui,
+    debug_info,
+    cl_debug_info,
+    mn_debug_info,
+    fl_debug_info,
+):
+    """display debug information gathered during a bundling through `ui`"""
+    d = debug_info
+    c = cl_debug_info
+    m = mn_debug_info
+    f = fl_debug_info
+    all_info = [
+        (b"changelog", b"cl", c),
+        (b"manifests", b"mn", m),
+        (b"files", b"fl", f),
+    ]
+    _dbg_bdl_line(ui, 0, b'revisions', d['revision-total'])
+    _dbg_bdl_line(ui, 1, b'changelog', d['revision-changelog'])
+    _dbg_bdl_line(ui, 1, b'manifest', d['revision-manifest'])
+    extra = b'(for %d revlogs)' % d['file-count']
+    _dbg_bdl_line(ui, 1, b'files', d['revision-files'], extra=extra)
+    if d['merge-total']:
+        _dbg_bdl_line(ui, 1, b'merge', d['merge-total'], d['revision-total'])
+    for k, __, v in all_info:
+        if v['merge-total']:
+            _dbg_bdl_line(ui, 2, k, v['merge-total'], v['revision-total'])
+
+    _dbg_bdl_line(ui, 0, b'deltas')
+    _dbg_bdl_line(
+        ui,
+        1,
+        b'from-storage',
+        d['reused-storage-delta'],
+        percentage_base=d['available-delta'],
+        percentage_key=b"available",
+        percentage_ref=d['available-delta'],
+    )
+
+    if d['denied-delta-candeltafn']:
+        _dbg_bdl_line(ui, 2, b'denied-fn', d['denied-delta-candeltafn'])
+    for __, k, v in all_info:
+        if v['denied-delta-candeltafn']:
+            _dbg_bdl_line(ui, 3, k, v['denied-delta-candeltafn'])
+
+    if d['denied-base-not-available']:
+        _dbg_bdl_line(ui, 2, b'denied-nb', d['denied-base-not-available'])
+    for k, __, v in all_info:
+        if v['denied-base-not-available']:
+            _dbg_bdl_line(ui, 3, k, v['denied-base-not-available'])
+
+    if d['computed-delta']:
+        _dbg_bdl_line(ui, 1, b'computed', d['computed-delta'])
+
+    if d['available-full']:
+        _dbg_bdl_line(
+            ui,
+            2,
+            b'full',
+            d['delta-full'],
+            percentage_base=d['available-full'],
+            percentage_key=b"native",
+            percentage_ref=d['available-full'],
+        )
+    for k, __, v in all_info:
+        if v['available-full']:
+            _dbg_bdl_line(
+                ui,
+                3,
+                k,
+                v['delta-full'],
+                percentage_base=v['available-full'],
+                percentage_key=b"native",
+                percentage_ref=v['available-full'],
+            )
+
+    if d['delta-against-prev']:
+        _dbg_bdl_line(ui, 2, b'previous', d['delta-against-prev'])
+    for k, __, v in all_info:
+        if v['delta-against-prev']:
+            _dbg_bdl_line(ui, 3, k, v['delta-against-prev'])
+
+    if d['delta-against-p1']:
+        _dbg_bdl_line(ui, 2, b'parent-1', d['delta-against-prev'])
+    for k, __, v in all_info:
+        if v['delta-against-p1']:
+            _dbg_bdl_line(ui, 3, k, v['delta-against-p1'])
+
+
 class cgpacker:
     def __init__(
         self,
@@ -1086,13 +1467,21 @@
             self._verbosenote = lambda s: None
 
     def generate(
-        self, commonrevs, clnodes, fastpathlinkrev, source, changelog=True
+        self,
+        commonrevs,
+        clnodes,
+        fastpathlinkrev,
+        source,
+        changelog=True,
     ):
         """Yield a sequence of changegroup byte chunks.
         If changelog is False, changelog data won't be added to changegroup
         """
 
+        debug_info = None
         repo = self._repo
+        if repo.ui.configbool(b'debug', b'bundling-stats'):
+            debug_info = make_debug_info()
         cl = repo.changelog
 
         self._verbosenote(_(b'uncompressed size of bundle content:\n'))
@@ -1107,14 +1496,19 @@
                 # correctly advertise its sidedata categories directly.
                 remote_sidedata = repo._wanted_sidedata
             sidedata_helpers = sidedatamod.get_sidedata_helpers(
-                repo, remote_sidedata
+                repo,
+                remote_sidedata,
             )
 
+        cl_debug_info = None
+        if debug_info is not None:
+            cl_debug_info = make_debug_info()
         clstate, deltas = self._generatechangelog(
             cl,
             clnodes,
             generate=changelog,
             sidedata_helpers=sidedata_helpers,
+            debug_info=cl_debug_info,
         )
         for delta in deltas:
             for chunk in _revisiondeltatochunks(
@@ -1126,6 +1520,9 @@
         close = closechunk()
         size += len(close)
         yield closechunk()
+        if debug_info is not None:
+            merge_debug_info(debug_info, cl_debug_info)
+            debug_info['revision-changelog'] = cl_debug_info['revision-total']
 
         self._verbosenote(_(b'%8.i (changelog)\n') % size)
 
@@ -1133,6 +1530,9 @@
         manifests = clstate[b'manifests']
         changedfiles = clstate[b'changedfiles']
 
+        if debug_info is not None:
+            debug_info['file-count'] = len(changedfiles)
+
         # We need to make sure that the linkrev in the changegroup refers to
         # the first changeset that introduced the manifest or file revision.
         # The fastpath is usually safer than the slowpath, because the filelogs
@@ -1156,6 +1556,9 @@
         fnodes = {}  # needed file nodes
 
         size = 0
+        mn_debug_info = None
+        if debug_info is not None:
+            mn_debug_info = make_debug_info()
         it = self.generatemanifests(
             commonrevs,
             clrevorder,
@@ -1165,6 +1568,7 @@
             source,
             clstate[b'clrevtomanifestrev'],
             sidedata_helpers=sidedata_helpers,
+            debug_info=mn_debug_info,
         )
 
         for tree, deltas in it:
@@ -1185,6 +1589,9 @@
             close = closechunk()
             size += len(close)
             yield close
+        if debug_info is not None:
+            merge_debug_info(debug_info, mn_debug_info)
+            debug_info['revision-manifest'] = mn_debug_info['revision-total']
 
         self._verbosenote(_(b'%8.i (manifests)\n') % size)
         yield self._manifestsend
@@ -1199,6 +1606,9 @@
         manifests.clear()
         clrevs = {cl.rev(x) for x in clnodes}
 
+        fl_debug_info = None
+        if debug_info is not None:
+            fl_debug_info = make_debug_info()
         it = self.generatefiles(
             changedfiles,
             commonrevs,
@@ -1208,6 +1618,7 @@
             fnodes,
             clrevs,
             sidedata_helpers=sidedata_helpers,
+            debug_info=fl_debug_info,
         )
 
         for path, deltas in it:
@@ -1230,12 +1641,29 @@
             self._verbosenote(_(b'%8.i  %s\n') % (size, path))
 
         yield closechunk()
+        if debug_info is not None:
+            merge_debug_info(debug_info, fl_debug_info)
+            debug_info['revision-files'] = fl_debug_info['revision-total']
+
+        if debug_info is not None:
+            display_bundling_debug_info(
+                repo.ui,
+                debug_info,
+                cl_debug_info,
+                mn_debug_info,
+                fl_debug_info,
+            )
 
         if clnodes:
             repo.hook(b'outgoing', node=hex(clnodes[0]), source=source)
 
     def _generatechangelog(
-        self, cl, nodes, generate=True, sidedata_helpers=None
+        self,
+        cl,
+        nodes,
+        generate=True,
+        sidedata_helpers=None,
+        debug_info=None,
     ):
         """Generate data for changelog chunks.
 
@@ -1332,6 +1760,7 @@
             fullclnodes=self._fullclnodes,
             precomputedellipsis=self._precomputedellipsis,
             sidedata_helpers=sidedata_helpers,
+            debug_info=debug_info,
         )
 
         return state, gen
@@ -1346,6 +1775,7 @@
         source,
         clrevtolocalrev,
         sidedata_helpers=None,
+        debug_info=None,
     ):
         """Returns an iterator of changegroup chunks containing manifests.
 
@@ -1444,6 +1874,7 @@
                 fullclnodes=self._fullclnodes,
                 precomputedellipsis=self._precomputedellipsis,
                 sidedata_helpers=sidedata_helpers,
+                debug_info=debug_info,
             )
 
             if not self._oldmatcher.visitdir(store.tree[:-1]):
@@ -1483,6 +1914,7 @@
         fnodes,
         clrevs,
         sidedata_helpers=None,
+        debug_info=None,
     ):
         changedfiles = [
             f
@@ -1578,6 +2010,7 @@
                 fullclnodes=self._fullclnodes,
                 precomputedellipsis=self._precomputedellipsis,
                 sidedata_helpers=sidedata_helpers,
+                debug_info=debug_info,
             )
 
             yield fname, deltas
@@ -1867,7 +2300,12 @@
 
 
 def makechangegroup(
-    repo, outgoing, version, source, fastpath=False, bundlecaps=None
+    repo,
+    outgoing,
+    version,
+    source,
+    fastpath=False,
+    bundlecaps=None,
 ):
     cgstream = makestream(
         repo,
@@ -1917,7 +2355,12 @@
 
     repo.hook(b'preoutgoing', throw=True, source=source)
     _changegroupinfo(repo, csets, source)
-    return bundler.generate(commonrevs, csets, fastpathlinkrev, source)
+    return bundler.generate(
+        commonrevs,
+        csets,
+        fastpathlinkrev,
+        source,
+    )
 
 
 def _addchangegroupfiles(
@@ -1928,6 +2371,7 @@
     expectedfiles,
     needfiles,
     addrevisioncb=None,
+    debug_info=None,
 ):
     revisions = 0
     files = 0
@@ -1948,6 +2392,7 @@
                 revmap,
                 trp,
                 addrevisioncb=addrevisioncb,
+                debug_info=debug_info,
             )
             if not added:
                 raise error.Abort(_(b"received file revlog group is empty"))
--- a/mercurial/configitems.py	Sat Nov 19 16:14:20 2022 +0100
+++ b/mercurial/configitems.py	Sat Nov 19 20:40:47 2022 +0100
@@ -588,6 +588,18 @@
     b'revlog.debug-delta',
     default=False,
 )
+# display extra information about the bundling process
+coreconfigitem(
+    b'debug',
+    b'bundling-stats',
+    default=False,
+)
+# display extra information about the unbundling process
+coreconfigitem(
+    b'debug',
+    b'unbundling-stats',
+    default=False,
+)
 coreconfigitem(
     b'defaults',
     b'.*',
@@ -2123,7 +2135,7 @@
 coreconfigitem(
     b'server',
     b'pullbundle',
-    default=False,
+    default=True,
 )
 coreconfigitem(
     b'server',
--- a/mercurial/debugcommands.py	Sat Nov 19 16:14:20 2022 +0100
+++ b/mercurial/debugcommands.py	Sat Nov 19 20:40:47 2022 +0100
@@ -59,6 +59,7 @@
     localrepo,
     lock as lockmod,
     logcmdutil,
+    mdiff,
     mergestate as mergestatemod,
     metadata,
     obsolete,
@@ -990,17 +991,29 @@
 
 @command(
     b'debug-delta-find',
-    cmdutil.debugrevlogopts + cmdutil.formatteropts,
+    cmdutil.debugrevlogopts
+    + cmdutil.formatteropts
+    + [
+        (
+            b'',
+            b'source',
+            b'full',
+            _(b'input data feed to the process (full, storage, p1, p2, prev)'),
+        ),
+    ],
     _(b'-c|-m|FILE REV'),
     optionalrepo=True,
 )
-def debugdeltafind(ui, repo, arg_1, arg_2=None, **opts):
+def debugdeltafind(ui, repo, arg_1, arg_2=None, source=b'full', **opts):
     """display the computation to get to a valid delta for storing REV
 
     This command will replay the process used to find the "best" delta to store
     a revision and display information about all the steps used to get to that
     result.
 
+    By default, the process is fed with a the full-text for the revision. This
+    can be controlled with the --source flag.
+
     The revision use the revision number of the target storage (not changelog
     revision number).
 
@@ -1028,11 +1041,31 @@
     p1r, p2r = revlog.parentrevs(rev)
     p1 = revlog.node(p1r)
     p2 = revlog.node(p2r)
-    btext = [revlog.revision(rev)]
+    full_text = revlog.revision(rev)
+    btext = [full_text]
     textlen = len(btext[0])
     cachedelta = None
     flags = revlog.flags(rev)
 
+    if source != b'full':
+        if source == b'storage':
+            base_rev = revlog.deltaparent(rev)
+        elif source == b'p1':
+            base_rev = p1r
+        elif source == b'p2':
+            base_rev = p2r
+        elif source == b'prev':
+            base_rev = rev - 1
+        else:
+            raise error.InputError(b"invalid --source value: %s" % source)
+
+        if base_rev != nullrev:
+            base_text = revlog.revision(base_rev)
+            delta = mdiff.textdiff(base_text, full_text)
+
+            cachedelta = (base_rev, delta)
+            btext = [None]
+
     revinfo = revlogutils.revisioninfo(
         node,
         p1,
--- a/mercurial/filelog.py	Sat Nov 19 16:14:20 2022 +0100
+++ b/mercurial/filelog.py	Sat Nov 19 20:40:47 2022 +0100
@@ -111,6 +111,7 @@
         assumehaveparentrevisions=False,
         deltamode=repository.CG_DELTAMODE_STD,
         sidedata_helpers=None,
+        debug_info=None,
     ):
         return self._revlog.emitrevisions(
             nodes,
@@ -119,6 +120,7 @@
             assumehaveparentrevisions=assumehaveparentrevisions,
             deltamode=deltamode,
             sidedata_helpers=sidedata_helpers,
+            debug_info=debug_info,
         )
 
     def addrevision(
@@ -151,6 +153,7 @@
         addrevisioncb=None,
         duplicaterevisioncb=None,
         maybemissingparents=False,
+        debug_info=None,
     ):
         if maybemissingparents:
             raise error.Abort(
@@ -171,6 +174,7 @@
                 transaction,
                 addrevisioncb=addrevisioncb,
                 duplicaterevisioncb=duplicaterevisioncb,
+                debug_info=debug_info,
             )
 
     def getstrippoint(self, minlink):
--- a/mercurial/localrepo.py	Sat Nov 19 16:14:20 2022 +0100
+++ b/mercurial/localrepo.py	Sat Nov 19 20:40:47 2022 +0100
@@ -15,6 +15,10 @@
 import weakref
 
 from concurrent import futures
+from typing import (
+    Optional,
+)
+
 from .i18n import _
 from .node import (
     bin,
@@ -526,7 +530,7 @@
     return set(read(b'requires').splitlines())
 
 
-def makelocalrepository(baseui, path, intents=None):
+def makelocalrepository(baseui, path: bytes, intents=None):
     """Create a local repository object.
 
     Given arguments needed to construct a local repository, this function
@@ -845,7 +849,13 @@
     )
 
 
-def loadhgrc(ui, wdirvfs, hgvfs, requirements, sharedvfs=None):
+def loadhgrc(
+    ui,
+    wdirvfs: vfsmod.vfs,
+    hgvfs: vfsmod.vfs,
+    requirements,
+    sharedvfs: Optional[vfsmod.vfs] = None,
+):
     """Load hgrc files/content into a ui instance.
 
     This is called during repository opening to load any additional
@@ -1323,15 +1333,15 @@
         self,
         baseui,
         ui,
-        origroot,
-        wdirvfs,
-        hgvfs,
+        origroot: bytes,
+        wdirvfs: vfsmod.vfs,
+        hgvfs: vfsmod.vfs,
         requirements,
         supportedrequirements,
-        sharedpath,
+        sharedpath: bytes,
         store,
-        cachevfs,
-        wcachevfs,
+        cachevfs: vfsmod.vfs,
+        wcachevfs: vfsmod.vfs,
         features,
         intents=None,
     ):
@@ -1977,7 +1987,7 @@
     def __iter__(self):
         return iter(self.changelog)
 
-    def revs(self, expr, *args):
+    def revs(self, expr: bytes, *args):
         """Find revisions matching a revset.
 
         The revset is specified as a string ``expr`` that may contain
@@ -1993,7 +2003,7 @@
         tree = revsetlang.spectree(expr, *args)
         return revset.makematcher(tree)(self)
 
-    def set(self, expr, *args):
+    def set(self, expr: bytes, *args):
         """Find revisions matching a revset and emit changectx instances.
 
         This is a convenience wrapper around ``revs()`` that iterates the
@@ -2005,7 +2015,7 @@
         for r in self.revs(expr, *args):
             yield self[r]
 
-    def anyrevs(self, specs, user=False, localalias=None):
+    def anyrevs(self, specs: bytes, user=False, localalias=None):
         """Find revisions matching one of the given revsets.
 
         Revset aliases from the configuration are not expanded by default. To
@@ -2030,7 +2040,7 @@
             m = revset.matchany(None, specs, localalias=localalias)
         return m(self)
 
-    def url(self):
+    def url(self) -> bytes:
         return b'file:' + self.root
 
     def hook(self, name, throw=False, **args):
@@ -2229,7 +2239,7 @@
             return b'store'
         return None
 
-    def wjoin(self, f, *insidef):
+    def wjoin(self, f: bytes, *insidef: bytes) -> bytes:
         return self.vfs.reljoin(self.root, f, *insidef)
 
     def setparents(self, p1, p2=None):
@@ -2238,17 +2248,17 @@
         self[None].setparents(p1, p2)
         self._quick_access_changeid_invalidate()
 
-    def filectx(self, path, changeid=None, fileid=None, changectx=None):
+    def filectx(self, path: bytes, changeid=None, fileid=None, changectx=None):
         """changeid must be a changeset revision, if specified.
         fileid can be a file revision or node."""
         return context.filectx(
             self, path, changeid, fileid, changectx=changectx
         )
 
-    def getcwd(self):
+    def getcwd(self) -> bytes:
         return self.dirstate.getcwd()
 
-    def pathto(self, f, cwd=None):
+    def pathto(self, f: bytes, cwd: Optional[bytes] = None) -> bytes:
         return self.dirstate.pathto(f, cwd)
 
     def _loadfilter(self, filter):
@@ -2300,14 +2310,21 @@
     def adddatafilter(self, name, filter):
         self._datafilters[name] = filter
 
-    def wread(self, filename):
+    def wread(self, filename: bytes) -> bytes:
         if self.wvfs.islink(filename):
             data = self.wvfs.readlink(filename)
         else:
             data = self.wvfs.read(filename)
         return self._filter(self._encodefilterpats, filename, data)
 
-    def wwrite(self, filename, data, flags, backgroundclose=False, **kwargs):
+    def wwrite(
+        self,
+        filename: bytes,
+        data: bytes,
+        flags: bytes,
+        backgroundclose=False,
+        **kwargs
+    ) -> int:
         """write ``data`` into ``filename`` in the working directory
 
         This returns length of written (maybe decoded) data.
@@ -2325,7 +2342,7 @@
                 self.wvfs.setflags(filename, False, False)
         return len(data)
 
-    def wwritedata(self, filename, data):
+    def wwritedata(self, filename: bytes, data: bytes) -> bytes:
         return self._filter(self._decodefilterpats, filename, data)
 
     def currenttransaction(self):
@@ -3520,13 +3537,13 @@
     return a
 
 
-def undoname(fn):
+def undoname(fn: bytes) -> bytes:
     base, name = os.path.split(fn)
     assert name.startswith(b'journal')
     return os.path.join(base, name.replace(b'journal', b'undo', 1))
 
 
-def instance(ui, path, create, intents=None, createopts=None):
+def instance(ui, path: bytes, create, intents=None, createopts=None):
 
     # prevent cyclic import localrepo -> upgrade -> localrepo
     from . import upgrade
@@ -3543,7 +3560,7 @@
     return repo
 
 
-def islocal(path):
+def islocal(path: bytes) -> bool:
     return True
 
 
@@ -3803,7 +3820,7 @@
     return {k: v for k, v in createopts.items() if k not in known}
 
 
-def createrepository(ui, path, createopts=None, requirements=None):
+def createrepository(ui, path: bytes, createopts=None, requirements=None):
     """Create a new repository in a vfs.
 
     ``path`` path to the new repo's working directory.
--- a/mercurial/manifest.py	Sat Nov 19 16:14:20 2022 +0100
+++ b/mercurial/manifest.py	Sat Nov 19 20:40:47 2022 +0100
@@ -1836,6 +1836,7 @@
         assumehaveparentrevisions=False,
         deltamode=repository.CG_DELTAMODE_STD,
         sidedata_helpers=None,
+        debug_info=None,
     ):
         return self._revlog.emitrevisions(
             nodes,
@@ -1844,6 +1845,7 @@
             assumehaveparentrevisions=assumehaveparentrevisions,
             deltamode=deltamode,
             sidedata_helpers=sidedata_helpers,
+            debug_info=debug_info,
         )
 
     def addgroup(
@@ -1854,6 +1856,7 @@
         alwayscache=False,
         addrevisioncb=None,
         duplicaterevisioncb=None,
+        debug_info=None,
     ):
         return self._revlog.addgroup(
             deltas,
@@ -1862,6 +1865,7 @@
             alwayscache=alwayscache,
             addrevisioncb=addrevisioncb,
             duplicaterevisioncb=duplicaterevisioncb,
+            debug_info=debug_info,
         )
 
     def rawsize(self, rev):
--- a/mercurial/pure/bdiff.py	Sat Nov 19 16:14:20 2022 +0100
+++ b/mercurial/pure/bdiff.py	Sat Nov 19 20:40:47 2022 +0100
@@ -10,8 +10,13 @@
 import re
 import struct
 
+from typing import (
+    List,
+    Tuple,
+)
 
-def splitnewlines(text):
+
+def splitnewlines(text: bytes) -> List[bytes]:
     '''like str.splitlines, but only split on newlines.'''
     lines = [l + b'\n' for l in text.split(b'\n')]
     if lines:
@@ -22,7 +27,9 @@
     return lines
 
 
-def _normalizeblocks(a, b, blocks):
+def _normalizeblocks(
+    a: List[bytes], b: List[bytes], blocks
+) -> List[Tuple[int, int, int]]:
     prev = None
     r = []
     for curr in blocks:
@@ -57,7 +64,7 @@
     return r
 
 
-def bdiff(a, b):
+def bdiff(a: bytes, b: bytes) -> bytes:
     a = bytes(a).splitlines(True)
     b = bytes(b).splitlines(True)
 
@@ -84,7 +91,7 @@
     return b"".join(bin)
 
 
-def blocks(a, b):
+def blocks(a: bytes, b: bytes) -> List[Tuple[int, int, int, int]]:
     an = splitnewlines(a)
     bn = splitnewlines(b)
     d = difflib.SequenceMatcher(None, an, bn).get_matching_blocks()
@@ -92,7 +99,7 @@
     return [(i, i + n, j, j + n) for (i, j, n) in d]
 
 
-def fixws(text, allws):
+def fixws(text: bytes, allws: bool) -> bytes:
     if allws:
         text = re.sub(b'[ \t\r]+', b'', text)
     else:
--- a/mercurial/pure/mpatch.py	Sat Nov 19 16:14:20 2022 +0100
+++ b/mercurial/pure/mpatch.py	Sat Nov 19 20:40:47 2022 +0100
@@ -9,6 +9,11 @@
 import io
 import struct
 
+from typing import (
+    List,
+    Tuple,
+)
+
 
 stringio = io.BytesIO
 
@@ -28,7 +33,9 @@
 # temporary string buffers.
 
 
-def _pull(dst, src, l):  # pull l bytes from src
+def _pull(
+    dst: List[Tuple[int, int]], src: List[Tuple[int, int]], l: int
+) -> None:  # pull l bytes from src
     while l:
         f = src.pop()
         if f[0] > l:  # do we need to split?
@@ -39,7 +46,7 @@
         l -= f[0]
 
 
-def _move(m, dest, src, count):
+def _move(m: stringio, dest: int, src: int, count: int) -> None:
     """move count bytes from src to dest
 
     The file pointer is left at the end of dest.
@@ -50,7 +57,9 @@
     m.write(buf)
 
 
-def _collect(m, buf, list):
+def _collect(
+    m: stringio, buf: int, list: List[Tuple[int, int]]
+) -> Tuple[int, int]:
     start = buf
     for l, p in reversed(list):
         _move(m, buf, p, l)
@@ -58,7 +67,7 @@
     return (buf - start, start)
 
 
-def patches(a, bins):
+def patches(a: bytes, bins: List[bytes]) -> bytes:
     if not bins:
         return a
 
@@ -111,7 +120,7 @@
     return m.read(t[0])
 
 
-def patchedsize(orig, delta):
+def patchedsize(orig: int, delta: bytes) -> int:
     outlen, last, bin = 0, 0, 0
     binend = len(delta)
     data = 12
--- a/mercurial/revlog.py	Sat Nov 19 16:14:20 2022 +0100
+++ b/mercurial/revlog.py	Sat Nov 19 20:40:47 2022 +0100
@@ -44,6 +44,7 @@
     FLAG_INLINE_DATA,
     INDEX_HEADER,
     KIND_CHANGELOG,
+    KIND_FILELOG,
     RANK_UNKNOWN,
     REVLOGV0,
     REVLOGV1,
@@ -505,7 +506,6 @@
             self._docket = docket
             self._docket_file = entry_point
         else:
-            entry_data = b''
             self._initempty = True
             entry_data = self._get_data(entry_point, mmapindexthreshold)
             if len(entry_data) > 0:
@@ -653,9 +653,12 @@
     @util.propertycache
     def display_id(self):
         """The public facing "ID" of the revlog that we use in message"""
-        # Maybe we should build a user facing representation of
-        # revlog.target instead of using `self.radix`
-        return self.radix
+        if self.revlog_kind == KIND_FILELOG:
+            # Reference the file without the "data/" prefix, so it is familiar
+            # to the user.
+            return self.target[1]
+        else:
+            return self.radix
 
     def _get_decompressor(self, t):
         try:
@@ -2637,6 +2640,7 @@
         alwayscache=False,
         addrevisioncb=None,
         duplicaterevisioncb=None,
+        debug_info=None,
     ):
         """
         add a delta group
@@ -2662,6 +2666,7 @@
                 deltacomputer = deltautil.deltacomputer(
                     self,
                     write_debug=write_debug,
+                    debug_info=debug_info,
                 )
                 # loop through our set of deltas
                 for data in deltas:
@@ -2886,6 +2891,7 @@
         assumehaveparentrevisions=False,
         deltamode=repository.CG_DELTAMODE_STD,
         sidedata_helpers=None,
+        debug_info=None,
     ):
         if nodesorder not in (b'nodes', b'storage', b'linear', None):
             raise error.ProgrammingError(
@@ -2915,6 +2921,7 @@
             revisiondata=revisiondata,
             assumehaveparentrevisions=assumehaveparentrevisions,
             sidedata_helpers=sidedata_helpers,
+            debug_info=debug_info,
         )
 
     DELTAREUSEALWAYS = b'always'
--- a/mercurial/revlogutils/deltas.py	Sat Nov 19 16:14:20 2022 +0100
+++ b/mercurial/revlogutils/deltas.py	Sat Nov 19 20:40:47 2022 +0100
@@ -655,7 +655,15 @@
 LIMIT_BASE2TEXT = 500
 
 
-def _candidategroups(revlog, textlen, p1, p2, cachedelta):
+def _candidategroups(
+    revlog,
+    textlen,
+    p1,
+    p2,
+    cachedelta,
+    excluded_bases=None,
+    target_rev=None,
+):
     """Provides group of revision to be tested as delta base
 
     This top level function focus on emitting groups with unique and worthwhile
@@ -674,7 +682,12 @@
     deltas_limit = textlen * LIMIT_DELTA2TEXT
 
     tested = {nullrev}
-    candidates = _refinedgroups(revlog, p1, p2, cachedelta)
+    candidates = _refinedgroups(
+        revlog,
+        p1,
+        p2,
+        cachedelta,
+    )
     while True:
         temptative = candidates.send(good)
         if temptative is None:
@@ -694,15 +707,27 @@
             # filter out revision we tested already
             if rev in tested:
                 continue
-            tested.add(rev)
+            # an higher authority deamed the base unworthy (e.g. censored)
+            if excluded_bases is not None and rev in excluded_bases:
+                tested.add(rev)
+                continue
+            # We are in some recomputation cases and that rev is too high in
+            # the revlog
+            if target_rev is not None and rev >= target_rev:
+                tested.add(rev)
+                continue
             # filter out delta base that will never produce good delta
             if deltas_limit < revlog.length(rev):
+                tested.add(rev)
                 continue
             if sparse and revlog.rawsize(rev) < (textlen // LIMIT_BASE2TEXT):
+                tested.add(rev)
                 continue
             # no delta for rawtext-changing revs (see "candelta" for why)
             if revlog.flags(rev) & REVIDX_RAWTEXT_CHANGING_FLAGS:
+                tested.add(rev)
                 continue
+
             # If we reach here, we are about to build and test a delta.
             # The delta building process will compute the chaininfo in all
             # case, since that computation is cached, it is fine to access it
@@ -710,9 +735,11 @@
             chainlen, chainsize = revlog._chaininfo(rev)
             # if chain will be too long, skip base
             if revlog._maxchainlen and chainlen >= revlog._maxchainlen:
+                tested.add(rev)
                 continue
             # if chain already have too much data, skip base
             if deltas_limit < chainsize:
+                tested.add(rev)
                 continue
             if sparse and revlog.upperboundcomp is not None:
                 maxcomp = revlog.upperboundcomp
@@ -731,12 +758,14 @@
                     snapshotlimit = textlen >> snapshotdepth
                     if snapshotlimit < lowestrealisticdeltalen:
                         # delta lower bound is larger than accepted upper bound
+                        tested.add(rev)
                         continue
 
                     # check the relative constraint on the delta size
                     revlength = revlog.length(rev)
                     if revlength < lowestrealisticdeltalen:
                         # delta probable lower bound is larger than target base
+                        tested.add(rev)
                         continue
 
             group.append(rev)
@@ -744,6 +773,7 @@
             # XXX: in the sparse revlog case, group can become large,
             #      impacting performances. Some bounding or slicing mecanism
             #      would help to reduce this impact.
+            tested.update(group)
             good = yield tuple(group)
     yield None
 
@@ -768,15 +798,28 @@
     # This logic only applies to general delta repositories and can be disabled
     # through configuration. Disabling reuse source delta is useful when
     # we want to make sure we recomputed "optimal" deltas.
+    debug_info = None
     if cachedelta and revlog._generaldelta and revlog._lazydeltabase:
         # Assume what we received from the server is a good choice
         # build delta will reuse the cache
+        if debug_info is not None:
+            debug_info['cached-delta.tested'] += 1
         good = yield (cachedelta[0],)
         if good is not None:
+            if debug_info is not None:
+                debug_info['cached-delta.accepted'] += 1
             yield None
             return
+    # XXX cache me higher
     snapshots = collections.defaultdict(list)
-    for candidates in _rawgroups(revlog, p1, p2, cachedelta, snapshots):
+    groups = _rawgroups(
+        revlog,
+        p1,
+        p2,
+        cachedelta,
+        snapshots,
+    )
+    for candidates in groups:
         good = yield candidates
         if good is not None:
             break
@@ -805,7 +848,10 @@
             children = tuple(sorted(c for c in snapshots[good]))
             good = yield children
 
-    # we have found nothing
+    if debug_info is not None:
+        if good is None:
+            debug_info['no-solution'] += 1
+
     yield None
 
 
@@ -841,7 +887,7 @@
 
     if sparse and parents:
         if snapshots is None:
-            # map: base-rev: snapshot-rev
+            # map: base-rev: [snapshot-revs]
             snapshots = collections.defaultdict(list)
         # See if we can use an existing snapshot in the parent chains to use as
         # a base for a new intermediate-snapshot
@@ -879,14 +925,14 @@
             # chain.
             max_depth = max(parents_snaps.keys())
             chain = deltachain(other)
-            for idx, s in enumerate(chain):
+            for depth, s in enumerate(chain):
                 if s < snapfloor:
                     continue
-                if max_depth < idx:
+                if max_depth < depth:
                     break
                 if not revlog.issnapshot(s):
                     break
-                parents_snaps[idx].add(s)
+                parents_snaps[depth].add(s)
         # Test them as possible intermediate snapshot base
         # We test them from highest to lowest level. High level one are more
         # likely to result in small delta
@@ -931,10 +977,17 @@
 
 
 class deltacomputer:
-    def __init__(self, revlog, write_debug=None, debug_search=False):
+    def __init__(
+        self,
+        revlog,
+        write_debug=None,
+        debug_search=False,
+        debug_info=None,
+    ):
         self.revlog = revlog
         self._write_debug = write_debug
         self._debug_search = debug_search
+        self._debug_info = debug_info
 
     def buildtext(self, revinfo, fh):
         """Builds a fulltext version of a revision
@@ -1103,11 +1156,14 @@
         if revinfo.flags & REVIDX_RAWTEXT_CHANGING_FLAGS:
             return self._fullsnapshotinfo(fh, revinfo, target_rev)
 
-        if self._write_debug is not None:
+        gather_debug = (
+            self._write_debug is not None or self._debug_info is not None
+        )
+        debug_search = self._write_debug is not None and self._debug_search
+
+        if gather_debug:
             start = util.timer()
 
-        debug_search = self._write_debug is not None and self._debug_search
-
         # count the number of different delta we tried (for debug purpose)
         dbg_try_count = 0
         # count the number of "search round" we did. (for debug purpose)
@@ -1122,7 +1178,7 @@
         deltainfo = None
         p1r, p2r = revlog.rev(p1), revlog.rev(p2)
 
-        if self._write_debug is not None:
+        if gather_debug:
             if p1r != nullrev:
                 p1_chain_len = revlog._chaininfo(p1r)[0]
             else:
@@ -1137,7 +1193,13 @@
             self._write_debug(msg)
 
         groups = _candidategroups(
-            self.revlog, revinfo.textlen, p1r, p2r, cachedelta
+            self.revlog,
+            revinfo.textlen,
+            p1r,
+            p2r,
+            cachedelta,
+            excluded_bases,
+            target_rev,
         )
         candidaterevs = next(groups)
         while candidaterevs is not None:
@@ -1147,7 +1209,13 @@
                 if deltainfo is not None:
                     prev = deltainfo.base
 
-                if p1 in candidaterevs or p2 in candidaterevs:
+                if (
+                    cachedelta is not None
+                    and len(candidaterevs) == 1
+                    and cachedelta[0] in candidaterevs
+                ):
+                    round_type = b"cached-delta"
+                elif p1 in candidaterevs or p2 in candidaterevs:
                     round_type = b"parents"
                 elif prev is not None and all(c < prev for c in candidaterevs):
                     round_type = b"refine-down"
@@ -1195,16 +1263,7 @@
                     msg = b"DBG-DELTAS-SEARCH:     base=%d\n"
                     msg %= self.revlog.deltaparent(candidaterev)
                     self._write_debug(msg)
-                if candidaterev in excluded_bases:
-                    if debug_search:
-                        msg = b"DBG-DELTAS-SEARCH:     EXCLUDED\n"
-                        self._write_debug(msg)
-                    continue
-                if candidaterev >= target_rev:
-                    if debug_search:
-                        msg = b"DBG-DELTAS-SEARCH:     TOO-HIGH\n"
-                        self._write_debug(msg)
-                    continue
+
                 dbg_try_count += 1
 
                 if debug_search:
@@ -1244,12 +1303,20 @@
         else:
             dbg_type = b"delta"
 
-        if self._write_debug is not None:
+        if gather_debug:
             end = util.timer()
+            used_cached = (
+                cachedelta is not None
+                and dbg_try_rounds == 1
+                and dbg_try_count == 1
+                and deltainfo.base == cachedelta[0]
+            )
             dbg = {
                 'duration': end - start,
                 'revision': target_rev,
+                'delta-base': deltainfo.base,  # pytype: disable=attribute-error
                 'search_round_count': dbg_try_rounds,
+                'using-cached-base': used_cached,
                 'delta_try_count': dbg_try_count,
                 'type': dbg_type,
                 'p1-chain-len': p1_chain_len,
@@ -1279,31 +1346,39 @@
                     target_revlog += b'%s:' % target_key
             dbg['target-revlog'] = target_revlog
 
-            msg = (
-                b"DBG-DELTAS:"
-                b" %-12s"
-                b" rev=%d:"
-                b" search-rounds=%d"
-                b" try-count=%d"
-                b" - delta-type=%-6s"
-                b" snap-depth=%d"
-                b" - p1-chain-length=%d"
-                b" p2-chain-length=%d"
-                b" - duration=%f"
-                b"\n"
-            )
-            msg %= (
-                dbg["target-revlog"],
-                dbg["revision"],
-                dbg["search_round_count"],
-                dbg["delta_try_count"],
-                dbg["type"],
-                dbg["snapshot-depth"],
-                dbg["p1-chain-len"],
-                dbg["p2-chain-len"],
-                dbg["duration"],
-            )
-            self._write_debug(msg)
+            if self._debug_info is not None:
+                self._debug_info.append(dbg)
+
+            if self._write_debug is not None:
+                msg = (
+                    b"DBG-DELTAS:"
+                    b" %-12s"
+                    b" rev=%d:"
+                    b" delta-base=%d"
+                    b" is-cached=%d"
+                    b" - search-rounds=%d"
+                    b" try-count=%d"
+                    b" - delta-type=%-6s"
+                    b" snap-depth=%d"
+                    b" - p1-chain-length=%d"
+                    b" p2-chain-length=%d"
+                    b" - duration=%f"
+                    b"\n"
+                )
+                msg %= (
+                    dbg["target-revlog"],
+                    dbg["revision"],
+                    dbg["delta-base"],
+                    dbg["using-cached-base"],
+                    dbg["search_round_count"],
+                    dbg["delta_try_count"],
+                    dbg["type"],
+                    dbg["snapshot-depth"],
+                    dbg["p1-chain-len"],
+                    dbg["p2-chain-len"],
+                    dbg["duration"],
+                )
+                self._write_debug(msg)
         return deltainfo
 
 
--- a/mercurial/shelve.py	Sat Nov 19 16:14:20 2022 +0100
+++ b/mercurial/shelve.py	Sat Nov 19 20:40:47 2022 +0100
@@ -247,6 +247,14 @@
         for ext in shelvefileextensions:
             self.vfs.tryunlink(self.name + b'.' + ext)
 
+    def changed_files(self, ui, repo):
+        try:
+            ctx = repo.unfiltered()[self.readinfo()[b'node']]
+            return ctx.files()
+        except (FileNotFoundError, error.RepoLookupError):
+            filename = self.vfs.join(self.name + b'.patch')
+            return patch.changedfiles(ui, repo, filename)
+
 
 def _optimized_match(repo, node):
     """
--- a/mercurial/util.py	Sat Nov 19 16:14:20 2022 +0100
+++ b/mercurial/util.py	Sat Nov 19 20:40:47 2022 +0100
@@ -2542,6 +2542,7 @@
         # delegated methods
         self.read = self._fp.read
         self.write = self._fp.write
+        self.writelines = self._fp.writelines
         self.seek = self._fp.seek
         self.tell = self._fp.tell
         self.fileno = self._fp.fileno
--- a/mercurial/utils/storageutil.py	Sat Nov 19 16:14:20 2022 +0100
+++ b/mercurial/utils/storageutil.py	Sat Nov 19 20:40:47 2022 +0100
@@ -305,6 +305,7 @@
     revisiondata=False,
     assumehaveparentrevisions=False,
     sidedata_helpers=None,
+    debug_info=None,
 ):
     """Generic implementation of ifiledata.emitrevisions().
 
@@ -370,6 +371,10 @@
     ``sidedata_helpers`` (optional)
         If not None, means that sidedata should be included.
         See `revlogutil.sidedata.get_sidedata_helpers`.
+
+    ``debug_info`
+        An optionnal dictionnary to gather information about the bundling
+        process (if present, see config: debug.bundling.stats.
     """
 
     fnode = store.node
@@ -395,23 +400,43 @@
         if rev == nullrev:
             continue
 
+        debug_delta_source = None
+        if debug_info is not None:
+            debug_info['revision-total'] += 1
+
         node = fnode(rev)
         p1rev, p2rev = store.parentrevs(rev)
 
+        if debug_info is not None:
+            if p1rev != p2rev and p1rev != nullrev and p2rev != nullrev:
+                debug_info['merge-total'] += 1
+
         if deltaparentfn:
             deltaparentrev = deltaparentfn(rev)
+            if debug_info is not None:
+                if deltaparentrev == nullrev:
+                    debug_info['available-full'] += 1
+                else:
+                    debug_info['available-delta'] += 1
+
         else:
             deltaparentrev = nullrev
 
         # Forced delta against previous mode.
         if deltamode == repository.CG_DELTAMODE_PREV:
+            if debug_info is not None:
+                debug_delta_source = "prev"
             baserev = prevrev
 
         # We're instructed to send fulltext. Honor that.
         elif deltamode == repository.CG_DELTAMODE_FULL:
+            if debug_info is not None:
+                debug_delta_source = "full"
             baserev = nullrev
         # We're instructed to use p1. Honor that
         elif deltamode == repository.CG_DELTAMODE_P1:
+            if debug_info is not None:
+                debug_delta_source = "p1"
             baserev = p1rev
 
         # There is a delta in storage. We try to use that because it
@@ -421,20 +446,29 @@
             # Base revision was already emitted in this group. We can
             # always safely use the delta.
             if deltaparentrev in available:
+                if debug_info is not None:
+                    debug_delta_source = "storage"
                 baserev = deltaparentrev
 
             # Base revision is a parent that hasn't been emitted already.
             # Use it if we can assume the receiver has the parent revision.
             elif assumehaveparentrevisions and deltaparentrev in (p1rev, p2rev):
+                if debug_info is not None:
+                    debug_delta_source = "storage"
                 baserev = deltaparentrev
-
             # No guarantee the receiver has the delta parent. Send delta
             # against last revision (if possible), which in the common case
             # should be similar enough to this revision that the delta is
             # reasonable.
             elif prevrev is not None:
+                if debug_info is not None:
+                    debug_info['denied-base-not-available'] += 1
+                    debug_delta_source = "prev"
                 baserev = prevrev
             else:
+                if debug_info is not None:
+                    debug_info['denied-base-not-available'] += 1
+                    debug_delta_source = "full"
                 baserev = nullrev
 
         # Storage has a fulltext revision.
@@ -442,13 +476,24 @@
         # Let's use the previous revision, which is as good a guess as any.
         # There is definitely room to improve this logic.
         elif prevrev is not None:
+            if debug_info is not None:
+                debug_delta_source = "prev"
             baserev = prevrev
         else:
+            if debug_info is not None:
+                debug_delta_source = "full"
             baserev = nullrev
 
         # But we can't actually use our chosen delta base for whatever
         # reason. Reset to fulltext.
-        if baserev != nullrev and (candeltafn and not candeltafn(baserev, rev)):
+        if (
+            baserev != nullrev
+            and candeltafn is not None
+            and not candeltafn(baserev, rev)
+        ):
+            if debug_info is not None:
+                debug_delta_source = "full"
+                debug_info['denied-delta-candeltafn'] += 1
             baserev = nullrev
 
         revision = None
@@ -460,6 +505,9 @@
                 try:
                     revision = store.rawdata(node)
                 except error.CensoredNodeError as e:
+                    if debug_info is not None:
+                        debug_delta_source = "full"
+                        debug_info['denied-delta-not-available'] += 1
                     revision = e.tombstone
 
                 if baserev != nullrev:
@@ -471,12 +519,46 @@
             elif (
                 baserev == nullrev and deltamode != repository.CG_DELTAMODE_PREV
             ):
+                if debug_info is not None:
+                    debug_info['computed-delta'] += 1  # close enough
+                    debug_info['delta-full'] += 1
                 revision = store.rawdata(node)
                 available.add(rev)
             else:
                 if revdifffn:
+                    if debug_info is not None:
+                        if debug_delta_source == "full":
+                            debug_info['computed-delta'] += 1
+                            debug_info['delta-full'] += 1
+                        elif debug_delta_source == "prev":
+                            debug_info['computed-delta'] += 1
+                            debug_info['delta-against-prev'] += 1
+                        elif debug_delta_source == "p1":
+                            debug_info['computed-delta'] += 1
+                            debug_info['delta-against-p1'] += 1
+                        elif debug_delta_source == "storage":
+                            debug_info['reused-storage-delta'] += 1
+                        else:
+                            assert False, 'unreachable'
+
                     delta = revdifffn(baserev, rev)
                 else:
+                    if debug_info is not None:
+                        if debug_delta_source == "full":
+                            debug_info['computed-delta'] += 1
+                            debug_info['delta-full'] += 1
+                        elif debug_delta_source == "prev":
+                            debug_info['computed-delta'] += 1
+                            debug_info['delta-against-prev'] += 1
+                        elif debug_delta_source == "p1":
+                            debug_info['computed-delta'] += 1
+                            debug_info['delta-against-p1'] += 1
+                        elif debug_delta_source == "storage":
+                            # seem quite unlikelry to happens
+                            debug_info['computed-delta'] += 1
+                            debug_info['reused-storage-delta'] += 1
+                        else:
+                            assert False, 'unreachable'
                     delta = mdiff.textdiff(
                         store.rawdata(baserev), store.rawdata(rev)
                     )
--- a/mercurial/utils/stringutil.py	Sat Nov 19 16:14:20 2022 +0100
+++ b/mercurial/utils/stringutil.py	Sat Nov 19 20:40:47 2022 +0100
@@ -14,6 +14,11 @@
 import textwrap
 import types
 
+from typing import (
+    Optional,
+    overload,
+)
+
 from ..i18n import _
 from ..thirdparty import attr
 
@@ -30,6 +35,16 @@
 regexbytesescapemap = {i: (b'\\' + i) for i in _respecial}
 
 
+@overload
+def reescape(pat: bytes) -> bytes:
+    ...
+
+
+@overload
+def reescape(pat: str) -> str:
+    ...
+
+
 def reescape(pat):
     """Drop-in replacement for re.escape."""
     # NOTE: it is intentional that this works on unicodes and not
@@ -45,12 +60,12 @@
     return pat.encode('latin1')
 
 
-def pprint(o, bprefix=False, indent=0, level=0):
+def pprint(o, bprefix: bool = False, indent: int = 0, level: int = 0) -> bytes:
     """Pretty print an object."""
     return b''.join(pprintgen(o, bprefix=bprefix, indent=indent, level=level))
 
 
-def pprintgen(o, bprefix=False, indent=0, level=0):
+def pprintgen(o, bprefix: bool = False, indent: int = 0, level: int = 0):
     """Pretty print an object to a generator of atoms.
 
     ``bprefix`` is a flag influencing whether bytestrings are preferred with
@@ -250,7 +265,7 @@
         yield pycompat.byterepr(o)
 
 
-def prettyrepr(o):
+def prettyrepr(o) -> bytes:
     """Pretty print a representation of a possibly-nested object"""
     lines = []
     rs = pycompat.byterepr(o)
@@ -281,7 +296,7 @@
     return b'\n'.join(b'  ' * l + s for l, s in lines)
 
 
-def buildrepr(r):
+def buildrepr(r) -> bytes:
     """Format an optional printable representation from unexpanded bits
 
     ========  =================================
@@ -305,12 +320,12 @@
         return pprint(r)
 
 
-def binary(s):
+def binary(s: bytes) -> bool:
     """return true if a string is binary data"""
     return bool(s and b'\0' in s)
 
 
-def _splitpattern(pattern):
+def _splitpattern(pattern: bytes):
     if pattern.startswith(b're:'):
         return b're', pattern[3:]
     elif pattern.startswith(b'literal:'):
@@ -318,7 +333,7 @@
     return b'literal', pattern
 
 
-def stringmatcher(pattern, casesensitive=True):
+def stringmatcher(pattern: bytes, casesensitive: bool = True):
     """
     accepts a string, possibly starting with 're:' or 'literal:' prefix.
     returns the matcher name, pattern, and matcher function.
@@ -379,7 +394,7 @@
     raise error.ProgrammingError(b'unhandled pattern kind: %s' % kind)
 
 
-def substringregexp(pattern, flags=0):
+def substringregexp(pattern: bytes, flags: int = 0):
     """Build a regexp object from a string pattern possibly starting with
     're:' or 'literal:' prefix.
 
@@ -431,7 +446,7 @@
     raise error.ProgrammingError(b'unhandled pattern kind: %s' % kind)
 
 
-def shortuser(user):
+def shortuser(user: bytes) -> bytes:
     """Return a short representation of a user name or email address."""
     f = user.find(b'@')
     if f >= 0:
@@ -448,7 +463,7 @@
     return user
 
 
-def emailuser(user):
+def emailuser(user: bytes) -> bytes:
     """Return the user portion of an email address."""
     f = user.find(b'@')
     if f >= 0:
@@ -459,7 +474,7 @@
     return user
 
 
-def email(author):
+def email(author: bytes) -> bytes:
     '''get email of author.'''
     r = author.find(b'>')
     if r == -1:
@@ -467,7 +482,7 @@
     return author[author.find(b'<') + 1 : r]
 
 
-def person(author):
+def person(author: bytes) -> bytes:
     """Returns the name before an email address,
     interpreting it as per RFC 5322
 
@@ -612,7 +627,7 @@
     return mailmap
 
 
-def mapname(mailmap, author):
+def mapname(mailmap, author: bytes) -> bytes:
     """Returns the author field according to the mailmap cache, or
     the original author field.
 
@@ -663,7 +678,7 @@
 _correctauthorformat = remod.compile(br'^[^<]+\s<[^<>]+@[^<>]+>$')
 
 
-def isauthorwellformed(author):
+def isauthorwellformed(author: bytes) -> bool:
     """Return True if the author field is well formed
     (ie "Contributor Name <contrib@email.dom>")
 
@@ -685,7 +700,7 @@
     return _correctauthorformat.match(author) is not None
 
 
-def firstline(text):
+def firstline(text: bytes) -> bytes:
     """Return the first line of the input"""
     # Try to avoid running splitlines() on the whole string
     i = text.find(b'\n')
@@ -697,12 +712,13 @@
         return b''
 
 
-def ellipsis(text, maxlength=400):
+def ellipsis(text: bytes, maxlength: int = 400) -> bytes:
     """Trim string to at most maxlength (default: 400) columns in display."""
     return encoding.trim(text, maxlength, ellipsis=b'...')
 
 
-def escapestr(s):
+def escapestr(s: bytes) -> bytes:
+    # "bytes" is also a typing shortcut for bytes, bytearray, and memoryview
     if isinstance(s, memoryview):
         s = bytes(s)
     # call underlying function of s.encode('string_escape') directly for
@@ -710,7 +726,7 @@
     return codecs.escape_encode(s)[0]  # pytype: disable=module-attr
 
 
-def unescapestr(s):
+def unescapestr(s: bytes) -> bytes:
     return codecs.escape_decode(s)[0]  # pytype: disable=module-attr
 
 
@@ -724,7 +740,7 @@
         return pycompat.bytestr(encoding.strtolocal(str(obj)))
 
 
-def uirepr(s):
+def uirepr(s: bytes) -> bytes:
     # Avoid double backslash in Windows path repr()
     return pycompat.byterepr(pycompat.bytestr(s)).replace(b'\\\\', b'\\')
 
@@ -838,7 +854,9 @@
     return tw(**kwargs)
 
 
-def wrap(line, width, initindent=b'', hangindent=b''):
+def wrap(
+    line: bytes, width: int, initindent: bytes = b'', hangindent: bytes = b''
+) -> bytes:
     maxindent = max(len(hangindent), len(initindent))
     if width <= maxindent:
         # adjust for weird terminal size
@@ -875,7 +893,7 @@
 }
 
 
-def parsebool(s):
+def parsebool(s: bytes) -> Optional[bool]:
     """Parse s into a boolean.
 
     If s is not a valid boolean, returns None.
@@ -883,7 +901,8 @@
     return _booleans.get(s.lower(), None)
 
 
-def parselist(value):
+# TODO: make arg mandatory (and fix code below?)
+def parselist(value: Optional[bytes]):
     """parse a configuration value as a list of comma/space separated strings
 
     >>> parselist(b'this,is "a small" ,test')
@@ -973,7 +992,7 @@
     return result or []
 
 
-def evalpythonliteral(s):
+def evalpythonliteral(s: bytes):
     """Evaluate a string containing a Python literal expression"""
     # We could backport our tokenizer hack to rewrite '' to u'' if we want
     return ast.literal_eval(s.decode('latin1'))
--- a/mercurial/vfs.py	Sat Nov 19 16:14:20 2022 +0100
+++ b/mercurial/vfs.py	Sat Nov 19 20:40:47 2022 +0100
@@ -11,6 +11,10 @@
 import stat
 import threading
 
+from typing import (
+    Optional,
+)
+
 from .i18n import _
 from .pycompat import (
     delattr,
@@ -26,7 +30,7 @@
 )
 
 
-def _avoidambig(path, oldstat):
+def _avoidambig(path: bytes, oldstat):
     """Avoid file stat ambiguity forcibly
 
     This function causes copying ``path`` file, if it is owned by
@@ -60,16 +64,17 @@
         '''Prevent instantiation; don't call this from subclasses.'''
         raise NotImplementedError('attempted instantiating ' + str(type(self)))
 
-    def __call__(self, path, mode=b'rb', **kwargs):
+    # TODO: type return, which is util.posixfile wrapped by a proxy
+    def __call__(self, path: bytes, mode: bytes = b'rb', **kwargs):
         raise NotImplementedError
 
-    def _auditpath(self, path, mode):
+    def _auditpath(self, path: bytes, mode: bytes):
         raise NotImplementedError
 
-    def join(self, path, *insidef):
+    def join(self, path: Optional[bytes], *insidef: bytes) -> bytes:
         raise NotImplementedError
 
-    def tryread(self, path):
+    def tryread(self, path: bytes) -> bytes:
         '''gracefully return an empty string for missing files'''
         try:
             return self.read(path)
@@ -77,7 +82,7 @@
             pass
         return b""
 
-    def tryreadlines(self, path, mode=b'rb'):
+    def tryreadlines(self, path: bytes, mode: bytes = b'rb'):
         '''gracefully return an empty array for missing files'''
         try:
             return self.readlines(path, mode=mode)
@@ -95,57 +100,61 @@
         """
         return self.__call__
 
-    def read(self, path):
+    def read(self, path: bytes) -> bytes:
         with self(path, b'rb') as fp:
             return fp.read()
 
-    def readlines(self, path, mode=b'rb'):
+    def readlines(self, path: bytes, mode: bytes = b'rb'):
         with self(path, mode=mode) as fp:
             return fp.readlines()
 
-    def write(self, path, data, backgroundclose=False, **kwargs):
+    def write(
+        self, path: bytes, data: bytes, backgroundclose=False, **kwargs
+    ) -> int:
         with self(path, b'wb', backgroundclose=backgroundclose, **kwargs) as fp:
             return fp.write(data)
 
-    def writelines(self, path, data, mode=b'wb', notindexed=False):
+    def writelines(
+        self, path: bytes, data: bytes, mode: bytes = b'wb', notindexed=False
+    ) -> None:
         with self(path, mode=mode, notindexed=notindexed) as fp:
             return fp.writelines(data)
 
-    def append(self, path, data):
+    def append(self, path: bytes, data: bytes) -> int:
         with self(path, b'ab') as fp:
             return fp.write(data)
 
-    def basename(self, path):
+    def basename(self, path: bytes) -> bytes:
         """return base element of a path (as os.path.basename would do)
 
         This exists to allow handling of strange encoding if needed."""
         return os.path.basename(path)
 
-    def chmod(self, path, mode):
+    def chmod(self, path: bytes, mode: int) -> None:
         return os.chmod(self.join(path), mode)
 
-    def dirname(self, path):
+    def dirname(self, path: bytes) -> bytes:
         """return dirname element of a path (as os.path.dirname would do)
 
         This exists to allow handling of strange encoding if needed."""
         return os.path.dirname(path)
 
-    def exists(self, path=None):
+    def exists(self, path: Optional[bytes] = None) -> bool:
         return os.path.exists(self.join(path))
 
     def fstat(self, fp):
         return util.fstat(fp)
 
-    def isdir(self, path=None):
+    def isdir(self, path: Optional[bytes] = None) -> bool:
         return os.path.isdir(self.join(path))
 
-    def isfile(self, path=None):
+    def isfile(self, path: Optional[bytes] = None) -> bool:
         return os.path.isfile(self.join(path))
 
-    def islink(self, path=None):
+    def islink(self, path: Optional[bytes] = None) -> bool:
         return os.path.islink(self.join(path))
 
-    def isfileorlink(self, path=None):
+    def isfileorlink(self, path: Optional[bytes] = None) -> bool:
         """return whether path is a regular file or a symlink
 
         Unlike isfile, this doesn't follow symlinks."""
@@ -156,7 +165,7 @@
         mode = st.st_mode
         return stat.S_ISREG(mode) or stat.S_ISLNK(mode)
 
-    def _join(self, *paths):
+    def _join(self, *paths: bytes) -> bytes:
         root_idx = 0
         for idx, p in enumerate(paths):
             if os.path.isabs(p) or p.startswith(self._dir_sep):
@@ -166,41 +175,48 @@
         paths = [p for p in paths if p]
         return self._dir_sep.join(paths)
 
-    def reljoin(self, *paths):
+    def reljoin(self, *paths: bytes) -> bytes:
         """join various elements of a path together (as os.path.join would do)
 
         The vfs base is not injected so that path stay relative. This exists
         to allow handling of strange encoding if needed."""
         return self._join(*paths)
 
-    def split(self, path):
+    def split(self, path: bytes):
         """split top-most element of a path (as os.path.split would do)
 
         This exists to allow handling of strange encoding if needed."""
         return os.path.split(path)
 
-    def lexists(self, path=None):
+    def lexists(self, path: Optional[bytes] = None) -> bool:
         return os.path.lexists(self.join(path))
 
-    def lstat(self, path=None):
+    def lstat(self, path: Optional[bytes] = None):
         return os.lstat(self.join(path))
 
-    def listdir(self, path=None):
+    def listdir(self, path: Optional[bytes] = None):
         return os.listdir(self.join(path))
 
-    def makedir(self, path=None, notindexed=True):
+    def makedir(self, path: Optional[bytes] = None, notindexed=True):
         return util.makedir(self.join(path), notindexed)
 
-    def makedirs(self, path=None, mode=None):
+    def makedirs(
+        self, path: Optional[bytes] = None, mode: Optional[int] = None
+    ):
         return util.makedirs(self.join(path), mode)
 
-    def makelock(self, info, path):
+    def makelock(self, info, path: bytes):
         return util.makelock(info, self.join(path))
 
-    def mkdir(self, path=None):
+    def mkdir(self, path: Optional[bytes] = None):
         return os.mkdir(self.join(path))
 
-    def mkstemp(self, suffix=b'', prefix=b'tmp', dir=None):
+    def mkstemp(
+        self,
+        suffix: bytes = b'',
+        prefix: bytes = b'tmp',
+        dir: Optional[bytes] = None,
+    ):
         fd, name = pycompat.mkstemp(
             suffix=suffix, prefix=prefix, dir=self.join(dir)
         )
@@ -210,13 +226,13 @@
         else:
             return fd, fname
 
-    def readdir(self, path=None, stat=None, skip=None):
+    def readdir(self, path: Optional[bytes] = None, stat=None, skip=None):
         return util.listdir(self.join(path), stat, skip)
 
-    def readlock(self, path):
+    def readlock(self, path: bytes) -> bytes:
         return util.readlock(self.join(path))
 
-    def rename(self, src, dst, checkambig=False):
+    def rename(self, src: bytes, dst: bytes, checkambig=False):
         """Rename from src to dst
 
         checkambig argument is used with util.filestat, and is useful
@@ -238,18 +254,20 @@
             return ret
         return util.rename(srcpath, dstpath)
 
-    def readlink(self, path):
+    def readlink(self, path: bytes) -> bytes:
         return util.readlink(self.join(path))
 
-    def removedirs(self, path=None):
+    def removedirs(self, path: Optional[bytes] = None):
         """Remove a leaf directory and all empty intermediate ones"""
         return util.removedirs(self.join(path))
 
-    def rmdir(self, path=None):
+    def rmdir(self, path: Optional[bytes] = None):
         """Remove an empty directory."""
         return os.rmdir(self.join(path))
 
-    def rmtree(self, path=None, ignore_errors=False, forcibly=False):
+    def rmtree(
+        self, path: Optional[bytes] = None, ignore_errors=False, forcibly=False
+    ):
         """Remove a directory tree recursively
 
         If ``forcibly``, this tries to remove READ-ONLY files, too.
@@ -272,28 +290,30 @@
             self.join(path), ignore_errors=ignore_errors, onerror=onerror
         )
 
-    def setflags(self, path, l, x):
+    def setflags(self, path: bytes, l: bool, x: bool):
         return util.setflags(self.join(path), l, x)
 
-    def stat(self, path=None):
+    def stat(self, path: Optional[bytes] = None):
         return os.stat(self.join(path))
 
-    def unlink(self, path=None):
+    def unlink(self, path: Optional[bytes] = None):
         return util.unlink(self.join(path))
 
-    def tryunlink(self, path=None):
+    def tryunlink(self, path: Optional[bytes] = None):
         """Attempt to remove a file, ignoring missing file errors."""
         util.tryunlink(self.join(path))
 
-    def unlinkpath(self, path=None, ignoremissing=False, rmdir=True):
+    def unlinkpath(
+        self, path: Optional[bytes] = None, ignoremissing=False, rmdir=True
+    ):
         return util.unlinkpath(
             self.join(path), ignoremissing=ignoremissing, rmdir=rmdir
         )
 
-    def utime(self, path=None, t=None):
+    def utime(self, path: Optional[bytes] = None, t=None):
         return os.utime(self.join(path), t)
 
-    def walk(self, path=None, onerror=None):
+    def walk(self, path: Optional[bytes] = None, onerror=None):
         """Yield (dirpath, dirs, files) tuple for each directories under path
 
         ``dirpath`` is relative one from the root of this vfs. This
@@ -360,7 +380,7 @@
 
     def __init__(
         self,
-        base,
+        base: bytes,
         audit=True,
         cacheaudited=False,
         expandpath=False,
@@ -381,7 +401,7 @@
         self.options = {}
 
     @util.propertycache
-    def _cansymlink(self):
+    def _cansymlink(self) -> bool:
         return util.checklink(self.base)
 
     @util.propertycache
@@ -393,7 +413,7 @@
             return
         os.chmod(name, self.createmode & 0o666)
 
-    def _auditpath(self, path, mode):
+    def _auditpath(self, path, mode) -> None:
         if self._audit:
             if os.path.isabs(path) and path.startswith(self.base):
                 path = os.path.relpath(path, self.base)
@@ -404,8 +424,8 @@
 
     def __call__(
         self,
-        path,
-        mode=b"r",
+        path: bytes,
+        mode: bytes = b"rb",
         atomictemp=False,
         notindexed=False,
         backgroundclose=False,
@@ -518,7 +538,7 @@
 
         return fp
 
-    def symlink(self, src, dst):
+    def symlink(self, src: bytes, dst: bytes) -> None:
         self.audit(dst)
         linkname = self.join(dst)
         util.tryunlink(linkname)
@@ -538,7 +558,7 @@
         else:
             self.write(dst, src)
 
-    def join(self, path, *insidef):
+    def join(self, path: Optional[bytes], *insidef: bytes) -> bytes:
         if path:
             parts = [self.base, path]
             parts.extend(insidef)
@@ -551,7 +571,7 @@
 
 
 class proxyvfs(abstractvfs):
-    def __init__(self, vfs):
+    def __init__(self, vfs: "vfs"):
         self.vfs = vfs
 
     def _auditpath(self, path, mode):
@@ -569,14 +589,14 @@
 class filtervfs(proxyvfs, abstractvfs):
     '''Wrapper vfs for filtering filenames with a function.'''
 
-    def __init__(self, vfs, filter):
+    def __init__(self, vfs: "vfs", filter):
         proxyvfs.__init__(self, vfs)
         self._filter = filter
 
-    def __call__(self, path, *args, **kwargs):
+    def __call__(self, path: bytes, *args, **kwargs):
         return self.vfs(self._filter(path), *args, **kwargs)
 
-    def join(self, path, *insidef):
+    def join(self, path: Optional[bytes], *insidef: bytes) -> bytes:
         if path:
             return self.vfs.join(self._filter(self.vfs.reljoin(path, *insidef)))
         else:
@@ -589,15 +609,15 @@
 class readonlyvfs(proxyvfs):
     '''Wrapper vfs preventing any writing.'''
 
-    def __init__(self, vfs):
+    def __init__(self, vfs: "vfs"):
         proxyvfs.__init__(self, vfs)
 
-    def __call__(self, path, mode=b'r', *args, **kw):
+    def __call__(self, path: bytes, mode: bytes = b'rb', *args, **kw):
         if mode not in (b'r', b'rb'):
             raise error.Abort(_(b'this vfs is read only'))
         return self.vfs(path, mode, *args, **kw)
 
-    def join(self, path, *insidef):
+    def join(self, path: Optional[bytes], *insidef: bytes) -> bytes:
         return self.vfs.join(path, *insidef)
 
 
--- a/relnotes/next	Sat Nov 19 16:14:20 2022 +0100
+++ b/relnotes/next	Sat Nov 19 20:40:47 2022 +0100
@@ -16,3 +16,7 @@
 == Internal API Changes ==
 
 == Miscellaneous ==
+
+ * pullbundle support no longer requires setting a server-side option,
+   providing a .hg/pullbundles.manifest according to the syntax specified in
+   'hg help -e clonebundles' is enough.
--- a/rust/hg-core/src/dirstate_tree/status.rs	Sat Nov 19 16:14:20 2022 +0100
+++ b/rust/hg-core/src/dirstate_tree/status.rs	Sat Nov 19 20:40:47 2022 +0100
@@ -15,7 +15,6 @@
 use crate::utils::hg_path::HgPath;
 use crate::BadMatch;
 use crate::DirstateStatus;
-use crate::HgPathBuf;
 use crate::HgPathCow;
 use crate::PatternFileWarning;
 use crate::StatusError;
@@ -25,6 +24,8 @@
 use rayon::prelude::*;
 use sha1::{Digest, Sha1};
 use std::borrow::Cow;
+use std::convert::TryFrom;
+use std::convert::TryInto;
 use std::io;
 use std::path::Path;
 use std::path::PathBuf;
@@ -147,7 +148,6 @@
     let hg_path = &BorrowedPath::OnDisk(HgPath::new(""));
     let has_ignored_ancestor = HasIgnoredAncestor::create(None, hg_path);
     let root_cached_mtime = None;
-    let root_dir_metadata = None;
     // If the path we have for the repository root is a symlink, do follow it.
     // (As opposed to symlinks within the working directory which are not
     // followed, using `std::fs::symlink_metadata`.)
@@ -155,8 +155,12 @@
         &has_ignored_ancestor,
         dmap.root.as_ref(),
         hg_path,
-        &root_dir,
-        root_dir_metadata,
+        &DirEntry {
+            hg_path: Cow::Borrowed(HgPath::new(b"")),
+            fs_path: Cow::Borrowed(&root_dir),
+            symlink_metadata: None,
+            file_type: FakeFileType::Directory,
+        },
         root_cached_mtime,
         is_at_repo_root,
     )?;
@@ -340,7 +344,7 @@
     /// need to call `read_dir`.
     fn can_skip_fs_readdir(
         &self,
-        directory_metadata: Option<&std::fs::Metadata>,
+        directory_entry: &DirEntry,
         cached_directory_mtime: Option<TruncatedTimestamp>,
     ) -> bool {
         if !self.options.list_unknown && !self.options.list_ignored {
@@ -356,9 +360,9 @@
                 // The dirstate contains a cached mtime for this directory, set
                 // by a previous run of the `status` algorithm which found this
                 // directory eligible for `read_dir` caching.
-                if let Some(meta) = directory_metadata {
+                if let Ok(meta) = directory_entry.symlink_metadata() {
                     if cached_mtime
-                        .likely_equal_to_mtime_of(meta)
+                        .likely_equal_to_mtime_of(&meta)
                         .unwrap_or(false)
                     {
                         // The mtime of that directory has not changed
@@ -379,26 +383,40 @@
         has_ignored_ancestor: &'ancestor HasIgnoredAncestor<'ancestor>,
         dirstate_nodes: ChildNodesRef<'tree, 'on_disk>,
         directory_hg_path: &BorrowedPath<'tree, 'on_disk>,
-        directory_fs_path: &Path,
-        directory_metadata: Option<&std::fs::Metadata>,
+        directory_entry: &DirEntry,
         cached_directory_mtime: Option<TruncatedTimestamp>,
         is_at_repo_root: bool,
     ) -> Result<bool, DirstateV2ParseError> {
-        if self.can_skip_fs_readdir(directory_metadata, cached_directory_mtime)
-        {
+        if self.can_skip_fs_readdir(directory_entry, cached_directory_mtime) {
             dirstate_nodes
                 .par_iter()
                 .map(|dirstate_node| {
-                    let fs_path = directory_fs_path.join(get_path_from_bytes(
+                    let fs_path = &directory_entry.fs_path;
+                    let fs_path = fs_path.join(get_path_from_bytes(
                         dirstate_node.base_name(self.dmap.on_disk)?.as_bytes(),
                     ));
                     match std::fs::symlink_metadata(&fs_path) {
-                        Ok(fs_metadata) => self.traverse_fs_and_dirstate(
-                            &fs_path,
-                            &fs_metadata,
-                            dirstate_node,
-                            has_ignored_ancestor,
-                        ),
+                        Ok(fs_metadata) => {
+                            let file_type =
+                                match fs_metadata.file_type().try_into() {
+                                    Ok(file_type) => file_type,
+                                    Err(_) => return Ok(()),
+                                };
+                            let entry = DirEntry {
+                                hg_path: Cow::Borrowed(
+                                    dirstate_node
+                                        .full_path(&self.dmap.on_disk)?,
+                                ),
+                                fs_path: Cow::Borrowed(&fs_path),
+                                symlink_metadata: Some(fs_metadata),
+                                file_type,
+                            };
+                            self.traverse_fs_and_dirstate(
+                                &entry,
+                                dirstate_node,
+                                has_ignored_ancestor,
+                            )
+                        }
                         Err(e) if e.kind() == std::io::ErrorKind::NotFound => {
                             self.traverse_dirstate_only(dirstate_node)
                         }
@@ -419,7 +437,7 @@
 
         let mut fs_entries = if let Ok(entries) = self.read_dir(
             directory_hg_path,
-            directory_fs_path,
+            &directory_entry.fs_path,
             is_at_repo_root,
         ) {
             entries
@@ -435,7 +453,7 @@
         let dirstate_nodes = dirstate_nodes.sorted();
         // `sort_unstable_by_key` doesn’t allow keys borrowing from the value:
         // https://github.com/rust-lang/rust/issues/34162
-        fs_entries.sort_unstable_by(|e1, e2| e1.base_name.cmp(&e2.base_name));
+        fs_entries.sort_unstable_by(|e1, e2| e1.hg_path.cmp(&e2.hg_path));
 
         // Propagate here any error that would happen inside the comparison
         // callback below
@@ -451,7 +469,7 @@
                 dirstate_node
                     .base_name(self.dmap.on_disk)
                     .unwrap()
-                    .cmp(&fs_entry.base_name)
+                    .cmp(&fs_entry.hg_path)
             },
         )
         .par_bridge()
@@ -461,8 +479,7 @@
             match pair {
                 Both(dirstate_node, fs_entry) => {
                     self.traverse_fs_and_dirstate(
-                        &fs_entry.full_path,
-                        &fs_entry.metadata,
+                        &fs_entry,
                         dirstate_node,
                         has_ignored_ancestor,
                     )?;
@@ -487,23 +504,21 @@
 
     fn traverse_fs_and_dirstate<'ancestor>(
         &self,
-        fs_path: &Path,
-        fs_metadata: &std::fs::Metadata,
+        fs_entry: &DirEntry,
         dirstate_node: NodeRef<'tree, 'on_disk>,
         has_ignored_ancestor: &'ancestor HasIgnoredAncestor<'ancestor>,
     ) -> Result<(), DirstateV2ParseError> {
         let outdated_dircache =
             self.check_for_outdated_directory_cache(&dirstate_node)?;
         let hg_path = &dirstate_node.full_path_borrowed(self.dmap.on_disk)?;
-        let file_type = fs_metadata.file_type();
-        let file_or_symlink = file_type.is_file() || file_type.is_symlink();
+        let file_or_symlink = fs_entry.is_file() || fs_entry.is_symlink();
         if !file_or_symlink {
             // If we previously had a file here, it was removed (with
             // `hg rm` or similar) or deleted before it could be
             // replaced by a directory or something else.
             self.mark_removed_or_deleted_if_file(&dirstate_node)?;
         }
-        if file_type.is_dir() {
+        if fs_entry.is_dir() {
             if self.options.collect_traversed_dirs {
                 self.outcome
                     .lock()
@@ -521,14 +536,13 @@
                     &is_ignored,
                     dirstate_node.children(self.dmap.on_disk)?,
                     hg_path,
-                    fs_path,
-                    Some(fs_metadata),
+                    fs_entry,
                     dirstate_node.cached_directory_mtime()?,
                     is_at_repo_root,
                 )?;
             self.maybe_save_directory_mtime(
                 children_all_have_dirstate_node_or_are_ignored,
-                fs_metadata,
+                fs_entry,
                 dirstate_node,
                 outdated_dircache,
             )?
@@ -550,7 +564,7 @@
                     } else if entry.modified() {
                         self.push_outcome(Outcome::Modified, &dirstate_node)?;
                     } else {
-                        self.handle_normal_file(&dirstate_node, fs_metadata)?;
+                        self.handle_normal_file(&dirstate_node, fs_entry)?;
                     }
                 } else {
                     // `node.entry.is_none()` indicates a "directory"
@@ -578,7 +592,7 @@
     fn maybe_save_directory_mtime(
         &self,
         children_all_have_dirstate_node_or_are_ignored: bool,
-        directory_metadata: &std::fs::Metadata,
+        directory_entry: &DirEntry,
         dirstate_node: NodeRef<'tree, 'on_disk>,
         outdated_directory_cache: bool,
     ) -> Result<(), DirstateV2ParseError> {
@@ -605,11 +619,13 @@
         // resolution based on the filesystem (for example ext3
         // only stores integer seconds), kernel (see
         // https://stackoverflow.com/a/14393315/1162888), etc.
+        let metadata = match directory_entry.symlink_metadata() {
+            Ok(meta) => meta,
+            Err(_) => return Ok(()),
+        };
         let directory_mtime = if let Ok(option) =
-            TruncatedTimestamp::for_reliable_mtime_of(
-                directory_metadata,
-                status_start,
-            ) {
+            TruncatedTimestamp::for_reliable_mtime_of(&metadata, status_start)
+        {
             if let Some(directory_mtime) = option {
                 directory_mtime
             } else {
@@ -671,18 +687,23 @@
     fn handle_normal_file(
         &self,
         dirstate_node: &NodeRef<'tree, 'on_disk>,
-        fs_metadata: &std::fs::Metadata,
+        fs_entry: &DirEntry,
     ) -> Result<(), DirstateV2ParseError> {
         // Keep the low 31 bits
         fn truncate_u64(value: u64) -> i32 {
             (value & 0x7FFF_FFFF) as i32
         }
 
+        let fs_metadata = match fs_entry.symlink_metadata() {
+            Ok(meta) => meta,
+            Err(_) => return Ok(()),
+        };
+
         let entry = dirstate_node
             .entry()?
             .expect("handle_normal_file called with entry-less node");
         let mode_changed =
-            || self.options.check_exec && entry.mode_changed(fs_metadata);
+            || self.options.check_exec && entry.mode_changed(&fs_metadata);
         let size = entry.size();
         let size_changed = size != truncate_u64(fs_metadata.len());
         if size >= 0 && size_changed && fs_metadata.file_type().is_symlink() {
@@ -697,7 +718,7 @@
         } else {
             let mtime_looks_clean;
             if let Some(dirstate_mtime) = entry.truncated_mtime() {
-                let fs_mtime = TruncatedTimestamp::for_mtime_of(fs_metadata)
+                let fs_mtime = TruncatedTimestamp::for_mtime_of(&fs_metadata)
                     .expect("OS/libc does not support mtime?");
                 // There might be a change in the future if for example the
                 // internal clock become off while process run, but this is a
@@ -767,10 +788,9 @@
         directory_hg_path: &HgPath,
         fs_entry: &DirEntry,
     ) -> bool {
-        let hg_path = directory_hg_path.join(&fs_entry.base_name);
-        let file_type = fs_entry.metadata.file_type();
-        let file_or_symlink = file_type.is_file() || file_type.is_symlink();
-        if file_type.is_dir() {
+        let hg_path = directory_hg_path.join(&fs_entry.hg_path);
+        let file_or_symlink = fs_entry.is_file() || fs_entry.is_symlink();
+        if fs_entry.is_dir() {
             let is_ignored =
                 has_ignored_ancestor || (self.ignore_fn)(&hg_path);
             let traverse_children = if is_ignored {
@@ -783,11 +803,9 @@
             };
             if traverse_children {
                 let is_at_repo_root = false;
-                if let Ok(children_fs_entries) = self.read_dir(
-                    &hg_path,
-                    &fs_entry.full_path,
-                    is_at_repo_root,
-                ) {
+                if let Ok(children_fs_entries) =
+                    self.read_dir(&hg_path, &fs_entry.fs_path, is_at_repo_root)
+                {
                     children_fs_entries.par_iter().for_each(|child_fs_entry| {
                         self.traverse_fs_only(
                             is_ignored,
@@ -850,15 +868,46 @@
     }
 }
 
-struct DirEntry {
-    base_name: HgPathBuf,
-    full_path: PathBuf,
-    metadata: std::fs::Metadata,
+/// Since [`std::fs::FileType`] cannot be built directly, we emulate what we
+/// care about.
+#[derive(Copy, Clone, Debug, PartialEq, Eq)]
+enum FakeFileType {
+    File,
+    Directory,
+    Symlink,
 }
 
-impl DirEntry {
-    /// Returns **unsorted** entries in the given directory, with name and
-    /// metadata.
+impl TryFrom<std::fs::FileType> for FakeFileType {
+    type Error = ();
+
+    fn try_from(f: std::fs::FileType) -> Result<Self, Self::Error> {
+        if f.is_dir() {
+            Ok(Self::Directory)
+        } else if f.is_file() {
+            Ok(Self::File)
+        } else if f.is_symlink() {
+            Ok(Self::Symlink)
+        } else {
+            // Things like FIFO etc.
+            Err(())
+        }
+    }
+}
+
+struct DirEntry<'a> {
+    /// Path as stored in the dirstate, or just the filename for optimization.
+    hg_path: HgPathCow<'a>,
+    /// Filesystem path
+    fs_path: Cow<'a, Path>,
+    /// Lazily computed
+    symlink_metadata: Option<std::fs::Metadata>,
+    /// Already computed for ergonomics.
+    file_type: FakeFileType,
+}
+
+impl<'a> DirEntry<'a> {
+    /// Returns **unsorted** entries in the given directory, with name,
+    /// metadata and file type.
     ///
     /// If a `.hg` sub-directory is encountered:
     ///
@@ -872,7 +921,7 @@
         let mut results = Vec::new();
         for entry in read_dir_path.read_dir()? {
             let entry = entry?;
-            let metadata = match entry.metadata() {
+            let file_type = match entry.file_type() {
                 Ok(v) => v,
                 Err(e) => {
                     // race with file deletion?
@@ -889,7 +938,7 @@
                 if is_at_repo_root {
                     // Skip the repo’s own .hg (might be a symlink)
                     continue;
-                } else if metadata.is_dir() {
+                } else if file_type.is_dir() {
                     // A .hg sub-directory at another location means a subrepo,
                     // skip it entirely.
                     return Ok(Vec::new());
@@ -900,15 +949,40 @@
             } else {
                 entry.path()
             };
-            let base_name = get_bytes_from_os_string(file_name).into();
+            let filename =
+                Cow::Owned(get_bytes_from_os_string(file_name).into());
+            let file_type = match FakeFileType::try_from(file_type) {
+                Ok(file_type) => file_type,
+                Err(_) => continue,
+            };
             results.push(DirEntry {
-                base_name,
-                full_path,
-                metadata,
+                hg_path: filename,
+                fs_path: Cow::Owned(full_path.to_path_buf()),
+                symlink_metadata: None,
+                file_type,
             })
         }
         Ok(results)
     }
+
+    fn symlink_metadata(&self) -> Result<std::fs::Metadata, std::io::Error> {
+        match &self.symlink_metadata {
+            Some(meta) => Ok(meta.clone()),
+            None => std::fs::symlink_metadata(&self.fs_path),
+        }
+    }
+
+    fn is_dir(&self) -> bool {
+        self.file_type == FakeFileType::Directory
+    }
+
+    fn is_file(&self) -> bool {
+        self.file_type == FakeFileType::File
+    }
+
+    fn is_symlink(&self) -> bool {
+        self.file_type == FakeFileType::Symlink
+    }
 }
 
 /// Return the `mtime` of a temporary file newly-created in the `.hg` directory
--- a/tests/test-bundle.t	Sat Nov 19 16:14:20 2022 +0100
+++ b/tests/test-bundle.t	Sat Nov 19 20:40:47 2022 +0100
@@ -1039,6 +1039,24 @@
   $ hg bundle -a --config devel.bundle.delta=full ./full.hg
   3 changesets found
 
+
+Test the debug statistic when building a bundle
+-----------------------------------------------
+
+  $ hg bundle -a ./default.hg --config debug.bundling-stats=yes
+  3 changesets found
+  DEBUG-BUNDLING: revisions:                9
+  DEBUG-BUNDLING:   changelog:              3
+  DEBUG-BUNDLING:   manifest:               3
+  DEBUG-BUNDLING:   files:                  3 (for 3 revlogs)
+  DEBUG-BUNDLING: deltas:
+  DEBUG-BUNDLING:   from-storage:           2 (100% of available 2)
+  DEBUG-BUNDLING:   computed:               7
+  DEBUG-BUNDLING:     full:                 7 (100% of native 7)
+  DEBUG-BUNDLING:       changelog:          3 (100% of native 3)
+  DEBUG-BUNDLING:       manifests:          1 (100% of native 1)
+  DEBUG-BUNDLING:       files:              3 (100% of native 3)
+
 Test the debug output when applying delta
 -----------------------------------------
 
@@ -1048,18 +1066,62 @@
   > --config storage.revlog.reuse-external-delta=no \
   > --config storage.revlog.reuse-external-delta-parent=no
   adding changesets
-  DBG-DELTAS: CHANGELOG:   rev=0: search-rounds=0 try-count=0 - delta-type=full   snap-depth=0 - p1-chain-length=-1 p2-chain-length=-1 - duration=* (glob)
-  DBG-DELTAS: CHANGELOG:   rev=1: search-rounds=0 try-count=0 - delta-type=full   snap-depth=0 - p1-chain-length=0 p2-chain-length=-1 - duration=* (glob)
-  DBG-DELTAS: CHANGELOG:   rev=2: search-rounds=0 try-count=0 - delta-type=full   snap-depth=0 - p1-chain-length=0 p2-chain-length=-1 - duration=* (glob)
+  DBG-DELTAS: CHANGELOG:   rev=0: delta-base=0 is-cached=0 - search-rounds=0 try-count=0 - delta-type=full   snap-depth=0 - p1-chain-length=-1 p2-chain-length=-1 - duration=* (glob)
+  DBG-DELTAS: CHANGELOG:   rev=1: delta-base=1 is-cached=0 - search-rounds=0 try-count=0 - delta-type=full   snap-depth=0 - p1-chain-length=0 p2-chain-length=-1 - duration=* (glob)
+  DBG-DELTAS: CHANGELOG:   rev=2: delta-base=2 is-cached=0 - search-rounds=0 try-count=0 - delta-type=full   snap-depth=0 - p1-chain-length=0 p2-chain-length=-1 - duration=* (glob)
   adding manifests
-  DBG-DELTAS: MANIFESTLOG: rev=0: search-rounds=0 try-count=0 - delta-type=full   snap-depth=0 - p1-chain-length=-1 p2-chain-length=-1 - duration=* (glob)
-  DBG-DELTAS: MANIFESTLOG: rev=1: search-rounds=1 try-count=1 - delta-type=delta  snap-depth=0 - p1-chain-length=0 p2-chain-length=-1 - duration=* (glob)
-  DBG-DELTAS: MANIFESTLOG: rev=2: search-rounds=1 try-count=1 - delta-type=delta  snap-depth=0 - p1-chain-length=1 p2-chain-length=-1 - duration=* (glob)
+  DBG-DELTAS: MANIFESTLOG: rev=0: delta-base=0 is-cached=0 - search-rounds=0 try-count=0 - delta-type=full   snap-depth=0 - p1-chain-length=-1 p2-chain-length=-1 - duration=* (glob)
+  DBG-DELTAS: MANIFESTLOG: rev=1: delta-base=0 is-cached=1 - search-rounds=1 try-count=1 - delta-type=delta  snap-depth=0 - p1-chain-length=0 p2-chain-length=-1 - duration=* (glob)
+  DBG-DELTAS: MANIFESTLOG: rev=2: delta-base=1 is-cached=1 - search-rounds=1 try-count=1 - delta-type=delta  snap-depth=0 - p1-chain-length=1 p2-chain-length=-1 - duration=* (glob)
   adding file changes
-  DBG-DELTAS: FILELOG:a:   rev=0: search-rounds=0 try-count=0 - delta-type=full   snap-depth=0 - p1-chain-length=-1 p2-chain-length=-1 - duration=* (glob)
-  DBG-DELTAS: FILELOG:b:   rev=0: search-rounds=0 try-count=0 - delta-type=full   snap-depth=0 - p1-chain-length=-1 p2-chain-length=-1 - duration=* (glob)
-  DBG-DELTAS: FILELOG:c:   rev=0: search-rounds=0 try-count=0 - delta-type=full   snap-depth=0 - p1-chain-length=-1 p2-chain-length=-1 - duration=* (glob)
+  DBG-DELTAS: FILELOG:a:   rev=0: delta-base=0 is-cached=0 - search-rounds=0 try-count=0 - delta-type=full   snap-depth=0 - p1-chain-length=-1 p2-chain-length=-1 - duration=* (glob)
+  DBG-DELTAS: FILELOG:b:   rev=0: delta-base=0 is-cached=0 - search-rounds=0 try-count=0 - delta-type=full   snap-depth=0 - p1-chain-length=-1 p2-chain-length=-1 - duration=* (glob)
+  DBG-DELTAS: FILELOG:c:   rev=0: delta-base=0 is-cached=0 - search-rounds=0 try-count=0 - delta-type=full   snap-depth=0 - p1-chain-length=-1 p2-chain-length=-1 - duration=* (glob)
   added 3 changesets with 3 changes to 3 files
   new changesets 4fe08cd4693e:4652c276ac4f (3 drafts)
   (run 'hg update' to get a working copy)
 
+
+Test the debug statistic when applying a bundle
+-----------------------------------------------
+
+  $ hg init bar
+  $ hg -R bar unbundle ./default.hg  --config debug.unbundling-stats=yes
+  adding changesets
+  adding manifests
+  adding file changes
+  DEBUG-UNBUNDLING: revisions:                9
+  DEBUG-UNBUNDLING:   changelog:              3             ( 33%)
+  DEBUG-UNBUNDLING:   manifests:              3             ( 33%)
+  DEBUG-UNBUNDLING:   files:                  3             ( 33%)
+  DEBUG-UNBUNDLING: total-time:      ?????????????? seconds (glob)
+  DEBUG-UNBUNDLING:   changelog:     ?????????????? seconds (???%) (glob)
+  DEBUG-UNBUNDLING:   manifests:     ?????????????? seconds (???%) (glob)
+  DEBUG-UNBUNDLING:   files:         ?????????????? seconds (???%) (glob)
+  DEBUG-UNBUNDLING: type-count:
+  DEBUG-UNBUNDLING:   changelog:
+  DEBUG-UNBUNDLING:     full:                 3
+  DEBUG-UNBUNDLING:       cached:             0             (  0%)
+  DEBUG-UNBUNDLING:   manifests:
+  DEBUG-UNBUNDLING:     full:                 1
+  DEBUG-UNBUNDLING:       cached:             0             (  0%)
+  DEBUG-UNBUNDLING:     delta:                2
+  DEBUG-UNBUNDLING:       cached:             2             (100%)
+  DEBUG-UNBUNDLING:   files:
+  DEBUG-UNBUNDLING:     full:                 3
+  DEBUG-UNBUNDLING:       cached:             0             (  0%)
+  DEBUG-UNBUNDLING: type-time:
+  DEBUG-UNBUNDLING:   changelog:
+  DEBUG-UNBUNDLING:     full:        ?????????????? seconds (???% of total) (glob)
+  DEBUG-UNBUNDLING:       cached:    ?????????????? seconds (???% of total) (glob)
+  DEBUG-UNBUNDLING:   manifests:
+  DEBUG-UNBUNDLING:     full:        ?????????????? seconds (???% of total) (glob)
+  DEBUG-UNBUNDLING:       cached:    ?????????????? seconds (???% of total) (glob)
+  DEBUG-UNBUNDLING:     delta:       ?????????????? seconds (???% of total) (glob)
+  DEBUG-UNBUNDLING:       cached:    ?????????????? seconds (???% of total) (glob)
+  DEBUG-UNBUNDLING:   files:
+  DEBUG-UNBUNDLING:     full:        ?????????????? seconds (???% of total) (glob)
+  DEBUG-UNBUNDLING:       cached:    ?????????????? seconds (???% of total) (glob)
+  added 3 changesets with 3 changes to 3 files
+  new changesets 4fe08cd4693e:4652c276ac4f (3 drafts)
+  (run 'hg update' to get a working copy)
--- a/tests/test-bundle2-exchange.t	Sat Nov 19 16:14:20 2022 +0100
+++ b/tests/test-bundle2-exchange.t	Sat Nov 19 20:40:47 2022 +0100
@@ -739,12 +739,10 @@
   $ hg -R main push ssh://user@dummy/other -r e7ec4e813ba6
   pushing to ssh://user@dummy/other
   searching for changes
-  remote: Fail early! (no-py3 chg !)
   remote: adding changesets
   remote: adding manifests
   remote: adding file changes
   remote: Fail early! (py3 !)
-  remote: Fail early! (no-py3 no-chg !)
   remote: transaction abort!
   remote: Cleaning up the mess...
   remote: rollback completed
--- a/tests/test-completion.t	Sat Nov 19 16:14:20 2022 +0100
+++ b/tests/test-completion.t	Sat Nov 19 20:40:47 2022 +0100
@@ -268,7 +268,7 @@
   config: untrusted, exp-all-known, edit, local, source, shared, non-shared, global, template
   continue: dry-run
   copy: forget, after, at-rev, force, include, exclude, dry-run
-  debug-delta-find: changelog, manifest, dir, template
+  debug-delta-find: changelog, manifest, dir, template, source
   debug-repair-issue6528: to-report, from-report, paranoid, dry-run
   debug-revlog-index: changelog, manifest, dir, template
   debugancestor: 
--- a/tests/test-convert-filemap.t	Sat Nov 19 16:14:20 2022 +0100
+++ b/tests/test-convert-filemap.t	Sat Nov 19 20:40:47 2022 +0100
@@ -292,12 +292,12 @@
   $ rm -rf source/.hg/store/data/dir/file4
 #endif
   $ hg -q convert --filemap renames.fmap --datesort source dummydest
-  abort: data/dir/file3@e96dce0bc6a217656a3a410e5e6bec2c4f42bf7c: no match found (reporevlogstore !)
+  abort: dir/file3@e96dce0bc6a217656a3a410e5e6bec2c4f42bf7c: no match found (reporevlogstore !)
   abort: data/dir/file3/index@e96dce0bc6a2: no node (reposimplestore !)
   [50]
   $ hg -q convert --filemap renames.fmap --datesort --config convert.hg.ignoreerrors=1 source renames.repo
-  ignoring: data/dir/file3@e96dce0bc6a217656a3a410e5e6bec2c4f42bf7c: no match found (reporevlogstore !)
-  ignoring: data/dir/file4@6edd55f559cdce67132b12ca09e09cee08b60442: no match found (reporevlogstore !)
+  ignoring: dir/file3@e96dce0bc6a217656a3a410e5e6bec2c4f42bf7c: no match found (reporevlogstore !)
+  ignoring: dir/file4@6edd55f559cdce67132b12ca09e09cee08b60442: no match found (reporevlogstore !)
   ignoring: data/dir/file3/index@e96dce0bc6a2: no node (reposimplestore !)
   ignoring: data/dir/file4/index@6edd55f559cd: no node (reposimplestore !)
   $ hg up -q -R renames.repo
--- a/tests/test-convert-hg-source.t	Sat Nov 19 16:14:20 2022 +0100
+++ b/tests/test-convert-hg-source.t	Sat Nov 19 20:40:47 2022 +0100
@@ -182,7 +182,7 @@
   sorting...
   converting...
   4 init
-  ignoring: data/b@1e88685f5ddec574a34c70af492f95b6debc8741: no match found (reporevlogstore !)
+  ignoring: b@1e88685f5ddec574a34c70af492f95b6debc8741: no match found (reporevlogstore !)
   ignoring: data/b/index@1e88685f5dde: no node (reposimplestore !)
   3 changeall
   2 changebagain
--- a/tests/test-demandimport.py	Sat Nov 19 16:14:20 2022 +0100
+++ b/tests/test-demandimport.py	Sat Nov 19 20:40:47 2022 +0100
@@ -234,3 +234,11 @@
 zipfileimp = __import__('ftplib', globals(), locals(), ['unknownattr'])
 assert f(zipfileimp) == "<module 'ftplib' from '?'>", f(zipfileimp)
 assert not util.safehasattr(zipfileimp, 'unknownattr')
+
+
+# test deactivation for issue6725
+del sys.modules['telnetlib']
+with demandimport.deactivated():
+    import telnetlib
+assert telnetlib.__loader__ == telnetlib.__spec__.loader
+assert telnetlib.__loader__.get_resource_reader
--- a/tests/test-install.t	Sat Nov 19 16:14:20 2022 +0100
+++ b/tests/test-install.t	Sat Nov 19 20:40:47 2022 +0100
@@ -233,42 +233,3 @@
   checking username (test)
   no problems detected
 #endif
-
-#if virtualenv no-py3 network-io no-pyoxidizer
-
-Note: --no-site-packages is the default for all versions enabled by hghave
-
-  $ "$PYTHON" -m virtualenv installenv >> pip.log
-  DEPRECATION: Python 2.7 will reach the end of its life on January 1st, 2020. Please upgrade your Python as Python 2.7 won't be maintained after that date. A future version of pip will drop support for Python 2.7. (?)
-  DEPRECATION: Python 2.7 will reach the end of its life on January 1st, 2020. Please upgrade your Python as Python 2.7 won't be maintained after that date. A future version of pip will drop support for Python 2.7. More details about Python 2 support in pip, can be found at https://pip.pypa.io/en/latest/development/release-process/#python-2-support (?)
-
-Note: we use this weird path to run pip and hg to avoid platform differences,
-since it's bin on most platforms but Scripts on Windows.
-  $ ./installenv/*/pip install $TESTDIR/.. >> pip.log
-  DEPRECATION: Python 2.7 will reach the end of its life on January 1st, 2020. Please upgrade your Python as Python 2.7 won't be maintained after that date. A future version of pip will drop support for Python 2.7. (?)
-  DEPRECATION: Python 2.7 will reach the end of its life on January 1st, 2020. Please upgrade your Python as Python 2.7 won't be maintained after that date. A future version of pip will drop support for Python 2.7. More details about Python 2 support in pip, can be found at https://pip.pypa.io/en/latest/development/release-process/#python-2-support (?)
-  DEPRECATION: Python 2.7 reached the end of its life on January 1st, 2020. Please upgrade your Python as Python 2.7 is no longer maintained. pip 21.0 will drop support for Python 2.7 in January 2021. More details about Python 2 support in pip can be found at https://pip.pypa.io/en/latest/development/release-process/#python-2-support pip 21.0 will remove support for this functionality. (?)
-  $ ./installenv/*/hg debuginstall || cat pip.log
-  checking encoding (ascii)...
-  checking Python executable (*) (glob)
-  checking Python implementation (*) (glob)
-  checking Python version (2.*) (glob)
-  checking Python lib (*)... (glob)
-  checking Python security support (*) (glob)
-    TLS 1.2 not supported by Python install; network connections lack modern security (?)
-    SNI not supported by Python install; may have connectivity issues with some servers (?)
-  checking Rust extensions \((installed|missing)\) (re)
-  checking Mercurial version (*) (glob)
-  checking Mercurial custom build (*) (glob)
-  checking module policy (*) (glob)
-  checking installed modules (*/mercurial)... (glob)
-  checking registered compression engines (*) (glob)
-  checking available compression engines (*) (glob)
-  checking available compression engines for wire protocol (*) (glob)
-  checking "re2" regexp engine \((available|missing)\) (re)
-  checking templates ($TESTTMP/installenv/*/site-packages/mercurial/templates)... (glob)
-  checking default template ($TESTTMP/installenv/*/site-packages/mercurial/templates/map-cmdline.default) (glob)
-  checking commit editor... (*) (glob)
-  checking username (test)
-  no problems detected
-#endif
--- a/tests/test-lfs.t	Sat Nov 19 16:14:20 2022 +0100
+++ b/tests/test-lfs.t	Sat Nov 19 20:40:47 2022 +0100
@@ -787,8 +787,8 @@
   checking manifests
   crosschecking files in changesets and manifests
   checking files
-   l@1: unpacking 46a2f24864bc: integrity check failed on data/l:0
-   large@0: unpacking 2c531e0992ff: integrity check failed on data/large:0
+   l@1: unpacking 46a2f24864bc: integrity check failed on l:0
+   large@0: unpacking 2c531e0992ff: integrity check failed on large:0
   checked 5 changesets with 10 changes to 4 files
   2 integrity errors encountered!
   (first damaged changeset appears to be 0)
@@ -897,9 +897,9 @@
   checking manifests
   crosschecking files in changesets and manifests
   checking files
-   l@1: unpacking 46a2f24864bc: integrity check failed on data/l:0
+   l@1: unpacking 46a2f24864bc: integrity check failed on l:0
   lfs: found 22f66a3fc0b9bf3f012c814303995ec07099b3a9ce02a7af84b5970811074a3b in the local lfs store
-   large@0: unpacking 2c531e0992ff: integrity check failed on data/large:0
+   large@0: unpacking 2c531e0992ff: integrity check failed on large:0
   lfs: found 89b6070915a3d573ff3599d1cda305bc5e38549b15c4847ab034169da66e1ca8 in the local lfs store
   lfs: found b1a6ea88da0017a0e77db139a54618986e9a2489bee24af9fe596de9daac498c in the local lfs store
   checked 5 changesets with 10 changes to 4 files
@@ -941,8 +941,8 @@
   checking manifests
   crosschecking files in changesets and manifests
   checking files
-   l@1: unpacking 46a2f24864bc: integrity check failed on data/l:0
-   large@0: unpacking 2c531e0992ff: integrity check failed on data/large:0
+   l@1: unpacking 46a2f24864bc: integrity check failed on l:0
+   large@0: unpacking 2c531e0992ff: integrity check failed on large:0
   checked 5 changesets with 10 changes to 4 files
   2 integrity errors encountered!
   (first damaged changeset appears to be 0)
@@ -967,9 +967,9 @@
   checking manifests
   crosschecking files in changesets and manifests
   checking files
-   l@1: unpacking 46a2f24864bc: integrity check failed on data/l:0
+   l@1: unpacking 46a2f24864bc: integrity check failed on l:0
   lfs: found 22f66a3fc0b9bf3f012c814303995ec07099b3a9ce02a7af84b5970811074a3b in the local lfs store
-   large@0: unpacking 2c531e0992ff: integrity check failed on data/large:0
+   large@0: unpacking 2c531e0992ff: integrity check failed on large:0
   lfs: found 89b6070915a3d573ff3599d1cda305bc5e38549b15c4847ab034169da66e1ca8 in the local lfs store
   lfs: found b1a6ea88da0017a0e77db139a54618986e9a2489bee24af9fe596de9daac498c in the local lfs store
   checked 5 changesets with 10 changes to 4 files
@@ -987,7 +987,7 @@
 Accessing a corrupt file will complain
 
   $ hg --cwd fromcorrupt2 cat -r 0 large
-  abort: integrity check failed on data/large:0
+  abort: integrity check failed on large:0
   [50]
 
 lfs -> normal -> lfs round trip conversions are possible.  The 'none()'
--- a/tests/test-narrow-exchange.t	Sat Nov 19 16:14:20 2022 +0100
+++ b/tests/test-narrow-exchange.t	Sat Nov 19 20:40:47 2022 +0100
@@ -218,8 +218,8 @@
   remote: adding manifests
   remote: adding file changes
   remote: added 1 changesets with 0 changes to 0 files (no-lfs-on !)
-  remote: error: pretxnchangegroup.lfs hook raised an exception: data/inside2/f@f59b4e0218355383d2789196f1092abcf2262b0c: no match found (lfs-on !)
+  remote: error: pretxnchangegroup.lfs hook raised an exception: inside2/f@f59b4e0218355383d2789196f1092abcf2262b0c: no match found (lfs-on !)
   remote: transaction abort! (lfs-on !)
   remote: rollback completed (lfs-on !)
-  remote: abort: data/inside2/f@f59b4e0218355383d2789196f1092abcf2262b0c: no match found (lfs-on !)
+  remote: abort: inside2/f@f59b4e0218355383d2789196f1092abcf2262b0c: no match found (lfs-on !)
   abort: stream ended unexpectedly (got 0 bytes, expected 4) (lfs-on !)
--- a/tests/test-pull-bundle.t	Sat Nov 19 16:14:20 2022 +0100
+++ b/tests/test-pull-bundle.t	Sat Nov 19 20:40:47 2022 +0100
@@ -33,8 +33,6 @@
 
   $ cd repo
   $ cat <<EOF > .hg/hgrc
-  > [server]
-  > pullbundle = True
   > [experimental]
   > evolution = True
   > [extensions]
--- a/tests/test-shelve.t	Sat Nov 19 16:14:20 2022 +0100
+++ b/tests/test-shelve.t	Sat Nov 19 20:40:47 2022 +0100
@@ -1600,6 +1600,7 @@
   $ rm -r .hg/shelve*
 
 #if phasebased
+  $ cp $HGRCPATH $TESTTMP/hgrc-saved
   $ cat <<EOF >> $HGRCPATH
   > [shelve]
   > store = strip
@@ -1628,3 +1629,32 @@
 #if stripbased
   $ hg log --hidden --template '{user}\n'
 #endif
+
+clean up
+
+#if phasebased
+  $ mv $TESTTMP/hgrc-saved $HGRCPATH
+#endif
+
+changed files should be reachable in all shelves
+
+create an extension that emits changed files
+
+  $ cat > shelve-changed-files.py << EOF
+  > """Command to emit changed files for a shelf"""
+  > 
+  > from mercurial import registrar, shelve
+  > 
+  > cmdtable = {}
+  > command = registrar.command(cmdtable)
+  > 
+  > 
+  > @command(b'shelve-changed-files')
+  > def shelve_changed_files(ui, repo, name):
+  >     shelf = shelve.ShelfDir(repo).get(name)
+  >     for file in shelf.changed_files(ui, repo):
+  >         ui.write(file + b'\n')
+  > EOF
+
+  $ hg --config extensions.shelve-changed-files=shelve-changed-files.py shelve-changed-files default
+  somefile.py
--- a/tests/test-sparse-revlog.t	Sat Nov 19 16:14:20 2022 +0100
+++ b/tests/test-sparse-revlog.t	Sat Nov 19 20:40:47 2022 +0100
@@ -159,7 +159,7 @@
      4971    4970      -1       3        5     4930    snap      19179     346472     427596   1.23414  15994877  15567281   36.40652     427596     179288   1.00000        5
   $ hg debug-delta-find SPARSE-REVLOG-TEST-FILE 4971
   DBG-DELTAS-SEARCH: SEARCH rev=4971
-  DBG-DELTAS-SEARCH: ROUND #1 - 2 candidates - search-down
+  DBG-DELTAS-SEARCH: ROUND #1 - 1 candidates - search-down
   DBG-DELTAS-SEARCH:   CANDIDATE: rev=4962
   DBG-DELTAS-SEARCH:     type=snapshot-4
   DBG-DELTAS-SEARCH:     size=18296
@@ -167,11 +167,43 @@
   DBG-DELTAS-SEARCH:     uncompressed-delta-size=30377
   DBG-DELTAS-SEARCH:     delta-search-time=* (glob)
   DBG-DELTAS-SEARCH:     DELTA: length=16872 (BAD)
-  DBG-DELTAS-SEARCH:   CANDIDATE: rev=4971
+  DBG-DELTAS-SEARCH: ROUND #2 - 1 candidates - search-down
+  DBG-DELTAS-SEARCH:   CANDIDATE: rev=4930
+  DBG-DELTAS-SEARCH:     type=snapshot-3
+  DBG-DELTAS-SEARCH:     size=39228
+  DBG-DELTAS-SEARCH:     base=4799
+  DBG-DELTAS-SEARCH:     uncompressed-delta-size=33050
+  DBG-DELTAS-SEARCH:     delta-search-time=* (glob)
+  DBG-DELTAS-SEARCH:     DELTA: length=19179 (GOOD)
+  DBG-DELTAS-SEARCH: ROUND #3 - 1 candidates - refine-down
+  DBG-DELTAS-SEARCH:   CONTENDER: rev=4930 - length=19179
+  DBG-DELTAS-SEARCH:   CANDIDATE: rev=4799
+  DBG-DELTAS-SEARCH:     type=snapshot-2
+  DBG-DELTAS-SEARCH:     size=50213
+  DBG-DELTAS-SEARCH:     base=4623
+  DBG-DELTAS-SEARCH:     uncompressed-delta-size=82661
+  DBG-DELTAS-SEARCH:     delta-search-time=* (glob)
+  DBG-DELTAS-SEARCH:     DELTA: length=49132 (BAD)
+  DBG-DELTAS: FILELOG:SPARSE-REVLOG-TEST-FILE: rev=4971: delta-base=4930 is-cached=0 - search-rounds=3 try-count=3 - delta-type=snapshot snap-depth=4 - p1-chain-length=15 p2-chain-length=-1 - duration=* (glob)
+
+  $ cat << EOF >>.hg/hgrc
+  > [storage]
+  > revlog.optimize-delta-parent-choice = no
+  > revlog.reuse-external-delta = yes
+  > EOF
+
+  $ hg debug-delta-find SPARSE-REVLOG-TEST-FILE 4971 --quiet
+  DBG-DELTAS: FILELOG:SPARSE-REVLOG-TEST-FILE: rev=4971: delta-base=4930 is-cached=0 - search-rounds=3 try-count=3 - delta-type=snapshot snap-depth=4 - p1-chain-length=15 p2-chain-length=-1 - duration=* (glob)
+  $ hg debug-delta-find SPARSE-REVLOG-TEST-FILE 4971 --source full
+  DBG-DELTAS-SEARCH: SEARCH rev=4971
+  DBG-DELTAS-SEARCH: ROUND #1 - 1 candidates - search-down
+  DBG-DELTAS-SEARCH:   CANDIDATE: rev=4962
   DBG-DELTAS-SEARCH:     type=snapshot-4
-  DBG-DELTAS-SEARCH:     size=19179
+  DBG-DELTAS-SEARCH:     size=18296
   DBG-DELTAS-SEARCH:     base=4930
-  DBG-DELTAS-SEARCH:     TOO-HIGH
+  DBG-DELTAS-SEARCH:     uncompressed-delta-size=30377
+  DBG-DELTAS-SEARCH:     delta-search-time=* (glob)
+  DBG-DELTAS-SEARCH:     DELTA: length=16872 (BAD)
   DBG-DELTAS-SEARCH: ROUND #2 - 1 candidates - search-down
   DBG-DELTAS-SEARCH:   CANDIDATE: rev=4930
   DBG-DELTAS-SEARCH:     type=snapshot-3
@@ -189,6 +221,101 @@
   DBG-DELTAS-SEARCH:     uncompressed-delta-size=82661
   DBG-DELTAS-SEARCH:     delta-search-time=* (glob)
   DBG-DELTAS-SEARCH:     DELTA: length=49132 (BAD)
-  DBG-DELTAS: FILELOG:SPARSE-REVLOG-TEST-FILE: rev=4971: search-rounds=3 try-count=3 - delta-type=snapshot snap-depth=4 - p1-chain-length=15 p2-chain-length=-1 - duration=* (glob)
+  DBG-DELTAS: FILELOG:SPARSE-REVLOG-TEST-FILE: rev=4971: delta-base=4930 is-cached=0 - search-rounds=3 try-count=3 - delta-type=snapshot snap-depth=4 - p1-chain-length=15 p2-chain-length=-1 - duration=* (glob)
+  $ hg debug-delta-find SPARSE-REVLOG-TEST-FILE 4971 --source storage
+  DBG-DELTAS-SEARCH: SEARCH rev=4971
+  DBG-DELTAS-SEARCH: ROUND #1 - 1 candidates - cached-delta
+  DBG-DELTAS-SEARCH:   CANDIDATE: rev=4930
+  DBG-DELTAS-SEARCH:     type=snapshot-3
+  DBG-DELTAS-SEARCH:     size=39228
+  DBG-DELTAS-SEARCH:     base=4799
+  DBG-DELTAS-SEARCH:     uncompressed-delta-size=33050
+  DBG-DELTAS-SEARCH:     delta-search-time=* (glob)
+  DBG-DELTAS-SEARCH:     DELTA: length=19179 (GOOD)
+  DBG-DELTAS: FILELOG:SPARSE-REVLOG-TEST-FILE: rev=4971: delta-base=4930 is-cached=1 - search-rounds=1 try-count=1 - delta-type=snapshot snap-depth=4 - p1-chain-length=15 p2-chain-length=-1 - duration=* (glob)
+  $ hg debug-delta-find SPARSE-REVLOG-TEST-FILE 4971 --source p1
+  DBG-DELTAS-SEARCH: SEARCH rev=4971
+  DBG-DELTAS-SEARCH: ROUND #1 - 1 candidates - search-down
+  DBG-DELTAS-SEARCH:   CANDIDATE: rev=4962
+  DBG-DELTAS-SEARCH:     type=snapshot-4
+  DBG-DELTAS-SEARCH:     size=18296
+  DBG-DELTAS-SEARCH:     base=4930
+  DBG-DELTAS-SEARCH:     uncompressed-delta-size=30377
+  DBG-DELTAS-SEARCH:     delta-search-time=* (glob)
+  DBG-DELTAS-SEARCH:     DELTA: length=16872 (BAD)
+  DBG-DELTAS-SEARCH: ROUND #2 - 1 candidates - search-down
+  DBG-DELTAS-SEARCH:   CANDIDATE: rev=4930
+  DBG-DELTAS-SEARCH:     type=snapshot-3
+  DBG-DELTAS-SEARCH:     size=39228
+  DBG-DELTAS-SEARCH:     base=4799
+  DBG-DELTAS-SEARCH:     uncompressed-delta-size=33050
+  DBG-DELTAS-SEARCH:     delta-search-time=* (glob)
+  DBG-DELTAS-SEARCH:     DELTA: length=19179 (GOOD)
+  DBG-DELTAS-SEARCH: ROUND #3 - 1 candidates - refine-down
+  DBG-DELTAS-SEARCH:   CONTENDER: rev=4930 - length=19179
+  DBG-DELTAS-SEARCH:   CANDIDATE: rev=4799
+  DBG-DELTAS-SEARCH:     type=snapshot-2
+  DBG-DELTAS-SEARCH:     size=50213
+  DBG-DELTAS-SEARCH:     base=4623
+  DBG-DELTAS-SEARCH:     uncompressed-delta-size=82661
+  DBG-DELTAS-SEARCH:     delta-search-time=* (glob)
+  DBG-DELTAS-SEARCH:     DELTA: length=49132 (BAD)
+  DBG-DELTAS: FILELOG:SPARSE-REVLOG-TEST-FILE: rev=4971: delta-base=4930 is-cached=0 - search-rounds=3 try-count=3 - delta-type=snapshot snap-depth=4 - p1-chain-length=15 p2-chain-length=-1 - duration=* (glob)
+  $ hg debug-delta-find SPARSE-REVLOG-TEST-FILE 4971 --source p2
+  DBG-DELTAS-SEARCH: SEARCH rev=4971
+  DBG-DELTAS-SEARCH: ROUND #1 - 1 candidates - search-down
+  DBG-DELTAS-SEARCH:   CANDIDATE: rev=4962
+  DBG-DELTAS-SEARCH:     type=snapshot-4
+  DBG-DELTAS-SEARCH:     size=18296
+  DBG-DELTAS-SEARCH:     base=4930
+  DBG-DELTAS-SEARCH:     uncompressed-delta-size=30377
+  DBG-DELTAS-SEARCH:     delta-search-time=* (glob)
+  DBG-DELTAS-SEARCH:     DELTA: length=16872 (BAD)
+  DBG-DELTAS-SEARCH: ROUND #2 - 1 candidates - search-down
+  DBG-DELTAS-SEARCH:   CANDIDATE: rev=4930
+  DBG-DELTAS-SEARCH:     type=snapshot-3
+  DBG-DELTAS-SEARCH:     size=39228
+  DBG-DELTAS-SEARCH:     base=4799
+  DBG-DELTAS-SEARCH:     uncompressed-delta-size=33050
+  DBG-DELTAS-SEARCH:     delta-search-time=* (glob)
+  DBG-DELTAS-SEARCH:     DELTA: length=19179 (GOOD)
+  DBG-DELTAS-SEARCH: ROUND #3 - 1 candidates - refine-down
+  DBG-DELTAS-SEARCH:   CONTENDER: rev=4930 - length=19179
+  DBG-DELTAS-SEARCH:   CANDIDATE: rev=4799
+  DBG-DELTAS-SEARCH:     type=snapshot-2
+  DBG-DELTAS-SEARCH:     size=50213
+  DBG-DELTAS-SEARCH:     base=4623
+  DBG-DELTAS-SEARCH:     uncompressed-delta-size=82661
+  DBG-DELTAS-SEARCH:     delta-search-time=* (glob)
+  DBG-DELTAS-SEARCH:     DELTA: length=49132 (BAD)
+  DBG-DELTAS: FILELOG:SPARSE-REVLOG-TEST-FILE: rev=4971: delta-base=4930 is-cached=0 - search-rounds=3 try-count=3 - delta-type=snapshot snap-depth=4 - p1-chain-length=15 p2-chain-length=-1 - duration=* (glob)
+  $ hg debug-delta-find SPARSE-REVLOG-TEST-FILE 4971 --source prev
+  DBG-DELTAS-SEARCH: SEARCH rev=4971
+  DBG-DELTAS-SEARCH: ROUND #1 - 1 candidates - search-down
+  DBG-DELTAS-SEARCH:   CANDIDATE: rev=4962
+  DBG-DELTAS-SEARCH:     type=snapshot-4
+  DBG-DELTAS-SEARCH:     size=18296
+  DBG-DELTAS-SEARCH:     base=4930
+  DBG-DELTAS-SEARCH:     uncompressed-delta-size=30377
+  DBG-DELTAS-SEARCH:     delta-search-time=* (glob)
+  DBG-DELTAS-SEARCH:     DELTA: length=16872 (BAD)
+  DBG-DELTAS-SEARCH: ROUND #2 - 1 candidates - search-down
+  DBG-DELTAS-SEARCH:   CANDIDATE: rev=4930
+  DBG-DELTAS-SEARCH:     type=snapshot-3
+  DBG-DELTAS-SEARCH:     size=39228
+  DBG-DELTAS-SEARCH:     base=4799
+  DBG-DELTAS-SEARCH:     uncompressed-delta-size=33050
+  DBG-DELTAS-SEARCH:     delta-search-time=* (glob)
+  DBG-DELTAS-SEARCH:     DELTA: length=19179 (GOOD)
+  DBG-DELTAS-SEARCH: ROUND #3 - 1 candidates - refine-down
+  DBG-DELTAS-SEARCH:   CONTENDER: rev=4930 - length=19179
+  DBG-DELTAS-SEARCH:   CANDIDATE: rev=4799
+  DBG-DELTAS-SEARCH:     type=snapshot-2
+  DBG-DELTAS-SEARCH:     size=50213
+  DBG-DELTAS-SEARCH:     base=4623
+  DBG-DELTAS-SEARCH:     uncompressed-delta-size=82661
+  DBG-DELTAS-SEARCH:     delta-search-time=* (glob)
+  DBG-DELTAS-SEARCH:     DELTA: length=49132 (BAD)
+  DBG-DELTAS: FILELOG:SPARSE-REVLOG-TEST-FILE: rev=4971: delta-base=4930 is-cached=0 - search-rounds=3 try-count=3 - delta-type=snapshot snap-depth=4 - p1-chain-length=15 p2-chain-length=-1 - duration=* (glob)
 
   $ cd ..
--- a/tests/test-ssh-bundle1.t	Sat Nov 19 16:14:20 2022 +0100
+++ b/tests/test-ssh-bundle1.t	Sat Nov 19 20:40:47 2022 +0100
@@ -293,9 +293,7 @@
   remote: adding manifests
   remote: adding file changes
   remote: added 1 changesets with 1 changes to 1 files (py3 !)
-  remote: added 1 changesets with 1 changes to 1 files (no-py3 no-chg !)
   remote: KABOOM
-  remote: added 1 changesets with 1 changes to 1 files (no-py3 chg !)
   $ hg -R ../remote heads
   changeset:   5:1383141674ec
   tag:         tip
@@ -463,9 +461,7 @@
   remote: adding manifests
   remote: adding file changes
   remote: added 1 changesets with 1 changes to 1 files (py3 !)
-  remote: added 1 changesets with 1 changes to 1 files (no-py3 no-chg !)
   remote: KABOOM
-  remote: added 1 changesets with 1 changes to 1 files (no-py3 chg !)
   local stdout
 
 debug output
--- a/tests/test-ssh.t	Sat Nov 19 16:14:20 2022 +0100
+++ b/tests/test-ssh.t	Sat Nov 19 20:40:47 2022 +0100
@@ -290,10 +290,8 @@
   remote: adding manifests
   remote: adding file changes
   remote: added 1 changesets with 1 changes to 1 files (py3 !)
-  remote: added 1 changesets with 1 changes to 1 files (no-py3 no-chg !)
   remote: KABOOM
   remote: KABOOM IN PROCESS
-  remote: added 1 changesets with 1 changes to 1 files (no-py3 chg !)
   $ hg -R ../remote heads
   changeset:   5:1383141674ec
   tag:         tip
@@ -515,10 +513,8 @@
   remote: adding manifests
   remote: adding file changes
   remote: added 1 changesets with 1 changes to 1 files (py3 !)
-  remote: added 1 changesets with 1 changes to 1 files (no-py3 no-chg !)
   remote: KABOOM
   remote: KABOOM IN PROCESS
-  remote: added 1 changesets with 1 changes to 1 files (no-py3 chg !)
   local stdout
 
 debug output
--- a/tests/test-verify.t	Sat Nov 19 16:14:20 2022 +0100
+++ b/tests/test-verify.t	Sat Nov 19 20:40:47 2022 +0100
@@ -297,7 +297,7 @@
   checking manifests
   crosschecking files in changesets and manifests
   checking files
-   a@1: broken revlog! (index data/a is corrupted)
+   a@1: broken revlog! (index a is corrupted)
   warning: orphan data file 'data/a.i'
   checked 2 changesets with 0 changes to 1 files
   1 warnings encountered!
@@ -350,7 +350,7 @@
   checking manifests
   crosschecking files in changesets and manifests
   checking files
-   base64@0: unpacking 794cee7777cb: integrity check failed on data/base64:0
+   base64@0: unpacking 794cee7777cb: integrity check failed on base64:0
   checked 1 changesets with 1 changes to 1 files
   1 integrity errors encountered!
   (first damaged changeset appears to be 0)