changeset 50694:a41eeb877d07

branching: merge with stable
author Pierre-Yves David <pierre-yves.david@octobus.net>
date Sun, 18 Jun 2023 00:09:39 +0200
parents b9a355763e76 (current diff) e7a3611181aa (diff)
children 1c31b343e514
files hgext/clonebundles.py mercurial/bundlecaches.py mercurial/dirstatemap.py mercurial/revlog.py mercurial/revlogutils/deltas.py mercurial/transaction.py mercurial/wireprotov1server.py tests/test-clonebundles.t tests/test-contrib-perf.t tests/test-dirstate-version-fallback.t tests/test-transaction-rollback-on-revlog-split.t tests/test-upgrade-repo.t
diffstat 20 files changed, 651 insertions(+), 264 deletions(-) [+]
line wrap: on
line diff
--- a/.hgsigs	Tue Jun 20 02:36:52 2023 +0200
+++ b/.hgsigs	Sun Jun 18 00:09:39 2023 +0200
@@ -244,3 +244,4 @@
 83ea6ce48b4fd09fb79c4e34cc5750c805699a53 0 iQHNBAABCgA3FiEEH2b4zfZU6QXBHaBhoR4BzQ4F2VYFAmQ3860ZHGFscGhhcmVAcmFwaGFlbGdvbWVzLmRldgAKCRChHgHNDgXZVk3gDACIIcQxKfis/r5UNj7SqyFhQxUCo8Njp7zdLFv3CSWFdFiOpQONI7Byt9KjwedUkUK9tqdb03V7W32ZSBTrNLM11uHY9E5Aknjoza4m+aIGbamEVRWIIHXjUZEMKS9QcY8ElbDvvPu/xdZjyTEjNNiuByUpPUcJXVzpKrHm8Wy3GWDliYBuu68mzFIX3JnZKscdK4EjCAfDysSwwfLeBMpd0Rk+SgwjDwyPWAAyU3yDPNmlUn8qTGHjXxU3vsHCXpoJWkfKmQ9n++23WEpM9vC8zx2TIy70+gFUvKG77+Ucv+djQxHRv0L6L5qUSBJukD3R3nml1xu6pUeioBHepRmTUWgPbHa/gQ+J2Pw+rPCK51x0EeT0SJjxUR2mmMLbk8N2efM35lEjF/sNxotTq17Sv9bjwXhue6BURxpQDEyOuSaS0IlF56ndXtE/4FX3H6zgU1+3jw5iBWajr1E04QjPlSOJO7nIKYM9Jq3VpHR7MiFwfT46pJEfw9pNgZX2b8o=
 f952be90b0514a576dcc8bbe758ce3847faba9bb 0 iQHNBAABCgA3FiEEH2b4zfZU6QXBHaBhoR4BzQ4F2VYFAmQ+ZaoZHGFscGhhcmVAcmFwaGFlbGdvbWVzLmRldgAKCRChHgHNDgXZVuDOC/90SQ3UjXmByAaT5qr4bd3sVGt12lXlaKdyDxY0JMSKyHMUnb4YltHzNFxiUku10aRsRvJt5denTGeaOvAYbbXE7nbZJuyLD9rvfFTCe6EVx7kymCBwSbobKMzD79QHAFU7xu036gs7rmwyc++F4JF4IOrT4bjSYY5/8g0uLAHUexnn49QfQ5OYr325qShDFLjUZ7aH0yxA/gEr2MfXQmbIEc0eJJQXD1EhDkpSJFNIKzwWMOT1AhFk8kTlDqqbPnW7sDxTW+v/gGjAFYLHi8GMLEyrBQdEqytN7Pl9XOPXt/8RaDfIzYfl0OHxh2l1Y1MuH/PHrWO4PBPsr82QI2mxufYKuujpFMPr4PxXXl2g31OKhI8jJj+bHr62kGIOJCxZ8EPPGKXPGyoOuIVa0MeHmXxjb9kkj0SALjlaUvZrSENzRTsQXDNHQa+iDaITKLmItvLsaTEz9DJzGmI20shtJYcx4lqHsTgtMZfOtR5tmUknAFUUBZfUwvwULD4LmNI=
 fc445f8abcf90b33db7c463816a1b3560681767f 0 iQHNBAABCgA3FiEEH2b4zfZU6QXBHaBhoR4BzQ4F2VYFAmRTok8ZHGFscGhhcmVAcmFwaGFlbGdvbWVzLmRldgAKCRChHgHNDgXZVpZ5DACBv33k//ovzSbyH5/q+Xhk3TqNRY8IDOjoEhvDyu0bJHsvygOGXLUtHpQPth1RA4/c+AVNJrUeFvT02sLqqP2d9oSA9HEAYpOuzwgr1A+1o+Q2GyfD4cElP6KfiEe8oyFVOB0rfBgWNei1C0nnrhChQr5dOPR63uAFhHzkEsgsTFS7ONxZ1DHbe7gRV8OMMf1MatAtRzRexQJCqyNv7WodQdrKtjHqPKtlWl20dbwTHhzeiZbtjiTe0CVXVsOqnA1DQkO/IaiKQrn3zWdGY5ABbqQ1K0ceLcej4NFOeLo9ZrShndU3BuFUa9Dq9bnPYOI9wMqGoDh/GdTZkZEzBy5PTokY3AJHblbub49pi8YTenFcPdtd/v71AaNi3TKa45ZNhYVkPmRETYweHkLs3CIrSyeiBwU4RGuQZVD/GujAQB5yhk0w+LPMzBsHruD4vsgXwIraCzQIIJTjgyxKuAJGdGNUFYyxEpUkgz5G6MFrBKe8HO69y3Pm/qDNZ2maV8k=
+da372c745e0f053bb7a64e74cccd15810d96341d 0 iQHNBAABCgA3FiEEH2b4zfZU6QXBHaBhoR4BzQ4F2VYFAmSB7WkZHGFscGhhcmVAcmFwaGFlbGdvbWVzLmRldgAKCRChHgHNDgXZVoy+C/4zwO+Wxc3wr0aEzjVqAss7FuGS5e66H+0T3WzVgKIRMqiiOmUmmiNf+XloXlX4TOwoh9j9GNEpoZfV6TSwFSqV0LALaVIRRwrkJBDhnqw4eNBZbK5aBWNa2/21dkHecxF4KG3ai9kLwy2mtHxkDIy8T2LPvdx8pfNcYT4PZ19x2itqZLouBJqiZYehsqeMLNF2vRqkq+rQ+D2sFGLljgPo0JlpkOZ4IL7S/cqTOBG1sQ6KJK+hAE1kF1lhvK796VhKKXVnWVgqJLyg7ZI6168gxeFv5cyCtb+FUXJJ/5SOkxaCKJf3mg3DIYi3G7xjwB5CfUGW8A2qexgEjXeV42Mu7/Mkmn/aeTdL0UcRK3oBVHJwqt/fJlGFqVWt4/9g9KW5mJvTDQYBo/zjLyvKFEbnSLzhEP+9SvthCrtX0UYkKxOGi2M2Z7e9wgBB0gY8a36kA739lkNu6r3vH/FVh0aPTMWukLToELS90WgfViNr16lDnCeDjMgg97OKxWdOW6U=
--- a/.hgtags	Tue Jun 20 02:36:52 2023 +0200
+++ b/.hgtags	Sun Jun 18 00:09:39 2023 +0200
@@ -260,3 +260,4 @@
 83ea6ce48b4fd09fb79c4e34cc5750c805699a53 6.4.1
 f952be90b0514a576dcc8bbe758ce3847faba9bb 6.4.2
 fc445f8abcf90b33db7c463816a1b3560681767f 6.4.3
+da372c745e0f053bb7a64e74cccd15810d96341d 6.4.4
--- a/hgext/clonebundles.py	Tue Jun 20 02:36:52 2023 +0200
+++ b/hgext/clonebundles.py	Sun Jun 18 00:09:39 2023 +0200
@@ -79,8 +79,10 @@
 Bundle files can be generated with the :hg:`bundle` command. Typically
 :hg:`bundle --all` is used to produce a bundle of the entire repository.
 
-:hg:`debugcreatestreamclonebundle` can be used to produce a special
-*streaming clonebundle*. These are bundle files that are extremely efficient
+The bundlespec option `stream` (see :hg:`help bundlespec`)
+can be used to produce a special *streaming clonebundle*, typically using
+:hg:`bundle --all --type="none-streamv2"`.
+These are bundle files that are extremely efficient
 to produce and consume (read: fast). However, they are larger than
 traditional bundle formats and require that clients support the exact set
 of repository data store formats in use by the repository that created them.
--- a/mercurial/bundlecaches.py	Tue Jun 20 02:36:52 2023 +0200
+++ b/mercurial/bundlecaches.py	Sun Jun 18 00:09:39 2023 +0200
@@ -52,6 +52,14 @@
     return url
 
 
+SUPPORTED_CLONEBUNDLE_SCHEMES = [
+    b"http://",
+    b"https://",
+    b"largefile://",
+    CLONEBUNDLESCHEME,
+]
+
+
 @attr.s
 class bundlespec:
     compression = attr.ib()
@@ -384,7 +392,9 @@
     return False
 
 
-def filterclonebundleentries(repo, entries, streamclonerequested=False):
+def filterclonebundleentries(
+    repo, entries, streamclonerequested=False, pullbundles=False
+):
     """Remove incompatible clone bundle manifest entries.
 
     Accepts a list of entries parsed with ``parseclonebundlesmanifest``
@@ -396,6 +406,16 @@
     """
     newentries = []
     for entry in entries:
+        url = entry.get(b'URL')
+        if not pullbundles and not any(
+            [url.startswith(scheme) for scheme in SUPPORTED_CLONEBUNDLE_SCHEMES]
+        ):
+            repo.ui.debug(
+                b'filtering %s because not a supported clonebundle scheme\n'
+                % url
+            )
+            continue
+
         spec = entry.get(b'BUNDLESPEC')
         if spec:
             try:
@@ -405,8 +425,7 @@
                 # entries.
                 if streamclonerequested and not isstreamclonespec(bundlespec):
                     repo.ui.debug(
-                        b'filtering %s because not a stream clone\n'
-                        % entry[b'URL']
+                        b'filtering %s because not a stream clone\n' % url
                     )
                     continue
 
@@ -416,7 +435,7 @@
             except error.UnsupportedBundleSpecification as e:
                 repo.ui.debug(
                     b'filtering %s because unsupported bundle '
-                    b'spec: %s\n' % (entry[b'URL'], stringutil.forcebytestr(e))
+                    b'spec: %s\n' % (url, stringutil.forcebytestr(e))
                 )
                 continue
         # If we don't have a spec and requested a stream clone, we don't know
@@ -424,14 +443,12 @@
         elif streamclonerequested:
             repo.ui.debug(
                 b'filtering %s because cannot determine if a stream '
-                b'clone bundle\n' % entry[b'URL']
+                b'clone bundle\n' % url
             )
             continue
 
         if b'REQUIRESNI' in entry and not sslutil.hassni:
-            repo.ui.debug(
-                b'filtering %s because SNI not supported\n' % entry[b'URL']
-            )
+            repo.ui.debug(b'filtering %s because SNI not supported\n' % url)
             continue
 
         if b'REQUIREDRAM' in entry:
@@ -439,15 +456,14 @@
                 requiredram = util.sizetoint(entry[b'REQUIREDRAM'])
             except error.ParseError:
                 repo.ui.debug(
-                    b'filtering %s due to a bad REQUIREDRAM attribute\n'
-                    % entry[b'URL']
+                    b'filtering %s due to a bad REQUIREDRAM attribute\n' % url
                 )
                 continue
             actualram = repo.ui.estimatememory()
             if actualram is not None and actualram * 0.66 < requiredram:
                 repo.ui.debug(
                     b'filtering %s as it needs more than 2/3 of system memory\n'
-                    % entry[b'URL']
+                    % url
                 )
                 continue
 
--- a/mercurial/dirstatemap.py	Tue Jun 20 02:36:52 2023 +0200
+++ b/mercurial/dirstatemap.py	Sun Jun 18 00:09:39 2023 +0200
@@ -4,6 +4,7 @@
 # GNU General Public License version 2 or any later version.
 
 
+import struct
 from .i18n import _
 
 from . import (
@@ -151,9 +152,15 @@
                     b'dirstate only has a docket in v2 format'
                 )
             self._set_identity()
-            self._docket = docketmod.DirstateDocket.parse(
-                self._readdirstatefile(), self._nodeconstants
-            )
+            try:
+                self._docket = docketmod.DirstateDocket.parse(
+                    self._readdirstatefile(), self._nodeconstants
+                )
+            except struct.error:
+                self._ui.debug(b"failed to read dirstate-v2 data")
+                raise error.CorruptedDirstate(
+                    b"failed to read dirstate-v2 data"
+                )
         return self._docket
 
     def _read_v2_data(self):
@@ -176,11 +183,23 @@
         return self._opener.read(self.docket.data_filename())
 
     def write_v2_no_append(self, tr, st, meta, packed):
-        old_docket = self.docket
+        try:
+            old_docket = self.docket
+        except error.CorruptedDirstate:
+            # This means we've identified a dirstate-v1 file on-disk when we
+            # were expecting a dirstate-v2 docket. We've managed to recover
+            # from that unexpected situation, and now we want to write back a
+            # dirstate-v2 file to make the on-disk situation right again.
+            #
+            # This shouldn't be triggered since `self.docket` is cached and
+            # we would have called parents() or read() first, but it's here
+            # just in case.
+            old_docket = None
+
         new_docket = docketmod.DirstateDocket.with_new_uuid(
             self.parents(), len(packed), meta
         )
-        if old_docket.uuid == new_docket.uuid:
+        if old_docket is not None and old_docket.uuid == new_docket.uuid:
             raise error.ProgrammingError(b'dirstate docket name collision')
         data_filename = new_docket.data_filename()
         self._opener.write(data_filename, packed)
@@ -194,7 +213,7 @@
         st.close()
         # Remove the old data file after the new docket pointing to
         # the new data file was written.
-        if old_docket.uuid:
+        if old_docket is not None and old_docket.uuid:
             data_filename = old_docket.data_filename()
             if tr is not None:
                 tr.addbackup(data_filename, location=b'plain')
@@ -211,28 +230,40 @@
     def parents(self):
         if not self._parents:
             if self._use_dirstate_v2:
-                self._parents = self.docket.parents
+                try:
+                    self.docket
+                except error.CorruptedDirstate as e:
+                    # fall back to dirstate-v1 if we fail to read v2
+                    self._v1_parents(e)
+                else:
+                    self._parents = self.docket.parents
             else:
-                read_len = self._nodelen * 2
-                st = self._readdirstatefile(read_len)
-                l = len(st)
-                if l == read_len:
-                    self._parents = (
-                        st[: self._nodelen],
-                        st[self._nodelen : 2 * self._nodelen],
-                    )
-                elif l == 0:
-                    self._parents = (
-                        self._nodeconstants.nullid,
-                        self._nodeconstants.nullid,
-                    )
-                else:
-                    raise error.Abort(
-                        _(b'working directory state appears damaged!')
-                    )
+                self._v1_parents()
 
         return self._parents
 
+    def _v1_parents(self, from_v2_exception=None):
+        read_len = self._nodelen * 2
+        st = self._readdirstatefile(read_len)
+        l = len(st)
+        if l == read_len:
+            self._parents = (
+                st[: self._nodelen],
+                st[self._nodelen : 2 * self._nodelen],
+            )
+        elif l == 0:
+            self._parents = (
+                self._nodeconstants.nullid,
+                self._nodeconstants.nullid,
+            )
+        else:
+            hint = None
+            if from_v2_exception is not None:
+                hint = _(b"falling back to dirstate-v1 from v2 also failed")
+            raise error.Abort(
+                _(b'working directory state appears damaged!'), hint
+            )
+
 
 class dirstatemap(_dirstatemapcommon):
     """Map encapsulating the dirstate's contents.
@@ -330,11 +361,17 @@
     def read(self):
         testing.wait_on_cfg(self._ui, b'dirstate.pre-read-file')
         if self._use_dirstate_v2:
-
-            if not self.docket.uuid:
-                return
-            testing.wait_on_cfg(self._ui, b'dirstate.post-docket-read-file')
-            st = self._read_v2_data()
+            try:
+                self.docket
+            except error.CorruptedDirstate:
+                # fall back to dirstate-v1 if we fail to read v2
+                self._set_identity()
+                st = self._readdirstatefile()
+            else:
+                if not self.docket.uuid:
+                    return
+                testing.wait_on_cfg(self._ui, b'dirstate.post-docket-read-file')
+                st = self._read_v2_data()
         else:
             self._set_identity()
             st = self._readdirstatefile()
@@ -365,10 +402,17 @@
         #
         # (we cannot decorate the function directly since it is in a C module)
         if self._use_dirstate_v2:
-            p = self.docket.parents
-            meta = self.docket.tree_metadata
-            parse_dirstate = util.nogc(v2.parse_dirstate)
-            parse_dirstate(self._map, self.copymap, st, meta)
+            try:
+                self.docket
+            except error.CorruptedDirstate:
+                # fall back to dirstate-v1 if we fail to parse v2
+                parse_dirstate = util.nogc(parsers.parse_dirstate)
+                p = parse_dirstate(self._map, self.copymap, st)
+            else:
+                p = self.docket.parents
+                meta = self.docket.tree_metadata
+                parse_dirstate = util.nogc(v2.parse_dirstate)
+                parse_dirstate(self._map, self.copymap, st, meta)
         else:
             parse_dirstate = util.nogc(parsers.parse_dirstate)
             p = parse_dirstate(self._map, self.copymap, st)
@@ -597,38 +641,37 @@
 
             testing.wait_on_cfg(self._ui, b'dirstate.pre-read-file')
             if self._use_dirstate_v2:
-                self.docket  # load the data if needed
-                inode = (
-                    self.identity.stat.st_ino
-                    if self.identity is not None
-                    and self.identity.stat is not None
-                    else None
-                )
-                testing.wait_on_cfg(self._ui, b'dirstate.post-docket-read-file')
-                if not self.docket.uuid:
-                    data = b''
-                    self._map = rustmod.DirstateMap.new_empty()
+                try:
+                    self.docket
+                except error.CorruptedDirstate as e:
+                    # fall back to dirstate-v1 if we fail to read v2
+                    parents = self._v1_map(e)
                 else:
-                    data = self._read_v2_data()
-                    self._map = rustmod.DirstateMap.new_v2(
-                        data,
-                        self.docket.data_size,
-                        self.docket.tree_metadata,
-                        self.docket.uuid,
-                        inode,
+                    parents = self.docket.parents
+                    inode = (
+                        self.identity.stat.st_ino
+                        if self.identity is not None
+                        and self.identity.stat is not None
+                        else None
+                    )
+                    testing.wait_on_cfg(
+                        self._ui, b'dirstate.post-docket-read-file'
                     )
-                parents = self.docket.parents
+                    if not self.docket.uuid:
+                        data = b''
+                        self._map = rustmod.DirstateMap.new_empty()
+                    else:
+                        data = self._read_v2_data()
+                        self._map = rustmod.DirstateMap.new_v2(
+                            data,
+                            self.docket.data_size,
+                            self.docket.tree_metadata,
+                            self.docket.uuid,
+                            inode,
+                        )
+                    parents = self.docket.parents
             else:
-                self._set_identity()
-                inode = (
-                    self.identity.stat.st_ino
-                    if self.identity is not None
-                    and self.identity.stat is not None
-                    else None
-                )
-                self._map, parents = rustmod.DirstateMap.new_v1(
-                    self._readdirstatefile(), inode
-                )
+                parents = self._v1_map()
 
             if parents and not self._dirtyparents:
                 self.setparents(*parents)
@@ -638,6 +681,23 @@
             self.get = self._map.get
             return self._map
 
+        def _v1_map(self, from_v2_exception=None):
+            self._set_identity()
+            inode = (
+                self.identity.stat.st_ino
+                if self.identity is not None and self.identity.stat is not None
+                else None
+            )
+            try:
+                self._map, parents = rustmod.DirstateMap.new_v1(
+                    self._readdirstatefile(), inode
+                )
+            except OSError as e:
+                if from_v2_exception is not None:
+                    raise e from from_v2_exception
+                raise
+            return parents
+
         @property
         def copymap(self):
             return self._map.copymap()
@@ -696,9 +756,15 @@
                 self._dirtyparents = False
                 return
 
+            write_mode = self._write_mode
+            try:
+                docket = self.docket
+            except error.CorruptedDirstate:
+                # fall back to dirstate-v1 if we fail to parse v2
+                docket = None
+
             # We can only append to an existing data file if there is one
-            write_mode = self._write_mode
-            if self.docket.uuid is None:
+            if docket is None or docket.uuid is None:
                 write_mode = WRITE_MODE_FORCE_NEW
             packed, meta, append = self._map.write_v2(write_mode)
             if append:
--- a/mercurial/error.py	Tue Jun 20 02:36:52 2023 +0200
+++ b/mercurial/error.py	Sun Jun 18 00:09:39 2023 +0200
@@ -650,6 +650,13 @@
     __bytes__ = _tobytes
 
 
+class CorruptedDirstate(Exception):
+    """error raised the dirstate appears corrupted on-disk. It may be due to
+    a dirstate version mismatch (i.e. expecting v2 and finding v1 on disk)."""
+
+    __bytes__ = _tobytes
+
+
 class PeerTransportError(Abort):
     """Transport-level I/O error when communicating with a peer repo."""
 
--- a/mercurial/helptext/bundlespec.txt	Tue Jun 20 02:36:52 2023 +0200
+++ b/mercurial/helptext/bundlespec.txt	Sun Jun 18 00:09:39 2023 +0200
@@ -67,6 +67,10 @@
 
 .. bundlecompressionmarker
 
+The compression engines can be prepended with ``stream`` to create a streaming bundle.
+These are bundles that are extremely efficient to produce and consume,
+but do not have guaranteed compatibility with older clients.
+
 Available Options
 =================
 
@@ -89,7 +93,6 @@
 revbranchcache
     Include the "tags-fnodes" cache inside the bundle.
 
-
 tagsfnodescache
     Include the "tags-fnodes" cache inside the bundle.
 
@@ -109,3 +112,10 @@
 
 ``zstd-v1``
     This errors because ``zstd`` is not supported for ``v1`` types.
+
+``none-streamv2``
+    Produce a ``v2`` streaming bundle with no compression.
+
+``zstd-v2;obsolescence=true;phases=true``
+    Produce a ``v2`` bundle with zstandard compression which includes
+    obsolescence markers and phases.
--- a/mercurial/revlog.py	Tue Jun 20 02:36:52 2023 +0200
+++ b/mercurial/revlog.py	Sun Jun 18 00:09:39 2023 +0200
@@ -615,8 +615,8 @@
             entry_point = b'%s.i.%s' % (self.radix, self.postfix)
         elif self._trypending and self.opener.exists(b'%s.i.a' % self.radix):
             entry_point = b'%s.i.a' % self.radix
-        elif self._try_split and self.opener.exists(b'%s.i.s' % self.radix):
-            entry_point = b'%s.i.s' % self.radix
+        elif self._try_split and self.opener.exists(self._split_index_file):
+            entry_point = self._split_index_file
         else:
             entry_point = b'%s.i' % self.radix
 
@@ -2125,6 +2125,22 @@
                 raise error.CensoredNodeError(self.display_id, node, text)
             raise
 
+    @property
+    def _split_index_file(self):
+        """the path where to expect the index of an ongoing splitting operation
+
+        The file will only exist if a splitting operation is in progress, but
+        it is always expected at the same location."""
+        parts = os.path.split(self.radix)
+        if len(parts) > 1:
+            # adds a '-s' prefix to the ``data/` or `meta/` base
+            head = parts[0] + b'-s'
+            return os.path.join(head, *parts[1:])
+        else:
+            # the revlog is stored at the root of the store (changelog or
+            # manifest), no risk of collision.
+            return self.radix + b'.i.s'
+
     def _enforceinlinesize(self, tr, side_write=True):
         """Check if the revlog is too big for inline and convert if so.
 
@@ -2161,7 +2177,7 @@
             # this code
         if side_write:
             old_index_file_path = self._indexfile
-            new_index_file_path = self._indexfile + b'.s'
+            new_index_file_path = self._split_index_file
             opener = self.opener
             weak_self = weakref.ref(self)
 
--- a/mercurial/revlogutils/deltas.py	Tue Jun 20 02:36:52 2023 +0200
+++ b/mercurial/revlogutils/deltas.py	Sun Jun 18 00:09:39 2023 +0200
@@ -1087,10 +1087,17 @@
     ):
         self.revlog = revlog
         self._write_debug = write_debug
-        self._debug_search = debug_search
+        if write_debug is None:
+            self._debug_search = False
+        else:
+            self._debug_search = debug_search
         self._debug_info = debug_info
         self._snapshot_cache = SnapshotCache()
 
+    @property
+    def _gather_debug(self):
+        return self._write_debug is not None or self._debug_info is not None
+
     def buildtext(self, revinfo, fh):
         """Builds a fulltext version of a revision
 
@@ -1136,7 +1143,6 @@
     def _builddeltainfo(self, revinfo, base, fh, target_rev=None):
         # can we use the cached delta?
         revlog = self.revlog
-        debug_search = self._write_debug is not None and self._debug_search
         chainbase = revlog.chainbase(base)
         if revlog._generaldelta:
             deltabase = base
@@ -1173,7 +1179,7 @@
                 delta = revinfo.cachedelta[1]
         if delta is None:
             delta = self._builddeltadiff(base, revinfo, fh)
-        if debug_search:
+        if self._debug_search:
             msg = b"DBG-DELTAS-SEARCH:     uncompressed-delta-size=%d\n"
             msg %= len(delta)
             self._write_debug(msg)
@@ -1181,17 +1187,17 @@
         if revlog.upperboundcomp is not None and snapshotdepth:
             lowestrealisticdeltalen = len(delta) // revlog.upperboundcomp
             snapshotlimit = revinfo.textlen >> snapshotdepth
-            if debug_search:
+            if self._debug_search:
                 msg = b"DBG-DELTAS-SEARCH:     projected-lower-size=%d\n"
                 msg %= lowestrealisticdeltalen
                 self._write_debug(msg)
             if snapshotlimit < lowestrealisticdeltalen:
-                if debug_search:
+                if self._debug_search:
                     msg = b"DBG-DELTAS-SEARCH:     DISCARDED (snapshot limit)\n"
                     self._write_debug(msg)
                 return None
             if revlog.length(base) < lowestrealisticdeltalen:
-                if debug_search:
+                if self._debug_search:
                     msg = b"DBG-DELTAS-SEARCH:     DISCARDED (prev size)\n"
                     self._write_debug(msg)
                 return None
@@ -1253,41 +1259,34 @@
         if target_rev is None:
             target_rev = len(self.revlog)
 
-        if not revinfo.textlen:
-            return self._fullsnapshotinfo(fh, revinfo, target_rev)
+        gather_debug = self._gather_debug
+        cachedelta = revinfo.cachedelta
+        revlog = self.revlog
+        p1r = p2r = None
 
         if excluded_bases is None:
             excluded_bases = set()
 
-        # no delta for flag processor revision (see "candelta" for why)
-        # not calling candelta since only one revision needs test, also to
-        # avoid overhead fetching flags again.
-        if revinfo.flags & REVIDX_RAWTEXT_CHANGING_FLAGS:
-            return self._fullsnapshotinfo(fh, revinfo, target_rev)
-
-        gather_debug = (
-            self._write_debug is not None or self._debug_info is not None
-        )
-        debug_search = self._write_debug is not None and self._debug_search
-
         if gather_debug:
             start = util.timer()
-
-        # count the number of different delta we tried (for debug purpose)
-        dbg_try_count = 0
-        # count the number of "search round" we did. (for debug purpose)
-        dbg_try_rounds = 0
-        dbg_type = b'unknown'
-
-        cachedelta = revinfo.cachedelta
-        p1 = revinfo.p1
-        p2 = revinfo.p2
-        revlog = self.revlog
-
-        deltainfo = None
-        p1r, p2r = revlog.rev(p1), revlog.rev(p2)
-
-        if gather_debug:
+            dbg = self._one_dbg_data()
+            dbg['revision'] = target_rev
+            target_revlog = b"UNKNOWN"
+            target_type = self.revlog.target[0]
+            target_key = self.revlog.target[1]
+            if target_type == KIND_CHANGELOG:
+                target_revlog = b'CHANGELOG:'
+            elif target_type == KIND_MANIFESTLOG:
+                target_revlog = b'MANIFESTLOG:'
+                if target_key:
+                    target_revlog += b'%s:' % target_key
+            elif target_type == KIND_FILELOG:
+                target_revlog = b'FILELOG:'
+                if target_key:
+                    target_revlog += b'%s:' % target_key
+            dbg['target-revlog'] = target_revlog
+            p1r = revlog.rev(revinfo.p1)
+            p2r = revlog.rev(revinfo.p2)
             if p1r != nullrev:
                 p1_chain_len = revlog._chaininfo(p1r)[0]
             else:
@@ -1296,7 +1295,109 @@
                 p2_chain_len = revlog._chaininfo(p2r)[0]
             else:
                 p2_chain_len = -1
-        if debug_search:
+            dbg['p1-chain-len'] = p1_chain_len
+            dbg['p2-chain-len'] = p2_chain_len
+
+        # 1) if the revision is empty, no amount of delta can beat it
+        #
+        # 2) no delta for flag processor revision (see "candelta" for why)
+        # not calling candelta since only one revision needs test, also to
+        # avoid overhead fetching flags again.
+        if not revinfo.textlen or revinfo.flags & REVIDX_RAWTEXT_CHANGING_FLAGS:
+            deltainfo = self._fullsnapshotinfo(fh, revinfo, target_rev)
+            if gather_debug:
+                end = util.timer()
+                dbg['duration'] = end - start
+                dbg[
+                    'delta-base'
+                ] = deltainfo.base  # pytype: disable=attribute-error
+                dbg['search_round_count'] = 0
+                dbg['using-cached-base'] = False
+                dbg['delta_try_count'] = 0
+                dbg['type'] = b"full"
+                dbg['snapshot-depth'] = 0
+                self._dbg_process_data(dbg)
+            return deltainfo
+
+        deltainfo = None
+
+        # If this source delta are to be forcibly reuse, let us comply early.
+        if (
+            revlog._generaldelta
+            and revinfo.cachedelta is not None
+            and revinfo.cachedelta[2] == DELTA_BASE_REUSE_FORCE
+        ):
+            base = revinfo.cachedelta[0]
+            if base == nullrev:
+                dbg_type = b"full"
+                deltainfo = self._fullsnapshotinfo(fh, revinfo, target_rev)
+                if gather_debug:
+                    snapshotdepth = 0
+            elif base not in excluded_bases:
+                delta = revinfo.cachedelta[1]
+                header, data = revlog.compress(delta)
+                deltalen = len(header) + len(data)
+                if gather_debug:
+                    offset = revlog.end(len(revlog) - 1)
+                    chainbase = revlog.chainbase(base)
+                    distance = deltalen + offset - revlog.start(chainbase)
+                    chainlen, compresseddeltalen = revlog._chaininfo(base)
+                    chainlen += 1
+                    compresseddeltalen += deltalen
+                    if base == p1r or base == p2r:
+                        dbg_type = b"delta"
+                        snapshotdepth = None
+                    elif not revlog.issnapshot(base):
+                        snapshotdepth = None
+                    else:
+                        dbg_type = b"snapshot"
+                        snapshotdepth = revlog.snapshotdepth(base) + 1
+                else:
+                    distance = None
+                    chainbase = None
+                    chainlen = None
+                    compresseddeltalen = None
+                    snapshotdepth = None
+                deltainfo = _deltainfo(
+                    distance=distance,
+                    deltalen=deltalen,
+                    data=(header, data),
+                    base=base,
+                    chainbase=chainbase,
+                    chainlen=chainlen,
+                    compresseddeltalen=compresseddeltalen,
+                    snapshotdepth=snapshotdepth,
+                )
+
+            if deltainfo is not None:
+                if gather_debug:
+                    end = util.timer()
+                    dbg['duration'] = end - start
+                    dbg[
+                        'delta-base'
+                    ] = deltainfo.base  # pytype: disable=attribute-error
+                    dbg['search_round_count'] = 0
+                    dbg['using-cached-base'] = True
+                    dbg['delta_try_count'] = 0
+                    dbg['type'] = b"full"
+                    if snapshotdepth is None:
+                        dbg['snapshot-depth'] = 0
+                    else:
+                        dbg['snapshot-depth'] = snapshotdepth
+                    self._dbg_process_data(dbg)
+                return deltainfo
+
+        # count the number of different delta we tried (for debug purpose)
+        dbg_try_count = 0
+        # count the number of "search round" we did. (for debug purpose)
+        dbg_try_rounds = 0
+        dbg_type = b'unknown'
+
+        if p1r is None:
+            p1r = revlog.rev(revinfo.p1)
+            p2r = revlog.rev(revinfo.p2)
+
+        if self._debug_search:
             msg = b"DBG-DELTAS-SEARCH: SEARCH rev=%d\n"
             msg %= target_rev
             self._write_debug(msg)
@@ -1314,7 +1415,7 @@
         candidaterevs = next(groups)
         while candidaterevs is not None:
             dbg_try_rounds += 1
-            if debug_search:
+            if self._debug_search:
                 prev = None
                 if deltainfo is not None:
                     prev = deltainfo.base
@@ -1325,7 +1426,7 @@
                     and cachedelta[0] in candidaterevs
                 ):
                     round_type = b"cached-delta"
-                elif p1 in candidaterevs or p2 in candidaterevs:
+                elif p1r in candidaterevs or p2r in candidaterevs:
                     round_type = b"parents"
                 elif prev is not None and all(c < prev for c in candidaterevs):
                     round_type = b"refine-down"
@@ -1338,7 +1439,7 @@
                 self._write_debug(msg)
             nominateddeltas = []
             if deltainfo is not None:
-                if debug_search:
+                if self._debug_search:
                     msg = (
                         b"DBG-DELTAS-SEARCH:   CONTENDER: rev=%d - length=%d\n"
                     )
@@ -1348,14 +1449,14 @@
                 # challenge it against refined candidates
                 nominateddeltas.append(deltainfo)
             for candidaterev in candidaterevs:
-                if debug_search:
+                if self._debug_search:
                     msg = b"DBG-DELTAS-SEARCH:   CANDIDATE: rev=%d\n"
                     msg %= candidaterev
                     self._write_debug(msg)
                     candidate_type = None
-                    if candidaterev == p1:
+                    if candidaterev == p1r:
                         candidate_type = b"p1"
-                    elif candidaterev == p2:
+                    elif candidaterev == p2r:
                         candidate_type = b"p2"
                     elif self.revlog.issnapshot(candidaterev):
                         candidate_type = b"snapshot-%d"
@@ -1376,7 +1477,7 @@
 
                 dbg_try_count += 1
 
-                if debug_search:
+                if self._debug_search:
                     delta_start = util.timer()
                 candidatedelta = self._builddeltainfo(
                     revinfo,
@@ -1384,23 +1485,23 @@
                     fh,
                     target_rev=target_rev,
                 )
-                if debug_search:
+                if self._debug_search:
                     delta_end = util.timer()
                     msg = b"DBG-DELTAS-SEARCH:     delta-search-time=%f\n"
                     msg %= delta_end - delta_start
                     self._write_debug(msg)
                 if candidatedelta is not None:
                     if is_good_delta_info(self.revlog, candidatedelta, revinfo):
-                        if debug_search:
+                        if self._debug_search:
                             msg = b"DBG-DELTAS-SEARCH:     DELTA: length=%d (GOOD)\n"
                             msg %= candidatedelta.deltalen
                             self._write_debug(msg)
                         nominateddeltas.append(candidatedelta)
-                    elif debug_search:
+                    elif self._debug_search:
                         msg = b"DBG-DELTAS-SEARCH:     DELTA: length=%d (BAD)\n"
                         msg %= candidatedelta.deltalen
                         self._write_debug(msg)
-                elif debug_search:
+                elif self._debug_search:
                     msg = b"DBG-DELTAS-SEARCH:     NO-DELTA\n"
                     self._write_debug(msg)
             if nominateddeltas:
@@ -1434,17 +1535,14 @@
                     and dbg_try_count == 1
                     and deltainfo.base == cachedelta[0]
                 )
-            dbg = {
-                'duration': end - start,
-                'revision': target_rev,
-                'delta-base': deltainfo.base,  # pytype: disable=attribute-error
-                'search_round_count': dbg_try_rounds,
-                'using-cached-base': used_cached,
-                'delta_try_count': dbg_try_count,
-                'type': dbg_type,
-                'p1-chain-len': p1_chain_len,
-                'p2-chain-len': p2_chain_len,
-            }
+            dbg['duration'] = end - start
+            dbg[
+                'delta-base'
+            ] = deltainfo.base  # pytype: disable=attribute-error
+            dbg['search_round_count'] = dbg_try_rounds
+            dbg['using-cached-base'] = used_cached
+            dbg['delta_try_count'] = dbg_try_count
+            dbg['type'] = dbg_type
             if (
                 deltainfo.snapshotdepth  # pytype: disable=attribute-error
                 is not None
@@ -1454,55 +1552,58 @@
                 ] = deltainfo.snapshotdepth  # pytype: disable=attribute-error
             else:
                 dbg['snapshot-depth'] = 0
-            target_revlog = b"UNKNOWN"
-            target_type = self.revlog.target[0]
-            target_key = self.revlog.target[1]
-            if target_type == KIND_CHANGELOG:
-                target_revlog = b'CHANGELOG:'
-            elif target_type == KIND_MANIFESTLOG:
-                target_revlog = b'MANIFESTLOG:'
-                if target_key:
-                    target_revlog += b'%s:' % target_key
-            elif target_type == KIND_FILELOG:
-                target_revlog = b'FILELOG:'
-                if target_key:
-                    target_revlog += b'%s:' % target_key
-            dbg['target-revlog'] = target_revlog
+            self._dbg_process_data(dbg)
+        return deltainfo
 
-            if self._debug_info is not None:
-                self._debug_info.append(dbg)
+    def _one_dbg_data(self):
+        return {
+            'duration': None,
+            'revision': None,
+            'delta-base': None,
+            'search_round_count': None,
+            'using-cached-base': None,
+            'delta_try_count': None,
+            'type': None,
+            'p1-chain-len': None,
+            'p2-chain-len': None,
+            'snapshot-depth': None,
+            'target-revlog': None,
+        }
+
+    def _dbg_process_data(self, dbg):
+        if self._debug_info is not None:
+            self._debug_info.append(dbg)
 
-            if self._write_debug is not None:
-                msg = (
-                    b"DBG-DELTAS:"
-                    b" %-12s"
-                    b" rev=%d:"
-                    b" delta-base=%d"
-                    b" is-cached=%d"
-                    b" - search-rounds=%d"
-                    b" try-count=%d"
-                    b" - delta-type=%-6s"
-                    b" snap-depth=%d"
-                    b" - p1-chain-length=%d"
-                    b" p2-chain-length=%d"
-                    b" - duration=%f"
-                    b"\n"
-                )
-                msg %= (
-                    dbg["target-revlog"],
-                    dbg["revision"],
-                    dbg["delta-base"],
-                    dbg["using-cached-base"],
-                    dbg["search_round_count"],
-                    dbg["delta_try_count"],
-                    dbg["type"],
-                    dbg["snapshot-depth"],
-                    dbg["p1-chain-len"],
-                    dbg["p2-chain-len"],
-                    dbg["duration"],
-                )
-                self._write_debug(msg)
-        return deltainfo
+        if self._write_debug is not None:
+            msg = (
+                b"DBG-DELTAS:"
+                b" %-12s"
+                b" rev=%d:"
+                b" delta-base=%d"
+                b" is-cached=%d"
+                b" - search-rounds=%d"
+                b" try-count=%d"
+                b" - delta-type=%-6s"
+                b" snap-depth=%d"
+                b" - p1-chain-length=%d"
+                b" p2-chain-length=%d"
+                b" - duration=%f"
+                b"\n"
+            )
+            msg %= (
+                dbg["target-revlog"],
+                dbg["revision"],
+                dbg["delta-base"],
+                dbg["using-cached-base"],
+                dbg["search_round_count"],
+                dbg["delta_try_count"],
+                dbg["type"],
+                dbg["snapshot-depth"],
+                dbg["p1-chain-len"],
+                dbg["p2-chain-len"],
+                dbg["duration"],
+            )
+            self._write_debug(msg)
 
 
 def delta_compression(default_compression_header, deltainfo):
--- a/mercurial/templatefuncs.py	Tue Jun 20 02:36:52 2023 +0200
+++ b/mercurial/templatefuncs.py	Sun Jun 18 00:09:39 2023 +0200
@@ -50,8 +50,8 @@
 
 @templatefunc(b'date(date[, fmt])')
 def date(context, mapping, args):
-    """Format a date. See :hg:`help dates` for formatting
-    strings. The default is a Unix date format, including the timezone:
+    """Format a date. The format string uses the Python strftime format.
+    The default is a Unix date format, including the timezone:
     "Mon Sep 04 15:13:13 2006 0700"."""
     if not (1 <= len(args) <= 2):
         # i18n: "date" is a keyword
--- a/mercurial/transaction.py	Tue Jun 20 02:36:52 2023 +0200
+++ b/mercurial/transaction.py	Sun Jun 18 00:09:39 2023 +0200
@@ -290,6 +290,8 @@
         self._backupjournal = b"%s.backupfiles" % self._journal
         self._backupsfile = opener.open(self._backupjournal, b'w')
         self._backupsfile.write(b'%d\n' % version)
+        # the set of temporary files
+        self._tmp_files = set()
 
         if createmode is not None:
             opener.chmod(self._journal, createmode & 0o666)
@@ -354,6 +356,7 @@
             file in self._newfiles
             or file in self._offsetmap
             or file in self._backupmap
+            or file in self._tmp_files
         ):
             return
         if self._queue:
@@ -368,6 +371,7 @@
             file in self._newfiles
             or file in self._offsetmap
             or file in self._backupmap
+            or file in self._tmp_files
         ):
             return
         if offset:
@@ -439,6 +443,7 @@
         Such files will be deleted when the transaction exits (on both
         failure and success).
         """
+        self._tmp_files.add(tmpfile)
         self._addbackupentry((location, b'', tmpfile, False))
 
     @active
--- a/mercurial/wireprotov1server.py	Tue Jun 20 02:36:52 2023 +0200
+++ b/mercurial/wireprotov1server.py	Sun Jun 18 00:09:39 2023 +0200
@@ -437,7 +437,7 @@
     if not manifest:
         return None
     res = bundlecaches.parseclonebundlesmanifest(repo, manifest)
-    res = bundlecaches.filterclonebundleentries(repo, res)
+    res = bundlecaches.filterclonebundleentries(repo, res, pullbundles=True)
     if not res:
         return None
     cl = repo.unfiltered().changelog
--- a/relnotes/6.4	Tue Jun 20 02:36:52 2023 +0200
+++ b/relnotes/6.4	Sun Jun 18 00:09:39 2023 +0200
@@ -1,3 +1,14 @@
+= Mercurial 6.4.4 =
+
+ * clonebundles: filter out invalid schemes instead of failing on them
+ * doc: format argument for date uses strftime format string (issue6818)
+ * test: make test-contrib-perf.t more robust
+ * revlog: fix a bug in revlog splitting
+ * bundles: clarify streaming v2 bundle usage
+ * delta-find: fix pulled-delta-reuse-policy=forced behavior
+ * dirstate: fall back to v1 if reading v2 failed
+ * revlog: avoid possible collision between directory and temporary index
+
 = Mercurial 6.4.3 =
 
  * chg: declare environ (issue6812)
--- a/rust/hg-core/src/repo.rs	Tue Jun 20 02:36:52 2023 +0200
+++ b/rust/hg-core/src/repo.rs	Sun Jun 18 00:09:39 2023 +0200
@@ -232,7 +232,17 @@
         try_with_lock_no_wait(self.hg_vfs(), "wlock", f)
     }
 
-    pub fn has_dirstate_v2(&self) -> bool {
+    /// Whether this repo should use dirstate-v2.
+    /// The presence of `dirstate-v2` in the requirements does not mean that
+    /// the on-disk dirstate is necessarily in version 2. In most cases,
+    /// a dirstate-v2 file will indeed be found, but in rare cases (like the
+    /// upgrade mechanism being cut short), the on-disk version will be a
+    /// v1 file.
+    /// Semantically, having a requirement only means that a client cannot
+    /// properly understand or properly update the repo if it lacks the support
+    /// for the required feature, but not that that feature is actually used
+    /// in all occasions.
+    pub fn use_dirstate_v2(&self) -> bool {
         self.requirements
             .contains(requirements::DIRSTATE_V2_REQUIREMENT)
     }
@@ -277,10 +287,21 @@
         let dirstate = self.dirstate_file_contents()?;
         let parents = if dirstate.is_empty() {
             DirstateParents::NULL
-        } else if self.has_dirstate_v2() {
-            let docket =
-                crate::dirstate_tree::on_disk::read_docket(&dirstate)?;
-            docket.parents()
+        } else if self.use_dirstate_v2() {
+            let docket_res =
+                crate::dirstate_tree::on_disk::read_docket(&dirstate);
+            match docket_res {
+                Ok(docket) => docket.parents(),
+                Err(_) => {
+                    log::info!(
+                        "Parsing dirstate docket failed, \
+                        falling back to dirstate-v1"
+                    );
+                    *crate::dirstate::parsers::parse_dirstate_parents(
+                        &dirstate,
+                    )?
+                }
+            }
         } else {
             *crate::dirstate::parsers::parse_dirstate_parents(&dirstate)?
         };
@@ -296,7 +317,7 @@
         &self,
     ) -> Result<DirstateMapIdentity, HgError> {
         assert!(
-            self.has_dirstate_v2(),
+            self.use_dirstate_v2(),
             "accessing dirstate data file ID without dirstate-v2"
         );
         // Get the identity before the contents since we could have a race
@@ -308,15 +329,35 @@
             self.dirstate_parents.set(DirstateParents::NULL);
             Ok((identity, None, 0))
         } else {
-            let docket =
-                crate::dirstate_tree::on_disk::read_docket(&dirstate)?;
-            self.dirstate_parents.set(docket.parents());
-            Ok((identity, Some(docket.uuid.to_owned()), docket.data_size()))
+            let docket_res =
+                crate::dirstate_tree::on_disk::read_docket(&dirstate);
+            match docket_res {
+                Ok(docket) => {
+                    self.dirstate_parents.set(docket.parents());
+                    Ok((
+                        identity,
+                        Some(docket.uuid.to_owned()),
+                        docket.data_size(),
+                    ))
+                }
+                Err(_) => {
+                    log::info!(
+                        "Parsing dirstate docket failed, \
+                        falling back to dirstate-v1"
+                    );
+                    let parents =
+                        *crate::dirstate::parsers::parse_dirstate_parents(
+                            &dirstate,
+                        )?;
+                    self.dirstate_parents.set(parents);
+                    Ok((identity, None, 0))
+                }
+            }
         }
     }
 
     fn new_dirstate_map(&self) -> Result<OwningDirstateMap, DirstateError> {
-        if self.has_dirstate_v2() {
+        if self.use_dirstate_v2() {
             // The v2 dirstate is split into a docket and a data file.
             // Since we don't always take the `wlock` to read it
             // (like in `hg status`), it is susceptible to races.
@@ -343,7 +384,13 @@
                             );
                             continue;
                         }
-                        _ => return Err(e),
+                        _ => {
+                            log::info!(
+                                "Reading dirstate v2 failed, \
+                                falling back to v1"
+                            );
+                            return self.new_dirstate_map_v1();
+                        }
                     },
                 }
             }
@@ -354,23 +401,22 @@
             );
             Err(DirstateError::Common(error))
         } else {
-            debug_wait_for_file_or_print(
-                self.config(),
-                "dirstate.pre-read-file",
-            );
-            let identity = self.dirstate_identity()?;
-            let dirstate_file_contents = self.dirstate_file_contents()?;
-            if dirstate_file_contents.is_empty() {
-                self.dirstate_parents.set(DirstateParents::NULL);
-                Ok(OwningDirstateMap::new_empty(Vec::new()))
-            } else {
-                let (map, parents) = OwningDirstateMap::new_v1(
-                    dirstate_file_contents,
-                    identity,
-                )?;
-                self.dirstate_parents.set(parents);
-                Ok(map)
-            }
+            self.new_dirstate_map_v1()
+        }
+    }
+
+    fn new_dirstate_map_v1(&self) -> Result<OwningDirstateMap, DirstateError> {
+        debug_wait_for_file_or_print(self.config(), "dirstate.pre-read-file");
+        let identity = self.dirstate_identity()?;
+        let dirstate_file_contents = self.dirstate_file_contents()?;
+        if dirstate_file_contents.is_empty() {
+            self.dirstate_parents.set(DirstateParents::NULL);
+            Ok(OwningDirstateMap::new_empty(Vec::new()))
+        } else {
+            let (map, parents) =
+                OwningDirstateMap::new_v1(dirstate_file_contents, identity)?;
+            self.dirstate_parents.set(parents);
+            Ok(map)
         }
     }
 
@@ -550,7 +596,7 @@
         // TODO: Maintain a `DirstateMap::dirty` flag, and return early here if
         // it’s unset
         let parents = self.dirstate_parents()?;
-        let (packed_dirstate, old_uuid_to_remove) = if self.has_dirstate_v2() {
+        let (packed_dirstate, old_uuid_to_remove) = if self.use_dirstate_v2() {
             let (identity, uuid, data_size) =
                 self.get_dirstate_data_file_integrity()?;
             let identity_changed = identity != map.old_identity();
--- a/tests/test-clonebundles.t	Tue Jun 20 02:36:52 2023 +0200
+++ b/tests/test-clonebundles.t	Sun Jun 18 00:09:39 2023 +0200
@@ -59,6 +59,20 @@
   (if this error persists, consider contacting the server operator or disable clone bundles via "--config ui.clonebundles=false")
   [255]
 
+Manifest file with URL with unknown scheme skips the URL
+  $ echo 'weirdscheme://does.not.exist/bundle.hg' > server/.hg/clonebundles.manifest
+  $ hg clone http://localhost:$HGPORT unknown-scheme
+  no compatible clone bundles available on server; falling back to regular clone
+  (you may want to report this to the server operator)
+  requesting all changes
+  adding changesets
+  adding manifests
+  adding file changes
+  added 2 changesets with 2 changes to 2 files
+  new changesets 53245c60e682:aaff8d2ffbbf
+  updating to branch default
+  2 files updated, 0 files merged, 0 files removed, 0 files unresolved
+
 Server is not running aborts
 
   $ echo "http://localhost:$HGPORT1/bundle.hg" > server/.hg/clonebundles.manifest
--- a/tests/test-contrib-perf.t	Tue Jun 20 02:36:52 2023 +0200
+++ b/tests/test-contrib-perf.t	Sun Jun 18 00:09:39 2023 +0200
@@ -304,20 +304,20 @@
 
 Multiple entries
 
-  $ hg perfparents --config perf.stub=no --config perf.run-limits='500000-1, 0.000000001-5'
-  ! wall * comb * user * sys * (best of 5) (glob)
+  $ hg perfparents --config perf.stub=no --config perf.run-limits='500000-1, 0.000000001-50'
+  ! wall * comb * user * sys * (best of 50) (glob)
 
 error case are ignored
 
-  $ hg perfparents --config perf.stub=no --config perf.run-limits='500, 0.000000001-5'
+  $ hg perfparents --config perf.stub=no --config perf.run-limits='500, 0.000000001-50'
   malformatted run limit entry, missing "-": 500
-  ! wall * comb * user * sys * (best of 5) (glob)
-  $ hg perfparents --config perf.stub=no --config perf.run-limits='aaa-12, 0.000000001-5'
-  malformatted run limit entry, could not convert string to float: 'aaa': aaa-12
-  ! wall * comb * user * sys * (best of 5) (glob)
-  $ hg perfparents --config perf.stub=no --config perf.run-limits='12-aaaaaa, 0.000000001-5'
-  malformatted run limit entry, invalid literal for int() with base 10: 'aaaaaa': 12-aaaaaa
-  ! wall * comb * user * sys * (best of 5) (glob)
+  ! wall * comb * user * sys * (best of 50) (glob)
+  $ hg perfparents --config perf.stub=no --config perf.run-limits='aaa-120, 0.000000001-50'
+  malformatted run limit entry, could not convert string to float: 'aaa': aaa-120
+  ! wall * comb * user * sys * (best of 50) (glob)
+  $ hg perfparents --config perf.stub=no --config perf.run-limits='120-aaaaaa, 0.000000001-50'
+  malformatted run limit entry, invalid literal for int() with base 10: 'aaaaaa': 120-aaaaaa
+  ! wall * comb * user * sys * (best of 50) (glob)
 
 test actual output
 ------------------
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/tests/test-dirstate-version-fallback.t	Sun Jun 18 00:09:39 2023 +0200
@@ -0,0 +1,51 @@
+  $ cat >> $HGRCPATH << EOF
+  > [storage]
+  > dirstate-v2.slow-path=allow
+  > [format]
+  > use-dirstate-v2=no
+  > EOF
+
+Set up a v1 repo
+
+  $ hg init repo
+  $ cd repo
+  $ echo a > a
+  $ hg add a
+  $ hg commit -m a
+  $ hg debugrequires | grep dirstate
+  [1]
+  $ ls -1 .hg/dirstate*
+  .hg/dirstate
+
+Copy v1 dirstate
+  $ cp .hg/dirstate $TESTTMP/dirstate-v1-backup
+
+Upgrade it to v2
+
+  $ hg debugupgraderepo -q --config format.use-dirstate-v2=1 --run | egrep 'added:|removed:'
+     added: dirstate-v2
+  $ hg debugrequires | grep dirstate
+  dirstate-v2
+  $ ls -1 .hg/dirstate*
+  .hg/dirstate
+  .hg/dirstate.* (glob)
+
+Manually reset to dirstate v1 to simulate an incomplete dirstate-v2 upgrade
+
+  $ rm .hg/dirstate*
+  $ cp $TESTTMP/dirstate-v1-backup .hg/dirstate
+
+There should be no errors, but a v2 dirstate should be written back to disk
+  $ hg st
+  $ ls -1 .hg/dirstate*
+  .hg/dirstate
+  .hg/dirstate.* (glob)
+
+Corrupt the dirstate to see how the errors show up to the user
+  $ echo "I ate your data" > .hg/dirstate
+
+  $ hg st
+  abort: working directory state appears damaged! (no-rhg !)
+  (falling back to dirstate-v1 from v2 also failed) (no-rhg !)
+  abort: Too little data for dirstate: 16 bytes. (rhg !)
+  [255]
--- a/tests/test-revlog-delta-find.t	Tue Jun 20 02:36:52 2023 +0200
+++ b/tests/test-revlog-delta-find.t	Sun Jun 18 00:09:39 2023 +0200
@@ -329,5 +329,34 @@
   DBG-DELTAS: CHANGELOG: * (glob)
   DBG-DELTAS: MANIFESTLOG: * (glob)
   DBG-DELTAS: MANIFESTLOG: * (glob)
-  DBG-DELTAS: FILELOG:my-file.txt: rev=3: delta-base=2 * (glob)
-  DBG-DELTAS: FILELOG:my-file.txt: rev=4: delta-base=3 * (glob)
+  DBG-DELTAS: FILELOG:my-file.txt: rev=3: delta-base=2 is-cached=1 *search-rounds=0 try-count=0* (glob)
+  DBG-DELTAS: FILELOG:my-file.txt: rev=4: delta-base=3 is-cached=1 *search-rounds=0 try-count=0* (glob)
+
+Check that running "forced" on a non-general delta repository does not corrupt it
+---------------------------------------------------------------------------------
+
+Even if requested to be used, some of the delta in the revlog cannot be stored on a non-general delta repository. We check that the bundle application was correct.
+
+  $ hg init \
+  >    --config format.usegeneraldelta=no \
+  >    --config format.sparse-revlog=no \
+  >    local-forced-full-p1-no-gd
+  $ hg debugformat -R local-forced-full-p1-no-gd | grep generaldelta
+  generaldelta:        no
+  $ hg -R local-forced-full-p1-no-gd pull --quiet local-pre-pull-full \
+  >    --config debug.revlog.debug-delta=no
+  $ hg -R local-forced-full-p1-no-gd pull --quiet \
+  > --config 'paths.*:pulled-delta-reuse-policy=forced' all-p1.hg
+  DBG-DELTAS: CHANGELOG: * (glob)
+  DBG-DELTAS: CHANGELOG: * (glob)
+  DBG-DELTAS: MANIFESTLOG: * (glob)
+  DBG-DELTAS: MANIFESTLOG: * (glob)
+  DBG-DELTAS: FILELOG:my-file.txt: rev=3: delta-base=0 * - search-rounds=1 try-count=1 * (glob)
+  DBG-DELTAS: FILELOG:my-file.txt: rev=4: delta-base=4 * - search-rounds=1 try-count=1 * (glob)
+  $ hg -R local-forced-full-p1-no-gd verify
+  checking changesets
+  checking manifests
+  crosschecking files in changesets and manifests
+  checking files
+  checking dirstate
+  checked 5 changesets with 5 changes to 1 files
--- a/tests/test-transaction-rollback-on-revlog-split.t	Tue Jun 20 02:36:52 2023 +0200
+++ b/tests/test-transaction-rollback-on-revlog-split.t	Sun Jun 18 00:09:39 2023 +0200
@@ -84,6 +84,8 @@
   > Directory_With,Special%Char/Complex_File.babar
   > foo/bar/babar_celeste/foo
   > 1234567890/1234567890/1234567890/1234567890/1234567890/1234567890/1234567890/1234567890/1234567890/1234567890/f
+  > some_dir/sub_dir/foo_bar
+  > some_dir/sub_dir/foo_bar.i.s/tutu
   > "
   $ for f in $files; do
   >     mkdir -p `dirname $f`
@@ -104,13 +106,17 @@
   >     dd if=/dev/zero of=$f bs=1k count=128 > /dev/null 2>&1
   > done
   $ hg commit -AqmD --traceback
+  $ for f in $files; do
+  >     dd if=/dev/zero of=$f bs=1k count=132 > /dev/null 2>&1
+  > done
+  $ hg commit -AqmD --traceback
 
 Reference size:
   $ f -s file
-  file: size=131072
-  $ f -s .hg/store/data/file*
-  .hg/store/data/file.d: size=132139
-  .hg/store/data/file.i: size=256
+  file: size=135168
+  $ f -s .hg/store/data*/file*
+  .hg/store/data/file.d: size=267307
+  .hg/store/data/file.i: size=320
 
   $ cd ..
 
@@ -134,16 +140,16 @@
   adding changesets
   adding manifests
   adding file changes
-  added 2 changesets with 8 changes to 4 files
-  new changesets 16a630ece54e:8437c461d70a
+  added 3 changesets with 18 changes to 6 files
+  new changesets c99a94cae9b1:64874a3b0160
   (run 'hg update' to get a working copy)
 
 
 The inline revlog has been replaced
 
   $ f -s .hg/store/data/file*
-  .hg/store/data/file.d: size=132139
-  .hg/store/data/file.i: size=256
+  .hg/store/data/file.d: size=267307
+  .hg/store/data/file.i: size=320
 
 
   $ hg verify -q
@@ -171,7 +177,7 @@
 Reference size:
   $ f -s file
   file: size=1024
-  $ f -s .hg/store/data/file*
+  $ f -s .hg/store/data*/file*
   .hg/store/data/file.i: size=1174
 
   $ cat > .hg/hgrc <<EOF
@@ -192,10 +198,13 @@
 
 The inline revlog still exist, but a split version exist next to it
 
-  $ f -s .hg/store/data/file*
-  .hg/store/data/file.d: size=132139
+  $ cat .hg/store/journal | tr '\0' ' ' | grep '\.s'
+  data/some_dir/sub_dir/foo_bar.i.s/tutu.i 1174
+  data/some_dir/sub_dir/foo_bar.i.s/tutu.d 0
+  $ f -s .hg/store/data*/file*
+  .hg/store/data-s/file: size=320
+  .hg/store/data/file.d: size=267307
   .hg/store/data/file.i: size=132395
-  .hg/store/data/file.i.s: size=256
 
 
 The first file.i entry should match the "Reference size" above.
@@ -206,19 +215,19 @@
   $ cat .hg/store/journal | tr -s '\000' ' ' | grep data/file
   data/file.i 1174
   data/file.d 0
-  $ cat .hg/store/journal.backupfiles | tr -s '\000' ' ' | tr -s '\00' ' '| grep data/file
+  $ cat .hg/store/journal.backupfiles | tr -s '\000' ' ' | tr -s '\00' ' '| grep 'data.*/file'
    data/file.i data/journal.backup.file.i.bck 0
-   data/file.i.s 0
+   data-s/file 0
 
 recover is rolling the split back, the fncache is still valid
 
   $ hg recover
   rolling back interrupted transaction
   (verify step skipped, run `hg verify` to check your repository content)
-  $ f -s .hg/store/data/file*
+  $ f -s .hg/store/data*/file*
   .hg/store/data/file.i: size=1174
   $ hg tip
-  changeset:   1:cc8dfb126534
+  changeset:   1:64b04c8dc267
   tag:         tip
   user:        test
   date:        Thu Jan 01 00:00:00 1970 +0000
@@ -243,7 +252,7 @@
 Reference size:
   $ f -s file
   file: size=1024
-  $ f -s .hg/store/data/file*
+  $ f -s .hg/store/data*/file*
   .hg/store/data/file.i: size=1174
 
   $ cat > .hg/hgrc <<EOF
@@ -271,12 +280,12 @@
 
 The inline revlog still exist, but a split version exist next to it
 
-  $ f -s .hg/store/data/file*
-  .hg/store/data/file.d: size=132139
+  $ f -s .hg/store/data*/file*
+  .hg/store/data-s/file: size=320
+  .hg/store/data/file.d: size=267307
   .hg/store/data/file.i: size=132395
-  .hg/store/data/file.i.s: size=256
 
-  $ cat .hg/store/journal | tr -s '\000' ' ' | grep data/file
+  $ cat .hg/store/journal | tr -s '\000' ' ' | grep 'data.*/file'
   data/file.i 1174
   data/file.d 0
 
@@ -285,10 +294,10 @@
   $ hg recover
   rolling back interrupted transaction
   (verify step skipped, run `hg verify` to check your repository content)
-  $ f -s .hg/store/data/file*
+  $ f -s .hg/store/data*/file*
   .hg/store/data/file.i: size=1174
   $ hg tip
-  changeset:   1:cc8dfb126534
+  changeset:   1:64b04c8dc267
   tag:         tip
   user:        test
   date:        Thu Jan 01 00:00:00 1970 +0000
@@ -308,7 +317,7 @@
 Reference size:
   $ f -s file
   file: size=1024
-  $ f -s .hg/store/data/file*
+  $ f -s .hg/store/data*/file*
   .hg/store/data/file.i: size=1174
 
   $ cat > .hg/hgrc <<EOF
@@ -336,11 +345,11 @@
 
 The inline revlog was over written on disk
 
-  $ f -s .hg/store/data/file*
-  .hg/store/data/file.d: size=132139
-  .hg/store/data/file.i: size=256
+  $ f -s .hg/store/data*/file*
+  .hg/store/data/file.d: size=267307
+  .hg/store/data/file.i: size=320
 
-  $ cat .hg/store/journal | tr -s '\000' ' ' | grep data/file
+  $ cat .hg/store/journal | tr -s '\000' ' ' | grep 'data.*/file'
   data/file.i 1174
   data/file.d 0
 
@@ -349,10 +358,10 @@
   $ hg recover
   rolling back interrupted transaction
   (verify step skipped, run `hg verify` to check your repository content)
-  $ f -s .hg/store/data/file*
+  $ f -s .hg/store/data*/file*
   .hg/store/data/file.i: size=1174
   $ hg tip
-  changeset:   1:cc8dfb126534
+  changeset:   1:64b04c8dc267
   tag:         tip
   user:        test
   date:        Thu Jan 01 00:00:00 1970 +0000
@@ -386,13 +395,13 @@
 
 The split was rollback
 
-  $ f -s .hg/store/data/file*
+  $ f -s .hg/store/data*/file*
   .hg/store/data/file.d: size=0
   .hg/store/data/file.i: size=1174
 
 
   $ hg tip
-  changeset:   1:cc8dfb126534
+  changeset:   1:64b04c8dc267
   tag:         tip
   user:        test
   date:        Thu Jan 01 00:00:00 1970 +0000
@@ -472,7 +481,7 @@
   adding changesets
   adding manifests
   adding file changes
-  size=131072
+  size=135168
   transaction abort!
   rollback completed
   abort: pretxnclose.03-abort hook exited with status 1
--- a/tests/test-upgrade-repo.t	Tue Jun 20 02:36:52 2023 +0200
+++ b/tests/test-upgrade-repo.t	Sun Jun 18 00:09:39 2023 +0200
@@ -839,6 +839,7 @@
   00changelog.i
   00manifest.i
   data
+  data-s
   fncache
   phaseroots
   requires
@@ -862,6 +863,7 @@
   00changelog.i
   00manifest.i
   data
+  data-s
   fncache
   phaseroots
   requires