changeset 51515:16d93adddce7

branching: merge stable into default
author Raphaël Gomès <rgomes@octobus.net>
date Thu, 21 Mar 2024 12:26:46 +0100
parents 0239ebdd0740 (diff) 394ea4428163 (current diff)
children c7e81615b5c4
files
diffstat 23 files changed, 693 insertions(+), 1103 deletions(-) [+]
line wrap: on
line diff
--- a/contrib/perf.py	Thu Mar 21 12:24:42 2024 +0100
+++ b/contrib/perf.py	Thu Mar 21 12:26:46 2024 +0100
@@ -4303,6 +4303,19 @@
         baserepo = repo.filtered(b'__perf_branchmap_update_base')
         targetrepo = repo.filtered(b'__perf_branchmap_update_target')
 
+        bcache = repo.branchmap()
+        copy_method = 'copy'
+
+        copy_base_kwargs = copy_base_kwargs = {}
+        if hasattr(bcache, 'copy'):
+            if 'repo' in getargspec(bcache.copy).args:
+                copy_base_kwargs = {"repo": baserepo}
+                copy_target_kwargs = {"repo": targetrepo}
+        else:
+            copy_method = 'inherit_for'
+            copy_base_kwargs = {"repo": baserepo}
+            copy_target_kwargs = {"repo": targetrepo}
+
         # try to find an existing branchmap to reuse
         subsettable = getbranchmapsubsettable()
         candidatefilter = subsettable.get(None)
@@ -4311,7 +4324,7 @@
             if candidatebm.validfor(baserepo):
                 filtered = repoview.filterrevs(repo, candidatefilter)
                 missing = [r for r in allbaserevs if r in filtered]
-                base = candidatebm.copy()
+                base = getattr(candidatebm, copy_method)(**copy_base_kwargs)
                 base.update(baserepo, missing)
                 break
             candidatefilter = subsettable.get(candidatefilter)
@@ -4321,7 +4334,7 @@
             base.update(baserepo, allbaserevs)
 
         def setup():
-            x[0] = base.copy()
+            x[0] = getattr(base, copy_method)(**copy_target_kwargs)
             if clearcaches:
                 unfi._revbranchcache = None
                 clearchangelog(repo)
--- a/mercurial/branchmap.py	Thu Mar 21 12:24:42 2024 +0100
+++ b/mercurial/branchmap.py	Thu Mar 21 12:26:46 2024 +0100
@@ -15,6 +15,7 @@
 )
 
 from typing import (
+    Any,
     Callable,
     Dict,
     Iterable,
@@ -59,7 +60,34 @@
 
     def __getitem__(self, repo):
         self.updatecache(repo)
-        return self._per_filter[repo.filtername]
+        bcache = self._per_filter[repo.filtername]
+        assert bcache._filtername == repo.filtername, (
+            bcache._filtername,
+            repo.filtername,
+        )
+        return bcache
+
+    def update_disk(self, repo):
+        """ensure and up-to-date cache is (or will be) written on disk
+
+        The cache for this repository view is updated  if needed and written on
+        disk.
+
+        If a transaction is in progress, the writing is schedule to transaction
+        close. See the `BranchMapCache.write_dirty` method.
+
+        This method exist independently of __getitem__ as it is sometime useful
+        to signal that we have no intend to use the data in memory yet.
+        """
+        self.updatecache(repo)
+        bcache = self._per_filter[repo.filtername]
+        assert bcache._filtername == repo.filtername, (
+            bcache._filtername,
+            repo.filtername,
+        )
+        tr = repo.currenttransaction()
+        if getattr(tr, 'finalized', True):
+            bcache.sync_disk(repo)
 
     def updatecache(self, repo):
         """Update the cache for the given filtered view on a repository"""
@@ -82,7 +110,8 @@
             subsetname = subsettable.get(filtername)
             if subsetname is not None:
                 subset = repo.filtered(subsetname)
-                bcache = self[subset].copy()
+                self.updatecache(subset)
+                bcache = self._per_filter[subset.filtername].inherit_for(repo)
                 extrarevs = subset.changelog.filteredrevs - cl.filteredrevs
                 revs.extend(r for r in extrarevs if r <= bcache.tiprev)
             else:
@@ -131,19 +160,26 @@
             for candidate in (b'base', b'immutable', b'served'):
                 rview = repo.filtered(candidate)
                 if cache.validfor(rview):
+                    cache._filtername = candidate
                     self._per_filter[candidate] = cache
+                    cache._state = STATE_DIRTY
                     cache.write(rview)
                     return
 
     def clear(self):
         self._per_filter.clear()
 
-    def write_delayed(self, repo):
+    def write_dirty(self, repo):
         unfi = repo.unfiltered()
-        for filtername, cache in self._per_filter.items():
-            if cache._delayed:
+        for filtername in repoviewutil.get_ordered_subset():
+            cache = self._per_filter.get(filtername)
+            if cache is None:
+                continue
+            if filtername is None:
+                repo = unfi
+            else:
                 repo = unfi.filtered(filtername)
-                cache.write(repo)
+            cache.sync_disk(repo)
 
 
 def _unknownnode(node):
@@ -158,7 +194,7 @@
         return b'branch cache'
 
 
-class branchcache:
+class _BaseBranchCache:
     """A dict like object that hold branches heads cache.
 
     This cache is used to avoid costly computations to determine all the
@@ -186,64 +222,18 @@
         entries: Union[
             Dict[bytes, List[bytes]], Iterable[Tuple[bytes, List[bytes]]]
         ] = (),
-        tipnode: Optional[bytes] = None,
-        tiprev: Optional[int] = nullrev,
-        filteredhash: Optional[bytes] = None,
-        closednodes: Optional[Set[bytes]] = None,
-        hasnode: Optional[Callable[[bytes], bool]] = None,
+        closed_nodes: Optional[Set[bytes]] = None,
     ) -> None:
         """hasnode is a function which can be used to verify whether changelog
         has a given node or not. If it's not provided, we assume that every node
         we have exists in changelog"""
-        self._repo = repo
-        self._delayed = False
-        if tipnode is None:
-            self.tipnode = repo.nullid
-        else:
-            self.tipnode = tipnode
-        self.tiprev = tiprev
-        self.filteredhash = filteredhash
         # closednodes is a set of nodes that close their branch. If the branch
         # cache has been updated, it may contain nodes that are no longer
         # heads.
-        if closednodes is None:
-            self._closednodes = set()
-        else:
-            self._closednodes = closednodes
+        if closed_nodes is None:
+            closed_nodes = set()
+        self._closednodes = set(closed_nodes)
         self._entries = dict(entries)
-        # whether closed nodes are verified or not
-        self._closedverified = False
-        # branches for which nodes are verified
-        self._verifiedbranches = set()
-        self._hasnode = hasnode
-        if self._hasnode is None:
-            self._hasnode = lambda x: True
-
-    def _verifyclosed(self):
-        """verify the closed nodes we have"""
-        if self._closedverified:
-            return
-        for node in self._closednodes:
-            if not self._hasnode(node):
-                _unknownnode(node)
-
-        self._closedverified = True
-
-    def _verifybranch(self, branch):
-        """verify head nodes for the given branch."""
-        if branch not in self._entries or branch in self._verifiedbranches:
-            return
-        for n in self._entries[branch]:
-            if not self._hasnode(n):
-                _unknownnode(n)
-
-        self._verifiedbranches.add(branch)
-
-    def _verifyall(self):
-        """verifies nodes of all the branches"""
-        needverification = set(self._entries.keys()) - self._verifiedbranches
-        for b in needverification:
-            self._verifybranch(b)
 
     def __iter__(self):
         return iter(self._entries)
@@ -252,115 +242,20 @@
         self._entries[key] = value
 
     def __getitem__(self, key):
-        self._verifybranch(key)
         return self._entries[key]
 
     def __contains__(self, key):
-        self._verifybranch(key)
         return key in self._entries
 
     def iteritems(self):
-        for k, v in self._entries.items():
-            self._verifybranch(k)
-            yield k, v
+        return self._entries.items()
 
     items = iteritems
 
     def hasbranch(self, label):
         """checks whether a branch of this name exists or not"""
-        self._verifybranch(label)
         return label in self._entries
 
-    @classmethod
-    def fromfile(cls, repo):
-        f = None
-        try:
-            f = repo.cachevfs(cls._filename(repo))
-            lineiter = iter(f)
-            cachekey = next(lineiter).rstrip(b'\n').split(b" ", 2)
-            last, lrev = cachekey[:2]
-            last, lrev = bin(last), int(lrev)
-            filteredhash = None
-            hasnode = repo.changelog.hasnode
-            if len(cachekey) > 2:
-                filteredhash = bin(cachekey[2])
-            bcache = cls(
-                repo,
-                tipnode=last,
-                tiprev=lrev,
-                filteredhash=filteredhash,
-                hasnode=hasnode,
-            )
-            if not bcache.validfor(repo):
-                # invalidate the cache
-                raise ValueError('tip differs')
-            bcache.load(repo, lineiter)
-        except (IOError, OSError):
-            return None
-
-        except Exception as inst:
-            if repo.ui.debugflag:
-                msg = b'invalid %s: %s\n'
-                repo.ui.debug(
-                    msg
-                    % (
-                        _branchcachedesc(repo),
-                        stringutil.forcebytestr(inst),
-                    )
-                )
-            bcache = None
-
-        finally:
-            if f:
-                f.close()
-
-        return bcache
-
-    def load(self, repo, lineiter):
-        """fully loads the branchcache by reading from the file using the line
-        iterator passed"""
-        for line in lineiter:
-            line = line.rstrip(b'\n')
-            if not line:
-                continue
-            node, state, label = line.split(b" ", 2)
-            if state not in b'oc':
-                raise ValueError('invalid branch state')
-            label = encoding.tolocal(label.strip())
-            node = bin(node)
-            self._entries.setdefault(label, []).append(node)
-            if state == b'c':
-                self._closednodes.add(node)
-
-    @staticmethod
-    def _filename(repo):
-        """name of a branchcache file for a given repo or repoview"""
-        filename = b"branch2"
-        if repo.filtername:
-            filename = b'%s-%s' % (filename, repo.filtername)
-        return filename
-
-    def validfor(self, repo):
-        """check that cache contents are valid for (a subset of) this repo
-
-        - False when the order of changesets changed or if we detect a strip.
-        - True when cache is up-to-date for the current repo or its subset."""
-        try:
-            node = repo.changelog.node(self.tiprev)
-        except IndexError:
-            # changesets were stripped and now we don't even have enough to
-            # find tiprev
-            return False
-        if self.tipnode != node:
-            # tiprev doesn't correspond to tipnode: repo was stripped, or this
-            # repo has a different order of changesets
-            return False
-        tiphash = scmutil.filteredhash(repo, self.tiprev, needobsolete=True)
-        # hashes don't match if this repo view has a different set of filtered
-        # revisions (e.g. due to phase changes) or obsolete revisions (e.g.
-        # history was rewritten)
-        return self.filteredhash == tiphash
-
     def _branchtip(self, heads):
         """Return tuple with last open head in heads and false,
         otherwise return last closed head and true."""
@@ -383,7 +278,6 @@
         return (n for n in nodes if n not in self._closednodes)
 
     def branchheads(self, branch, closed=False):
-        self._verifybranch(branch)
         heads = self._entries[branch]
         if not closed:
             heads = list(self.iteropen(heads))
@@ -395,60 +289,8 @@
 
     def iterheads(self):
         """returns all the heads"""
-        self._verifyall()
         return self._entries.values()
 
-    def copy(self):
-        """return an deep copy of the branchcache object"""
-        return type(self)(
-            self._repo,
-            self._entries,
-            self.tipnode,
-            self.tiprev,
-            self.filteredhash,
-            self._closednodes,
-        )
-
-    def write(self, repo):
-        tr = repo.currenttransaction()
-        if not getattr(tr, 'finalized', True):
-            # Avoid premature writing.
-            #
-            # (The cache warming setup by localrepo will update the file later.)
-            self._delayed = True
-            return
-        try:
-            filename = self._filename(repo)
-            with repo.cachevfs(filename, b"w", atomictemp=True) as f:
-                cachekey = [hex(self.tipnode), b'%d' % self.tiprev]
-                if self.filteredhash is not None:
-                    cachekey.append(hex(self.filteredhash))
-                f.write(b" ".join(cachekey) + b'\n')
-                nodecount = 0
-                for label, nodes in sorted(self._entries.items()):
-                    label = encoding.fromlocal(label)
-                    for node in nodes:
-                        nodecount += 1
-                        if node in self._closednodes:
-                            state = b'c'
-                        else:
-                            state = b'o'
-                        f.write(b"%s %s %s\n" % (hex(node), state, label))
-            repo.ui.log(
-                b'branchcache',
-                b'wrote %s with %d labels and %d nodes\n',
-                _branchcachedesc(repo),
-                len(self._entries),
-                nodecount,
-            )
-            self._delayed = False
-        except (IOError, OSError, error.Abort) as inst:
-            # Abort may be raised by read only opener, so log and continue
-            repo.ui.debug(
-                b"couldn't write branch cache: %s\n"
-                % stringutil.forcebytestr(inst)
-            )
-
     def update(self, repo, revgen):
         """Given a branchhead cache, self, that may have extra nodes or be
         missing heads, and a generator of nodes that are strictly a superset of
@@ -459,15 +301,16 @@
         # collect new branch entries
         newbranches = {}
         getbranchinfo = repo.revbranchcache().branchinfo
+        max_rev = -1
         for r in revgen:
             branch, closesbranch = getbranchinfo(r)
             newbranches.setdefault(branch, []).append(r)
             if closesbranch:
                 self._closednodes.add(cl.node(r))
-
-        # new tip revision which we found after iterating items from new
-        # branches
-        ntiprev = self.tiprev
+            max_rev = max(max_rev, r)
+        if max_rev < 0:
+            msg = "running branchcache.update without revision to update"
+            raise error.ProgrammingError(msg)
 
         # Delay fetching the topological heads until they are needed.
         # A repository without non-continous branches can skip this part.
@@ -561,13 +404,314 @@
                         bheadset -= ancestors
             if bheadset:
                 self[branch] = [cl.node(rev) for rev in sorted(bheadset)]
-            tiprev = max(newheadrevs)
-            if tiprev > ntiprev:
-                ntiprev = tiprev
+
+        duration = util.timer() - starttime
+        repo.ui.log(
+            b'branchcache',
+            b'updated %s in %.4f seconds\n',
+            _branchcachedesc(repo),
+            duration,
+        )
+        return max_rev
+
+
+STATE_CLEAN = 1
+STATE_INHERITED = 2
+STATE_DIRTY = 3
+
+
+class branchcache(_BaseBranchCache):
+    """Branchmap info for a local repo or repoview"""
+
+    _base_filename = b"branch2"
+
+    def __init__(
+        self,
+        repo: "localrepo.localrepository",
+        entries: Union[
+            Dict[bytes, List[bytes]], Iterable[Tuple[bytes, List[bytes]]]
+        ] = (),
+        tipnode: Optional[bytes] = None,
+        tiprev: Optional[int] = nullrev,
+        filteredhash: Optional[bytes] = None,
+        closednodes: Optional[Set[bytes]] = None,
+        hasnode: Optional[Callable[[bytes], bool]] = None,
+        verify_node: bool = False,
+        inherited: bool = False,
+    ) -> None:
+        """hasnode is a function which can be used to verify whether changelog
+        has a given node or not. If it's not provided, we assume that every node
+        we have exists in changelog"""
+        self._filtername = repo.filtername
+        if tipnode is None:
+            self.tipnode = repo.nullid
+        else:
+            self.tipnode = tipnode
+        self.tiprev = tiprev
+        self.filteredhash = filteredhash
+        self._state = STATE_CLEAN
+        if inherited:
+            self._state = STATE_INHERITED
+
+        super().__init__(repo=repo, entries=entries, closed_nodes=closednodes)
+        # closednodes is a set of nodes that close their branch. If the branch
+        # cache has been updated, it may contain nodes that are no longer
+        # heads.
+
+        # Do we need to verify branch at all ?
+        self._verify_node = verify_node
+        # branches for which nodes are verified
+        self._verifiedbranches = set()
+        self._hasnode = None
+        if self._verify_node:
+            self._hasnode = repo.changelog.hasnode
+
+    def validfor(self, repo):
+        """check that cache contents are valid for (a subset of) this repo
+
+        - False when the order of changesets changed or if we detect a strip.
+        - True when cache is up-to-date for the current repo or its subset."""
+        try:
+            node = repo.changelog.node(self.tiprev)
+        except IndexError:
+            # changesets were stripped and now we don't even have enough to
+            # find tiprev
+            return False
+        if self.tipnode != node:
+            # tiprev doesn't correspond to tipnode: repo was stripped, or this
+            # repo has a different order of changesets
+            return False
+        tiphash = scmutil.filteredhash(repo, self.tiprev, needobsolete=True)
+        # hashes don't match if this repo view has a different set of filtered
+        # revisions (e.g. due to phase changes) or obsolete revisions (e.g.
+        # history was rewritten)
+        return self.filteredhash == tiphash
+
+    @classmethod
+    def fromfile(cls, repo):
+        f = None
+        try:
+            f = repo.cachevfs(cls._filename(repo))
+            lineiter = iter(f)
+            init_kwargs = cls._load_header(repo, lineiter)
+            bcache = cls(
+                repo,
+                verify_node=True,
+                **init_kwargs,
+            )
+            if not bcache.validfor(repo):
+                # invalidate the cache
+                raise ValueError('tip differs')
+            bcache._load_heads(repo, lineiter)
+        except (IOError, OSError):
+            return None
+
+        except Exception as inst:
+            if repo.ui.debugflag:
+                msg = b'invalid %s: %s\n'
+                msg %= (
+                    _branchcachedesc(repo),
+                    stringutil.forcebytestr(inst),
+                )
+                repo.ui.debug(msg)
+            bcache = None
+
+        finally:
+            if f:
+                f.close()
+
+        return bcache
+
+    @classmethod
+    def _load_header(cls, repo, lineiter) -> "dict[str, Any]":
+        """parse the head of a branchmap file
+
+        return parameters to pass to a newly created class instance.
+        """
+        cachekey = next(lineiter).rstrip(b'\n').split(b" ", 2)
+        last, lrev = cachekey[:2]
+        last, lrev = bin(last), int(lrev)
+        filteredhash = None
+        if len(cachekey) > 2:
+            filteredhash = bin(cachekey[2])
+        return {
+            "tipnode": last,
+            "tiprev": lrev,
+            "filteredhash": filteredhash,
+        }
+
+    def _load_heads(self, repo, lineiter):
+        """fully loads the branchcache by reading from the file using the line
+        iterator passed"""
+        for line in lineiter:
+            line = line.rstrip(b'\n')
+            if not line:
+                continue
+            node, state, label = line.split(b" ", 2)
+            if state not in b'oc':
+                raise ValueError('invalid branch state')
+            label = encoding.tolocal(label.strip())
+            node = bin(node)
+            self._entries.setdefault(label, []).append(node)
+            if state == b'c':
+                self._closednodes.add(node)
 
-        if ntiprev > self.tiprev:
-            self.tiprev = ntiprev
-            self.tipnode = cl.node(ntiprev)
+    @classmethod
+    def _filename(cls, repo):
+        """name of a branchcache file for a given repo or repoview"""
+        filename = cls._base_filename
+        if repo.filtername:
+            filename = b'%s-%s' % (filename, repo.filtername)
+        return filename
+
+    def inherit_for(self, repo):
+        """return a deep copy of the branchcache object"""
+        assert repo.filtername != self._filtername
+        other = type(self)(
+            repo=repo,
+            # we always do a shally copy of self._entries, and the values is
+            # always replaced, so no need to deepcopy until the above remains
+            # true.
+            entries=self._entries,
+            tipnode=self.tipnode,
+            tiprev=self.tiprev,
+            filteredhash=self.filteredhash,
+            closednodes=set(self._closednodes),
+            verify_node=self._verify_node,
+            inherited=True,
+        )
+        # also copy information about the current verification state
+        other._verifiedbranches = set(self._verifiedbranches)
+        return other
+
+    def sync_disk(self, repo):
+        """synchronise the on disk file with the cache state
+
+        If new value specific to this filter level need to be written, the file
+        will be updated, if the state of the branchcache is inherited from a
+        subset, any stalled on disk file will be deleted.
+
+        That method does nothing if there is nothing to do.
+        """
+        if self._state == STATE_DIRTY:
+            self.write(repo)
+        elif self._state == STATE_INHERITED:
+            filename = self._filename(repo)
+            repo.cachevfs.tryunlink(filename)
+
+    def write(self, repo):
+        assert self._filtername == repo.filtername, (
+            self._filtername,
+            repo.filtername,
+        )
+        assert self._state == STATE_DIRTY, self._state
+        # This method should not be called during an open transaction
+        tr = repo.currenttransaction()
+        if not getattr(tr, 'finalized', True):
+            msg = "writing branchcache in the middle of a transaction"
+            raise error.ProgrammingError(msg)
+        try:
+            filename = self._filename(repo)
+            with repo.cachevfs(filename, b"w", atomictemp=True) as f:
+                self._write_header(f)
+                nodecount = self._write_heads(f)
+            repo.ui.log(
+                b'branchcache',
+                b'wrote %s with %d labels and %d nodes\n',
+                _branchcachedesc(repo),
+                len(self._entries),
+                nodecount,
+            )
+            self._state = STATE_CLEAN
+        except (IOError, OSError, error.Abort) as inst:
+            # Abort may be raised by read only opener, so log and continue
+            repo.ui.debug(
+                b"couldn't write branch cache: %s\n"
+                % stringutil.forcebytestr(inst)
+            )
+
+    def _write_header(self, fp) -> None:
+        """write the branch cache header to a file"""
+        cachekey = [hex(self.tipnode), b'%d' % self.tiprev]
+        if self.filteredhash is not None:
+            cachekey.append(hex(self.filteredhash))
+        fp.write(b" ".join(cachekey) + b'\n')
+
+    def _write_heads(self, fp) -> int:
+        """write list of heads to a file
+
+        Return the number of heads written."""
+        nodecount = 0
+        for label, nodes in sorted(self._entries.items()):
+            label = encoding.fromlocal(label)
+            for node in nodes:
+                nodecount += 1
+                if node in self._closednodes:
+                    state = b'c'
+                else:
+                    state = b'o'
+                fp.write(b"%s %s %s\n" % (hex(node), state, label))
+        return nodecount
+
+    def _verifybranch(self, branch):
+        """verify head nodes for the given branch."""
+        if not self._verify_node:
+            return
+        if branch not in self._entries or branch in self._verifiedbranches:
+            return
+        assert self._hasnode is not None
+        for n in self._entries[branch]:
+            if not self._hasnode(n):
+                _unknownnode(n)
+
+        self._verifiedbranches.add(branch)
+
+    def _verifyall(self):
+        """verifies nodes of all the branches"""
+        for b in self._entries.keys():
+            if b not in self._verifiedbranches:
+                self._verifybranch(b)
+
+    def __getitem__(self, key):
+        self._verifybranch(key)
+        return super().__getitem__(key)
+
+    def __contains__(self, key):
+        self._verifybranch(key)
+        return super().__contains__(key)
+
+    def iteritems(self):
+        self._verifyall()
+        return super().iteritems()
+
+    items = iteritems
+
+    def iterheads(self):
+        """returns all the heads"""
+        self._verifyall()
+        return super().iterheads()
+
+    def hasbranch(self, label):
+        """checks whether a branch of this name exists or not"""
+        self._verifybranch(label)
+        return super().hasbranch(label)
+
+    def branchheads(self, branch, closed=False):
+        self._verifybranch(branch)
+        return super().branchheads(branch, closed=closed)
+
+    def update(self, repo, revgen):
+        assert self._filtername == repo.filtername, (
+            self._filtername,
+            repo.filtername,
+        )
+        cl = repo.changelog
+        max_rev = super().update(repo, revgen)
+        # new tip revision which we found after iterating items from new
+        # branches
+        if max_rev is not None and max_rev > self.tiprev:
+            self.tiprev = max_rev
+            self.tipnode = cl.node(max_rev)
 
         if not self.validfor(repo):
             # old cache key is now invalid for the repo, but we've just updated
@@ -588,23 +732,27 @@
         self.filteredhash = scmutil.filteredhash(
             repo, self.tiprev, needobsolete=True
         )
-
-        duration = util.timer() - starttime
-        repo.ui.log(
-            b'branchcache',
-            b'updated %s in %.4f seconds\n',
-            _branchcachedesc(repo),
-            duration,
-        )
-
-        self.write(repo)
+        self._state = STATE_DIRTY
+        tr = repo.currenttransaction()
+        if getattr(tr, 'finalized', True):
+            # Avoid premature writing.
+            #
+            # (The cache warming setup by localrepo will update the file later.)
+            self.write(repo)
 
 
-class remotebranchcache(branchcache):
+class remotebranchcache(_BaseBranchCache):
     """Branchmap info for a remote connection, should not write locally"""
 
-    def write(self, repo):
-        pass
+    def __init__(
+        self,
+        repo: "localrepo.localrepository",
+        entries: Union[
+            Dict[bytes, List[bytes]], Iterable[Tuple[bytes, List[bytes]]]
+        ] = (),
+        closednodes: Optional[Set[bytes]] = None,
+    ) -> None:
+        super().__init__(repo=repo, entries=entries, closed_nodes=closednodes)
 
 
 # Revision branch info cache
--- a/mercurial/changelog.py	Thu Mar 21 12:24:42 2024 +0100
+++ b/mercurial/changelog.py	Thu Mar 21 12:26:46 2024 +0100
@@ -327,6 +327,9 @@
         self._filteredrevs_hashcache = {}
         self._copiesstorage = opener.options.get(b'copies-storage')
 
+    def __contains__(self, rev):
+        return (0 <= rev < len(self)) and rev not in self._filteredrevs
+
     @property
     def filteredrevs(self):
         return self._filteredrevs
--- a/mercurial/localrepo.py	Thu Mar 21 12:24:42 2024 +0100
+++ b/mercurial/localrepo.py	Thu Mar 21 12:26:46 2024 +0100
@@ -2923,12 +2923,9 @@
 
         if repository.CACHE_BRANCHMAP_SERVED in caches:
             if tr is None or tr.changes[b'origrepolen'] < len(self):
-                # accessing the 'served' branchmap should refresh all the others,
                 self.ui.debug(b'updating the branch cache\n')
-                self.filtered(b'served').branchmap()
-                self.filtered(b'served.hidden').branchmap()
-                # flush all possibly delayed write.
-                self._branchcaches.write_delayed(self)
+                self._branchcaches.update_disk(self.filtered(b'served'))
+                self._branchcaches.update_disk(self.filtered(b'served.hidden'))
 
         if repository.CACHE_CHANGELOG_CACHE in caches:
             self.changelog.update_caches(transaction=tr)
@@ -2973,7 +2970,10 @@
             # they're a subset of another kind of cache that *has* been used).
             for filt in repoview.filtertable.keys():
                 filtered = self.filtered(filt)
-                filtered.branchmap().write(filtered)
+                self._branchcaches.update_disk(filtered)
+
+        # flush all possibly delayed write.
+        self._branchcaches.write_dirty(self)
 
     def invalidatecaches(self):
         if '_tagscache' in vars(self):
--- a/mercurial/repoview.py	Thu Mar 21 12:24:42 2024 +0100
+++ b/mercurial/repoview.py	Thu Mar 21 12:26:46 2024 +0100
@@ -397,6 +397,9 @@
     """
 
     def __init__(self, repo, filtername, visibilityexceptions=None):
+        if filtername is None:
+            msg = "repoview should have a non-None filtername"
+            raise error.ProgrammingError(msg)
         object.__setattr__(self, '_unfilteredrepo', repo)
         object.__setattr__(self, 'filtername', filtername)
         object.__setattr__(self, '_clcachekey', None)
--- a/mercurial/utils/repoviewutil.py	Thu Mar 21 12:24:42 2024 +0100
+++ b/mercurial/utils/repoviewutil.py	Thu Mar 21 12:26:46 2024 +0100
@@ -6,6 +6,7 @@
 # This software may be used and distributed according to the terms of the
 # GNU General Public License version 2 or any later version.
 
+from .. import error
 
 ### Nearest subset relation
 # Nearest subset of filter X is a filter Y so that:
@@ -21,3 +22,30 @@
     b'served': b'immutable',
     b'immutable': b'base',
 }
+
+
+def get_ordered_subset():
+    """return a list of subset name from dependencies to dependents"""
+    _unfinalized = set(subsettable.values())
+    ordered = []
+
+    # the subset table is expected to be small so we do the stupid N² version
+    # of the algorithm
+    while _unfinalized:
+        this_level = []
+        for candidate in _unfinalized:
+            dependency = subsettable.get(candidate)
+            if dependency not in _unfinalized:
+                this_level.append(candidate)
+
+        if not this_level:
+            msg = "cyclic dependencies in repoview subset %r"
+            msg %= subsettable
+            raise error.ProgrammingError(msg)
+
+        this_level.sort(key=lambda x: x if x is not None else '')
+
+        ordered.extend(this_level)
+        _unfinalized.difference_update(this_level)
+
+    return ordered
--- a/rust/hg-core/src/revlog/index.rs	Thu Mar 21 12:24:42 2024 +0100
+++ b/rust/hg-core/src/revlog/index.rs	Thu Mar 21 12:26:46 2024 +0100
@@ -18,11 +18,12 @@
 };
 
 pub const INDEX_ENTRY_SIZE: usize = 64;
+pub const INDEX_HEADER_SIZE: usize = 4;
 pub const COMPRESSION_MODE_INLINE: u8 = 2;
 
 #[derive(Debug)]
 pub struct IndexHeader {
-    pub(super) header_bytes: [u8; 4],
+    pub(super) header_bytes: [u8; INDEX_HEADER_SIZE],
 }
 
 #[derive(Copy, Clone)]
@@ -92,14 +93,21 @@
     truncation: Option<usize>,
     /// Bytes that were added after reading the index
     added: Vec<u8>,
+    first_entry: [u8; INDEX_ENTRY_SIZE],
 }
 
 impl IndexData {
     pub fn new(bytes: Box<dyn Deref<Target = [u8]> + Send + Sync>) -> Self {
+        let mut first_entry = [0; INDEX_ENTRY_SIZE];
+        if bytes.len() >= INDEX_ENTRY_SIZE {
+            first_entry[INDEX_HEADER_SIZE..]
+                .copy_from_slice(&bytes[INDEX_HEADER_SIZE..INDEX_ENTRY_SIZE])
+        }
         Self {
             bytes,
             truncation: None,
             added: vec![],
+            first_entry,
         }
     }
 
@@ -356,7 +364,6 @@
                 let end = offset + INDEX_ENTRY_SIZE;
                 let entry = IndexEntry {
                     bytes: &bytes[offset..end],
-                    offset_override: None,
                 };
 
                 offset += INDEX_ENTRY_SIZE + entry.compressed_len() as usize;
@@ -449,11 +456,17 @@
         if rev == NULL_REVISION {
             return None;
         }
-        Some(if self.is_inline() {
-            self.get_entry_inline(rev)
+        if rev.0 == 0 {
+            Some(IndexEntry {
+                bytes: &self.bytes.first_entry[..],
+            })
         } else {
-            self.get_entry_separated(rev)
-        })
+            Some(if self.is_inline() {
+                self.get_entry_inline(rev)
+            } else {
+                self.get_entry_separated(rev)
+            })
+        }
     }
 
     /// Return the binary content of the index entry for the given revision
@@ -512,13 +525,7 @@
         let end = start + INDEX_ENTRY_SIZE;
         let bytes = &self.bytes[start..end];
 
-        // See IndexEntry for an explanation of this override.
-        let offset_override = Some(end);
-
-        IndexEntry {
-            bytes,
-            offset_override,
-        }
+        IndexEntry { bytes }
     }
 
     fn get_entry_separated(&self, rev: Revision) -> IndexEntry {
@@ -526,20 +533,12 @@
         let end = start + INDEX_ENTRY_SIZE;
         let bytes = &self.bytes[start..end];
 
-        // Override the offset of the first revision as its bytes are used
-        // for the index's metadata (saving space because it is always 0)
-        let offset_override = if rev == Revision(0) { Some(0) } else { None };
-
-        IndexEntry {
-            bytes,
-            offset_override,
-        }
+        IndexEntry { bytes }
     }
 
     fn null_entry(&self) -> IndexEntry {
         IndexEntry {
             bytes: &[0; INDEX_ENTRY_SIZE],
-            offset_override: Some(0),
         }
     }
 
@@ -755,13 +754,20 @@
         revision_data: RevisionDataParams,
     ) -> Result<(), RevlogError> {
         revision_data.validate()?;
+        let entry_v1 = revision_data.into_v1();
+        let entry_bytes = entry_v1.as_bytes();
+        if self.bytes.len() == 0 {
+            self.bytes.first_entry[INDEX_HEADER_SIZE..].copy_from_slice(
+                &entry_bytes[INDEX_HEADER_SIZE..INDEX_ENTRY_SIZE],
+            )
+        }
         if self.is_inline() {
             let new_offset = self.bytes.len();
             if let Some(offsets) = &mut *self.get_offsets_mut() {
                 offsets.push(new_offset)
             }
         }
-        self.bytes.added.extend(revision_data.into_v1().as_bytes());
+        self.bytes.added.extend(entry_bytes);
         self.clear_head_revs();
         Ok(())
     }
@@ -1654,7 +1660,6 @@
         let end = offset + INDEX_ENTRY_SIZE;
         let entry = IndexEntry {
             bytes: &bytes[offset..end],
-            offset_override: None,
         };
 
         offset += INDEX_ENTRY_SIZE + entry.compressed_len() as usize;
@@ -1678,29 +1683,14 @@
 #[derive(Debug)]
 pub struct IndexEntry<'a> {
     bytes: &'a [u8],
-    /// Allows to override the offset value of the entry.
-    ///
-    /// For interleaved index and data, the offset stored in the index
-    /// corresponds to the separated data offset.
-    /// It has to be overridden with the actual offset in the interleaved
-    /// index which is just after the index block.
-    ///
-    /// For separated index and data, the offset stored in the first index
-    /// entry is mixed with the index headers.
-    /// It has to be overridden with 0.
-    offset_override: Option<usize>,
 }
 
 impl<'a> IndexEntry<'a> {
     /// Return the offset of the data.
     pub fn offset(&self) -> usize {
-        if let Some(offset_override) = self.offset_override {
-            offset_override
-        } else {
-            let mut bytes = [0; 8];
-            bytes[2..8].copy_from_slice(&self.bytes[0..=5]);
-            BigEndian::read_u64(&bytes[..]) as usize
-        }
+        let mut bytes = [0; 8];
+        bytes[2..8].copy_from_slice(&self.bytes[0..=5]);
+        BigEndian::read_u64(&bytes[..]) as usize
     }
     pub fn raw_offset(&self) -> u64 {
         BigEndian::read_u64(&self.bytes[0..8])
@@ -1956,32 +1946,15 @@
     #[test]
     fn test_offset() {
         let bytes = IndexEntryBuilder::new().with_offset(1).build();
-        let entry = IndexEntry {
-            bytes: &bytes,
-            offset_override: None,
-        };
+        let entry = IndexEntry { bytes: &bytes };
 
         assert_eq!(entry.offset(), 1)
     }
 
     #[test]
-    fn test_with_overridden_offset() {
-        let bytes = IndexEntryBuilder::new().with_offset(1).build();
-        let entry = IndexEntry {
-            bytes: &bytes,
-            offset_override: Some(2),
-        };
-
-        assert_eq!(entry.offset(), 2)
-    }
-
-    #[test]
     fn test_compressed_len() {
         let bytes = IndexEntryBuilder::new().with_compressed_len(1).build();
-        let entry = IndexEntry {
-            bytes: &bytes,
-            offset_override: None,
-        };
+        let entry = IndexEntry { bytes: &bytes };
 
         assert_eq!(entry.compressed_len(), 1)
     }
@@ -1989,10 +1962,7 @@
     #[test]
     fn test_uncompressed_len() {
         let bytes = IndexEntryBuilder::new().with_uncompressed_len(1).build();
-        let entry = IndexEntry {
-            bytes: &bytes,
-            offset_override: None,
-        };
+        let entry = IndexEntry { bytes: &bytes };
 
         assert_eq!(entry.uncompressed_len(), 1)
     }
@@ -2002,10 +1972,7 @@
         let bytes = IndexEntryBuilder::new()
             .with_base_revision_or_base_of_delta_chain(Revision(1))
             .build();
-        let entry = IndexEntry {
-            bytes: &bytes,
-            offset_override: None,
-        };
+        let entry = IndexEntry { bytes: &bytes };
 
         assert_eq!(entry.base_revision_or_base_of_delta_chain(), 1.into())
     }
@@ -2016,10 +1983,7 @@
             .with_link_revision(Revision(123))
             .build();
 
-        let entry = IndexEntry {
-            bytes: &bytes,
-            offset_override: None,
-        };
+        let entry = IndexEntry { bytes: &bytes };
 
         assert_eq!(entry.link_revision(), 123.into());
     }
@@ -2028,10 +1992,7 @@
     fn p1_test() {
         let bytes = IndexEntryBuilder::new().with_p1(Revision(123)).build();
 
-        let entry = IndexEntry {
-            bytes: &bytes,
-            offset_override: None,
-        };
+        let entry = IndexEntry { bytes: &bytes };
 
         assert_eq!(entry.p1(), 123.into());
     }
@@ -2040,10 +2001,7 @@
     fn p2_test() {
         let bytes = IndexEntryBuilder::new().with_p2(Revision(123)).build();
 
-        let entry = IndexEntry {
-            bytes: &bytes,
-            offset_override: None,
-        };
+        let entry = IndexEntry { bytes: &bytes };
 
         assert_eq!(entry.p2(), 123.into());
     }
@@ -2054,10 +2012,7 @@
             .unwrap();
         let bytes = IndexEntryBuilder::new().with_node(node).build();
 
-        let entry = IndexEntry {
-            bytes: &bytes,
-            offset_override: None,
-        };
+        let entry = IndexEntry { bytes: &bytes };
 
         assert_eq!(*entry.hash(), node);
     }
--- a/rust/hg-core/src/revlog/mod.rs	Thu Mar 21 12:24:42 2024 +0100
+++ b/rust/hg-core/src/revlog/mod.rs	Thu Mar 21 12:26:46 2024 +0100
@@ -29,6 +29,7 @@
 use self::node::{NODE_BYTES_LENGTH, NULL_NODE};
 use self::nodemap_docket::NodeMapDocket;
 use super::index::Index;
+use super::index::INDEX_ENTRY_SIZE;
 use super::nodemap::{NodeMap, NodeMapError};
 use crate::errors::HgError;
 use crate::vfs::Vfs;
@@ -531,7 +532,12 @@
             .index
             .get_entry(rev)
             .ok_or(RevlogError::InvalidRevision)?;
-        let start = index_entry.offset();
+        let offset = index_entry.offset();
+        let start = if self.index.is_inline() {
+            offset + ((rev.0 as usize + 1) * INDEX_ENTRY_SIZE)
+        } else {
+            offset
+        };
         let end = start + index_entry.compressed_len() as usize;
         let data = if self.index.is_inline() {
             self.index.data(start, end)
@@ -859,7 +865,7 @@
 #[cfg(test)]
 mod tests {
     use super::*;
-    use crate::index::{IndexEntryBuilder, INDEX_ENTRY_SIZE};
+    use crate::index::IndexEntryBuilder;
     use itertools::Itertools;
 
     #[test]
@@ -897,15 +903,10 @@
             .is_first(true)
             .with_version(1)
             .with_inline(true)
-            .with_offset(INDEX_ENTRY_SIZE)
             .with_node(node0)
             .build();
-        let entry1_bytes = IndexEntryBuilder::new()
-            .with_offset(INDEX_ENTRY_SIZE)
-            .with_node(node1)
-            .build();
+        let entry1_bytes = IndexEntryBuilder::new().with_node(node1).build();
         let entry2_bytes = IndexEntryBuilder::new()
-            .with_offset(INDEX_ENTRY_SIZE)
             .with_p1(Revision(0))
             .with_p2(Revision(1))
             .with_node(node2)
@@ -971,13 +972,9 @@
             .is_first(true)
             .with_version(1)
             .with_inline(true)
-            .with_offset(INDEX_ENTRY_SIZE)
             .with_node(node0)
             .build();
-        let entry1_bytes = IndexEntryBuilder::new()
-            .with_offset(INDEX_ENTRY_SIZE)
-            .with_node(node1)
-            .build();
+        let entry1_bytes = IndexEntryBuilder::new().with_node(node1).build();
         let contents = vec![entry0_bytes, entry1_bytes]
             .into_iter()
             .flatten()
--- a/tests/common-pattern.py	Thu Mar 21 12:24:42 2024 +0100
+++ b/tests/common-pattern.py	Thu Mar 21 12:26:46 2024 +0100
@@ -114,14 +114,6 @@
         br'(.*file:/)/?(/\$TESTTMP.*)',
         lambda m: m.group(1) + b'*' + m.group(2) + b' (glob)',
     ),
-    # `hg clone --stream` output
-    (
-        br'transferred (\S+?) KB in \S+? seconds \(.+?/sec\)(?: \(glob\))?(.*)',
-        lambda m: (
-            br'transferred %s KB in * seconds (* */sec) (glob)%s'
-            % (m.group(1), m.group(2))
-        ),
-    ),
     # `discovery debug output
     (
         br'\b(\d+) total queries in \d.\d\d\d\ds\b',
--- a/tests/test-acl.t	Thu Mar 21 12:24:42 2024 +0100
+++ b/tests/test-acl.t	Thu Mar 21 12:26:46 2024 +0100
@@ -167,7 +167,6 @@
   listing keys for "phases"
   checking for updated bookmarks
   listing keys for "bookmarks"
-  invalid branch cache (served): tip differs
   listing keys for "bookmarks"
   3 changesets found
   list of changesets:
@@ -187,7 +186,6 @@
   bundle2-input-part: total payload size * (glob)
   bundle2-input-part: "check:updated-heads" supported
   bundle2-input-part: total payload size * (glob)
-  invalid branch cache (served): tip differs
   bundle2-input-part: "changegroup" (params: 1 mandatory) supported
   adding changesets
   add changeset ef1ea85a6374
@@ -237,7 +235,6 @@
   listing keys for "phases"
   checking for updated bookmarks
   listing keys for "bookmarks"
-  invalid branch cache (served): tip differs
   listing keys for "bookmarks"
   3 changesets found
   list of changesets:
@@ -257,7 +254,6 @@
   bundle2-input-part: total payload size * (glob)
   bundle2-input-part: "check:updated-heads" supported
   bundle2-input-part: total payload size * (glob)
-  invalid branch cache (served): tip differs
   bundle2-input-part: "changegroup" (params: 1 mandatory) supported
   adding changesets
   add changeset ef1ea85a6374
@@ -317,7 +313,6 @@
   listing keys for "phases"
   checking for updated bookmarks
   listing keys for "bookmarks"
-  invalid branch cache (served): tip differs
   listing keys for "bookmarks"
   3 changesets found
   list of changesets:
@@ -337,7 +332,6 @@
   bundle2-input-part: total payload size * (glob)
   bundle2-input-part: "check:updated-heads" supported
   bundle2-input-part: total payload size * (glob)
-  invalid branch cache (served): tip differs
   bundle2-input-part: "changegroup" (params: 1 mandatory) supported
   adding changesets
   add changeset ef1ea85a6374
@@ -388,7 +382,6 @@
   listing keys for "phases"
   checking for updated bookmarks
   listing keys for "bookmarks"
-  invalid branch cache (served): tip differs
   listing keys for "bookmarks"
   3 changesets found
   list of changesets:
@@ -408,7 +401,6 @@
   bundle2-input-part: total payload size * (glob)
   bundle2-input-part: "check:updated-heads" supported
   bundle2-input-part: total payload size * (glob)
-  invalid branch cache (served): tip differs
   bundle2-input-part: "changegroup" (params: 1 mandatory) supported
   adding changesets
   add changeset ef1ea85a6374
@@ -463,7 +455,6 @@
   listing keys for "phases"
   checking for updated bookmarks
   listing keys for "bookmarks"
-  invalid branch cache (served): tip differs
   listing keys for "bookmarks"
   3 changesets found
   list of changesets:
@@ -483,7 +474,6 @@
   bundle2-input-part: total payload size * (glob)
   bundle2-input-part: "check:updated-heads" supported
   bundle2-input-part: total payload size * (glob)
-  invalid branch cache (served): tip differs
   bundle2-input-part: "changegroup" (params: 1 mandatory) supported
   adding changesets
   add changeset ef1ea85a6374
@@ -535,7 +525,6 @@
   listing keys for "phases"
   checking for updated bookmarks
   listing keys for "bookmarks"
-  invalid branch cache (served): tip differs
   listing keys for "bookmarks"
   3 changesets found
   list of changesets:
@@ -555,7 +544,6 @@
   bundle2-input-part: total payload size * (glob)
   bundle2-input-part: "check:updated-heads" supported
   bundle2-input-part: total payload size * (glob)
-  invalid branch cache (served): tip differs
   bundle2-input-part: "changegroup" (params: 1 mandatory) supported
   adding changesets
   add changeset ef1ea85a6374
@@ -612,7 +600,6 @@
   listing keys for "phases"
   checking for updated bookmarks
   listing keys for "bookmarks"
-  invalid branch cache (served): tip differs
   listing keys for "bookmarks"
   3 changesets found
   list of changesets:
@@ -632,7 +619,6 @@
   bundle2-input-part: total payload size * (glob)
   bundle2-input-part: "check:updated-heads" supported
   bundle2-input-part: total payload size * (glob)
-  invalid branch cache (served): tip differs
   bundle2-input-part: "changegroup" (params: 1 mandatory) supported
   adding changesets
   add changeset ef1ea85a6374
@@ -686,7 +672,6 @@
   listing keys for "phases"
   checking for updated bookmarks
   listing keys for "bookmarks"
-  invalid branch cache (served): tip differs
   listing keys for "bookmarks"
   3 changesets found
   list of changesets:
@@ -706,7 +691,6 @@
   bundle2-input-part: total payload size * (glob)
   bundle2-input-part: "check:updated-heads" supported
   bundle2-input-part: total payload size * (glob)
-  invalid branch cache (served): tip differs
   bundle2-input-part: "changegroup" (params: 1 mandatory) supported
   adding changesets
   add changeset ef1ea85a6374
@@ -761,7 +745,6 @@
   listing keys for "phases"
   checking for updated bookmarks
   listing keys for "bookmarks"
-  invalid branch cache (served): tip differs
   listing keys for "bookmarks"
   1 changesets found
   list of changesets:
@@ -783,7 +766,6 @@
   bundle2-input-part: total payload size * (glob)
   bundle2-input-part: "check:updated-heads" supported
   bundle2-input-part: total payload size * (glob)
-  invalid branch cache (served): tip differs
   bundle2-input-part: "changegroup" (params: 1 mandatory) supported
   adding changesets
   add changeset ef1ea85a6374
@@ -810,7 +792,6 @@
   acl: bookmark access granted: "ef1ea85a6374b77d6da9dcda9541f498f2d17df7" on bookmark "moving-bookmark"
   bundle2-input-bundle: 7 parts total
   updating the branch cache
-  invalid branch cache (served.hidden): tip differs
   added 1 changesets with 1 changes to 1 files
   bundle2-output-bundle: "HG20", 1 parts total
   bundle2-output-part: "reply:changegroup" (advisory) (params: 0 advisory) empty payload
@@ -850,7 +831,6 @@
   listing keys for "phases"
   checking for updated bookmarks
   listing keys for "bookmarks"
-  invalid branch cache (served): tip differs
   listing keys for "bookmarks"
   1 changesets found
   list of changesets:
@@ -872,7 +852,6 @@
   bundle2-input-part: total payload size * (glob)
   bundle2-input-part: "check:updated-heads" supported
   bundle2-input-part: total payload size * (glob)
-  invalid branch cache (served): tip differs
   bundle2-input-part: "changegroup" (params: 1 mandatory) supported
   adding changesets
   add changeset ef1ea85a6374
@@ -939,7 +918,6 @@
   listing keys for "phases"
   checking for updated bookmarks
   listing keys for "bookmarks"
-  invalid branch cache (served): tip differs
   listing keys for "bookmarks"
   3 changesets found
   list of changesets:
@@ -959,7 +937,6 @@
   bundle2-input-part: total payload size * (glob)
   bundle2-input-part: "check:updated-heads" supported
   bundle2-input-part: total payload size * (glob)
-  invalid branch cache (served): tip differs
   bundle2-input-part: "changegroup" (params: 1 mandatory) supported
   adding changesets
   add changeset ef1ea85a6374
@@ -1025,7 +1002,6 @@
   listing keys for "phases"
   checking for updated bookmarks
   listing keys for "bookmarks"
-  invalid branch cache (served): tip differs
   listing keys for "bookmarks"
   3 changesets found
   list of changesets:
@@ -1045,7 +1021,6 @@
   bundle2-input-part: total payload size * (glob)
   bundle2-input-part: "check:updated-heads" supported
   bundle2-input-part: total payload size * (glob)
-  invalid branch cache (served): tip differs
   bundle2-input-part: "changegroup" (params: 1 mandatory) supported
   adding changesets
   add changeset ef1ea85a6374
@@ -1109,7 +1084,6 @@
   listing keys for "phases"
   checking for updated bookmarks
   listing keys for "bookmarks"
-  invalid branch cache (served): tip differs
   listing keys for "bookmarks"
   3 changesets found
   list of changesets:
@@ -1129,7 +1103,6 @@
   bundle2-input-part: total payload size * (glob)
   bundle2-input-part: "check:updated-heads" supported
   bundle2-input-part: total payload size * (glob)
-  invalid branch cache (served): tip differs
   bundle2-input-part: "changegroup" (params: 1 mandatory) supported
   adding changesets
   add changeset ef1ea85a6374
@@ -1187,7 +1160,6 @@
   listing keys for "phases"
   checking for updated bookmarks
   listing keys for "bookmarks"
-  invalid branch cache (served): tip differs
   listing keys for "bookmarks"
   3 changesets found
   list of changesets:
@@ -1207,7 +1179,6 @@
   bundle2-input-part: total payload size * (glob)
   bundle2-input-part: "check:updated-heads" supported
   bundle2-input-part: total payload size * (glob)
-  invalid branch cache (served): tip differs
   bundle2-input-part: "changegroup" (params: 1 mandatory) supported
   adding changesets
   add changeset ef1ea85a6374
@@ -1276,7 +1247,6 @@
   listing keys for "phases"
   checking for updated bookmarks
   listing keys for "bookmarks"
-  invalid branch cache (served): tip differs
   listing keys for "bookmarks"
   3 changesets found
   list of changesets:
@@ -1296,7 +1266,6 @@
   bundle2-input-part: total payload size * (glob)
   bundle2-input-part: "check:updated-heads" supported
   bundle2-input-part: total payload size * (glob)
-  invalid branch cache (served): tip differs
   bundle2-input-part: "changegroup" (params: 1 mandatory) supported
   adding changesets
   add changeset ef1ea85a6374
@@ -1366,7 +1335,6 @@
   listing keys for "phases"
   checking for updated bookmarks
   listing keys for "bookmarks"
-  invalid branch cache (served): tip differs
   listing keys for "bookmarks"
   3 changesets found
   list of changesets:
@@ -1386,7 +1354,6 @@
   bundle2-input-part: total payload size * (glob)
   bundle2-input-part: "check:updated-heads" supported
   bundle2-input-part: total payload size * (glob)
-  invalid branch cache (served): tip differs
   bundle2-input-part: "changegroup" (params: 1 mandatory) supported
   adding changesets
   add changeset ef1ea85a6374
@@ -1453,7 +1420,6 @@
   listing keys for "phases"
   checking for updated bookmarks
   listing keys for "bookmarks"
-  invalid branch cache (served): tip differs
   listing keys for "bookmarks"
   3 changesets found
   list of changesets:
@@ -1473,7 +1439,6 @@
   bundle2-input-part: total payload size * (glob)
   bundle2-input-part: "check:updated-heads" supported
   bundle2-input-part: total payload size * (glob)
-  invalid branch cache (served): tip differs
   bundle2-input-part: "changegroup" (params: 1 mandatory) supported
   adding changesets
   add changeset ef1ea85a6374
@@ -1536,7 +1501,6 @@
   listing keys for "phases"
   checking for updated bookmarks
   listing keys for "bookmarks"
-  invalid branch cache (served): tip differs
   listing keys for "bookmarks"
   3 changesets found
   list of changesets:
@@ -1556,7 +1520,6 @@
   bundle2-input-part: total payload size * (glob)
   bundle2-input-part: "check:updated-heads" supported
   bundle2-input-part: total payload size * (glob)
-  invalid branch cache (served): tip differs
   bundle2-input-part: "changegroup" (params: 1 mandatory) supported
   adding changesets
   add changeset ef1ea85a6374
@@ -1623,7 +1586,6 @@
   listing keys for "phases"
   checking for updated bookmarks
   listing keys for "bookmarks"
-  invalid branch cache (served): tip differs
   listing keys for "bookmarks"
   3 changesets found
   list of changesets:
@@ -1643,7 +1605,6 @@
   bundle2-input-part: total payload size * (glob)
   bundle2-input-part: "check:updated-heads" supported
   bundle2-input-part: total payload size * (glob)
-  invalid branch cache (served): tip differs
   bundle2-input-part: "changegroup" (params: 1 mandatory) supported
   adding changesets
   add changeset ef1ea85a6374
@@ -1797,7 +1758,6 @@
   bundle2-input-part: total payload size * (glob)
   bundle2-input-bundle: 5 parts total
   updating the branch cache
-  invalid branch cache (served.hidden): tip differs
   added 4 changesets with 4 changes to 4 files (+1 heads)
   bundle2-output-bundle: "HG20", 1 parts total
   bundle2-output-part: "reply:changegroup" (advisory) (params: 0 advisory) empty payload
@@ -2104,7 +2064,6 @@
   bundle2-input-part: total payload size * (glob)
   bundle2-input-bundle: 5 parts total
   updating the branch cache
-  invalid branch cache (served.hidden): tip differs
   added 4 changesets with 4 changes to 4 files (+1 heads)
   bundle2-output-bundle: "HG20", 1 parts total
   bundle2-output-part: "reply:changegroup" (advisory) (params: 0 advisory) empty payload
@@ -2196,7 +2155,6 @@
   bundle2-input-part: total payload size * (glob)
   bundle2-input-bundle: 5 parts total
   updating the branch cache
-  invalid branch cache (served.hidden): tip differs
   added 4 changesets with 4 changes to 4 files (+1 heads)
   bundle2-output-bundle: "HG20", 1 parts total
   bundle2-output-part: "reply:changegroup" (advisory) (params: 0 advisory) empty payload
@@ -2360,7 +2318,6 @@
   bundle2-input-part: total payload size * (glob)
   bundle2-input-bundle: 5 parts total
   updating the branch cache
-  invalid branch cache (served.hidden): tip differs
   added 4 changesets with 4 changes to 4 files (+1 heads)
   bundle2-output-bundle: "HG20", 1 parts total
   bundle2-output-part: "reply:changegroup" (advisory) (params: 0 advisory) empty payload
--- a/tests/test-blackbox.t	Thu Mar 21 12:24:42 2024 +0100
+++ b/tests/test-blackbox.t	Thu Mar 21 12:26:46 2024 +0100
@@ -127,13 +127,11 @@
   added 1 changesets with 1 changes to 1 files
   new changesets d02f48003e62
   (run 'hg update' to get a working copy)
-  $ hg blackbox -l 6
+  $ hg blackbox -l 4
   1970-01-01 00:00:00.000 bob @6563da9dcf87b1949716e38ff3e3dfaa3198eb06 (5000)> wrote branch cache (served) with 1 labels and 2 nodes
-  1970-01-01 00:00:00.000 bob @6563da9dcf87b1949716e38ff3e3dfaa3198eb06 (5000)> updated branch cache (served.hidden) in * seconds (glob)
-  1970-01-01 00:00:00.000 bob @6563da9dcf87b1949716e38ff3e3dfaa3198eb06 (5000)> wrote branch cache (served.hidden) with 1 labels and 2 nodes
   1970-01-01 00:00:00.000 bob @6563da9dcf87b1949716e38ff3e3dfaa3198eb06 (5000)> 1 incoming changes - new heads: d02f48003e62
   1970-01-01 00:00:00.000 bob @6563da9dcf87b1949716e38ff3e3dfaa3198eb06 (5000)> pull exited 0 after * seconds (glob)
-  1970-01-01 00:00:00.000 bob @6563da9dcf87b1949716e38ff3e3dfaa3198eb06 (5000)> blackbox -l 6
+  1970-01-01 00:00:00.000 bob @6563da9dcf87b1949716e38ff3e3dfaa3198eb06 (5000)> blackbox -l 4
 
 we must not cause a failure if we cannot write to the log
 
@@ -190,13 +188,11 @@
   $ hg strip tip
   0 files updated, 0 files merged, 1 files removed, 0 files unresolved
   saved backup bundle to $TESTTMP/blackboxtest2/.hg/strip-backup/*-backup.hg (glob)
-  $ hg blackbox -l 6
+  $ hg blackbox -l 4
   1970-01-01 00:00:00.000 bob @73f6ee326b27d820b0472f1a825e3a50f3dc489b (5000)> strip tip
   1970-01-01 00:00:00.000 bob @6563da9dcf87b1949716e38ff3e3dfaa3198eb06 (5000)> saved backup bundle to $TESTTMP/blackboxtest2/.hg/strip-backup/73f6ee326b27-7612e004-backup.hg
-  1970-01-01 00:00:00.000 bob @6563da9dcf87b1949716e38ff3e3dfaa3198eb06 (5000)> updated branch cache (immutable) in * seconds (glob)
-  1970-01-01 00:00:00.000 bob @6563da9dcf87b1949716e38ff3e3dfaa3198eb06 (5000)> wrote branch cache (immutable) with 1 labels and 2 nodes
   1970-01-01 00:00:00.000 bob @6563da9dcf87b1949716e38ff3e3dfaa3198eb06 (5000)> strip tip exited 0 after * seconds (glob)
-  1970-01-01 00:00:00.000 bob @6563da9dcf87b1949716e38ff3e3dfaa3198eb06 (5000)> blackbox -l 6
+  1970-01-01 00:00:00.000 bob @6563da9dcf87b1949716e38ff3e3dfaa3198eb06 (5000)> blackbox -l 4
 
 extension and python hooks - use the eol extension for a pythonhook
 
--- a/tests/test-branches.t	Thu Mar 21 12:24:42 2024 +0100
+++ b/tests/test-branches.t	Thu Mar 21 12:26:46 2024 +0100
@@ -1316,7 +1316,7 @@
   new changesets 2ab8003a1750:99ba08759bc7
   updating to branch A
   0 files updated, 0 files merged, 0 files removed, 0 files unresolved
-  $ cat branchmap-update-01/.hg/cache/branch2-served
+  $ cat branchmap-update-01/.hg/cache/branch2-base
   99ba08759bc7f6fdbe5304e83d0387f35c082479 1
   99ba08759bc7f6fdbe5304e83d0387f35c082479 o A
   $ hg -R branchmap-update-01 unbundle bundle.hg
@@ -1350,7 +1350,7 @@
   updating to branch A
   0 files updated, 0 files merged, 0 files removed, 0 files unresolved
 
-  $ cat branchmap-update-02/.hg/cache/branch2-served
+  $ cat branchmap-update-02/.hg/cache/branch2-base
   99ba08759bc7f6fdbe5304e83d0387f35c082479 1
   99ba08759bc7f6fdbe5304e83d0387f35c082479 o A
   $ hg -R branchmap-update-02 unbundle bundle.hg --config "hooks.pretxnclose=python:$TESTTMP/simplehook.py:hook"
@@ -1361,6 +1361,6 @@
   rollback completed
   abort: pretxnclose hook failed
   [40]
-  $ cat branchmap-update-02/.hg/cache/branch2-served
+  $ cat branchmap-update-02/.hg/cache/branch2-base
   99ba08759bc7f6fdbe5304e83d0387f35c082479 1
   99ba08759bc7f6fdbe5304e83d0387f35c082479 o A
--- a/tests/test-clone-stream.t	Thu Mar 21 12:24:42 2024 +0100
+++ b/tests/test-clone-stream.t	Thu Mar 21 12:26:46 2024 +0100
@@ -109,150 +109,18 @@
 Check uncompressed
 ==================
 
-Cannot stream clone when server.uncompressed is set
+Cannot stream clone when server.uncompressed is set to false
+------------------------------------------------------------
+
+When `server.uncompressed` is disabled, the client should fallback to a bundle
+based clone with a warning.
+
 
   $ get-with-headers.py $LOCALIP:$HGPORT '?cmd=stream_out'
   200 Script output follows
   
   1
 
-#if stream-legacy
-  $ hg debugcapabilities http://localhost:$HGPORT
-  Main capabilities:
-    batch
-    branchmap
-    $USUAL_BUNDLE2_CAPS_SERVER$
-    changegroupsubset
-    compression=$BUNDLE2_COMPRESSIONS$
-    getbundle
-    httpheader=1024
-    httpmediatype=0.1rx,0.1tx,0.2tx
-    known
-    lookup
-    pushkey
-    unbundle=HG10GZ,HG10BZ,HG10UN
-    unbundlehash
-  Bundle2 capabilities:
-    HG20
-    bookmarks
-    changegroup
-      01
-      02
-      03
-    checkheads
-      related
-    digests
-      md5
-      sha1
-      sha512
-    error
-      abort
-      unsupportedcontent
-      pushraced
-      pushkey
-    hgtagsfnodes
-    listkeys
-    phases
-      heads
-    pushkey
-    remote-changegroup
-      http
-      https
-
-  $ hg clone --stream -U http://localhost:$HGPORT server-disabled
-  warning: stream clone requested but server has them disabled
-  requesting all changes
-  adding changesets
-  adding manifests
-  adding file changes
-  added 3 changesets with 1088 changes to 1088 files
-  new changesets 96ee1d7354c4:5223b5e3265f
-
-  $ get-with-headers.py $LOCALIP:$HGPORT '?cmd=getbundle' content-type --bodyfile body --hgproto 0.2 --requestheader "x-hgarg-1=bundlecaps=HG20%2Cbundle2%3DHG20%250Abookmarks%250Achangegroup%253D01%252C02%252C03%250Adigests%253Dmd5%252Csha1%252Csha512%250Aerror%253Dabort%252Cunsupportedcontent%252Cpushraced%252Cpushkey%250Ahgtagsfnodes%250Alistkeys%250Aphases%253Dheads%250Apushkey%250Aremote-changegroup%253Dhttp%252Chttps&cg=0&common=0000000000000000000000000000000000000000&heads=c17445101a72edac06facd130d14808dfbd5c7c2&stream=1"
-  200 Script output follows
-  content-type: application/mercurial-0.2
-  
-
-  $ f --size body --hexdump --bytes 100
-  body: size=140
-  0000: 04 6e 6f 6e 65 48 47 32 30 00 00 00 00 00 00 00 |.noneHG20.......|
-  0010: 73 0b 45 52 52 4f 52 3a 41 42 4f 52 54 00 00 00 |s.ERROR:ABORT...|
-  0020: 00 01 01 07 3c 04 16 6d 65 73 73 61 67 65 73 74 |....<..messagest|
-  0030: 72 65 61 6d 20 64 61 74 61 20 72 65 71 75 65 73 |ream data reques|
-  0040: 74 65 64 20 62 75 74 20 73 65 72 76 65 72 20 64 |ted but server d|
-  0050: 6f 65 73 20 6e 6f 74 20 61 6c 6c 6f 77 20 74 68 |oes not allow th|
-  0060: 69 73 20 66                                     |is f|
-
-#endif
-#if stream-bundle2-v2
-  $ hg debugcapabilities http://localhost:$HGPORT
-  Main capabilities:
-    batch
-    branchmap
-    $USUAL_BUNDLE2_CAPS_SERVER$
-    changegroupsubset
-    compression=$BUNDLE2_COMPRESSIONS$
-    getbundle
-    httpheader=1024
-    httpmediatype=0.1rx,0.1tx,0.2tx
-    known
-    lookup
-    pushkey
-    unbundle=HG10GZ,HG10BZ,HG10UN
-    unbundlehash
-  Bundle2 capabilities:
-    HG20
-    bookmarks
-    changegroup
-      01
-      02
-      03
-    checkheads
-      related
-    digests
-      md5
-      sha1
-      sha512
-    error
-      abort
-      unsupportedcontent
-      pushraced
-      pushkey
-    hgtagsfnodes
-    listkeys
-    phases
-      heads
-    pushkey
-    remote-changegroup
-      http
-      https
-
-  $ hg clone --stream -U http://localhost:$HGPORT server-disabled
-  warning: stream clone requested but server has them disabled
-  requesting all changes
-  adding changesets
-  adding manifests
-  adding file changes
-  added 3 changesets with 1088 changes to 1088 files
-  new changesets 96ee1d7354c4:5223b5e3265f
-
-  $ get-with-headers.py $LOCALIP:$HGPORT '?cmd=getbundle' content-type --bodyfile body --hgproto 0.2 --requestheader "x-hgarg-1=bundlecaps=HG20%2Cbundle2%3DHG20%250Abookmarks%250Achangegroup%253D01%252C02%252C03%250Adigests%253Dmd5%252Csha1%252Csha512%250Aerror%253Dabort%252Cunsupportedcontent%252Cpushraced%252Cpushkey%250Ahgtagsfnodes%250Alistkeys%250Aphases%253Dheads%250Apushkey%250Aremote-changegroup%253Dhttp%252Chttps&cg=0&common=0000000000000000000000000000000000000000&heads=c17445101a72edac06facd130d14808dfbd5c7c2&stream=1"
-  200 Script output follows
-  content-type: application/mercurial-0.2
-  
-
-  $ f --size body --hexdump --bytes 100
-  body: size=140
-  0000: 04 6e 6f 6e 65 48 47 32 30 00 00 00 00 00 00 00 |.noneHG20.......|
-  0010: 73 0b 45 52 52 4f 52 3a 41 42 4f 52 54 00 00 00 |s.ERROR:ABORT...|
-  0020: 00 01 01 07 3c 04 16 6d 65 73 73 61 67 65 73 74 |....<..messagest|
-  0030: 72 65 61 6d 20 64 61 74 61 20 72 65 71 75 65 73 |ream data reques|
-  0040: 74 65 64 20 62 75 74 20 73 65 72 76 65 72 20 64 |ted but server d|
-  0050: 6f 65 73 20 6e 6f 74 20 61 6c 6c 6f 77 20 74 68 |oes not allow th|
-  0060: 69 73 20 66                                     |is f|
-
-#endif
-#if stream-bundle2-v3
   $ hg debugcapabilities http://localhost:$HGPORT
   Main capabilities:
     batch
@@ -304,23 +172,6 @@
   added 3 changesets with 1088 changes to 1088 files
   new changesets 96ee1d7354c4:5223b5e3265f
 
-  $ get-with-headers.py $LOCALIP:$HGPORT '?cmd=getbundle' content-type --bodyfile body --hgproto 0.2 --requestheader "x-hgarg-1=bundlecaps=HG20%2Cbundle2%3DHG20%250Abookmarks%250Achangegroup%253D01%252C02%252C03%250Adigests%253Dmd5%252Csha1%252Csha512%250Aerror%253Dabort%252Cunsupportedcontent%252Cpushraced%252Cpushkey%250Ahgtagsfnodes%250Alistkeys%250Aphases%253Dheads%250Apushkey%250Aremote-changegroup%253Dhttp%252Chttps&cg=0&common=0000000000000000000000000000000000000000&heads=c17445101a72edac06facd130d14808dfbd5c7c2&stream=1"
-  200 Script output follows
-  content-type: application/mercurial-0.2
-  
-
-  $ f --size body --hexdump --bytes 100
-  body: size=140
-  0000: 04 6e 6f 6e 65 48 47 32 30 00 00 00 00 00 00 00 |.noneHG20.......|
-  0010: 73 0b 45 52 52 4f 52 3a 41 42 4f 52 54 00 00 00 |s.ERROR:ABORT...|
-  0020: 00 01 01 07 3c 04 16 6d 65 73 73 61 67 65 73 74 |....<..messagest|
-  0030: 72 65 61 6d 20 64 61 74 61 20 72 65 71 75 65 73 |ream data reques|
-  0040: 74 65 64 20 62 75 74 20 73 65 72 76 65 72 20 64 |ted but server d|
-  0050: 6f 65 73 20 6e 6f 74 20 61 6c 6c 6f 77 20 74 68 |oes not allow th|
-  0060: 69 73 20 66                                     |is f|
-
-#endif
-
   $ killdaemons.py
   $ cd server
   $ hg serve -p $HGPORT -d --pid-file=hg.pid --error errors.txt
@@ -328,6 +179,13 @@
   $ cd ..
 
 Basic clone
+-----------
+
+Check that --stream trigger a stream clone and result in a valid repositoty
+
+We check the associated output for exact bytes on file number as changes in
+these value implies changes in the data transfered and can detect unintended
+changes in the process.
 
 #if stream-legacy
   $ hg clone --stream -U http://localhost:$HGPORT clone1
@@ -338,7 +196,6 @@
   transferred 98.8 KB in * seconds (* */sec) (glob) (zstd !)
   searching for changes
   no changes found
-  $ cat server/errors.txt
 #endif
 #if stream-bundle2-v2
   $ hg clone --stream -U http://localhost:$HGPORT clone1
@@ -349,20 +206,8 @@
   transferred 98.9 KB in * seconds (* */sec) (glob) (zstd no-rust !)
   1096 files to transfer, 99.0 KB of data (zstd rust !)
   transferred 99.0 KB in * seconds (* */sec) (glob) (zstd rust !)
+#endif
 
-  $ ls -1 clone1/.hg/cache
-  branch2-base
-  branch2-immutable
-  branch2-served
-  branch2-served.hidden
-  branch2-visible
-  branch2-visible-hidden
-  rbc-names-v1
-  rbc-revs-v1
-  tags2
-  tags2-served
-  $ cat server/errors.txt
-#endif
 #if stream-bundle2-v3
   $ hg clone --stream -U http://localhost:$HGPORT clone1
   streaming all changes
@@ -370,244 +215,68 @@
   transferred 102 KB in * seconds (* */sec) (glob) (no-zstd !)
   transferred 98.9 KB in * seconds (* */sec) (glob) (zstd no-rust !)
   transferred 99.0 KB in * seconds (* */sec) (glob) (zstd rust !)
+#endif
 
+#if no-stream-legacy
   $ ls -1 clone1/.hg/cache
   branch2-base
-  branch2-immutable
   branch2-served
-  branch2-served.hidden
-  branch2-visible
-  branch2-visible-hidden
   rbc-names-v1
   rbc-revs-v1
   tags2
   tags2-served
-  $ cat server/errors.txt
 #endif
 
+  $ hg -R clone1 verify --quiet
+  $ cat server/errors.txt
+
 getbundle requests with stream=1 are uncompressed
+-------------------------------------------------
+
+We check that `getbundle` will return a stream bundle when requested.
+
+XXX manually building the --requestheader is fragile and will drift away from actual usage
 
   $ get-with-headers.py $LOCALIP:$HGPORT '?cmd=getbundle' content-type --bodyfile body --hgproto '0.1 0.2 comp=zlib,none' --requestheader "x-hgarg-1=bundlecaps=HG20%2Cbundle2%3DHG20%250Abookmarks%250Achangegroup%253D01%252C02%252C03%250Adigests%253Dmd5%252Csha1%252Csha512%250Aerror%253Dabort%252Cunsupportedcontent%252Cpushraced%252Cpushkey%250Ahgtagsfnodes%250Alistkeys%250Aphases%253Dheads%250Apushkey%250Aremote-changegroup%253Dhttp%252Chttps%250Astream%253Dv2&cg=0&common=0000000000000000000000000000000000000000&heads=c17445101a72edac06facd130d14808dfbd5c7c2&stream=1"
   200 Script output follows
   content-type: application/mercurial-0.2
   
 
-#if no-zstd no-rust
-  $ f --size --hex --bytes 256 body
-  body: size=119140
-  0000: 04 6e 6f 6e 65 48 47 32 30 00 00 00 00 00 00 00 |.noneHG20.......|
-  0010: 62 07 53 54 52 45 41 4d 32 00 00 00 00 03 00 09 |b.STREAM2.......|
-  0020: 06 09 04 0c 26 62 79 74 65 63 6f 75 6e 74 31 30 |....&bytecount10|
-  0030: 34 31 31 35 66 69 6c 65 63 6f 75 6e 74 31 30 39 |4115filecount109|
-  0040: 34 72 65 71 75 69 72 65 6d 65 6e 74 73 67 65 6e |4requirementsgen|
-  0050: 65 72 61 6c 64 65 6c 74 61 25 32 43 72 65 76 6c |eraldelta%2Crevl|
-  0060: 6f 67 76 31 25 32 43 73 70 61 72 73 65 72 65 76 |ogv1%2Csparserev|
-  0070: 6c 6f 67 00 00 80 00 73 08 42 64 61 74 61 2f 30 |log....s.Bdata/0|
-  0080: 2e 69 00 03 00 01 00 00 00 00 00 00 00 02 00 00 |.i..............|
-  0090: 00 01 00 00 00 00 00 00 00 01 ff ff ff ff ff ff |................|
-  00a0: ff ff 80 29 63 a0 49 d3 23 87 bf ce fe 56 67 92 |...)c.I.#....Vg.|
-  00b0: 67 2c 69 d1 ec 39 00 00 00 00 00 00 00 00 00 00 |g,i..9..........|
-  00c0: 00 00 75 30 73 26 45 64 61 74 61 2f 30 30 63 68 |..u0s&Edata/00ch|
-  00d0: 61 6e 67 65 6c 6f 67 2d 61 62 33 34 39 31 38 30 |angelog-ab349180|
-  00e0: 61 30 34 30 35 30 31 30 2e 6e 64 2e 69 00 03 00 |a0405010.nd.i...|
-  00f0: 01 00 00 00 00 00 00 00 05 00 00 00 04 00 00 00 |................|
-#endif
-#if zstd no-rust
-  $ f --size --hex --bytes 256 body
-  body: size=116327 (no-bigendian !)
-  body: size=116322 (bigendian !)
+  $ f --size --hex --bytes 48 body
+  body: size=* (glob)
   0000: 04 6e 6f 6e 65 48 47 32 30 00 00 00 00 00 00 00 |.noneHG20.......|
-  0010: 7c 07 53 54 52 45 41 4d 32 00 00 00 00 03 00 09 ||.STREAM2.......|
-  0020: 06 09 04 0c 40 62 79 74 65 63 6f 75 6e 74 31 30 |....@bytecount10|
-  0030: 31 32 37 36 66 69 6c 65 63 6f 75 6e 74 31 30 39 |1276filecount109| (no-bigendian !)
-  0030: 31 32 37 31 66 69 6c 65 63 6f 75 6e 74 31 30 39 |1271filecount109| (bigendian !)
-  0040: 34 72 65 71 75 69 72 65 6d 65 6e 74 73 67 65 6e |4requirementsgen|
-  0050: 65 72 61 6c 64 65 6c 74 61 25 32 43 72 65 76 6c |eraldelta%2Crevl|
-  0060: 6f 67 2d 63 6f 6d 70 72 65 73 73 69 6f 6e 2d 7a |og-compression-z|
-  0070: 73 74 64 25 32 43 72 65 76 6c 6f 67 76 31 25 32 |std%2Crevlogv1%2|
-  0080: 43 73 70 61 72 73 65 72 65 76 6c 6f 67 00 00 80 |Csparserevlog...|
-  0090: 00 73 08 42 64 61 74 61 2f 30 2e 69 00 03 00 01 |.s.Bdata/0.i....|
-  00a0: 00 00 00 00 00 00 00 02 00 00 00 01 00 00 00 00 |................|
-  00b0: 00 00 00 01 ff ff ff ff ff ff ff ff 80 29 63 a0 |.............)c.|
-  00c0: 49 d3 23 87 bf ce fe 56 67 92 67 2c 69 d1 ec 39 |I.#....Vg.g,i..9|
-  00d0: 00 00 00 00 00 00 00 00 00 00 00 00 75 30 73 26 |............u0s&|
-  00e0: 45 64 61 74 61 2f 30 30 63 68 61 6e 67 65 6c 6f |Edata/00changelo|
-  00f0: 67 2d 61 62 33 34 39 31 38 30 61 30 34 30 35 30 |g-ab349180a04050|
-#endif
-#if zstd rust no-dirstate-v2
-  $ f --size --hex --bytes 256 body
-  body: size=116310 (no-rust !)
-  body: size=116495 (rust no-stream-legacy no-bigendian !)
-  body: size=116490 (rust no-stream-legacy bigendian !)
-  body: size=116327 (rust stream-legacy no-bigendian !)
-  body: size=116322 (rust stream-legacy bigendian !)
-  0000: 04 6e 6f 6e 65 48 47 32 30 00 00 00 00 00 00 00 |.noneHG20.......|
-  0010: 7c 07 53 54 52 45 41 4d 32 00 00 00 00 03 00 09 ||.STREAM2.......|
-  0020: 06 09 04 0c 40 62 79 74 65 63 6f 75 6e 74 31 30 |....@bytecount10|
-  0030: 31 32 37 36 66 69 6c 65 63 6f 75 6e 74 31 30 39 |1276filecount109| (no-rust !)
-  0040: 33 72 65 71 75 69 72 65 6d 65 6e 74 73 67 65 6e |3requirementsgen| (no-rust !)
-  0030: 31 34 30 32 66 69 6c 65 63 6f 75 6e 74 31 30 39 |1402filecount109| (rust no-stream-legacy no-bigendian !)
-  0030: 31 33 39 37 66 69 6c 65 63 6f 75 6e 74 31 30 39 |1397filecount109| (rust no-stream-legacy bigendian !)
-  0040: 36 72 65 71 75 69 72 65 6d 65 6e 74 73 67 65 6e |6requirementsgen| (rust no-stream-legacy !)
-  0030: 31 32 37 36 66 69 6c 65 63 6f 75 6e 74 31 30 39 |1276filecount109| (rust stream-legacy no-bigendian !)
-  0030: 31 32 37 31 66 69 6c 65 63 6f 75 6e 74 31 30 39 |1271filecount109| (rust stream-legacy bigendian !)
-  0040: 34 72 65 71 75 69 72 65 6d 65 6e 74 73 67 65 6e |4requirementsgen| (rust stream-legacy !)
-  0050: 65 72 61 6c 64 65 6c 74 61 25 32 43 72 65 76 6c |eraldelta%2Crevl|
-  0060: 6f 67 2d 63 6f 6d 70 72 65 73 73 69 6f 6e 2d 7a |og-compression-z|
-  0070: 73 74 64 25 32 43 72 65 76 6c 6f 67 76 31 25 32 |std%2Crevlogv1%2|
-  0080: 43 73 70 61 72 73 65 72 65 76 6c 6f 67 00 00 80 |Csparserevlog...|
-  0090: 00 73 08 42 64 61 74 61 2f 30 2e 69 00 03 00 01 |.s.Bdata/0.i....|
-  00a0: 00 00 00 00 00 00 00 02 00 00 00 01 00 00 00 00 |................|
-  00b0: 00 00 00 01 ff ff ff ff ff ff ff ff 80 29 63 a0 |.............)c.|
-  00c0: 49 d3 23 87 bf ce fe 56 67 92 67 2c 69 d1 ec 39 |I.#....Vg.g,i..9|
-  00d0: 00 00 00 00 00 00 00 00 00 00 00 00 75 30 73 26 |............u0s&|
-  00e0: 45 64 61 74 61 2f 30 30 63 68 61 6e 67 65 6c 6f |Edata/00changelo|
-  00f0: 67 2d 61 62 33 34 39 31 38 30 61 30 34 30 35 30 |g-ab349180a04050|
-#endif
-#if zstd dirstate-v2
-  $ f --size --hex --bytes 256 body
-  body: size=109549
-  0000: 04 6e 6f 6e 65 48 47 32 30 00 00 00 00 00 00 00 |.noneHG20.......|
-  0010: c0 07 53 54 52 45 41 4d 32 00 00 00 00 03 00 09 |..STREAM2.......|
-  0020: 05 09 04 0c 85 62 79 74 65 63 6f 75 6e 74 39 35 |.....bytecount95|
-  0030: 38 39 37 66 69 6c 65 63 6f 75 6e 74 31 30 33 30 |897filecount1030|
-  0040: 72 65 71 75 69 72 65 6d 65 6e 74 73 64 6f 74 65 |requirementsdote|
-  0050: 6e 63 6f 64 65 25 32 43 65 78 70 2d 64 69 72 73 |ncode%2Cexp-dirs|
-  0060: 74 61 74 65 2d 76 32 25 32 43 66 6e 63 61 63 68 |tate-v2%2Cfncach|
-  0070: 65 25 32 43 67 65 6e 65 72 61 6c 64 65 6c 74 61 |e%2Cgeneraldelta|
-  0080: 25 32 43 70 65 72 73 69 73 74 65 6e 74 2d 6e 6f |%2Cpersistent-no|
-  0090: 64 65 6d 61 70 25 32 43 72 65 76 6c 6f 67 2d 63 |demap%2Crevlog-c|
-  00a0: 6f 6d 70 72 65 73 73 69 6f 6e 2d 7a 73 74 64 25 |ompression-zstd%|
-  00b0: 32 43 72 65 76 6c 6f 67 76 31 25 32 43 73 70 61 |2Crevlogv1%2Cspa|
-  00c0: 72 73 65 72 65 76 6c 6f 67 25 32 43 73 74 6f 72 |rserevlog%2Cstor|
-  00d0: 65 00 00 80 00 73 08 42 64 61 74 61 2f 30 2e 69 |e....s.Bdata/0.i|
-  00e0: 00 03 00 01 00 00 00 00 00 00 00 02 00 00 00 01 |................|
-  00f0: 00 00 00 00 00 00 00 01 ff ff ff ff ff ff ff ff |................|
-#endif
+  0010: ?? 07 53 54 52 45 41 4d 32 00 00 00 00 03 00 09 |?.STREAM2.......| (glob)
+  0020: 06 09 04 0c ?? 62 79 74 65 63 6f 75 6e 74 31 30 |....?bytecount10| (glob)
 
 --uncompressed is an alias to --stream
+---------------------------------------
 
-#if stream-legacy
-  $ hg clone --uncompressed -U http://localhost:$HGPORT clone1-uncompressed
-  streaming all changes
-  1091 files to transfer, 102 KB of data (no-zstd !)
-  transferred 102 KB in * seconds (* */sec) (glob) (no-zstd !)
-  1091 files to transfer, 98.8 KB of data (zstd !)
-  transferred 98.8 KB in * seconds (* */sec) (glob) (zstd !)
-  searching for changes
-  no changes found
-#endif
-#if stream-bundle2-v2
+The alias flag should trigger a stream clone too.
+
   $ hg clone --uncompressed -U http://localhost:$HGPORT clone1-uncompressed
   streaming all changes
-  1094 files to transfer, 102 KB of data (no-zstd !)
-  transferred 102 KB in * seconds (* */sec) (glob) (no-zstd !)
-  1094 files to transfer, 98.9 KB of data (zstd no-rust !)
-  transferred 98.9 KB in * seconds (* */sec) (glob) (zstd no-rust !)
-  1096 files to transfer, 99.0 KB of data (zstd rust !)
-  transferred 99.0 KB in * seconds (* */sec) (glob) (zstd rust !)
-#endif
-#if stream-bundle2-v3
-  $ hg clone --uncompressed -U http://localhost:$HGPORT clone1-uncompressed
-  streaming all changes
-  1093 entries to transfer
-  transferred 102 KB in * seconds (* */sec) (glob) (no-zstd !)
-  transferred 98.9 KB in * seconds (* */sec) (glob) (zstd no-rust !)
-  transferred 99.0 KB in * seconds (* */sec) (glob) (zstd rust !)
-#endif
+  * files to transfer* (glob) (no-stream-bundle2-v3 !)
+  * entries to transfer (glob) (stream-bundle2-v3 !)
+  transferred * KB in * seconds (* */sec) (glob)
+  searching for changes (stream-legacy !)
+  no changes found (stream-legacy !)
 
 Clone with background file closing enabled
+-------------------------------------------
 
-#if stream-legacy
-  $ hg --debug --config worker.backgroundclose=true --config worker.backgroundcloseminfilecount=1 clone --stream -U http://localhost:$HGPORT clone-background | grep -v adding
-  using http://localhost:$HGPORT/
-  sending capabilities command
-  sending branchmap command
-  streaming all changes
-  sending stream_out command
-  1091 files to transfer, 102 KB of data (no-zstd !)
-  1091 files to transfer, 98.8 KB of data (zstd !)
-  starting 4 threads for background file closing
-  updating the branch cache
-  transferred 102 KB in * seconds (* */sec) (glob) (no-zstd !)
-  transferred 98.8 KB in * seconds (* */sec) (glob) (zstd !)
-  query 1; heads
-  sending batch command
-  searching for changes
-  all remote heads known locally
-  no changes found
-  sending getbundle command
-  bundle2-input-bundle: with-transaction
-  bundle2-input-part: "listkeys" (params: 1 mandatory) supported
-  bundle2-input-part: "phase-heads" supported
-  bundle2-input-part: total payload size 24
-  bundle2-input-bundle: 2 parts total
-  checking for updated bookmarks
-  updating the branch cache
-  (sent 5 HTTP requests and * bytes; received * bytes in responses) (glob)
-#endif
-#if stream-bundle2-v2
-  $ hg --debug --config worker.backgroundclose=true --config worker.backgroundcloseminfilecount=1 clone --stream -U http://localhost:$HGPORT clone-background | grep -v adding
-  using http://localhost:$HGPORT/
-  sending capabilities command
-  query 1; heads
-  sending batch command
-  streaming all changes
-  sending getbundle command
-  bundle2-input-bundle: with-transaction
-  bundle2-input-part: "stream2" (params: 3 mandatory) supported
-  applying stream bundle
-  1094 files to transfer, 102 KB of data (no-zstd !)
-  1094 files to transfer, 98.9 KB of data (zstd no-rust !)
-  1096 files to transfer, 99.0 KB of data (zstd rust !)
-  starting 4 threads for background file closing
+The backgound file closing logic should trigger when configured to do so, and
+the result should be a valid repository.
+
+  $ hg --debug --config worker.backgroundclose=true --config worker.backgroundcloseminfilecount=1 clone --stream -U http://localhost:$HGPORT clone-background | grep "background file closing"
   starting 4 threads for background file closing
-  updating the branch cache
-  transferred 102 KB in * seconds (* */sec) (glob) (no-zstd !)
-  bundle2-input-part: total payload size 119001 (no-zstd !)
-  transferred 98.9 KB in * seconds (* */sec) (glob) (zstd no-rust !)
-  transferred 99.0 KB in * seconds (* */sec) (glob) (zstd rust !)
-  bundle2-input-part: total payload size 116162 (zstd no-bigendian no-rust !)
-  bundle2-input-part: total payload size 116330 (zstd no-bigendian rust !)
-  bundle2-input-part: total payload size 116157 (zstd bigendian no-rust !)
-  bundle2-input-part: total payload size 116325 (zstd bigendian rust !)
-  bundle2-input-part: "listkeys" (params: 1 mandatory) supported
-  bundle2-input-bundle: 2 parts total
-  checking for updated bookmarks
-  updating the branch cache
-  (sent 3 HTTP requests and * bytes; received * bytes in responses) (glob)
-#endif
-#if stream-bundle2-v3
-  $ hg --debug --config worker.backgroundclose=true --config worker.backgroundcloseminfilecount=1 clone --stream -U http://localhost:$HGPORT clone-background | grep -v adding
-  using http://localhost:$HGPORT/
-  sending capabilities command
-  query 1; heads
-  sending batch command
-  streaming all changes
-  sending getbundle command
-  bundle2-input-bundle: with-transaction
-  bundle2-input-part: "stream3-exp" (params: 1 mandatory) supported
-  applying stream bundle
-  1093 entries to transfer
-  starting 4 threads for background file closing
-  starting 4 threads for background file closing
-  updating the branch cache
-  transferred 102 KB in * seconds (* */sec) (glob) (no-zstd !)
-  bundle2-input-part: total payload size 120096 (no-zstd !)
-  transferred 98.9 KB in * seconds (* */sec) (glob) (zstd no-rust !)
-  transferred 99.0 KB in * seconds (* */sec) (glob) (zstd rust !)
-  bundle2-input-part: total payload size 117257 (zstd no-rust no-bigendian !)
-  bundle2-input-part: total payload size 117425 (zstd rust no-bigendian !)
-  bundle2-input-part: total payload size 117252 (zstd bigendian no-rust !)
-  bundle2-input-part: total payload size 117420 (zstd bigendian rust !)
-  bundle2-input-part: "listkeys" (params: 1 mandatory) supported
-  bundle2-input-bundle: 2 parts total
-  checking for updated bookmarks
-  updating the branch cache
-  (sent 3 HTTP requests and * bytes; received * bytes in responses) (glob)
-#endif
+  starting 4 threads for background file closing (no-stream-legacy !)
+  $ hg verify -R clone-background --quiet
 
 Cannot stream clone when there are secret changesets
+----------------------------------------------------
+
+If secret changeset are present the should not be cloned (by default) and the
+clone falls back to a bundle clone.
 
   $ hg -R server phase --force --secret -r tip
   $ hg clone --stream -U http://localhost:$HGPORT secret-denied
@@ -622,44 +291,30 @@
   $ killdaemons.py
 
 Streaming of secrets can be overridden by server config
+-------------------------------------------------------
+
+Secret changeset can still be streamed if the server is configured to do so.
 
   $ cd server
   $ hg serve --config server.uncompressedallowsecret=true -p $HGPORT -d --pid-file=hg.pid
   $ cat hg.pid > $DAEMON_PIDS
   $ cd ..
 
-#if stream-legacy
-  $ hg clone --stream -U http://localhost:$HGPORT secret-allowed
-  streaming all changes
-  1091 files to transfer, 102 KB of data (no-zstd !)
-  transferred 102 KB in * seconds (* */sec) (glob) (no-zstd !)
-  1091 files to transfer, 98.8 KB of data (zstd !)
-  transferred 98.8 KB in * seconds (* */sec) (glob) (zstd !)
-  searching for changes
-  no changes found
-#endif
-#if stream-bundle2-v2
   $ hg clone --stream -U http://localhost:$HGPORT secret-allowed
   streaming all changes
-  1094 files to transfer, 102 KB of data (no-zstd !)
-  transferred 102 KB in * seconds (* */sec) (glob) (no-zstd !)
-  1094 files to transfer, 98.9 KB of data (zstd no-rust !)
-  transferred 98.9 KB in * seconds (* */sec) (glob) (zstd no-rust !)
-  1096 files to transfer, 99.0 KB of data (zstd rust !)
-  transferred 99.0 KB in * seconds (* */sec) (glob) (zstd rust !)
-#endif
-#if stream-bundle2-v3
-  $ hg clone --stream -U http://localhost:$HGPORT secret-allowed
-  streaming all changes
-  1093 entries to transfer
-  transferred 102 KB in * seconds (* */sec) (glob) (no-zstd !)
-  transferred 98.9 KB in * seconds (* */sec) (glob) (zstd no-rust !)
-  transferred 99.0 KB in * seconds (* */sec) (glob) (zstd rust !)
-#endif
+  * files to transfer* (glob) (no-stream-bundle2-v3 !)
+  * entries to transfer (glob) (stream-bundle2-v3 !)
+  transferred * KB in * seconds (* */sec) (glob)
+  searching for changes (stream-legacy !)
+  no changes found (stream-legacy !)
 
   $ killdaemons.py
 
 Verify interaction between preferuncompressed and secret presence
+-----------------------------------------------------------------
+
+Secret presence will still make the clone falls back to a normal bundle even if
+the server prefers stream clone.
 
   $ cd server
   $ hg serve --config server.preferuncompressed=true -p $HGPORT -d --pid-file=hg.pid
@@ -677,6 +332,9 @@
   $ killdaemons.py
 
 Clone not allowed when full bundles disabled and can't serve secrets
+--------------------------------------------------------------------
+
+The clone should fail as no valid option is found.
 
   $ cd server
   $ hg serve --config server.disablefullbundle=true -p $HGPORT -d --pid-file=hg.pid
@@ -692,6 +350,8 @@
   [100]
 
 Local stream clone with secrets involved
+----------------------------------------
+
 (This is just a test over behavior: if you have access to the repo's files,
 there is no security so it isn't important to prevent a clone here.)
 
@@ -704,12 +364,20 @@
   added 2 changesets with 1025 changes to 1025 files
   new changesets 96ee1d7354c4:c17445101a72
 
+(revert introduction of secret changeset)
+
+  $ hg -R server phase --draft 'secret()'
+
 Stream clone while repo is changing:
+------------------------------------
+
+We should send a repository in a valid state, ignoring the ongoing transaction.
 
   $ mkdir changing
   $ cd changing
 
 prepare repo with small and big file to cover both code paths in emitrevlogdata
+(inlined revlog and non-inlined revlogs).
 
   $ hg init repo
   $ touch repo/f1
@@ -740,15 +408,14 @@
   $ $RUNTESTDIR/testlib/wait-on-file 10 $HG_TEST_STREAM_WALKED_FILE_3
   $ hg -R clone id
   000000000000
+  $ hg -R clone verify --quiet
   $ cat errors.log
   $ cd ..
 
 Stream repository with bookmarks
 --------------------------------
 
-(revert introduction of secret changeset)
-
-  $ hg -R server phase --draft 'secret()'
+The bookmark file should be send over in the stream bundle.
 
 add a bookmark
 
@@ -756,40 +423,17 @@
 
 clone it
 
-#if stream-legacy
-  $ hg clone --stream http://localhost:$HGPORT with-bookmarks
-  streaming all changes
-  1091 files to transfer, 102 KB of data (no-zstd !)
-  transferred 102 KB in * seconds (* */sec) (glob) (no-zstd !)
-  1091 files to transfer, 98.8 KB of data (zstd !)
-  transferred 98.8 KB in * seconds (* */sec) (glob) (zstd !)
-  searching for changes
-  no changes found
-  updating to branch default
-  1088 files updated, 0 files merged, 0 files removed, 0 files unresolved
-#endif
-#if stream-bundle2-v2
   $ hg clone --stream http://localhost:$HGPORT with-bookmarks
   streaming all changes
-  1097 files to transfer, 102 KB of data (no-zstd !)
-  transferred 102 KB in * seconds (* */sec) (glob) (no-zstd !)
-  1097 files to transfer, 99.1 KB of data (zstd no-rust !)
-  transferred 99.1 KB in * seconds (* */sec) (glob) (zstd no-rust !)
-  1099 files to transfer, 99.2 KB of data (zstd rust !)
-  transferred 99.2 KB in * seconds (* */sec) (glob) (zstd rust !)
+  1091 files to transfer, * KB of data (glob) (stream-legacy !)
+  1097 files to transfer, * KB of data (glob) (stream-bundle2-v2 no-rust !)
+  1099 files to transfer, * KB of data (glob) (stream-bundle2-v2 rust !)
+  1096 entries to transfer (stream-bundle2-v3 !)
+  transferred * KB in * seconds (* */sec) (glob)
+  searching for changes (stream-legacy !)
+  no changes found (stream-legacy !)
   updating to branch default
   1088 files updated, 0 files merged, 0 files removed, 0 files unresolved
-#endif
-#if stream-bundle2-v3
-  $ hg clone --stream http://localhost:$HGPORT with-bookmarks
-  streaming all changes
-  1096 entries to transfer
-  transferred 102 KB in * seconds (* */sec) (glob) (no-zstd !)
-  transferred 99.1 KB in * seconds (* */sec) (glob) (zstd no-rust !)
-  transferred 99.2 KB in * seconds (* */sec) (glob) (zstd rust !)
-  updating to branch default
-  1088 files updated, 0 files merged, 0 files removed, 0 files unresolved
-#endif
   $ hg verify -R with-bookmarks -q
   $ hg -R with-bookmarks bookmarks
      some-bookmark             2:5223b5e3265f
@@ -797,6 +441,9 @@
 Stream repository with phases
 -----------------------------
 
+The file storing phases information (e.g. phaseroots) should be sent as part of
+the stream bundle.
+
 Clone as publishing
 
   $ hg -R server phase -r 'all()'
@@ -804,40 +451,17 @@
   1: draft
   2: draft
 
-#if stream-legacy
-  $ hg clone --stream http://localhost:$HGPORT phase-publish
-  streaming all changes
-  1091 files to transfer, 102 KB of data (no-zstd !)
-  transferred 102 KB in * seconds (* */sec) (glob) (no-zstd !)
-  1091 files to transfer, 98.8 KB of data (zstd !)
-  transferred 98.8 KB in * seconds (* */sec) (glob) (zstd !)
-  searching for changes
-  no changes found
-  updating to branch default
-  1088 files updated, 0 files merged, 0 files removed, 0 files unresolved
-#endif
-#if stream-bundle2-v2
   $ hg clone --stream http://localhost:$HGPORT phase-publish
   streaming all changes
-  1097 files to transfer, 102 KB of data (no-zstd !)
-  transferred 102 KB in * seconds (* */sec) (glob) (no-zstd !)
-  1097 files to transfer, 99.1 KB of data (zstd no-rust !)
-  transferred 99.1 KB in * seconds (* */sec) (glob) (zstd  no-rust !)
-  1099 files to transfer, 99.2 KB of data (zstd rust !)
-  transferred 99.2 KB in * seconds (* */sec) (glob) (zstd rust !)
+  1091 files to transfer, * KB of data (glob) (stream-legacy !)
+  1097 files to transfer, * KB of data (glob) (stream-bundle2-v2 no-rust !)
+  1099 files to transfer, * KB of data (glob) (stream-bundle2-v2 rust !)
+  1096 entries to transfer (stream-bundle2-v3 !)
+  transferred * KB in * seconds (* */sec) (glob)
+  searching for changes (stream-legacy !)
+  no changes found (stream-legacy !)
   updating to branch default
   1088 files updated, 0 files merged, 0 files removed, 0 files unresolved
-#endif
-#if stream-bundle2-v3
-  $ hg clone --stream http://localhost:$HGPORT phase-publish
-  streaming all changes
-  1096 entries to transfer
-  transferred 102 KB in * seconds (* */sec) (glob) (no-zstd !)
-  transferred 99.1 KB in * seconds (* */sec) (glob) (zstd no-rust !)
-  transferred 99.2 KB in * seconds (* */sec) (glob) (zstd rust !)
-  updating to branch default
-  1088 files updated, 0 files merged, 0 files removed, 0 files unresolved
-#endif
   $ hg verify -R phase-publish -q
   $ hg -R phase-publish phase -r 'all()'
   0: public
@@ -854,73 +478,47 @@
   $ hg -R server serve -p $HGPORT -d --pid-file=hg.pid
   $ cat hg.pid > $DAEMON_PIDS
 
-#if stream-legacy
-
-With v1 of the stream protocol, changeset are always cloned as public. It make
-stream v1 unsuitable for non-publishing repository.
-
-  $ hg clone --stream http://localhost:$HGPORT phase-no-publish
-  streaming all changes
-  1091 files to transfer, 102 KB of data (no-zstd !)
-  transferred 102 KB in * seconds (* */sec) (glob) (no-zstd !)
-  1091 files to transfer, 98.8 KB of data (zstd !)
-  transferred 98.8 KB in * seconds (* */sec) (glob) (zstd !)
-  searching for changes
-  no changes found
-  updating to branch default
-  1088 files updated, 0 files merged, 0 files removed, 0 files unresolved
-  $ hg -R phase-no-publish phase -r 'all()'
-  0: public
-  1: public
-  2: public
-#endif
-#if stream-bundle2-v2
   $ hg clone --stream http://localhost:$HGPORT phase-no-publish
   streaming all changes
-  1098 files to transfer, 102 KB of data (no-zstd !)
-  transferred 102 KB in * seconds (* */sec) (glob) (no-zstd !)
-  1098 files to transfer, 99.1 KB of data (zstd no-rust !)
-  transferred 99.1 KB in * seconds (* */sec) (glob) (zstd no-rust !)
-  1100 files to transfer, 99.2 KB of data (zstd rust !)
-  transferred 99.2 KB in * seconds (* */sec) (glob) (zstd rust !)
+  1091 files to transfer, * KB of data (glob) (stream-legacy !)
+  1098 files to transfer, * KB of data (glob) (stream-bundle2-v2 no-rust !)
+  1100 files to transfer, * KB of data (glob) (stream-bundle2-v2 rust !)
+  1097 entries to transfer (stream-bundle2-v3 !)
+  transferred * KB in * seconds (* */sec) (glob)
+  searching for changes (stream-legacy !)
+  no changes found (stream-legacy !)
   updating to branch default
   1088 files updated, 0 files merged, 0 files removed, 0 files unresolved
+
+Note: With v1 of the stream protocol, changeset are always cloned as public. It
+make stream v1 unsuitable for non-publishing repository.
+
   $ hg -R phase-no-publish phase -r 'all()'
-  0: draft
-  1: draft
-  2: draft
-#endif
-#if stream-bundle2-v3
-  $ hg clone --stream http://localhost:$HGPORT phase-no-publish
-  streaming all changes
-  1097 entries to transfer
-  transferred 102 KB in * seconds (* */sec) (glob) (no-zstd !)
-  transferred 99.1 KB in * seconds (* */sec) (glob) (zstd no-rust !)
-  transferred 99.2 KB in * seconds (* */sec) (glob) (zstd rust !)
-  updating to branch default
-  1088 files updated, 0 files merged, 0 files removed, 0 files unresolved
-  $ hg -R phase-no-publish phase -r 'all()'
-  0: draft
-  1: draft
-  2: draft
-#endif
+  0: public (stream-legacy !)
+  1: public (stream-legacy !)
+  2: public (stream-legacy !)
+  0: draft (no-stream-legacy !)
+  1: draft (no-stream-legacy !)
+  2: draft (no-stream-legacy !)
   $ hg verify -R phase-no-publish -q
 
   $ killdaemons.py
 
+
+Stream repository with obsolescence
+-----------------------------------
+
 #if stream-legacy
 
 With v1 of the stream protocol, changeset are always cloned as public. There's
 no obsolescence markers exchange in stream v1.
 
-#endif
-#if stream-bundle2-v2
-
-Stream repository with obsolescence
------------------------------------
+#else
 
 Clone non-publishing with obsolescence
 
+The obsstore file should be send as part of the stream bundle
+
   $ cat >> $HGRCPATH << EOF
   > [experimental]
   > evolution=all
@@ -943,62 +541,10 @@
 
   $ hg clone -U --stream http://localhost:$HGPORT with-obsolescence
   streaming all changes
-  1099 files to transfer, 102 KB of data (no-zstd !)
-  transferred 102 KB in * seconds (* */sec) (glob) (no-zstd !)
-  1099 files to transfer, 99.5 KB of data (zstd no-rust !)
-  transferred 99.5 KB in * seconds (* */sec) (glob) (zstd no-rust !)
-  1101 files to transfer, 99.6 KB of data (zstd rust !)
-  transferred 99.6 KB in * seconds (* */sec) (glob) (zstd rust !)
-  $ hg -R with-obsolescence log -T '{rev}: {phase}\n'
-  2: draft
-  1: draft
-  0: draft
-  $ hg debugobsolete -R with-obsolescence
-  8c206a663911c1f97f2f9d7382e417ae55872cfa 0 {5223b5e3265f0df40bb743da62249413d74ac70f} (Thu Jan 01 00:00:00 1970 +0000) {'user': 'test'}
-  $ hg verify -R with-obsolescence -q
-
-  $ hg clone -U --stream --config experimental.evolution=0 http://localhost:$HGPORT with-obsolescence-no-evolution
-  streaming all changes
-  remote: abort: server has obsolescence markers, but client cannot receive them via stream clone
-  abort: pull failed on remote
-  [100]
-
-  $ killdaemons.py
-
-#endif
-#if stream-bundle2-v3
-
-Stream repository with obsolescence
------------------------------------
-
-Clone non-publishing with obsolescence
-
-  $ cat >> $HGRCPATH << EOF
-  > [experimental]
-  > evolution=all
-  > EOF
-
-  $ cd server
-  $ echo foo > foo
-  $ hg -q commit -m 'about to be pruned'
-  $ hg debugobsolete `hg log -r . -T '{node}'` -d '0 0' -u test --record-parents
-  1 new obsolescence markers
-  obsoleted 1 changesets
-  $ hg up null -q
-  $ hg log -T '{rev}: {phase}\n'
-  2: draft
-  1: draft
-  0: draft
-  $ hg serve -p $HGPORT -d --pid-file=hg.pid
-  $ cat hg.pid > $DAEMON_PIDS
-  $ cd ..
-
-  $ hg clone -U --stream http://localhost:$HGPORT with-obsolescence
-  streaming all changes
-  1098 entries to transfer
-  transferred 102 KB in * seconds (* */sec) (glob) (no-zstd !)
-  transferred 99.5 KB in * seconds (* */sec) (glob) (zstd no-rust !)
-  transferred 99.6 KB in * seconds (* */sec) (glob) (zstd rust !)
+  1099 files to transfer, * KB of data (glob) (stream-bundle2-v2 no-rust !)
+  1101 files to transfer, * KB of data (glob) (stream-bundle2-v2 rust !)
+  1098 entries to transfer (no-stream-bundle2-v2 !)
+  transferred * KB in * seconds (* */sec) (glob)
   $ hg -R with-obsolescence log -T '{rev}: {phase}\n'
   2: draft
   1: draft
@@ -1018,19 +564,16 @@
 #endif
 
 Cloning a repo with no requirements doesn't give some obscure error
+-------------------------------------------------------------------
 
   $ mkdir -p empty-repo/.hg
   $ hg clone -q --stream ssh://user@dummy/empty-repo empty-repo2
   $ hg --cwd empty-repo2 verify -q
 
 Cloning a repo with an empty manifestlog doesn't give some weird error
+----------------------------------------------------------------------
 
   $ rm -r empty-repo; hg init empty-repo
   $ (cd empty-repo; touch x; hg commit -Am empty; hg debugstrip -r 0) > /dev/null
   $ hg clone -q --stream ssh://user@dummy/empty-repo empty-repo3
-  $ hg --cwd empty-repo3 verify -q 2>&1 | grep -v warning
-  [1]
-
-The warnings filtered out here are talking about zero-length 'orphan' data files.
-Those are harmless, so that's fine.
-
+  $ hg --cwd empty-repo3 verify -q
--- a/tests/test-clone.t	Thu Mar 21 12:24:42 2024 +0100
+++ b/tests/test-clone.t	Thu Mar 21 12:26:46 2024 +0100
@@ -47,11 +47,7 @@
 
   $ ls .hg/cache
   branch2-base
-  branch2-immutable
   branch2-served
-  branch2-served.hidden
-  branch2-visible
-  branch2-visible-hidden
   rbc-names-v1
   rbc-revs-v1
   tags2
@@ -71,42 +67,34 @@
 
 #if hardlink
   $ hg --debug clone -U . ../c --config progress.debug=true
-  linking: 1/16 files (6.25%) (no-rust !)
-  linking: 2/16 files (12.50%) (no-rust !)
-  linking: 3/16 files (18.75%) (no-rust !)
-  linking: 4/16 files (25.00%) (no-rust !)
-  linking: 5/16 files (31.25%) (no-rust !)
-  linking: 6/16 files (37.50%) (no-rust !)
-  linking: 7/16 files (43.75%) (no-rust !)
-  linking: 8/16 files (50.00%) (no-rust !)
-  linking: 9/16 files (56.25%) (no-rust !)
-  linking: 10/16 files (62.50%) (no-rust !)
-  linking: 11/16 files (68.75%) (no-rust !)
-  linking: 12/16 files (75.00%) (no-rust !)
-  linking: 13/16 files (81.25%) (no-rust !)
-  linking: 14/16 files (87.50%) (no-rust !)
-  linking: 15/16 files (93.75%) (no-rust !)
-  linking: 16/16 files (100.00%) (no-rust !)
-  linked 16 files (no-rust !)
-  linking: 1/18 files (5.56%) (rust !)
-  linking: 2/18 files (11.11%) (rust !)
-  linking: 3/18 files (16.67%) (rust !)
-  linking: 4/18 files (22.22%) (rust !)
-  linking: 5/18 files (27.78%) (rust !)
-  linking: 6/18 files (33.33%) (rust !)
-  linking: 7/18 files (38.89%) (rust !)
-  linking: 8/18 files (44.44%) (rust !)
-  linking: 9/18 files (50.00%) (rust !)
-  linking: 10/18 files (55.56%) (rust !)
-  linking: 11/18 files (61.11%) (rust !)
-  linking: 12/18 files (66.67%) (rust !)
-  linking: 13/18 files (72.22%) (rust !)
-  linking: 14/18 files (77.78%) (rust !)
-  linking: 15/18 files (83.33%) (rust !)
-  linking: 16/18 files (88.89%) (rust !)
-  linking: 17/18 files (94.44%) (rust !)
-  linking: 18/18 files (100.00%) (rust !)
-  linked 18 files (rust !)
+  linking: 1/12 files (8.33%) (no-rust !)
+  linking: 2/12 files (16.67%) (no-rust !)
+  linking: 3/12 files (25.00%) (no-rust !)
+  linking: 4/12 files (33.33%) (no-rust !)
+  linking: 5/12 files (41.67%) (no-rust !)
+  linking: 6/12 files (50.00%) (no-rust !)
+  linking: 7/12 files (58.33%) (no-rust !)
+  linking: 8/12 files (66.67%) (no-rust !)
+  linking: 9/12 files (75.00%) (no-rust !)
+  linking: 10/12 files (83.33%) (no-rust !)
+  linking: 11/12 files (91.67%) (no-rust !)
+  linking: 12/12 files (100.00%) (no-rust !)
+  linked 12 files (no-rust !)
+  linking: 1/14 files (7.14%) (rust !)
+  linking: 2/14 files (14.29%) (rust !)
+  linking: 3/14 files (21.43%) (rust !)
+  linking: 4/14 files (28.57%) (rust !)
+  linking: 5/14 files (35.71%) (rust !)
+  linking: 6/14 files (42.86%) (rust !)
+  linking: 7/14 files (50.00%) (rust !)
+  linking: 8/14 files (57.14%) (rust !)
+  linking: 9/14 files (64.29%) (rust !)
+  linking: 10/14 files (71.43%) (rust !)
+  linking: 11/14 files (78.57%) (rust !)
+  linking: 12/14 files (85.71%) (rust !)
+  linking: 13/14 files (92.86%) (rust !)
+  linking: 14/14 files (100.00%) (rust !)
+  linked 14 files (rust !)
   updating the branch cache
 #else
   $ hg --debug clone -U . ../c --config progress.debug=true
@@ -125,11 +113,7 @@
 
   $ ls .hg/cache
   branch2-base
-  branch2-immutable
   branch2-served
-  branch2-served.hidden
-  branch2-visible
-  branch2-visible-hidden
   rbc-names-v1
   rbc-revs-v1
   tags2
--- a/tests/test-clonebundles.t	Thu Mar 21 12:24:42 2024 +0100
+++ b/tests/test-clonebundles.t	Thu Mar 21 12:26:46 2024 +0100
@@ -394,9 +394,9 @@
   $ hg clone -U http://localhost:$HGPORT stream-clone-no-spec
   applying clone bundle from http://localhost:$HGPORT1/packed.hg
   5 files to transfer, 613 bytes of data (no-rust !)
-  transferred 613 bytes in *.* seconds (*) (glob) (no-rust !)
+  transferred 613 bytes in * seconds (* */sec) (glob) (no-rust !)
   7 files to transfer, 739 bytes of data (rust !)
-  transferred 739 bytes in *.* seconds (*) (glob) (rust !)
+  transferred 739 bytes in * seconds (* */sec) (glob) (rust !)
   finished applying clone bundle
   searching for changes
   no changes found
@@ -409,10 +409,8 @@
 
   $ hg clone -U http://localhost:$HGPORT stream-clone-vanilla-spec
   applying clone bundle from http://localhost:$HGPORT1/packed.hg
-  5 files to transfer, 613 bytes of data (no-rust !)
-  transferred 613 bytes in *.* seconds (*) (glob) (no-rust !)
-  7 files to transfer, 739 bytes of data (rust !)
-  transferred 739 bytes in *.* seconds (*) (glob) (rust !)
+  * files to transfer, * bytes of data (glob)
+  transferred * bytes in * seconds (* */sec) (glob)
   finished applying clone bundle
   searching for changes
   no changes found
@@ -425,10 +423,8 @@
 
   $ hg clone -U http://localhost:$HGPORT stream-clone-supported-requirements
   applying clone bundle from http://localhost:$HGPORT1/packed.hg
-  5 files to transfer, 613 bytes of data (no-rust !)
-  transferred 613 bytes in *.* seconds (*) (glob) (no-rust !)
-  7 files to transfer, 739 bytes of data (rust !)
-  transferred 739 bytes in *.* seconds (*) (glob) (rust !)
+  * files to transfer, * bytes of data (glob)
+  transferred * bytes in * seconds (* */sec) (glob)
   finished applying clone bundle
   searching for changes
   no changes found
@@ -574,10 +570,8 @@
   no compatible clone bundles available on server; falling back to regular clone
   (you may want to report this to the server operator)
   streaming all changes
-  10 files to transfer, 816 bytes of data (no-rust !)
-  transferred 816 bytes in * seconds (*) (glob) (no-rust !)
-  12 files to transfer, 942 bytes of data (rust !)
-  transferred 942 bytes in *.* seconds (*) (glob) (rust !)
+  * files to transfer, * bytes of data (glob)
+  transferred * bytes in * seconds (* */sec) (glob)
 
 A manifest with a stream clone but no BUNDLESPEC
 
@@ -589,10 +583,8 @@
   no compatible clone bundles available on server; falling back to regular clone
   (you may want to report this to the server operator)
   streaming all changes
-  10 files to transfer, 816 bytes of data (no-rust !)
-  transferred 816 bytes in * seconds (*) (glob) (no-rust !)
-  12 files to transfer, 942 bytes of data (rust !)
-  transferred 942 bytes in *.* seconds (*) (glob) (rust !)
+  * files to transfer, * bytes of data (glob)
+  transferred * bytes in * seconds (* */sec) (glob)
 
 A manifest with a gzip bundle and a stream clone
 
@@ -603,10 +595,8 @@
 
   $ hg clone -U --stream http://localhost:$HGPORT uncompressed-gzip-packed
   applying clone bundle from http://localhost:$HGPORT1/packed.hg
-  5 files to transfer, 613 bytes of data (no-rust !)
-  transferred 613 bytes in *.* seconds (*) (glob) (no-rust !)
-  7 files to transfer, 739 bytes of data (rust !)
-  transferred 739 bytes in *.* seconds (*) (glob) (rust !)
+  * files to transfer, * bytes of data (glob)
+  transferred * bytes in * seconds (* */sec) (glob)
   finished applying clone bundle
   searching for changes
   no changes found
@@ -620,10 +610,8 @@
 
   $ hg clone -U --stream http://localhost:$HGPORT uncompressed-gzip-packed-requirements
   applying clone bundle from http://localhost:$HGPORT1/packed.hg
-  5 files to transfer, 613 bytes of data (no-rust !)
-  transferred 613 bytes in *.* seconds (*) (glob) (no-rust !)
-  7 files to transfer, 739 bytes of data (rust !)
-  transferred 739 bytes in *.* seconds (*) (glob) (rust !)
+  * files to transfer, * bytes of data (glob)
+  transferred * bytes in * seconds (* */sec) (glob)
   finished applying clone bundle
   searching for changes
   no changes found
@@ -639,10 +627,8 @@
   no compatible clone bundles available on server; falling back to regular clone
   (you may want to report this to the server operator)
   streaming all changes
-  10 files to transfer, 816 bytes of data (no-rust !)
-  transferred 816 bytes in * seconds (*) (glob) (no-rust !)
-  12 files to transfer, 942 bytes of data (rust !)
-  transferred 942 bytes in *.* seconds (*) (glob) (rust !)
+  * files to transfer, * bytes of data (glob)
+  transferred * bytes in * seconds (* */sec) (glob)
 
 Test clone bundle retrieved through bundle2
 
--- a/tests/test-debugcommands.t	Thu Mar 21 12:24:42 2024 +0100
+++ b/tests/test-debugcommands.t	Thu Mar 21 12:26:46 2024 +0100
@@ -652,12 +652,7 @@
   .hg/cache/rbc-revs-v1
   .hg/cache/rbc-names-v1
   .hg/cache/hgtagsfnodes1
-  .hg/cache/branch2-visible-hidden
-  .hg/cache/branch2-visible
-  .hg/cache/branch2-served.hidden
   .hg/cache/branch2-served
-  .hg/cache/branch2-immutable
-  .hg/cache/branch2-base
 
 Test debug::unbundle
 
--- a/tests/test-hardlinks.t	Thu Mar 21 12:24:42 2024 +0100
+++ b/tests/test-hardlinks.t	Thu Mar 21 12:26:46 2024 +0100
@@ -263,11 +263,7 @@
   2 r4/.hg/00changelog.i
   [24] r4/.hg/branch (re)
   2 r4/.hg/cache/branch2-base
-  2 r4/.hg/cache/branch2-immutable
   2 r4/.hg/cache/branch2-served
-  2 r4/.hg/cache/branch2-served.hidden
-  2 r4/.hg/cache/branch2-visible
-  2 r4/.hg/cache/branch2-visible-hidden
   2 r4/.hg/cache/rbc-names-v1
   2 r4/.hg/cache/rbc-revs-v1
   2 r4/.hg/cache/tags2
@@ -320,11 +316,7 @@
   2 r4/.hg/00changelog.i
   1 r4/.hg/branch
   2 r4/.hg/cache/branch2-base
-  2 r4/.hg/cache/branch2-immutable
   2 r4/.hg/cache/branch2-served
-  2 r4/.hg/cache/branch2-served.hidden
-  2 r4/.hg/cache/branch2-visible
-  2 r4/.hg/cache/branch2-visible-hidden
   2 r4/.hg/cache/rbc-names-v1
   2 r4/.hg/cache/rbc-revs-v1
   2 r4/.hg/cache/tags2
--- a/tests/test-server-view.t	Thu Mar 21 12:24:42 2024 +0100
+++ b/tests/test-server-view.t	Thu Mar 21 12:26:46 2024 +0100
@@ -36,12 +36,7 @@
   $ hg -R test --config experimental.extra-filter-revs='not public()' debugupdatecache
   $ ls -1 test/.hg/cache/
   branch2-base%89c45d2fa07e
-  branch2-immutable%89c45d2fa07e
   branch2-served
-  branch2-served%89c45d2fa07e
-  branch2-served.hidden%89c45d2fa07e
-  branch2-visible%89c45d2fa07e
-  branch2-visible-hidden%89c45d2fa07e
   hgtagsfnodes1
   rbc-names-v1
   rbc-revs-v1
--- a/tests/test-share.t	Thu Mar 21 12:24:42 2024 +0100
+++ b/tests/test-share.t	Thu Mar 21 12:26:46 2024 +0100
@@ -63,11 +63,7 @@
   1 files updated, 0 files merged, 0 files removed, 0 files unresolved
   $ ls -1 ../repo2-clone/.hg/cache
   branch2-base
-  branch2-immutable
   branch2-served
-  branch2-served.hidden
-  branch2-visible
-  branch2-visible-hidden
   rbc-names-v1
   rbc-revs-v1
   tags2
--- a/tests/test-ssh.t	Thu Mar 21 12:24:42 2024 +0100
+++ b/tests/test-ssh.t	Thu Mar 21 12:26:46 2024 +0100
@@ -72,8 +72,8 @@
   $ hg -R local-stream book mybook
   $ hg clone --stream ssh://user@dummy/local-stream stream2
   streaming all changes
-  16 files to transfer, * of data (glob) (no-rust !)
-  18 files to transfer, * of data (glob) (rust !)
+  12 files to transfer, * of data (glob) (no-rust !)
+  14 files to transfer, * of data (glob) (rust !)
   transferred * in * seconds (*) (glob)
   updating to branch default
   2 files updated, 0 files merged, 0 files removed, 0 files unresolved
--- a/tests/test-strip-branch-cache.t	Thu Mar 21 12:24:42 2024 +0100
+++ b/tests/test-strip-branch-cache.t	Thu Mar 21 12:26:46 2024 +0100
@@ -1,3 +1,5 @@
+This test cover a bug that no longer exist.
+
 Define helpers.
 
   $ hg_log () { hg log -G -T "{rev}:{node|short}"; }
@@ -18,7 +20,10 @@
 
   $ hg pull -q ../repo
 
-  $ cat .hg/cache/branch2-visible
+  $ ls -1 .hg/cache/branch?*
+  .hg/cache/branch2-base
+  .hg/cache/branch2-served
+  $ cat .hg/cache/branch?-served
   222ae9789a75703f9836e44de7db179cbfd420ee 2
   a3498d6e39376d2456425dd8c692367bdbf00fa2 o default
   222ae9789a75703f9836e44de7db179cbfd420ee o default
@@ -33,24 +38,36 @@
 
   $ strip '1:'
 
-The branchmap cache is not adjusted on strip.
-Now mentions a changelog entry that has been stripped.
+After the strip the "served" cache is now identical to the "base" one, and the
+older one have been actively deleted.
 
-  $ cat .hg/cache/branch2-visible
-  222ae9789a75703f9836e44de7db179cbfd420ee 2
-  a3498d6e39376d2456425dd8c692367bdbf00fa2 o default
-  222ae9789a75703f9836e44de7db179cbfd420ee o default
+  $ ls -1 .hg/cache/branch?*
+  .hg/cache/branch2-base
+  $ cat .hg/cache/branch?-base
+  7ab0a3bd758a58b9f79557ce708533e627776cce 0
+  7ab0a3bd758a58b9f79557ce708533e627776cce o default
+
+We do a new commit and we get a new valid branchmap for the served version
 
   $ commit c
-
-Not adjusted on commit, either.
+  $ ls -1 .hg/cache/branch?*
+  .hg/cache/branch2-base
+  .hg/cache/branch2-served
+  $ cat .hg/cache/branch?-served
+  a1602b357cfca067600406eb19060c7128804d72 1
+  a1602b357cfca067600406eb19060c7128804d72 o default
 
-  $ cat .hg/cache/branch2-visible
-  222ae9789a75703f9836e44de7db179cbfd420ee 2
-  a3498d6e39376d2456425dd8c692367bdbf00fa2 o default
-  222ae9789a75703f9836e44de7db179cbfd420ee o default
 
 On pull we end up with the same tip, and so wrongly reuse the invalid cache and crash.
 
-  $ hg pull ../repo 2>&1 | grep 'ValueError:'
-  ValueError: node a3498d6e39376d2456425dd8c692367bdbf00fa2 does not exist (known-bad-output !)
+  $ hg pull ../repo --quiet
+  $ hg heads -T '{rev} {node} {branch}\n'
+  2 222ae9789a75703f9836e44de7db179cbfd420ee default
+  1 a1602b357cfca067600406eb19060c7128804d72 default
+  $ ls -1 .hg/cache/branch?*
+  .hg/cache/branch2-base
+  .hg/cache/branch2-served
+  $ cat .hg/cache/branch?-served
+  222ae9789a75703f9836e44de7db179cbfd420ee 2
+  a1602b357cfca067600406eb19060c7128804d72 o default
+  222ae9789a75703f9836e44de7db179cbfd420ee o default
--- a/tests/test-tags.t	Thu Mar 21 12:24:42 2024 +0100
+++ b/tests/test-tags.t	Thu Mar 21 12:26:46 2024 +0100
@@ -792,11 +792,6 @@
 
   $ ls tagsclient/.hg/cache
   branch2-base
-  branch2-immutable
-  branch2-served
-  branch2-served.hidden
-  branch2-visible
-  branch2-visible-hidden
   hgtagsfnodes1
   rbc-names-v1
   rbc-revs-v1
@@ -823,11 +818,6 @@
 
   $ ls tagsclient/.hg/cache
   branch2-base
-  branch2-immutable
-  branch2-served
-  branch2-served.hidden
-  branch2-visible
-  branch2-visible-hidden
   hgtagsfnodes1
   rbc-names-v1
   rbc-revs-v1
--- a/tests/test-treemanifest.t	Thu Mar 21 12:24:42 2024 +0100
+++ b/tests/test-treemanifest.t	Thu Mar 21 12:26:46 2024 +0100
@@ -761,8 +761,8 @@
   $ hg clone --config experimental.changegroup3=True --stream -U \
   >   http://localhost:$HGPORT1 stream-clone-basicstore
   streaming all changes
-  29 files to transfer, * of data (glob) (no-rust !)
-  31 files to transfer, * of data (glob) (rust !)
+  24 files to transfer, * of data (glob) (no-rust !)
+  26 files to transfer, * of data (glob) (rust !)
   transferred * in * seconds (*) (glob)
   $ hg -R stream-clone-basicstore verify -q
   $ cat port-1-errors.log
@@ -771,8 +771,8 @@
   $ hg clone --config experimental.changegroup3=True --stream -U \
   >   http://localhost:$HGPORT2 stream-clone-encodedstore
   streaming all changes
-  29 files to transfer, * of data (glob) (no-rust !)
-  31 files to transfer, * of data (glob) (rust !)
+  24 files to transfer, * of data (glob) (no-rust !)
+  26 files to transfer, * of data (glob) (rust !)
   transferred * in * seconds (*) (glob)
   $ hg -R stream-clone-encodedstore verify -q
   $ cat port-2-errors.log