changeset 51648:6454c117c6a4 stable 6.8rc0

branching: merge default into stable for 6.8rc0
author Raphaël Gomès <rgomes@octobus.net>
date Mon, 24 Jun 2024 12:05:31 +0200
parents 96cb63a86ee5 (current diff) 56eb076a08aa (diff)
children a57e12229bc9
files
diffstat 70 files changed, 3294 insertions(+), 1602 deletions(-) [+]
line wrap: on
line diff
--- a/contrib/all-revsets.txt	Thu Jun 13 09:52:39 2024 +0200
+++ b/contrib/all-revsets.txt	Mon Jun 24 12:05:31 2024 +0200
@@ -46,8 +46,8 @@
 # Used in revision c1546d7400ef
 min(0::)
 # Used in revision 546fa6576815
-author(lmoscovicz) or author(olivia)
-author(olivia) or author(lmoscovicz)
+author(lmoscovicz) or author("pierre-yves")
+author("pierre-yves") or author(lmoscovicz)
 # Used in revision 9bfe68357c01
 public() and id("d82e2223f132")
 # Used in revision ba89f7b542c9
@@ -100,7 +100,7 @@
 draft() and ::tip
 ::tip and draft()
 author(lmoscovicz)
-author(olivia)
+author("pierre-yves")
 ::p1(p1(tip))::
 public()
 :10000 and public()
@@ -130,7 +130,7 @@
 head()
 head() - public()
 draft() and head()
-head() and author("olivia")
+head() and author("pierre-yves")
 
 # testing the mutable phases set
 draft()
--- a/contrib/base-revsets.txt	Thu Jun 13 09:52:39 2024 +0200
+++ b/contrib/base-revsets.txt	Mon Jun 24 12:05:31 2024 +0200
@@ -25,9 +25,9 @@
 0::tip
 roots(0::tip)
 author(lmoscovicz)
-author(olivia)
-author(lmoscovicz) or author(olivia)
-author(olivia) or author(lmoscovicz)
+author("pierre-yves")
+author(lmoscovicz) or author("pierre-yves")
+author("pierre-yves") or author(lmoscovicz)
 tip:0
 0::
 # those two `roots(...)` inputs are close to what phase movement use.
--- a/contrib/perf.py	Thu Jun 13 09:52:39 2024 +0200
+++ b/contrib/perf.py	Mon Jun 24 12:05:31 2024 +0200
@@ -20,7 +20,10 @@
 
 ``profile-benchmark``
   Enable profiling for the benchmarked section.
-  (The first iteration is benchmarked)
+  (by default, the first iteration is benchmarked)
+
+``profiled-runs``
+  list of iteration to profile (starting from 0)
 
 ``run-limits``
   Control the number of runs each benchmark will perform. The option value
@@ -318,6 +321,11 @@
     )
     configitem(
         b'perf',
+        b'profiled-runs',
+        default=mercurial.configitems.dynamicdefault,
+    )
+    configitem(
+        b'perf',
         b'run-limits',
         default=mercurial.configitems.dynamicdefault,
         experimental=True,
@@ -354,7 +362,7 @@
     )
     configitem(
         b'perf',
-        b'profile-benchmark',
+        b'profiled-runs',
         default=mercurial.configitems.dynamicdefault,
     )
     configitem(
@@ -491,9 +499,12 @@
         limits = DEFAULTLIMITS
 
     profiler = None
+    profiled_runs = set()
     if profiling is not None:
         if ui.configbool(b"perf", b"profile-benchmark", False):
-            profiler = profiling.profile(ui)
+            profiler = lambda: profiling.profile(ui)
+            for run in ui.configlist(b"perf", b"profiled-runs", [0]):
+                profiled_runs.add(int(run))
 
     prerun = getint(ui, b"perf", b"pre-run", 0)
     t = functools.partial(
@@ -503,6 +514,7 @@
         limits=limits,
         prerun=prerun,
         profiler=profiler,
+        profiled_runs=profiled_runs,
     )
     return t, fm
 
@@ -547,27 +559,32 @@
     limits=DEFAULTLIMITS,
     prerun=0,
     profiler=None,
+    profiled_runs=(0,),
 ):
     gc.collect()
     results = []
-    begin = util.timer()
     count = 0
     if profiler is None:
-        profiler = NOOPCTX
+        profiler = lambda: NOOPCTX
     for i in range(prerun):
         if setup is not None:
             setup()
         with context():
             func()
+    begin = util.timer()
     keepgoing = True
     while keepgoing:
+        if count in profiled_runs:
+            prof = profiler()
+        else:
+            prof = NOOPCTX
         if setup is not None:
             setup()
         with context():
-            with profiler:
+            gc.collect()
+            with prof:
                 with timeone() as item:
                     r = func()
-        profiler = NOOPCTX
         count += 1
         results.append(item[0])
         cstop = util.timer()
@@ -2029,6 +2046,19 @@
     fm.end()
 
 
+def _clear_store_audit_cache(repo):
+    vfs = getsvfs(repo)
+    # unwrap the fncache proxy
+    if not hasattr(vfs, "audit"):
+        vfs = getattr(vfs, "vfs", vfs)
+    auditor = vfs.audit
+    if hasattr(auditor, "clear_audit_cache"):
+        auditor.clear_audit_cache()
+    elif hasattr(auditor, "audited"):
+        auditor.audited.clear()
+        auditor.auditeddir.clear()
+
+
 def _find_stream_generator(version):
     """find the proper generator function for this stream version"""
     import mercurial.streamclone
@@ -2040,7 +2070,7 @@
     if generatev1 is not None:
 
         def generate(repo):
-            entries, bytes, data = generatev2(repo, None, None, True)
+            entries, bytes, data = generatev1(repo, None, None, True)
             return data
 
         available[b'v1'] = generatev1
@@ -2058,8 +2088,7 @@
     if generatev3 is not None:
 
         def generate(repo):
-            entries, bytes, data = generatev3(repo, None, None, True)
-            return data
+            return generatev3(repo, None, None, True)
 
         available[b'v3-exp'] = generate
 
@@ -2085,7 +2114,8 @@
             b'',
             b'stream-version',
             b'latest',
-            b'stream version to use ("v1", "v2", "v3" or "latest", (the default))',
+            b'stream version to use ("v1", "v2", "v3-exp" '
+            b'or "latest", (the default))',
         ),
     ]
     + formatteropts,
@@ -2102,6 +2132,9 @@
 
     def setupone():
         result_holder[0] = None
+        # This is important for the full generation, even if it does not
+        # currently matters, it seems safer to also real it here.
+        _clear_store_audit_cache(repo)
 
     generate = _find_stream_generator(stream_version)
 
@@ -2120,7 +2153,8 @@
             b'',
             b'stream-version',
             b'latest',
-            b'stream version to us ("v1", "v2" or "latest", (the default))',
+            b'stream version to us ("v1", "v2", "v3-exp" '
+            b'or "latest", (the default))',
         ),
     ]
     + formatteropts,
@@ -2136,12 +2170,15 @@
 
     generate = _find_stream_generator(stream_version)
 
+    def setup():
+        _clear_store_audit_cache(repo)
+
     def runone():
         # the lock is held for the duration the initialisation
         for chunk in generate(repo):
             pass
 
-    timer(runone, title=b"generate")
+    timer(runone, setup=setup, title=b"generate")
     fm.end()
 
 
@@ -2187,10 +2224,18 @@
 
     run_variables = [None, None]
 
+    # we create the new repository next to the other one for two reasons:
+    # - this way we use the same file system, which are relevant for benchmark
+    # - if /tmp/ is small, the operation could overfills it.
+    source_repo_dir = os.path.dirname(repo.root)
+
     @contextlib.contextmanager
     def context():
         with open(filename, mode='rb') as bundle:
-            with tempfile.TemporaryDirectory() as tmp_dir:
+            with tempfile.TemporaryDirectory(
+                prefix=b'hg-perf-stream-consume-',
+                dir=source_repo_dir,
+            ) as tmp_dir:
                 tmp_dir = fsencode(tmp_dir)
                 run_variables[0] = bundle
                 run_variables[1] = tmp_dir
@@ -2201,11 +2246,15 @@
     def runone():
         bundle = run_variables[0]
         tmp_dir = run_variables[1]
+
+        # we actually wants to copy all config to ensure the repo config is
+        # taken in account during the benchmark
+        new_ui = repo.ui.__class__(repo.ui)
         # only pass ui when no srcrepo
         localrepo.createrepository(
-            repo.ui, tmp_dir, requirements=repo.requirements
+            new_ui, tmp_dir, requirements=repo.requirements
         )
-        target = hg.repository(repo.ui, tmp_dir)
+        target = hg.repository(new_ui, tmp_dir)
         gen = exchange.readbundle(target.ui, bundle, bundle.name)
         # stream v1
         if util.safehasattr(gen, 'apply'):
@@ -4205,15 +4254,24 @@
         # add unfiltered
         allfilters.append(None)
 
-    if util.safehasattr(branchmap.branchcache, 'fromfile'):
+    old_branch_cache_from_file = None
+    branchcacheread = None
+    if util.safehasattr(branchmap, 'branch_cache_from_file'):
+        old_branch_cache_from_file = branchmap.branch_cache_from_file
+        branchmap.branch_cache_from_file = lambda *args: None
+    elif util.safehasattr(branchmap.branchcache, 'fromfile'):
         branchcacheread = safeattrsetter(branchmap.branchcache, b'fromfile')
         branchcacheread.set(classmethod(lambda *args: None))
     else:
         # older versions
         branchcacheread = safeattrsetter(branchmap, b'read')
         branchcacheread.set(lambda *args: None)
-    branchcachewrite = safeattrsetter(branchmap.branchcache, b'write')
-    branchcachewrite.set(lambda *args: None)
+    if util.safehasattr(branchmap, '_LocalBranchCache'):
+        branchcachewrite = safeattrsetter(branchmap._LocalBranchCache, b'write')
+        branchcachewrite.set(lambda *args: None)
+    else:
+        branchcachewrite = safeattrsetter(branchmap.branchcache, b'write')
+        branchcachewrite.set(lambda *args: None)
     try:
         for name in allfilters:
             printname = name
@@ -4221,7 +4279,10 @@
                 printname = b'unfiltered'
             timer(getbranchmap(name), title=printname)
     finally:
-        branchcacheread.restore()
+        if old_branch_cache_from_file is not None:
+            branchmap.branch_cache_from_file = old_branch_cache_from_file
+        if branchcacheread is not None:
+            branchcacheread.restore()
         branchcachewrite.restore()
     fm.end()
 
@@ -4303,6 +4364,19 @@
         baserepo = repo.filtered(b'__perf_branchmap_update_base')
         targetrepo = repo.filtered(b'__perf_branchmap_update_target')
 
+        bcache = repo.branchmap()
+        copy_method = 'copy'
+
+        copy_base_kwargs = copy_base_kwargs = {}
+        if hasattr(bcache, 'copy'):
+            if 'repo' in getargspec(bcache.copy).args:
+                copy_base_kwargs = {"repo": baserepo}
+                copy_target_kwargs = {"repo": targetrepo}
+        else:
+            copy_method = 'inherit_for'
+            copy_base_kwargs = {"repo": baserepo}
+            copy_target_kwargs = {"repo": targetrepo}
+
         # try to find an existing branchmap to reuse
         subsettable = getbranchmapsubsettable()
         candidatefilter = subsettable.get(None)
@@ -4311,7 +4385,7 @@
             if candidatebm.validfor(baserepo):
                 filtered = repoview.filterrevs(repo, candidatefilter)
                 missing = [r for r in allbaserevs if r in filtered]
-                base = candidatebm.copy()
+                base = getattr(candidatebm, copy_method)(**copy_base_kwargs)
                 base.update(baserepo, missing)
                 break
             candidatefilter = subsettable.get(candidatefilter)
@@ -4321,7 +4395,7 @@
             base.update(baserepo, allbaserevs)
 
         def setup():
-            x[0] = base.copy()
+            x[0] = getattr(base, copy_method)(**copy_target_kwargs)
             if clearcaches:
                 unfi._revbranchcache = None
                 clearchangelog(repo)
@@ -4368,10 +4442,10 @@
 
     repo.branchmap()  # make sure we have a relevant, up to date branchmap
 
-    try:
-        fromfile = branchmap.branchcache.fromfile
-    except AttributeError:
-        # older versions
+    fromfile = getattr(branchmap, 'branch_cache_from_file', None)
+    if fromfile is None:
+        fromfile = getattr(branchmap.branchcache, 'fromfile', None)
+    if fromfile is None:
         fromfile = branchmap.read
 
     currentfilter = filter
--- a/hgext/largefiles/lfutil.py	Thu Jun 13 09:52:39 2024 +0200
+++ b/hgext/largefiles/lfutil.py	Mon Jun 24 12:05:31 2024 +0200
@@ -430,6 +430,7 @@
     def composedmatchfn(f):
         return isstandin(f) and rmatcher.matchfn(splitstandin(f))
 
+    smatcher._was_tampered_with = True
     smatcher.matchfn = composedmatchfn
 
     return smatcher
@@ -716,6 +717,7 @@
         return match
 
     lfiles = listlfiles(repo)
+    match._was_tampered_with = True
     match._files = repo._subdirlfs(match.files(), lfiles)
 
     # Case 2: user calls commit with specified patterns: refresh
@@ -746,6 +748,7 @@
     # user.  Have to modify _files to prevent commit() from
     # complaining "not tracked" for big files.
     match = copy.copy(match)
+    match._was_tampered_with = True
     origmatchfn = match.matchfn
 
     # Check both the list of largefiles and the list of
--- a/hgext/largefiles/overrides.py	Thu Jun 13 09:52:39 2024 +0200
+++ b/hgext/largefiles/overrides.py	Mon Jun 24 12:05:31 2024 +0200
@@ -71,6 +71,7 @@
     """create a matcher that matches only the largefiles in the original
     matcher"""
     m = copy.copy(match)
+    m._was_tampered_with = True
     lfile = lambda f: lfutil.standin(f) in manifest
     m._files = [lf for lf in m._files if lfile(lf)]
     m._fileset = set(m._files)
@@ -86,6 +87,7 @@
         excluded.update(exclude)
 
     m = copy.copy(match)
+    m._was_tampered_with = True
     notlfile = lambda f: not (
         lfutil.isstandin(f) or lfutil.standin(f) in manifest or f in excluded
     )
@@ -442,6 +444,8 @@
 
         pats.update(fixpats(f, tostandin) for f in p)
 
+        m._was_tampered_with = True
+
         for i in range(0, len(m._files)):
             # Don't add '.hglf' to m.files, since that is already covered by '.'
             if m._files[i] == b'.':
@@ -849,6 +853,7 @@
                     newpats.append(pat)
             match = orig(ctx, newpats, opts, globbed, default, badfn=badfn)
             m = copy.copy(match)
+            m._was_tampered_with = True
             lfile = lambda f: lfutil.standin(f) in manifest
             m._files = [lfutil.standin(f) for f in m._files if lfile(f)]
             m._fileset = set(m._files)
@@ -967,6 +972,7 @@
                 opts = {}
             match = orig(mctx, pats, opts, globbed, default, badfn=badfn)
             m = copy.copy(match)
+            m._was_tampered_with = True
 
             # revert supports recursing into subrepos, and though largefiles
             # currently doesn't work correctly in that case, this match is
@@ -1595,6 +1601,7 @@
     # confused state later.
     if s.deleted:
         m = copy.copy(matcher)
+        m._was_tampered_with = True
 
         # The m._files and m._map attributes are not changed to the deleted list
         # because that affects the m.exact() test, which in turn governs whether
@@ -1721,6 +1728,7 @@
     err = 1
     notbad = set()
     m = scmutil.match(ctx, (file1,) + pats, pycompat.byteskwargs(opts))
+    m._was_tampered_with = True
     origmatchfn = m.matchfn
 
     def lfmatchfn(f):
--- a/hgext/largefiles/reposetup.py	Thu Jun 13 09:52:39 2024 +0200
+++ b/hgext/largefiles/reposetup.py	Mon Jun 24 12:05:31 2024 +0200
@@ -181,6 +181,7 @@
                     return newfiles
 
                 m = copy.copy(match)
+                m._was_tampered_with = True
                 m._files = tostandins(m._files)
 
                 result = orig(
@@ -193,6 +194,7 @@
                         dirstate = self.dirstate
                         return sf in dirstate or dirstate.hasdir(sf)
 
+                    match._was_tampered_with = True
                     match._files = [f for f in match._files if sfindirstate(f)]
                     # Don't waste time getting the ignored and unknown
                     # files from lfdirstate
--- a/hgext/rebase.py	Thu Jun 13 09:52:39 2024 +0200
+++ b/hgext/rebase.py	Mon Jun 24 12:05:31 2024 +0200
@@ -2133,16 +2133,16 @@
             )
 
             revsprepull = len(repo)
-            origpostincoming = commands.postincoming
+            origpostincoming = cmdutil.postincoming
 
             def _dummy(*args, **kwargs):
                 pass
 
-            commands.postincoming = _dummy
+            cmdutil.postincoming = _dummy
             try:
                 ret = orig(ui, repo, *args, **opts)
             finally:
-                commands.postincoming = origpostincoming
+                cmdutil.postincoming = origpostincoming
             revspostpull = len(repo)
             if revspostpull > revsprepull:
                 # --rev option from pull conflict with rebase own --rev
--- a/i18n/ja.po	Thu Jun 13 09:52:39 2024 +0200
+++ b/i18n/ja.po	Mon Jun 24 12:05:31 2024 +0200
@@ -34780,8 +34780,8 @@
 msgstr "廃止マーカの作成機能は無効化されています"
 
 #, python-format
-msgid "obsolete feature not enabled but %i markers found!\n"
-msgstr "obsolete 機能は無効ですが、 %i 個の廃止情報マーカが存在します!\n"
+msgid "\"obsolete\" feature not enabled but %i markers found!\n"
+msgstr "\"obsolete\" 機能は無効ですが、 %i 個の廃止情報マーカが存在します!\n"
 
 #, python-format
 msgid "unknown key: %r"
--- a/i18n/pt_BR.po	Thu Jun 13 09:52:39 2024 +0200
+++ b/i18n/pt_BR.po	Mon Jun 24 12:05:31 2024 +0200
@@ -36049,9 +36049,9 @@
 "repositório"
 
 #, python-format
-msgid "obsolete feature not enabled but %i markers found!\n"
-msgstr ""
-"a funcionalidade obsolete não está habilitada, mas foram encontradas %i "
+msgid "\"obsolete\" feature not enabled but %i markers found!\n"
+msgstr ""
+"a funcionalidade \"obsolete\" não está habilitada, mas foram encontradas %i "
 "marcações!\n"
 
 #, python-format
--- a/mercurial/branchmap.py	Thu Jun 13 09:52:39 2024 +0200
+++ b/mercurial/branchmap.py	Mon Jun 24 12:05:31 2024 +0200
@@ -15,6 +15,7 @@
 )
 
 from typing import (
+    Any,
     Callable,
     Dict,
     Iterable,
@@ -24,6 +25,7 @@
     TYPE_CHECKING,
     Tuple,
     Union,
+    cast,
 )
 
 from . import (
@@ -59,7 +61,37 @@
 
     def __getitem__(self, repo):
         self.updatecache(repo)
-        return self._per_filter[repo.filtername]
+        bcache = self._per_filter[repo.filtername]
+        bcache._ensure_populated(repo)
+        assert bcache._filtername == repo.filtername, (
+            bcache._filtername,
+            repo.filtername,
+        )
+        return bcache
+
+    def update_disk(self, repo, detect_pure_topo=False):
+        """ensure and up-to-date cache is (or will be) written on disk
+
+        The cache for this repository view is updated  if needed and written on
+        disk.
+
+        If a transaction is in progress, the writing is schedule to transaction
+        close. See the `BranchMapCache.write_dirty` method.
+
+        This method exist independently of __getitem__ as it is sometime useful
+        to signal that we have no intend to use the data in memory yet.
+        """
+        self.updatecache(repo)
+        bcache = self._per_filter[repo.filtername]
+        assert bcache._filtername == repo.filtername, (
+            bcache._filtername,
+            repo.filtername,
+        )
+        if detect_pure_topo:
+            bcache._detect_pure_topo(repo)
+        tr = repo.currenttransaction()
+        if getattr(tr, 'finalized', True):
+            bcache.sync_disk(repo)
 
     def updatecache(self, repo):
         """Update the cache for the given filtered view on a repository"""
@@ -72,7 +104,7 @@
         bcache = self._per_filter.get(filtername)
         if bcache is None or not bcache.validfor(repo):
             # cache object missing or cache object stale? Read from disk
-            bcache = branchcache.fromfile(repo)
+            bcache = branch_cache_from_file(repo)
 
         revs = []
         if bcache is None:
@@ -82,12 +114,13 @@
             subsetname = subsettable.get(filtername)
             if subsetname is not None:
                 subset = repo.filtered(subsetname)
-                bcache = self[subset].copy()
+                self.updatecache(subset)
+                bcache = self._per_filter[subset.filtername].inherit_for(repo)
                 extrarevs = subset.changelog.filteredrevs - cl.filteredrevs
                 revs.extend(r for r in extrarevs if r <= bcache.tiprev)
             else:
                 # nothing to fall back on, start empty.
-                bcache = branchcache(repo)
+                bcache = new_branch_cache(repo)
 
         revs.extend(cl.revs(start=bcache.tiprev + 1))
         if revs:
@@ -118,7 +151,7 @@
 
         if rbheads:
             rtiprev = max((int(clrev(node)) for node in rbheads))
-            cache = branchcache(
+            cache = new_branch_cache(
                 repo,
                 remotebranchmap,
                 repo[rtiprev].node(),
@@ -131,19 +164,26 @@
             for candidate in (b'base', b'immutable', b'served'):
                 rview = repo.filtered(candidate)
                 if cache.validfor(rview):
+                    cache._filtername = candidate
                     self._per_filter[candidate] = cache
+                    cache._state = STATE_DIRTY
                     cache.write(rview)
                     return
 
     def clear(self):
         self._per_filter.clear()
 
-    def write_delayed(self, repo):
+    def write_dirty(self, repo):
         unfi = repo.unfiltered()
-        for filtername, cache in self._per_filter.items():
-            if cache._delayed:
+        for filtername in repoviewutil.get_ordered_subset():
+            cache = self._per_filter.get(filtername)
+            if cache is None:
+                continue
+            if filtername is None:
+                repo = unfi
+            else:
                 repo = unfi.filtered(filtername)
-                cache.write(repo)
+            cache.sync_disk(repo)
 
 
 def _unknownnode(node):
@@ -158,26 +198,11 @@
         return b'branch cache'
 
 
-class branchcache:
+class _BaseBranchCache:
     """A dict like object that hold branches heads cache.
 
     This cache is used to avoid costly computations to determine all the
     branch heads of a repo.
-
-    The cache is serialized on disk in the following format:
-
-    <tip hex node> <tip rev number> [optional filtered repo hex hash]
-    <branch head hex node> <open/closed state> <branch name>
-    <branch head hex node> <open/closed state> <branch name>
-    ...
-
-    The first line is used to check if the cache is still valid. If the
-    branch cache is for a filtered repo view, an optional third hash is
-    included that hashes the hashes of all filtered and obsolete revisions.
-
-    The open/closed state is represented by a single letter 'o' or 'c'.
-    This field can be used to avoid changelog reads when determining if a
-    branch head closes a branch or not.
     """
 
     def __init__(
@@ -186,64 +211,18 @@
         entries: Union[
             Dict[bytes, List[bytes]], Iterable[Tuple[bytes, List[bytes]]]
         ] = (),
-        tipnode: Optional[bytes] = None,
-        tiprev: Optional[int] = nullrev,
-        filteredhash: Optional[bytes] = None,
-        closednodes: Optional[Set[bytes]] = None,
-        hasnode: Optional[Callable[[bytes], bool]] = None,
+        closed_nodes: Optional[Set[bytes]] = None,
     ) -> None:
         """hasnode is a function which can be used to verify whether changelog
         has a given node or not. If it's not provided, we assume that every node
         we have exists in changelog"""
-        self._repo = repo
-        self._delayed = False
-        if tipnode is None:
-            self.tipnode = repo.nullid
-        else:
-            self.tipnode = tipnode
-        self.tiprev = tiprev
-        self.filteredhash = filteredhash
         # closednodes is a set of nodes that close their branch. If the branch
         # cache has been updated, it may contain nodes that are no longer
         # heads.
-        if closednodes is None:
-            self._closednodes = set()
-        else:
-            self._closednodes = closednodes
+        if closed_nodes is None:
+            closed_nodes = set()
+        self._closednodes = set(closed_nodes)
         self._entries = dict(entries)
-        # whether closed nodes are verified or not
-        self._closedverified = False
-        # branches for which nodes are verified
-        self._verifiedbranches = set()
-        self._hasnode = hasnode
-        if self._hasnode is None:
-            self._hasnode = lambda x: True
-
-    def _verifyclosed(self):
-        """verify the closed nodes we have"""
-        if self._closedverified:
-            return
-        for node in self._closednodes:
-            if not self._hasnode(node):
-                _unknownnode(node)
-
-        self._closedverified = True
-
-    def _verifybranch(self, branch):
-        """verify head nodes for the given branch."""
-        if branch not in self._entries or branch in self._verifiedbranches:
-            return
-        for n in self._entries[branch]:
-            if not self._hasnode(n):
-                _unknownnode(n)
-
-        self._verifiedbranches.add(branch)
-
-    def _verifyall(self):
-        """verifies nodes of all the branches"""
-        needverification = set(self._entries.keys()) - self._verifiedbranches
-        for b in needverification:
-            self._verifybranch(b)
 
     def __iter__(self):
         return iter(self._entries)
@@ -252,115 +231,20 @@
         self._entries[key] = value
 
     def __getitem__(self, key):
-        self._verifybranch(key)
         return self._entries[key]
 
     def __contains__(self, key):
-        self._verifybranch(key)
         return key in self._entries
 
     def iteritems(self):
-        for k, v in self._entries.items():
-            self._verifybranch(k)
-            yield k, v
+        return self._entries.items()
 
     items = iteritems
 
     def hasbranch(self, label):
         """checks whether a branch of this name exists or not"""
-        self._verifybranch(label)
         return label in self._entries
 
-    @classmethod
-    def fromfile(cls, repo):
-        f = None
-        try:
-            f = repo.cachevfs(cls._filename(repo))
-            lineiter = iter(f)
-            cachekey = next(lineiter).rstrip(b'\n').split(b" ", 2)
-            last, lrev = cachekey[:2]
-            last, lrev = bin(last), int(lrev)
-            filteredhash = None
-            hasnode = repo.changelog.hasnode
-            if len(cachekey) > 2:
-                filteredhash = bin(cachekey[2])
-            bcache = cls(
-                repo,
-                tipnode=last,
-                tiprev=lrev,
-                filteredhash=filteredhash,
-                hasnode=hasnode,
-            )
-            if not bcache.validfor(repo):
-                # invalidate the cache
-                raise ValueError('tip differs')
-            bcache.load(repo, lineiter)
-        except (IOError, OSError):
-            return None
-
-        except Exception as inst:
-            if repo.ui.debugflag:
-                msg = b'invalid %s: %s\n'
-                repo.ui.debug(
-                    msg
-                    % (
-                        _branchcachedesc(repo),
-                        stringutil.forcebytestr(inst),
-                    )
-                )
-            bcache = None
-
-        finally:
-            if f:
-                f.close()
-
-        return bcache
-
-    def load(self, repo, lineiter):
-        """fully loads the branchcache by reading from the file using the line
-        iterator passed"""
-        for line in lineiter:
-            line = line.rstrip(b'\n')
-            if not line:
-                continue
-            node, state, label = line.split(b" ", 2)
-            if state not in b'oc':
-                raise ValueError('invalid branch state')
-            label = encoding.tolocal(label.strip())
-            node = bin(node)
-            self._entries.setdefault(label, []).append(node)
-            if state == b'c':
-                self._closednodes.add(node)
-
-    @staticmethod
-    def _filename(repo):
-        """name of a branchcache file for a given repo or repoview"""
-        filename = b"branch2"
-        if repo.filtername:
-            filename = b'%s-%s' % (filename, repo.filtername)
-        return filename
-
-    def validfor(self, repo):
-        """check that cache contents are valid for (a subset of) this repo
-
-        - False when the order of changesets changed or if we detect a strip.
-        - True when cache is up-to-date for the current repo or its subset."""
-        try:
-            node = repo.changelog.node(self.tiprev)
-        except IndexError:
-            # changesets were stripped and now we don't even have enough to
-            # find tiprev
-            return False
-        if self.tipnode != node:
-            # tiprev doesn't correspond to tipnode: repo was stripped, or this
-            # repo has a different order of changesets
-            return False
-        tiphash = scmutil.filteredhash(repo, self.tiprev, needobsolete=True)
-        # hashes don't match if this repo view has a different set of filtered
-        # revisions (e.g. due to phase changes) or obsolete revisions (e.g.
-        # history was rewritten)
-        return self.filteredhash == tiphash
-
     def _branchtip(self, heads):
         """Return tuple with last open head in heads and false,
         otherwise return last closed head and true."""
@@ -383,7 +267,6 @@
         return (n for n in nodes if n not in self._closednodes)
 
     def branchheads(self, branch, closed=False):
-        self._verifybranch(branch)
         heads = self._entries[branch]
         if not closed:
             heads = list(self.iteropen(heads))
@@ -395,60 +278,8 @@
 
     def iterheads(self):
         """returns all the heads"""
-        self._verifyall()
         return self._entries.values()
 
-    def copy(self):
-        """return an deep copy of the branchcache object"""
-        return type(self)(
-            self._repo,
-            self._entries,
-            self.tipnode,
-            self.tiprev,
-            self.filteredhash,
-            self._closednodes,
-        )
-
-    def write(self, repo):
-        tr = repo.currenttransaction()
-        if not getattr(tr, 'finalized', True):
-            # Avoid premature writing.
-            #
-            # (The cache warming setup by localrepo will update the file later.)
-            self._delayed = True
-            return
-        try:
-            filename = self._filename(repo)
-            with repo.cachevfs(filename, b"w", atomictemp=True) as f:
-                cachekey = [hex(self.tipnode), b'%d' % self.tiprev]
-                if self.filteredhash is not None:
-                    cachekey.append(hex(self.filteredhash))
-                f.write(b" ".join(cachekey) + b'\n')
-                nodecount = 0
-                for label, nodes in sorted(self._entries.items()):
-                    label = encoding.fromlocal(label)
-                    for node in nodes:
-                        nodecount += 1
-                        if node in self._closednodes:
-                            state = b'c'
-                        else:
-                            state = b'o'
-                        f.write(b"%s %s %s\n" % (hex(node), state, label))
-            repo.ui.log(
-                b'branchcache',
-                b'wrote %s with %d labels and %d nodes\n',
-                _branchcachedesc(repo),
-                len(self._entries),
-                nodecount,
-            )
-            self._delayed = False
-        except (IOError, OSError, error.Abort) as inst:
-            # Abort may be raised by read only opener, so log and continue
-            repo.ui.debug(
-                b"couldn't write branch cache: %s\n"
-                % stringutil.forcebytestr(inst)
-            )
-
     def update(self, repo, revgen):
         """Given a branchhead cache, self, that may have extra nodes or be
         missing heads, and a generator of nodes that are strictly a superset of
@@ -456,29 +287,69 @@
         """
         starttime = util.timer()
         cl = repo.changelog
+        # Faster than using ctx.obsolete()
+        obsrevs = obsolete.getrevs(repo, b'obsolete')
         # collect new branch entries
         newbranches = {}
+        new_closed = set()
+        obs_ignored = set()
         getbranchinfo = repo.revbranchcache().branchinfo
+        max_rev = -1
         for r in revgen:
+            max_rev = max(max_rev, r)
+            if r in obsrevs:
+                # We ignore obsolete changesets as they shouldn't be
+                # considered heads.
+                obs_ignored.add(r)
+                continue
             branch, closesbranch = getbranchinfo(r)
             newbranches.setdefault(branch, []).append(r)
             if closesbranch:
-                self._closednodes.add(cl.node(r))
+                new_closed.add(r)
+        if max_rev < 0:
+            msg = "running branchcache.update without revision to update"
+            raise error.ProgrammingError(msg)
+
+        self._process_new(
+            repo,
+            newbranches,
+            new_closed,
+            obs_ignored,
+            max_rev,
+        )
+
+        self._closednodes.update(cl.node(rev) for rev in new_closed)
 
-        # new tip revision which we found after iterating items from new
-        # branches
-        ntiprev = self.tiprev
+        duration = util.timer() - starttime
+        repo.ui.log(
+            b'branchcache',
+            b'updated %s in %.4f seconds\n',
+            _branchcachedesc(repo),
+            duration,
+        )
+        return max_rev
 
+    def _process_new(
+        self,
+        repo,
+        newbranches,
+        new_closed,
+        obs_ignored,
+        max_rev,
+    ):
+        """update the branchmap from a set of new information"""
         # Delay fetching the topological heads until they are needed.
         # A repository without non-continous branches can skip this part.
         topoheads = None
 
+        cl = repo.changelog
+        getbranchinfo = repo.revbranchcache().branchinfo
+        # Faster than using ctx.obsolete()
+        obsrevs = obsolete.getrevs(repo, b'obsolete')
+
         # If a changeset is visible, its parents must be visible too, so
         # use the faster unfiltered parent accessor.
-        parentrevs = repo.unfiltered().changelog.parentrevs
-
-        # Faster than using ctx.obsolete()
-        obsrevs = obsolete.getrevs(repo, b'obsolete')
+        parentrevs = cl._uncheckedparentrevs
 
         for branch, newheadrevs in newbranches.items():
             # For every branch, compute the new branchheads.
@@ -520,11 +391,6 @@
             bheadset = {cl.rev(node) for node in bheads}
             uncertain = set()
             for newrev in sorted(newheadrevs):
-                if newrev in obsrevs:
-                    # We ignore obsolete changesets as they shouldn't be
-                    # considered heads.
-                    continue
-
                 if not bheadset:
                     bheadset.add(newrev)
                     continue
@@ -561,50 +427,665 @@
                         bheadset -= ancestors
             if bheadset:
                 self[branch] = [cl.node(rev) for rev in sorted(bheadset)]
-            tiprev = max(newheadrevs)
-            if tiprev > ntiprev:
-                ntiprev = tiprev
+
+
+STATE_CLEAN = 1
+STATE_INHERITED = 2
+STATE_DIRTY = 3
+
+
+class _LocalBranchCache(_BaseBranchCache):
+    """base class of branch-map info for a local repo or repoview"""
+
+    _base_filename = None
+    _default_key_hashes: Tuple[bytes] = cast(Tuple[bytes], ())
+
+    def __init__(
+        self,
+        repo: "localrepo.localrepository",
+        entries: Union[
+            Dict[bytes, List[bytes]], Iterable[Tuple[bytes, List[bytes]]]
+        ] = (),
+        tipnode: Optional[bytes] = None,
+        tiprev: Optional[int] = nullrev,
+        key_hashes: Optional[Tuple[bytes]] = None,
+        closednodes: Optional[Set[bytes]] = None,
+        hasnode: Optional[Callable[[bytes], bool]] = None,
+        verify_node: bool = False,
+        inherited: bool = False,
+    ) -> None:
+        """hasnode is a function which can be used to verify whether changelog
+        has a given node or not. If it's not provided, we assume that every node
+        we have exists in changelog"""
+        self._filtername = repo.filtername
+        if tipnode is None:
+            self.tipnode = repo.nullid
+        else:
+            self.tipnode = tipnode
+        self.tiprev = tiprev
+        if key_hashes is None:
+            self.key_hashes = self._default_key_hashes
+        else:
+            self.key_hashes = key_hashes
+        self._state = STATE_CLEAN
+        if inherited:
+            self._state = STATE_INHERITED
+
+        super().__init__(repo=repo, entries=entries, closed_nodes=closednodes)
+        # closednodes is a set of nodes that close their branch. If the branch
+        # cache has been updated, it may contain nodes that are no longer
+        # heads.
+
+        # Do we need to verify branch at all ?
+        self._verify_node = verify_node
+        # branches for which nodes are verified
+        self._verifiedbranches = set()
+        self._hasnode = None
+        if self._verify_node:
+            self._hasnode = repo.changelog.hasnode
+
+    def _compute_key_hashes(self, repo) -> Tuple[bytes]:
+        raise NotImplementedError
+
+    def _ensure_populated(self, repo):
+        """make sure any lazily loaded values are fully populated"""
+
+    def _detect_pure_topo(self, repo) -> None:
+        pass
+
+    def validfor(self, repo):
+        """check that cache contents are valid for (a subset of) this repo
+
+        - False when the order of changesets changed or if we detect a strip.
+        - True when cache is up-to-date for the current repo or its subset."""
+        try:
+            node = repo.changelog.node(self.tiprev)
+        except IndexError:
+            # changesets were stripped and now we don't even have enough to
+            # find tiprev
+            return False
+        if self.tipnode != node:
+            # tiprev doesn't correspond to tipnode: repo was stripped, or this
+            # repo has a different order of changesets
+            return False
+        repo_key_hashes = self._compute_key_hashes(repo)
+        # hashes don't match if this repo view has a different set of filtered
+        # revisions (e.g. due to phase changes) or obsolete revisions (e.g.
+        # history was rewritten)
+        return self.key_hashes == repo_key_hashes
+
+    @classmethod
+    def fromfile(cls, repo):
+        f = None
+        try:
+            f = repo.cachevfs(cls._filename(repo))
+            lineiter = iter(f)
+            init_kwargs = cls._load_header(repo, lineiter)
+            bcache = cls(
+                repo,
+                verify_node=True,
+                **init_kwargs,
+            )
+            if not bcache.validfor(repo):
+                # invalidate the cache
+                raise ValueError('tip differs')
+            bcache._load_heads(repo, lineiter)
+        except (IOError, OSError):
+            return None
+
+        except Exception as inst:
+            if repo.ui.debugflag:
+                msg = b'invalid %s: %s\n'
+                msg %= (
+                    _branchcachedesc(repo),
+                    stringutil.forcebytestr(inst),
+                )
+                repo.ui.debug(msg)
+            bcache = None
+
+        finally:
+            if f:
+                f.close()
+
+        return bcache
+
+    @classmethod
+    def _load_header(cls, repo, lineiter) -> "dict[str, Any]":
+        raise NotImplementedError
+
+    def _load_heads(self, repo, lineiter):
+        """fully loads the branchcache by reading from the file using the line
+        iterator passed"""
+        for line in lineiter:
+            line = line.rstrip(b'\n')
+            if not line:
+                continue
+            node, state, label = line.split(b" ", 2)
+            if state not in b'oc':
+                raise ValueError('invalid branch state')
+            label = encoding.tolocal(label.strip())
+            node = bin(node)
+            self._entries.setdefault(label, []).append(node)
+            if state == b'c':
+                self._closednodes.add(node)
 
-        if ntiprev > self.tiprev:
-            self.tiprev = ntiprev
-            self.tipnode = cl.node(ntiprev)
+    @classmethod
+    def _filename(cls, repo):
+        """name of a branchcache file for a given repo or repoview"""
+        filename = cls._base_filename
+        assert filename is not None
+        if repo.filtername:
+            filename = b'%s-%s' % (filename, repo.filtername)
+        return filename
+
+    def inherit_for(self, repo):
+        """return a deep copy of the branchcache object"""
+        assert repo.filtername != self._filtername
+        other = type(self)(
+            repo=repo,
+            # we always do a shally copy of self._entries, and the values is
+            # always replaced, so no need to deepcopy until the above remains
+            # true.
+            entries=self._entries,
+            tipnode=self.tipnode,
+            tiprev=self.tiprev,
+            key_hashes=self.key_hashes,
+            closednodes=set(self._closednodes),
+            verify_node=self._verify_node,
+            inherited=True,
+        )
+        # also copy information about the current verification state
+        other._verifiedbranches = set(self._verifiedbranches)
+        return other
+
+    def sync_disk(self, repo):
+        """synchronise the on disk file with the cache state
+
+        If new value specific to this filter level need to be written, the file
+        will be updated, if the state of the branchcache is inherited from a
+        subset, any stalled on disk file will be deleted.
+
+        That method does nothing if there is nothing to do.
+        """
+        if self._state == STATE_DIRTY:
+            self.write(repo)
+        elif self._state == STATE_INHERITED:
+            filename = self._filename(repo)
+            repo.cachevfs.tryunlink(filename)
+
+    def write(self, repo):
+        assert self._filtername == repo.filtername, (
+            self._filtername,
+            repo.filtername,
+        )
+        assert self._state == STATE_DIRTY, self._state
+        # This method should not be called during an open transaction
+        tr = repo.currenttransaction()
+        if not getattr(tr, 'finalized', True):
+            msg = "writing branchcache in the middle of a transaction"
+            raise error.ProgrammingError(msg)
+        try:
+            filename = self._filename(repo)
+            with repo.cachevfs(filename, b"w", atomictemp=True) as f:
+                self._write_header(f)
+                nodecount = self._write_heads(repo, f)
+            repo.ui.log(
+                b'branchcache',
+                b'wrote %s with %d labels and %d nodes\n',
+                _branchcachedesc(repo),
+                len(self._entries),
+                nodecount,
+            )
+            self._state = STATE_CLEAN
+        except (IOError, OSError, error.Abort) as inst:
+            # Abort may be raised by read only opener, so log and continue
+            repo.ui.debug(
+                b"couldn't write branch cache: %s\n"
+                % stringutil.forcebytestr(inst)
+            )
+
+    def _write_header(self, fp) -> None:
+        raise NotImplementedError
+
+    def _write_heads(self, repo, fp) -> int:
+        """write list of heads to a file
+
+        Return the number of heads written."""
+        nodecount = 0
+        for label, nodes in sorted(self._entries.items()):
+            label = encoding.fromlocal(label)
+            for node in nodes:
+                nodecount += 1
+                if node in self._closednodes:
+                    state = b'c'
+                else:
+                    state = b'o'
+                fp.write(b"%s %s %s\n" % (hex(node), state, label))
+        return nodecount
+
+    def _verifybranch(self, branch):
+        """verify head nodes for the given branch."""
+        if not self._verify_node:
+            return
+        if branch not in self._entries or branch in self._verifiedbranches:
+            return
+        assert self._hasnode is not None
+        for n in self._entries[branch]:
+            if not self._hasnode(n):
+                _unknownnode(n)
+
+        self._verifiedbranches.add(branch)
+
+    def _verifyall(self):
+        """verifies nodes of all the branches"""
+        for b in self._entries.keys():
+            if b not in self._verifiedbranches:
+                self._verifybranch(b)
+
+    def __getitem__(self, key):
+        self._verifybranch(key)
+        return super().__getitem__(key)
+
+    def __contains__(self, key):
+        self._verifybranch(key)
+        return super().__contains__(key)
+
+    def iteritems(self):
+        self._verifyall()
+        return super().iteritems()
+
+    items = iteritems
+
+    def iterheads(self):
+        """returns all the heads"""
+        self._verifyall()
+        return super().iterheads()
+
+    def hasbranch(self, label):
+        """checks whether a branch of this name exists or not"""
+        self._verifybranch(label)
+        return super().hasbranch(label)
+
+    def branchheads(self, branch, closed=False):
+        self._verifybranch(branch)
+        return super().branchheads(branch, closed=closed)
+
+    def update(self, repo, revgen):
+        assert self._filtername == repo.filtername, (
+            self._filtername,
+            repo.filtername,
+        )
+        cl = repo.changelog
+        max_rev = super().update(repo, revgen)
+        # new tip revision which we found after iterating items from new
+        # branches
+        if max_rev is not None and max_rev > self.tiprev:
+            self.tiprev = max_rev
+            self.tipnode = cl.node(max_rev)
+        else:
+            # We should not be here is if this is false
+            assert cl.node(self.tiprev) == self.tipnode
 
         if not self.validfor(repo):
-            # old cache key is now invalid for the repo, but we've just updated
-            # the cache and we assume it's valid, so let's make the cache key
-            # valid as well by recomputing it from the cached data
-            self.tipnode = repo.nullid
-            self.tiprev = nullrev
-            for heads in self.iterheads():
-                if not heads:
-                    # all revisions on a branch are obsolete
-                    continue
-                # note: tiprev is not necessarily the tip revision of repo,
-                # because the tip could be obsolete (i.e. not a head)
-                tiprev = max(cl.rev(node) for node in heads)
-                if tiprev > self.tiprev:
-                    self.tipnode = cl.node(tiprev)
-                    self.tiprev = tiprev
-        self.filteredhash = scmutil.filteredhash(
-            repo, self.tiprev, needobsolete=True
+            # the tiprev and tipnode should be aligned, so if the current repo
+            # is not seens as valid this is because old cache key is now
+            # invalid for the repo.
+            #
+            # However. we've just updated the cache and we assume it's valid,
+            # so let's make the cache key valid as well by recomputing it from
+            # the cached data
+            self.key_hashes = self._compute_key_hashes(repo)
+            self.filteredhash = scmutil.combined_filtered_and_obsolete_hash(
+                repo,
+                self.tiprev,
+            )
+
+        self._state = STATE_DIRTY
+        tr = repo.currenttransaction()
+        if getattr(tr, 'finalized', True):
+            # Avoid premature writing.
+            #
+            # (The cache warming setup by localrepo will update the file later.)
+            self.write(repo)
+
+
+def branch_cache_from_file(repo) -> Optional[_LocalBranchCache]:
+    """Build a branch cache from on-disk data if possible
+
+    Return a branch cache of the right format depending of the repository.
+    """
+    if repo.ui.configbool(b"experimental", b"branch-cache-v3"):
+        return BranchCacheV3.fromfile(repo)
+    else:
+        return BranchCacheV2.fromfile(repo)
+
+
+def new_branch_cache(repo, *args, **kwargs):
+    """Build a new branch cache from argument
+
+    Return a branch cache of the right format depending of the repository.
+    """
+    if repo.ui.configbool(b"experimental", b"branch-cache-v3"):
+        return BranchCacheV3(repo, *args, **kwargs)
+    else:
+        return BranchCacheV2(repo, *args, **kwargs)
+
+
+class BranchCacheV2(_LocalBranchCache):
+    """a branch cache using version 2 of the format on disk
+
+    The cache is serialized on disk in the following format:
+
+    <tip hex node> <tip rev number> [optional filtered repo hex hash]
+    <branch head hex node> <open/closed state> <branch name>
+    <branch head hex node> <open/closed state> <branch name>
+    ...
+
+    The first line is used to check if the cache is still valid. If the
+    branch cache is for a filtered repo view, an optional third hash is
+    included that hashes the hashes of all filtered and obsolete revisions.
+
+    The open/closed state is represented by a single letter 'o' or 'c'.
+    This field can be used to avoid changelog reads when determining if a
+    branch head closes a branch or not.
+    """
+
+    _base_filename = b"branch2"
+
+    @classmethod
+    def _load_header(cls, repo, lineiter) -> "dict[str, Any]":
+        """parse the head of a branchmap file
+
+        return parameters to pass to a newly created class instance.
+        """
+        cachekey = next(lineiter).rstrip(b'\n').split(b" ", 2)
+        last, lrev = cachekey[:2]
+        last, lrev = bin(last), int(lrev)
+        filteredhash = ()
+        if len(cachekey) > 2:
+            filteredhash = (bin(cachekey[2]),)
+        return {
+            "tipnode": last,
+            "tiprev": lrev,
+            "key_hashes": filteredhash,
+        }
+
+    def _write_header(self, fp) -> None:
+        """write the branch cache header to a file"""
+        cachekey = [hex(self.tipnode), b'%d' % self.tiprev]
+        if self.key_hashes:
+            cachekey.append(hex(self.key_hashes[0]))
+        fp.write(b" ".join(cachekey) + b'\n')
+
+    def _compute_key_hashes(self, repo) -> Tuple[bytes]:
+        """return the cache key hashes that match this repoview state"""
+        filtered_hash = scmutil.combined_filtered_and_obsolete_hash(
+            repo,
+            self.tiprev,
+            needobsolete=True,
+        )
+        keys: Tuple[bytes] = cast(Tuple[bytes], ())
+        if filtered_hash is not None:
+            keys: Tuple[bytes] = (filtered_hash,)
+        return keys
+
+
+class BranchCacheV3(_LocalBranchCache):
+    """a branch cache using version 3 of the format on disk
+
+    This version is still EXPERIMENTAL and the format is subject to changes.
+
+    The cache is serialized on disk in the following format:
+
+    <cache-key-xxx>=<xxx-value> <cache-key-yyy>=<yyy-value> […]
+    <branch head hex node> <open/closed state> <branch name>
+    <branch head hex node> <open/closed state> <branch name>
+    ...
+
+    The first line is used to check if the cache is still valid. It is a series
+    of key value pair. The following key are recognized:
+
+    - tip-rev: the rev-num of the tip-most revision seen by this cache
+    - tip-node: the node-id of the tip-most revision sen by this cache
+    - filtered-hash: the hash of all filtered revisions (before tip-rev)
+                     ignored by this cache.
+    - obsolete-hash: the hash of all non-filtered obsolete revisions (before
+                     tip-rev) ignored by this cache.
+
+    The tip-rev is used to know how far behind the value in the file are
+    compared to the current repository state.
+
+    The tip-node, filtered-hash and obsolete-hash are used to detect if this
+    cache can be used for this repository state at all.
+
+    The open/closed state is represented by a single letter 'o' or 'c'.
+    This field can be used to avoid changelog reads when determining if a
+    branch head closes a branch or not.
+
+    Topological heads are not included in the listing and should be dispatched
+    on the right branch at read time. Obsolete topological heads should be
+    ignored.
+    """
+
+    _base_filename = b"branch3-exp"
+    _default_key_hashes = (None, None)
+
+    def __init__(self, *args, pure_topo_branch=None, **kwargs):
+        super().__init__(*args, **kwargs)
+        self._pure_topo_branch = pure_topo_branch
+        self._needs_populate = self._pure_topo_branch is not None
+
+    def inherit_for(self, repo):
+        new = super().inherit_for(repo)
+        new._pure_topo_branch = self._pure_topo_branch
+        new._needs_populate = self._needs_populate
+        return new
+
+    def _get_topo_heads(self, repo):
+        """returns the topological head of a repoview content up to self.tiprev"""
+        cl = repo.changelog
+        if self.tiprev == nullrev:
+            return []
+        elif self.tiprev == cl.tiprev():
+            return cl.headrevs()
+        else:
+            # XXX passing tiprev as ceiling of cl.headrevs could be faster
+            heads = cl.headrevs(cl.revs(stop=self.tiprev))
+            return heads
+
+    def _write_header(self, fp) -> None:
+        cache_keys = {
+            b"tip-node": hex(self.tipnode),
+            b"tip-rev": b'%d' % self.tiprev,
+        }
+        if self.key_hashes:
+            if self.key_hashes[0] is not None:
+                cache_keys[b"filtered-hash"] = hex(self.key_hashes[0])
+            if self.key_hashes[1] is not None:
+                cache_keys[b"obsolete-hash"] = hex(self.key_hashes[1])
+        if self._pure_topo_branch is not None:
+            cache_keys[b"topo-mode"] = b"pure"
+        pieces = (b"%s=%s" % i for i in sorted(cache_keys.items()))
+        fp.write(b" ".join(pieces) + b'\n')
+        if self._pure_topo_branch is not None:
+            label = encoding.fromlocal(self._pure_topo_branch)
+            fp.write(label + b'\n')
+
+    def _write_heads(self, repo, fp) -> int:
+        """write list of heads to a file
+
+        Return the number of heads written."""
+        nodecount = 0
+        topo_heads = None
+        if self._pure_topo_branch is None:
+            topo_heads = set(self._get_topo_heads(repo))
+        to_rev = repo.changelog.index.rev
+        for label, nodes in sorted(self._entries.items()):
+            if label == self._pure_topo_branch:
+                # not need to write anything the header took care of that
+                continue
+            label = encoding.fromlocal(label)
+            for node in nodes:
+                if topo_heads is not None:
+                    rev = to_rev(node)
+                    if rev in topo_heads:
+                        continue
+                if node in self._closednodes:
+                    state = b'c'
+                else:
+                    state = b'o'
+                nodecount += 1
+                fp.write(b"%s %s %s\n" % (hex(node), state, label))
+        return nodecount
+
+    @classmethod
+    def _load_header(cls, repo, lineiter):
+        header_line = next(lineiter)
+        pieces = header_line.rstrip(b'\n').split(b" ")
+        cache_keys = dict(p.split(b'=', 1) for p in pieces)
+
+        args = {}
+        filtered_hash = None
+        obsolete_hash = None
+        has_pure_topo_heads = False
+        for k, v in cache_keys.items():
+            if k == b"tip-rev":
+                args["tiprev"] = int(v)
+            elif k == b"tip-node":
+                args["tipnode"] = bin(v)
+            elif k == b"filtered-hash":
+                filtered_hash = bin(v)
+            elif k == b"obsolete-hash":
+                obsolete_hash = bin(v)
+            elif k == b"topo-mode":
+                if v == b"pure":
+                    has_pure_topo_heads = True
+                else:
+                    msg = b"unknown topo-mode: %r" % v
+                    raise ValueError(msg)
+            else:
+                msg = b"unknown cache key: %r" % k
+                raise ValueError(msg)
+        args["key_hashes"] = (filtered_hash, obsolete_hash)
+        if has_pure_topo_heads:
+            pure_line = next(lineiter).rstrip(b'\n')
+            args["pure_topo_branch"] = encoding.tolocal(pure_line)
+        return args
+
+    def _load_heads(self, repo, lineiter):
+        """fully loads the branchcache by reading from the file using the line
+        iterator passed"""
+        super()._load_heads(repo, lineiter)
+        if self._pure_topo_branch is not None:
+            # no need to read the repository heads, we know their value already.
+            return
+        cl = repo.changelog
+        getbranchinfo = repo.revbranchcache().branchinfo
+        obsrevs = obsolete.getrevs(repo, b'obsolete')
+        to_node = cl.node
+        touched_branch = set()
+        for head in self._get_topo_heads(repo):
+            if head in obsrevs:
+                continue
+            node = to_node(head)
+            branch, closed = getbranchinfo(head)
+            self._entries.setdefault(branch, []).append(node)
+            if closed:
+                self._closednodes.add(node)
+            touched_branch.add(branch)
+        to_rev = cl.index.rev
+        for branch in touched_branch:
+            self._entries[branch].sort(key=to_rev)
+
+    def _compute_key_hashes(self, repo) -> Tuple[bytes]:
+        """return the cache key hashes that match this repoview state"""
+        return scmutil.filtered_and_obsolete_hash(
+            repo,
+            self.tiprev,
         )
 
-        duration = util.timer() - starttime
-        repo.ui.log(
-            b'branchcache',
-            b'updated %s in %.4f seconds\n',
-            _branchcachedesc(repo),
-            duration,
+    def _process_new(
+        self,
+        repo,
+        newbranches,
+        new_closed,
+        obs_ignored,
+        max_rev,
+    ) -> None:
+        if (
+            # note: the check about `obs_ignored` is too strict as the
+            # obsolete revision could be non-topological, but lets keep
+            # things simple for now
+            #
+            # The same apply to `new_closed` if the closed changeset are
+            # not a head, we don't care that it is closed, but lets keep
+            # things simple here too.
+            not (obs_ignored or new_closed)
+            and (
+                not newbranches
+                or (
+                    len(newbranches) == 1
+                    and (
+                        self.tiprev == nullrev
+                        or self._pure_topo_branch in newbranches
+                    )
+                )
+            )
+        ):
+            if newbranches:
+                assert len(newbranches) == 1
+                self._pure_topo_branch = list(newbranches.keys())[0]
+                self._needs_populate = True
+                self._entries.pop(self._pure_topo_branch, None)
+            return
+
+        self._ensure_populated(repo)
+        self._pure_topo_branch = None
+        super()._process_new(
+            repo,
+            newbranches,
+            new_closed,
+            obs_ignored,
+            max_rev,
         )
 
-        self.write(repo)
+    def _ensure_populated(self, repo):
+        """make sure any lazily loaded values are fully populated"""
+        if self._needs_populate:
+            assert self._pure_topo_branch is not None
+            cl = repo.changelog
+            to_node = cl.node
+            topo_heads = self._get_topo_heads(repo)
+            heads = [to_node(r) for r in topo_heads]
+            self._entries[self._pure_topo_branch] = heads
+            self._needs_populate = False
+
+    def _detect_pure_topo(self, repo) -> None:
+        if self._pure_topo_branch is not None:
+            # we are pure topological already
+            return
+        to_node = repo.changelog.node
+        topo_heads = [to_node(r) for r in self._get_topo_heads(repo)]
+        if any(n in self._closednodes for n in topo_heads):
+            return
+        for branch, heads in self._entries.items():
+            if heads == topo_heads:
+                self._pure_topo_branch = branch
+                break
 
 
-class remotebranchcache(branchcache):
+class remotebranchcache(_BaseBranchCache):
     """Branchmap info for a remote connection, should not write locally"""
 
-    def write(self, repo):
-        pass
+    def __init__(
+        self,
+        repo: "localrepo.localrepository",
+        entries: Union[
+            Dict[bytes, List[bytes]], Iterable[Tuple[bytes, List[bytes]]]
+        ] = (),
+        closednodes: Optional[Set[bytes]] = None,
+    ) -> None:
+        super().__init__(repo=repo, entries=entries, closed_nodes=closednodes)
 
 
 # Revision branch info cache
--- a/mercurial/bundle2.py	Thu Jun 13 09:52:39 2024 +0200
+++ b/mercurial/bundle2.py	Mon Jun 24 12:05:31 2024 +0200
@@ -1728,9 +1728,10 @@
     caps = {}
     if opts.get(b'obsolescence', False):
         caps[b'obsmarkers'] = (b'V1',)
-    if opts.get(b'streamv2'):
+    stream_version = opts.get(b'stream', b"")
+    if stream_version == b"v2":
         caps[b'stream'] = [b'v2']
-    elif opts.get(b'streamv3-exp'):
+    elif stream_version == b"v3-exp":
         caps[b'stream'] = [b'v3-exp']
     bundle = bundle20(ui, caps)
     bundle.setcompression(compression, compopts)
@@ -1774,10 +1775,10 @@
     if repository.REPO_FEATURE_SIDE_DATA in repo.features:
         part.addparam(b'exp-sidedata', b'1')
 
-    if opts.get(b'streamv2', False):
+    if opts.get(b'stream', b"") == b"v2":
         addpartbundlestream2(bundler, repo, stream=True)
 
-    if opts.get(b'streamv3-exp', False):
+    if opts.get(b'stream', b"") == b"v3-exp":
         addpartbundlestream2(bundler, repo, stream=True)
 
     if opts.get(b'tagsfnodescache', True):
@@ -1787,7 +1788,7 @@
         addpartrevbranchcache(repo, bundler, outgoing)
 
     if opts.get(b'obsolescence', False):
-        obsmarkers = repo.obsstore.relevantmarkers(outgoing.missing)
+        obsmarkers = repo.obsstore.relevantmarkers(nodes=outgoing.missing)
         buildobsmarkerspart(
             bundler,
             obsmarkers,
--- a/mercurial/bundlecaches.py	Thu Jun 13 09:52:39 2024 +0200
+++ b/mercurial/bundlecaches.py	Mon Jun 24 12:05:31 2024 +0200
@@ -6,6 +6,8 @@
 import collections
 
 from typing import (
+    Dict,
+    Union,
     cast,
 )
 
@@ -106,7 +108,7 @@
 }
 
 # Maps bundle version with content opts to choose which part to bundle
-_bundlespeccontentopts = {
+_bundlespeccontentopts: Dict[bytes, Dict[bytes, Union[bool, bytes]]] = {
     b'v1': {
         b'changegroup': True,
         b'cg.version': b'01',
@@ -136,7 +138,7 @@
         b'cg.version': b'02',
         b'obsolescence': False,
         b'phases': False,
-        b"streamv2": True,
+        b"stream": b"v2",
         b'tagsfnodescache': False,
         b'revbranchcache': False,
     },
@@ -145,7 +147,7 @@
         b'cg.version': b'03',
         b'obsolescence': False,
         b'phases': False,
-        b"streamv3-exp": True,
+        b"stream": b"v3-exp",
         b'tagsfnodescache': False,
         b'revbranchcache': False,
     },
@@ -158,8 +160,6 @@
 }
 _bundlespeccontentopts[b'bundle2'] = _bundlespeccontentopts[b'v2']
 
-_bundlespecvariants = {b"streamv2": {}}
-
 # Compression engines allowed in version 1. THIS SHOULD NEVER CHANGE.
 _bundlespecv1compengines = {b'gzip', b'bzip2', b'none'}
 
@@ -391,10 +391,7 @@
     if (
         bundlespec.wirecompression == b'UN'
         and bundlespec.wireversion == b'02'
-        and (
-            bundlespec.contentopts.get(b'streamv2')
-            or bundlespec.contentopts.get(b'streamv3-exp')
-        )
+        and bundlespec.contentopts.get(b'stream', None) in (b"v2", b"v3-exp")
     ):
         return True
 
--- a/mercurial/cacheutil.py	Thu Jun 13 09:52:39 2024 +0200
+++ b/mercurial/cacheutil.py	Mon Jun 24 12:05:31 2024 +0200
@@ -14,6 +14,8 @@
     # ones. Therefore copy all branch caches over.
     cachefiles = [b'branch2']
     cachefiles += [b'branch2-%s' % f for f in repoview.filtertable]
+    cachefiles += [b'branch3']
+    cachefiles += [b'branch3-%s' % f for f in repoview.filtertable]
     cachefiles += [b'rbc-names-v1', b'rbc-revs-v1']
     cachefiles += [b'tags2']
     cachefiles += [b'tags2-%s' % f for f in repoview.filtertable]
--- a/mercurial/changelog.py	Thu Jun 13 09:52:39 2024 +0200
+++ b/mercurial/changelog.py	Mon Jun 24 12:05:31 2024 +0200
@@ -327,6 +327,9 @@
         self._filteredrevs_hashcache = {}
         self._copiesstorage = opener.options.get(b'copies-storage')
 
+    def __contains__(self, rev):
+        return (0 <= rev < len(self)) and rev not in self._filteredrevs
+
     @property
     def filteredrevs(self):
         return self._filteredrevs
--- a/mercurial/cmdutil.py	Thu Jun 13 09:52:39 2024 +0200
+++ b/mercurial/cmdutil.py	Mon Jun 24 12:05:31 2024 +0200
@@ -35,11 +35,13 @@
 
 from . import (
     bookmarks,
+    bundle2,
     changelog,
     copies,
     crecord as crecordmod,
     encoding,
     error,
+    exchange,
     formatter,
     logcmdutil,
     match as matchmod,
@@ -56,6 +58,7 @@
     rewriteutil,
     scmutil,
     state as statemod,
+    streamclone,
     subrepoutil,
     templatekw,
     templater,
@@ -66,6 +69,7 @@
 from .utils import (
     dateutil,
     stringutil,
+    urlutil,
 )
 
 from .revlogutils import (
@@ -4135,3 +4139,90 @@
     with repo.wlock():
         graftstate = statemod.cmdstate(repo, b'graftstate')
         return abortgraft(ui, repo, graftstate)
+
+
+def postincoming(ui, repo, modheads, optupdate, checkout, brev):
+    """Run after a changegroup has been added via pull/unbundle
+
+    This takes arguments below:
+
+    :modheads: change of heads by pull/unbundle
+    :optupdate: updating working directory is needed or not
+    :checkout: update destination revision (or None to default destination)
+    :brev: a name, which might be a bookmark to be activated after updating
+
+    return True if update raise any conflict, False otherwise.
+    """
+    if modheads == 0:
+        return False
+    if optupdate:
+        # avoid circular import
+        from . import hg
+
+        try:
+            return hg.updatetotally(ui, repo, checkout, brev)
+        except error.UpdateAbort as inst:
+            msg = _(b"not updating: %s") % stringutil.forcebytestr(inst)
+            hint = inst.hint
+            raise error.UpdateAbort(msg, hint=hint)
+    if ui.quiet:
+        pass  # we won't report anything so the other clause are useless.
+    elif modheads is not None and modheads > 1:
+        currentbranchheads = len(repo.branchheads())
+        if currentbranchheads == modheads:
+            ui.status(
+                _(b"(run 'hg heads' to see heads, 'hg merge' to merge)\n")
+            )
+        elif currentbranchheads > 1:
+            ui.status(
+                _(b"(run 'hg heads .' to see heads, 'hg merge' to merge)\n")
+            )
+        else:
+            ui.status(_(b"(run 'hg heads' to see heads)\n"))
+    elif not ui.configbool(b'commands', b'update.requiredest'):
+        ui.status(_(b"(run 'hg update' to get a working copy)\n"))
+    return False
+
+
+def unbundle_files(ui, repo, fnames, unbundle_source=b'unbundle'):
+    """utility for `hg unbundle` and `hg debug::unbundle`"""
+    assert fnames
+    # avoid circular import
+    from . import hg
+
+    with repo.lock():
+        for fname in fnames:
+            f = hg.openpath(ui, fname)
+            gen = exchange.readbundle(ui, f, fname)
+            if isinstance(gen, streamclone.streamcloneapplier):
+                raise error.InputError(
+                    _(
+                        b'packed bundles cannot be applied with '
+                        b'"hg unbundle"'
+                    ),
+                    hint=_(b'use "hg debugapplystreamclonebundle"'),
+                )
+            url = b'bundle:' + fname
+            try:
+                txnname = b'unbundle'
+                if not isinstance(gen, bundle2.unbundle20):
+                    txnname = b'unbundle\n%s' % urlutil.hidepassword(url)
+                with repo.transaction(txnname) as tr:
+                    op = bundle2.applybundle(
+                        repo,
+                        gen,
+                        tr,
+                        source=unbundle_source,  # used by debug::unbundle
+                        url=url,
+                    )
+            except error.BundleUnknownFeatureError as exc:
+                raise error.Abort(
+                    _(b'%s: unknown bundle feature, %s') % (fname, exc),
+                    hint=_(
+                        b"see https://mercurial-scm.org/"
+                        b"wiki/BundleFeature for more "
+                        b"information"
+                    ),
+                )
+            modheads = bundle2.combinechangegroupresults(op)
+    return modheads
--- a/mercurial/commands.py	Thu Jun 13 09:52:39 2024 +0200
+++ b/mercurial/commands.py	Mon Jun 24 12:05:31 2024 +0200
@@ -60,7 +60,6 @@
     server,
     shelve as shelvemod,
     state as statemod,
-    streamclone,
     tags as tagsmod,
     ui as uimod,
     util,
@@ -1627,6 +1626,8 @@
             pycompat.bytestr(e),
             hint=_(b"see 'hg help bundlespec' for supported values for --type"),
         )
+
+    has_changegroup = bundlespec.params.get(b"changegroup", False)
     cgversion = bundlespec.params[b"cg.version"]
 
     # Packed bundles are a pseudo bundle format for now.
@@ -1663,7 +1664,8 @@
             base = [nullrev]
     else:
         base = None
-    if cgversion not in changegroup.supportedoutgoingversions(repo):
+    supported_cg_versions = changegroup.supportedoutgoingversions(repo)
+    if has_changegroup and cgversion not in supported_cg_versions:
         raise error.Abort(
             _(b"repository does not support bundle version %s") % cgversion
         )
@@ -5375,44 +5377,6 @@
     return ret
 
 
-def postincoming(ui, repo, modheads, optupdate, checkout, brev):
-    """Run after a changegroup has been added via pull/unbundle
-
-    This takes arguments below:
-
-    :modheads: change of heads by pull/unbundle
-    :optupdate: updating working directory is needed or not
-    :checkout: update destination revision (or None to default destination)
-    :brev: a name, which might be a bookmark to be activated after updating
-
-    return True if update raise any conflict, False otherwise.
-    """
-    if modheads == 0:
-        return False
-    if optupdate:
-        try:
-            return hg.updatetotally(ui, repo, checkout, brev)
-        except error.UpdateAbort as inst:
-            msg = _(b"not updating: %s") % stringutil.forcebytestr(inst)
-            hint = inst.hint
-            raise error.UpdateAbort(msg, hint=hint)
-    if modheads is not None and modheads > 1:
-        currentbranchheads = len(repo.branchheads())
-        if currentbranchheads == modheads:
-            ui.status(
-                _(b"(run 'hg heads' to see heads, 'hg merge' to merge)\n")
-            )
-        elif currentbranchheads > 1:
-            ui.status(
-                _(b"(run 'hg heads .' to see heads, 'hg merge' to merge)\n")
-            )
-        else:
-            ui.status(_(b"(run 'hg heads' to see heads)\n"))
-    elif not ui.configbool(b'commands', b'update.requiredest'):
-        ui.status(_(b"(run 'hg update' to get a working copy)\n"))
-    return False
-
-
 @command(
     b'pull',
     [
@@ -5608,7 +5572,7 @@
                 # for pushes.
                 repo._subtoppath = path.loc
                 try:
-                    update_conflict = postincoming(
+                    update_conflict = cmdutil.postincoming(
                         ui, repo, modheads, opts.get('update'), checkout, brev
                     )
                 except error.FilteredRepoLookupError as exc:
@@ -7730,7 +7694,7 @@
     _(b'[-u] FILE...'),
     helpcategory=command.CATEGORY_IMPORT_EXPORT,
 )
-def unbundle(ui, repo, fname1, *fnames, _unbundle_source=b'unbundle', **opts):
+def unbundle(ui, repo, fname1, *fnames, **opts):
     """apply one or more bundle files
 
     Apply one or more bundle files generated by :hg:`bundle`.
@@ -7738,44 +7702,9 @@
     Returns 0 on success, 1 if an update has unresolved files.
     """
     fnames = (fname1,) + fnames
-
-    with repo.lock():
-        for fname in fnames:
-            f = hg.openpath(ui, fname)
-            gen = exchange.readbundle(ui, f, fname)
-            if isinstance(gen, streamclone.streamcloneapplier):
-                raise error.InputError(
-                    _(
-                        b'packed bundles cannot be applied with '
-                        b'"hg unbundle"'
-                    ),
-                    hint=_(b'use "hg debugapplystreamclonebundle"'),
-                )
-            url = b'bundle:' + fname
-            try:
-                txnname = b'unbundle'
-                if not isinstance(gen, bundle2.unbundle20):
-                    txnname = b'unbundle\n%s' % urlutil.hidepassword(url)
-                with repo.transaction(txnname) as tr:
-                    op = bundle2.applybundle(
-                        repo,
-                        gen,
-                        tr,
-                        source=_unbundle_source,  # used by debug::unbundle
-                        url=url,
-                    )
-            except error.BundleUnknownFeatureError as exc:
-                raise error.Abort(
-                    _(b'%s: unknown bundle feature, %s') % (fname, exc),
-                    hint=_(
-                        b"see https://mercurial-scm.org/"
-                        b"wiki/BundleFeature for more "
-                        b"information"
-                    ),
-                )
-            modheads = bundle2.combinechangegroupresults(op)
-
-    if postincoming(ui, repo, modheads, opts.get('update'), None, None):
+    modheads = cmdutil.unbundle_files(ui, repo, fnames)
+
+    if cmdutil.postincoming(ui, repo, modheads, opts.get('update'), None, None):
         return 1
     else:
         return 0
--- a/mercurial/configitems.toml	Thu Jun 13 09:52:39 2024 +0200
+++ b/mercurial/configitems.toml	Mon Jun 24 12:05:31 2024 +0200
@@ -719,6 +719,15 @@
 name = "auto-publish"
 default = "publish"
 
+
+# The current implementation of the filtering/injecting of topological heads is
+# naive and need proper benchmark and optimisation because we can envision
+# moving the the v3 of the branch-cache format out of experimental
+[[items]]
+section = "experimental"
+name = "branch-cache-v3"
+default = false
+
 [[items]]
 section = "experimental"
 name = "bundle-phases"
--- a/mercurial/debugcommands.py	Thu Jun 13 09:52:39 2024 +0200
+++ b/mercurial/debugcommands.py	Mon Jun 24 12:05:31 2024 +0200
@@ -4078,26 +4078,17 @@
 
 @command(
     b'debug::unbundle',
-    [
-        (
-            b'u',
-            b'update',
-            None,
-            _(b'update to new branch head if changesets were unbundled'),
-        )
-    ],
-    _(b'[-u] FILE...'),
+    [],
+    _(b'FILE...'),
     helpcategory=command.CATEGORY_IMPORT_EXPORT,
 )
-def debugunbundle(ui, repo, *args, **kwargs):
+def debugunbundle(ui, repo, fname1, *fnames):
     """same as `hg unbundle`, but pretent to come from a push
 
     This is useful to debug behavior and performance change in this case.
     """
-    from . import commands  # avoid cycle
-
-    unbundle = cmdutil.findcmd(b'unbundle', commands.table)[1][0]
-    return unbundle(ui, repo, *args, _unbundle_source=b'push', **kwargs)
+    fnames = (fname1,) + fnames
+    cmdutil.unbundle_files(ui, repo, fnames)
 
 
 @command(
--- a/mercurial/dirstate.py	Thu Jun 13 09:52:39 2024 +0200
+++ b/mercurial/dirstate.py	Mon Jun 24 12:05:31 2024 +0200
@@ -1639,16 +1639,6 @@
 
         use_rust = True
 
-        allowed_matchers = (
-            matchmod.alwaysmatcher,
-            matchmod.differencematcher,
-            matchmod.exactmatcher,
-            matchmod.includematcher,
-            matchmod.intersectionmatcher,
-            matchmod.nevermatcher,
-            matchmod.unionmatcher,
-        )
-
         if rustmod is None:
             use_rust = False
         elif self._checkcase:
@@ -1656,9 +1646,6 @@
             use_rust = False
         elif subrepos:
             use_rust = False
-        elif not isinstance(match, allowed_matchers):
-            # Some matchers have yet to be implemented
-            use_rust = False
 
         # Get the time from the filesystem so we can disambiguate files that
         # appear modified in the present or future.
--- a/mercurial/discovery.py	Thu Jun 13 09:52:39 2024 +0200
+++ b/mercurial/discovery.py	Mon Jun 24 12:05:31 2024 +0200
@@ -18,6 +18,7 @@
     bookmarks,
     branchmap,
     error,
+    node as nodemod,
     obsolete,
     phases,
     pycompat,
@@ -98,29 +99,62 @@
     def __init__(
         self, repo, commonheads=None, ancestorsof=None, missingroots=None
     ):
-        # at least one of them must not be set
-        assert None in (commonheads, missingroots)
+        # at most one of them must not be set
+        if commonheads is not None and missingroots is not None:
+            m = 'commonheads and missingroots arguments are mutually exclusive'
+            raise error.ProgrammingError(m)
         cl = repo.changelog
+        unfi = repo.unfiltered()
+        ucl = unfi.changelog
+        to_node = ucl.node
+        missing = None
+        common = None
+        arg_anc = ancestorsof
         if ancestorsof is None:
             ancestorsof = cl.heads()
-        if missingroots:
+
+        # XXX-perf: do we need all this to be node-list? They would be simpler
+        # as rev-num sets (and smartset)
+        if missingroots == [nodemod.nullrev] or missingroots == []:
+            commonheads = [repo.nullid]
+            common = set()
+            if arg_anc is None:
+                missing = [to_node(r) for r in cl]
+            else:
+                missing_rev = repo.revs('::%ln', missingroots, ancestorsof)
+                missing = [to_node(r) for r in missing_rev]
+        elif missingroots is not None:
             # TODO remove call to nodesbetween.
-            # TODO populate attributes on outgoing instance instead of setting
-            # discbases.
-            csets, roots, heads = cl.nodesbetween(missingroots, ancestorsof)
-            included = set(csets)
-            discbases = []
-            for n in csets:
-                discbases.extend([p for p in cl.parents(n) if p != repo.nullid])
-            ancestorsof = heads
-            commonheads = [n for n in discbases if n not in included]
+            missing_rev = repo.revs('%ln::%ln', missingroots, ancestorsof)
+            ancestorsof = [to_node(r) for r in ucl.headrevs(missing_rev)]
+            parent_revs = ucl.parentrevs
+            common_legs = set()
+            for r in missing_rev:
+                p1, p2 = parent_revs(r)
+                if p1 not in missing_rev:
+                    common_legs.add(p1)
+                if p2 not in missing_rev:
+                    common_legs.add(p2)
+            common_legs.discard(nodemod.nullrev)
+            if not common_legs:
+                commonheads = [repo.nullid]
+                common = set()
+            else:
+                commonheads_revs = unfi.revs(
+                    'heads(%ld::%ld)',
+                    common_legs,
+                    common_legs,
+                )
+                commonheads = [to_node(r) for r in commonheads_revs]
+                common = ucl.ancestors(commonheads_revs, inclusive=True)
+            missing = [to_node(r) for r in missing_rev]
         elif not commonheads:
             commonheads = [repo.nullid]
         self.commonheads = commonheads
         self.ancestorsof = ancestorsof
         self._revlog = cl
-        self._common = None
-        self._missing = None
+        self._common = common
+        self._missing = missing
         self.excluded = []
 
     def _computecommonmissing(self):
@@ -190,7 +224,12 @@
         if len(missing) == len(allmissing):
             ancestorsof = onlyheads
         else:  # update missing heads
-            ancestorsof = phases.newheads(repo, onlyheads, excluded)
+            to_rev = repo.changelog.index.rev
+            to_node = repo.changelog.node
+            excluded_revs = [to_rev(r) for r in excluded]
+            onlyheads_revs = [to_rev(r) for r in onlyheads]
+            new_heads = phases.new_heads(repo, onlyheads_revs, excluded_revs)
+            ancestorsof = [to_node(r) for r in new_heads]
         og.ancestorsof = ancestorsof
     if portable:
         # recompute common and ancestorsof as if -r<rev> had been given for
--- a/mercurial/exchange.py	Thu Jun 13 09:52:39 2024 +0200
+++ b/mercurial/exchange.py	Mon Jun 24 12:05:31 2024 +0200
@@ -344,32 +344,56 @@
             # not target to push, all common are relevant
             return self.outgoing.commonheads
         unfi = self.repo.unfiltered()
-        # I want cheads = heads(::ancestorsof and ::commonheads)
-        # (ancestorsof is revs with secret changeset filtered out)
+        # I want cheads = heads(::push_heads and ::commonheads)
+        #
+        # To push, we already computed
+        #     common = (::commonheads)
+        #     missing = ((commonheads::push_heads) - commonheads)
+        #
+        # So we basically search
         #
-        # This can be expressed as:
-        #     cheads = ( (ancestorsof and ::commonheads)
-        #              + (commonheads and ::ancestorsof))"
-        #              )
+        #     almost_heads = heads((parents(missing) + push_heads) & common)
         #
-        # while trying to push we already computed the following:
-        #     common = (::commonheads)
-        #     missing = ((commonheads::ancestorsof) - commonheads)
+        # We use "almost" here as this can return revision that are ancestors
+        # of other in the set and we need to explicitly turn it into an
+        # antichain later. We can do so using:
+        #
+        #     cheads = heads(almost_heads::almost_heads)
         #
-        # We can pick:
-        # * ancestorsof part of common (::commonheads)
+        # In pratice the code is a bit more convulted to avoid some extra
+        # computation. It aims at doing the same computation as highlighted
+        # above however.
         common = self.outgoing.common
-        rev = self.repo.changelog.index.rev
-        cheads = [node for node in self.revs if rev(node) in common]
-        # and
-        # * commonheads parents on missing
-        revset = unfi.set(
-            b'%ln and parents(roots(%ln))',
-            self.outgoing.commonheads,
-            self.outgoing.missing,
-        )
-        cheads.extend(c.node() for c in revset)
-        return cheads
+        unfi = self.repo.unfiltered()
+        cl = unfi.changelog
+        to_rev = cl.index.rev
+        to_node = cl.node
+        parent_revs = cl.parentrevs
+        unselected = []
+        cheads = set()
+        # XXX-perf: `self.revs` and `outgoing.missing` could hold revs directly
+        for n in self.revs:
+            r = to_rev(n)
+            if r in common:
+                cheads.add(r)
+            else:
+                unselected.append(r)
+        known_non_heads = cl.ancestors(cheads, inclusive=True)
+        if unselected:
+            missing_revs = {to_rev(n) for n in self.outgoing.missing}
+            missing_revs.add(nullrev)
+            root_points = set()
+            for r in missing_revs:
+                p1, p2 = parent_revs(r)
+                if p1 not in missing_revs and p1 not in known_non_heads:
+                    root_points.add(p1)
+                if p2 not in missing_revs and p2 not in known_non_heads:
+                    root_points.add(p2)
+            if root_points:
+                heads = unfi.revs('heads(%ld::%ld)', root_points, root_points)
+                cheads.update(heads)
+        # XXX-perf: could this be a set of revision?
+        return [to_node(r) for r in sorted(cheads)]
 
     @property
     def commonheads(self):
@@ -600,7 +624,10 @@
 
     (computed for both success and failure case for changesets push)"""
     outgoing = pushop.outgoing
-    unfi = pushop.repo.unfiltered()
+    repo = pushop.repo
+    unfi = repo.unfiltered()
+    cl = unfi.changelog
+    to_rev = cl.index.rev
     remotephases = listkeys(pushop.remote, b'phases')
 
     if (
@@ -622,38 +649,43 @@
         pushop.fallbackoutdatedphases = []
         return
 
-    pushop.remotephases = phases.remotephasessummary(
-        pushop.repo, pushop.fallbackheads, remotephases
+    fallbackheads_rev = {to_rev(n) for n in pushop.fallbackheads}
+    pushop.remotephases = phases.RemotePhasesSummary(
+        pushop.repo,
+        fallbackheads_rev,
+        remotephases,
     )
-    droots = pushop.remotephases.draftroots
-
-    extracond = b''
-    if not pushop.remotephases.publishing:
-        extracond = b' and public()'
-    revset = b'heads((%%ln::%%ln) %s)' % extracond
-    # Get the list of all revs draft on remote by public here.
-    # XXX Beware that revset break if droots is not strictly
-    # XXX root we may want to ensure it is but it is costly
-    fallback = list(unfi.set(revset, droots, pushop.fallbackheads))
-    if not pushop.remotephases.publishing and pushop.publish:
-        future = list(
-            unfi.set(
-                b'%ln and (not public() or %ln::)', pushop.futureheads, droots
-            )
-        )
-    elif not outgoing.missing:
-        future = fallback
+    droots = set(pushop.remotephases.draft_roots)
+
+    fallback_publishing = pushop.remotephases.publishing
+    push_publishing = pushop.remotephases.publishing or pushop.publish
+    missing_revs = {to_rev(n) for n in outgoing.missing}
+    drafts = unfi._phasecache.get_raw_set(unfi, phases.draft)
+
+    if fallback_publishing:
+        fallback_roots = droots - missing_revs
+        revset = b'heads(%ld::%ld)'
     else:
-        # adds changeset we are going to push as draft
-        #
-        # should not be necessary for publishing server, but because of an
-        # issue fixed in xxxxx we have to do it anyway.
-        fdroots = list(
-            unfi.set(b'roots(%ln  + %ln::)', outgoing.missing, droots)
-        )
-        fdroots = [f.node() for f in fdroots]
-        future = list(unfi.set(revset, fdroots, pushop.futureheads))
-    pushop.outdatedphases = future
+        fallback_roots = droots - drafts
+        fallback_roots -= missing_revs
+        # Get the list of all revs draft on remote but public here.
+        revset = b'heads((%ld::%ld) and public())'
+    if not fallback_roots:
+        fallback = fallback_rev = []
+    else:
+        fallback_rev = unfi.revs(revset, fallback_roots, fallbackheads_rev)
+        fallback = [repo[r] for r in fallback_rev]
+
+    if push_publishing:
+        published = missing_revs.copy()
+    else:
+        published = missing_revs - drafts
+    if pushop.publish:
+        published.update(fallbackheads_rev & drafts)
+    elif fallback:
+        published.update(fallback_rev)
+
+    pushop.outdatedphases = [repo[r] for r in cl.headrevs(published)]
     pushop.fallbackoutdatedphases = fallback
 
 
@@ -671,8 +703,8 @@
     repo = pushop.repo
     # very naive computation, that can be quite expensive on big repo.
     # However: evolution is currently slow on them anyway.
-    nodes = (c.node() for c in repo.set(b'::%ln', pushop.futureheads))
-    pushop.outobsmarkers = pushop.repo.obsstore.relevantmarkers(nodes)
+    revs = repo.revs(b'::%ln', pushop.futureheads)
+    pushop.outobsmarkers = pushop.repo.obsstore.relevantmarkers(revs=revs)
 
 
 @pushdiscovery(b'bookmarks')
@@ -888,8 +920,13 @@
     if pushop.remotephases is not None and hasphaseheads:
         # check that the remote phase has not changed
         checks = {p: [] for p in phases.allphases}
-        checks[phases.public].extend(pushop.remotephases.publicheads)
-        checks[phases.draft].extend(pushop.remotephases.draftroots)
+        to_node = pushop.repo.unfiltered().changelog.node
+        checks[phases.public].extend(
+            to_node(r) for r in pushop.remotephases.public_heads
+        )
+        checks[phases.draft].extend(
+            to_node(r) for r in pushop.remotephases.draft_roots
+        )
         if any(checks.values()):
             for phase in checks:
                 checks[phase].sort()
@@ -1293,8 +1330,16 @@
         _localphasemove(pushop, cheads)
         # don't push any phase data as there is nothing to push
     else:
-        ana = phases.analyzeremotephases(pushop.repo, cheads, remotephases)
-        pheads, droots = ana
+        unfi = pushop.repo.unfiltered()
+        to_rev = unfi.changelog.index.rev
+        to_node = unfi.changelog.node
+        cheads_revs = [to_rev(n) for n in cheads]
+        pheads_revs, _dr = phases.analyze_remote_phases(
+            pushop.repo,
+            cheads_revs,
+            remotephases,
+        )
+        pheads = [to_node(r) for r in pheads_revs]
         ### Apply remote phase on local
         if remotephases.get(b'publishing', False):
             _localphasemove(pushop, cheads)
@@ -2048,10 +2093,17 @@
     pullop.stepsdone.add(b'phases')
     publishing = bool(remotephases.get(b'publishing', False))
     if remotephases and not publishing:
+        unfi = pullop.repo.unfiltered()
+        to_rev = unfi.changelog.index.rev
+        to_node = unfi.changelog.node
+        pulledsubset_revs = [to_rev(n) for n in pullop.pulledsubset]
         # remote is new and non-publishing
-        pheads, _dr = phases.analyzeremotephases(
-            pullop.repo, pullop.pulledsubset, remotephases
+        pheads_revs, _dr = phases.analyze_remote_phases(
+            pullop.repo,
+            pulledsubset_revs,
+            remotephases,
         )
+        pheads = [to_node(r) for r in pheads_revs]
         dheads = pullop.pulledsubset
     else:
         # Remote is old or publishing all common changesets
@@ -2553,8 +2605,8 @@
     if kwargs.get('obsmarkers', False):
         if heads is None:
             heads = repo.heads()
-        subset = [c.node() for c in repo.set(b'::%ln', heads)]
-        markers = repo.obsstore.relevantmarkers(subset)
+        revs = repo.revs(b'::%ln', heads)
+        markers = repo.obsstore.relevantmarkers(revs=revs)
         markers = obsutil.sortedmarkers(markers)
         bundle2.buildobsmarkerspart(bundler, markers)
 
--- a/mercurial/interfaces/repository.py	Thu Jun 13 09:52:39 2024 +0200
+++ b/mercurial/interfaces/repository.py	Mon Jun 24 12:05:31 2024 +0200
@@ -54,6 +54,8 @@
 CACHE_BRANCHMAP_SERVED = b"branchmap-served"
 # Warm internal changelog cache (eg: persistent nodemap)
 CACHE_CHANGELOG_CACHE = b"changelog-cache"
+# check of a branchmap can use the "pure topo" mode
+CACHE_BRANCHMAP_DETECT_PURE_TOPO = b"branchmap-detect-pure-topo"
 # Warm full manifest cache
 CACHE_FULL_MANIFEST = b"full-manifest"
 # Warm file-node-tags cache
@@ -78,6 +80,7 @@
 CACHES_ALL = {
     CACHE_BRANCHMAP_SERVED,
     CACHE_BRANCHMAP_ALL,
+    CACHE_BRANCHMAP_DETECT_PURE_TOPO,
     CACHE_CHANGELOG_CACHE,
     CACHE_FILE_NODE_TAGS,
     CACHE_FULL_MANIFEST,
--- a/mercurial/localrepo.py	Thu Jun 13 09:52:39 2024 +0200
+++ b/mercurial/localrepo.py	Mon Jun 24 12:05:31 2024 +0200
@@ -2923,12 +2923,14 @@
 
         if repository.CACHE_BRANCHMAP_SERVED in caches:
             if tr is None or tr.changes[b'origrepolen'] < len(self):
-                # accessing the 'served' branchmap should refresh all the others,
                 self.ui.debug(b'updating the branch cache\n')
-                self.filtered(b'served').branchmap()
-                self.filtered(b'served.hidden').branchmap()
-                # flush all possibly delayed write.
-                self._branchcaches.write_delayed(self)
+                dpt = repository.CACHE_BRANCHMAP_DETECT_PURE_TOPO in caches
+                served = self.filtered(b'served')
+                self._branchcaches.update_disk(served, detect_pure_topo=dpt)
+                served_hidden = self.filtered(b'served.hidden')
+                self._branchcaches.update_disk(
+                    served_hidden, detect_pure_topo=dpt
+                )
 
         if repository.CACHE_CHANGELOG_CACHE in caches:
             self.changelog.update_caches(transaction=tr)
@@ -2957,7 +2959,7 @@
 
         if repository.CACHE_FILE_NODE_TAGS in caches:
             # accessing fnode cache warms the cache
-            tagsmod.fnoderevs(self.ui, unfi, unfi.changelog.revs())
+            tagsmod.warm_cache(self)
 
         if repository.CACHE_TAGS_DEFAULT in caches:
             # accessing tags warm the cache
@@ -2971,9 +2973,14 @@
             # even if they haven't explicitly been requested yet (if they've
             # never been used by hg, they won't ever have been written, even if
             # they're a subset of another kind of cache that *has* been used).
+            dpt = repository.CACHE_BRANCHMAP_DETECT_PURE_TOPO in caches
+
             for filt in repoview.filtertable.keys():
                 filtered = self.filtered(filt)
-                filtered.branchmap().write(filtered)
+                self._branchcaches.update_disk(filtered, detect_pure_topo=dpt)
+
+        # flush all possibly delayed write.
+        self._branchcaches.write_dirty(self)
 
     def invalidatecaches(self):
         if '_tagscache' in vars(self):
--- a/mercurial/match.py	Thu Jun 13 09:52:39 2024 +0200
+++ b/mercurial/match.py	Mon Jun 24 12:05:31 2024 +0200
@@ -395,9 +395,18 @@
 
 class basematcher:
     def __init__(self, badfn=None):
+        self._was_tampered_with = False
         if badfn is not None:
             self.bad = badfn
 
+    def was_tampered_with_nonrec(self):
+        # [_was_tampered_with] is used to track if when extensions changed the matcher
+        # behavior (crazy stuff!), so we disable the rust fast path.
+        return self._was_tampered_with
+
+    def was_tampered_with(self):
+        return self.was_tampered_with_nonrec()
+
     def __call__(self, fn):
         return self.matchfn(fn)
 
@@ -638,6 +647,11 @@
         super(patternmatcher, self).__init__(badfn)
         kindpats.sort()
 
+        if rustmod is not None:
+            # We need to pass the patterns to Rust because they can contain
+            # patterns from the user interface
+            self._kindpats = kindpats
+
         roots, dirs, parents = _rootsdirsandparents(kindpats)
         self._files = _explicitfiles(kindpats)
         self._dirs_explicit = set(dirs)
@@ -880,6 +894,13 @@
         self.bad = m1.bad
         self.traversedir = m1.traversedir
 
+    def was_tampered_with(self):
+        return (
+            self.was_tampered_with_nonrec()
+            or self._m1.was_tampered_with()
+            or self._m2.was_tampered_with()
+        )
+
     def matchfn(self, f):
         return self._m1(f) and not self._m2(f)
 
@@ -963,6 +984,13 @@
         self.bad = m1.bad
         self.traversedir = m1.traversedir
 
+    def was_tampered_with(self):
+        return (
+            self.was_tampered_with_nonrec()
+            or self._m1.was_tampered_with()
+            or self._m2.was_tampered_with()
+        )
+
     @propertycache
     def _files(self):
         if self.isexact():
@@ -1060,6 +1088,11 @@
         if matcher.prefix():
             self._always = any(f == path for f in matcher._files)
 
+    def was_tampered_with(self):
+        return (
+            self.was_tampered_with_nonrec() or self._matcher.was_tampered_with()
+        )
+
     def bad(self, f, msg):
         self._matcher.bad(self._path + b"/" + f, msg)
 
@@ -1194,6 +1227,11 @@
         self.traversedir = m1.traversedir
         self._matchers = matchers
 
+    def was_tampered_with(self):
+        return self.was_tampered_with_nonrec() or any(
+            map(lambda m: m.was_tampered_with(), self._matchers)
+        )
+
     def matchfn(self, f):
         for match in self._matchers:
             if match(f):
--- a/mercurial/obsolete.py	Thu Jun 13 09:52:39 2024 +0200
+++ b/mercurial/obsolete.py	Mon Jun 24 12:05:31 2024 +0200
@@ -771,10 +771,11 @@
             _addchildren(self.children, markers)
         _checkinvalidmarkers(self.repo, markers)
 
-    def relevantmarkers(self, nodes):
-        """return a set of all obsolescence markers relevant to a set of nodes.
+    def relevantmarkers(self, nodes=None, revs=None):
+        """return a set of all obsolescence markers relevant to a set of
+        nodes or revisions.
 
-        "relevant" to a set of nodes mean:
+        "relevant" to a set of nodes or revisions mean:
 
         - marker that use this changeset as successor
         - prune marker of direct children on this changeset
@@ -782,8 +783,21 @@
           markers
 
         It is a set so you cannot rely on order."""
+        if nodes is None:
+            nodes = set()
+        if revs is None:
+            revs = set()
 
-        pendingnodes = set(nodes)
+        get_rev = self.repo.unfiltered().changelog.index.get_rev
+        pendingnodes = set()
+        for marker in self._all:
+            for node in (marker[0],) + marker[1] + (marker[5] or ()):
+                if node in nodes:
+                    pendingnodes.add(node)
+                elif revs:
+                    rev = get_rev(node)
+                    if rev is not None and rev in revs:
+                        pendingnodes.add(node)
         seenmarkers = set()
         seennodes = set(pendingnodes)
         precursorsmarkers = self.predecessors
@@ -818,7 +832,7 @@
     store = obsstore(repo, repo.svfs, readonly=readonly, **kwargs)
     if store and readonly:
         ui.warn(
-            _(b'obsolete feature not enabled but %i markers found!\n')
+            _(b'"obsolete" feature not enabled but %i markers found!\n')
             % len(list(store))
         )
     return store
--- a/mercurial/obsutil.py	Thu Jun 13 09:52:39 2024 +0200
+++ b/mercurial/obsutil.py	Mon Jun 24 12:05:31 2024 +0200
@@ -108,7 +108,7 @@
     elif exclusive:
         rawmarkers = exclusivemarkers(repo, nodes)
     else:
-        rawmarkers = repo.obsstore.relevantmarkers(nodes)
+        rawmarkers = repo.obsstore.relevantmarkers(nodes=nodes)
 
     for markerdata in rawmarkers:
         yield marker(repo, markerdata)
--- a/mercurial/pathutil.py	Thu Jun 13 09:52:39 2024 +0200
+++ b/mercurial/pathutil.py	Mon Jun 24 12:05:31 2024 +0200
@@ -180,6 +180,13 @@
                 self.auditeddir.clear()
                 self._cached = False
 
+    def clear_audit_cache(self):
+        """reset all audit cache
+
+        intended for debug and performance benchmark purposes"""
+        self.audited.clear()
+        self.auditeddir.clear()
+
 
 def canonpath(
     root: bytes,
--- a/mercurial/phases.py	Thu Jun 13 09:52:39 2024 +0200
+++ b/mercurial/phases.py	Mon Jun 24 12:05:31 2024 +0200
@@ -109,6 +109,7 @@
 from typing import (
     Any,
     Callable,
+    Collection,
     Dict,
     Iterable,
     List,
@@ -127,7 +128,6 @@
 )
 from . import (
     error,
-    pycompat,
     requirements,
     smartset,
     txnutil,
@@ -414,6 +414,27 @@
             ]
         )
 
+    def get_raw_set(
+        self,
+        repo: "localrepo.localrepository",
+        phase: int,
+    ) -> Set[int]:
+        """return the set of revision in that phase
+
+        The returned set is not filtered and might contains revision filtered
+        for the passed repoview.
+
+        The returned set might be the internal one and MUST NOT be mutated to
+        avoid side effect.
+        """
+        if phase == public:
+            raise error.ProgrammingError("cannot get_set for public phase")
+        self._ensure_phase_sets(repo.unfiltered())
+        revs = self._phasesets.get(phase)
+        if revs is None:
+            return set()
+        return revs
+
     def getrevset(
         self,
         repo: "localrepo.localrepository",
@@ -1095,7 +1116,11 @@
             advanceboundary(repo, trgetter(), phase, heads)
 
 
-def analyzeremotephases(repo, subset, roots):
+def analyze_remote_phases(
+    repo,
+    subset: Collection[int],
+    roots: Dict[bytes, bytes],
+) -> Tuple[Collection[int], Collection[int]]:
     """Compute phases heads and root in a subset of node from root dict
 
     * subset is heads of the subset
@@ -1105,8 +1130,8 @@
     """
     repo = repo.unfiltered()
     # build list from dictionary
-    draftroots = []
-    has_node = repo.changelog.index.has_node  # to filter unknown nodes
+    draft_roots = []
+    to_rev = repo.changelog.index.get_rev
     for nhex, phase in roots.items():
         if nhex == b'publishing':  # ignore data related to publish option
             continue
@@ -1114,49 +1139,53 @@
         phase = int(phase)
         if phase == public:
             if node != repo.nullid:
-                repo.ui.warn(
-                    _(
-                        b'ignoring inconsistent public root'
-                        b' from remote: %s\n'
-                    )
-                    % nhex
-                )
+                msg = _(b'ignoring inconsistent public root from remote: %s\n')
+                repo.ui.warn(msg % nhex)
         elif phase == draft:
-            if has_node(node):
-                draftroots.append(node)
+            rev = to_rev(node)
+            if rev is not None:  # to filter unknown nodes
+                draft_roots.append(rev)
         else:
-            repo.ui.warn(
-                _(b'ignoring unexpected root from remote: %i %s\n')
-                % (phase, nhex)
-            )
+            msg = _(b'ignoring unexpected root from remote: %i %s\n')
+            repo.ui.warn(msg % (phase, nhex))
     # compute heads
-    publicheads = newheads(repo, subset, draftroots)
-    return publicheads, draftroots
+    public_heads = new_heads(repo, subset, draft_roots)
+    return public_heads, draft_roots
 
 
-class remotephasessummary:
+class RemotePhasesSummary:
     """summarize phase information on the remote side
 
     :publishing: True is the remote is publishing
-    :publicheads: list of remote public phase heads (nodes)
-    :draftheads: list of remote draft phase heads (nodes)
-    :draftroots: list of remote draft phase root (nodes)
+    :public_heads: list of remote public phase heads (revs)
+    :draft_heads: list of remote draft phase heads (revs)
+    :draft_roots: list of remote draft phase root (revs)
     """
 
-    def __init__(self, repo, remotesubset, remoteroots):
+    def __init__(
+        self,
+        repo,
+        remote_subset: Collection[int],
+        remote_roots: Dict[bytes, bytes],
+    ):
         unfi = repo.unfiltered()
-        self._allremoteroots = remoteroots
-
-        self.publishing = remoteroots.get(b'publishing', False)
+        self._allremoteroots: Dict[bytes, bytes] = remote_roots
 
-        ana = analyzeremotephases(repo, remotesubset, remoteroots)
-        self.publicheads, self.draftroots = ana
+        self.publishing: bool = bool(remote_roots.get(b'publishing', False))
+
+        heads, roots = analyze_remote_phases(repo, remote_subset, remote_roots)
+        self.public_heads: Collection[int] = heads
+        self.draft_roots: Collection[int] = roots
         # Get the list of all "heads" revs draft on remote
-        dheads = unfi.set(b'heads(%ln::%ln)', self.draftroots, remotesubset)
-        self.draftheads = [c.node() for c in dheads]
+        dheads = unfi.revs(b'heads(%ld::%ld)', roots, remote_subset)
+        self.draft_heads: Collection[int] = dheads
 
 
-def newheads(repo, heads, roots):
+def new_heads(
+    repo,
+    heads: Collection[int],
+    roots: Collection[int],
+) -> Collection[int]:
     """compute new head of a subset minus another
 
     * `heads`: define the first subset
@@ -1165,16 +1194,15 @@
     # phases > dagop > patch > copies > scmutil > obsolete > obsutil > phases
     from . import dagop
 
-    repo = repo.unfiltered()
-    cl = repo.changelog
-    rev = cl.index.get_rev
     if not roots:
         return heads
-    if not heads or heads == [repo.nullid]:
+    if not heads or heads == [nullrev]:
         return []
     # The logic operated on revisions, convert arguments early for convenience
-    new_heads = {rev(n) for n in heads if n != repo.nullid}
-    roots = [rev(n) for n in roots]
+    # PERF-XXX: maybe heads could directly comes as a set without impacting
+    # other user of that value
+    new_heads = set(heads)
+    new_heads.discard(nullrev)
     # compute the area we need to remove
     affected_zone = repo.revs(b"(%ld::%ld)", roots, new_heads)
     # heads in the area are no longer heads
@@ -1192,7 +1220,9 @@
         pruned = dagop.reachableroots(repo, candidates, prunestart)
         new_heads.difference_update(pruned)
 
-    return pycompat.maplist(cl.node, sorted(new_heads))
+    # PERF-XXX: do we actually need a sorted list here? Could we simply return
+    # a set?
+    return sorted(new_heads)
 
 
 def newcommitphase(ui: "uimod.ui") -> int:
--- a/mercurial/profiling.py	Thu Jun 13 09:52:39 2024 +0200
+++ b/mercurial/profiling.py	Mon Jun 24 12:05:31 2024 +0200
@@ -70,6 +70,7 @@
             stats = lsprof.Stats(p.getstats())
             stats.sort(pycompat.sysstr(field))
             stats.pprint(limit=limit, file=fp, climit=climit)
+        fp.flush()
 
 
 @contextlib.contextmanager
@@ -97,14 +98,15 @@
     finally:
         thread.stop()
         thread.join()
-        print(
-            b'Collected %d stack frames (%d unique) in %2.2f seconds.'
-            % (
+        m = b'Collected %d stack frames (%d unique) in %2.2f seconds.'
+        m %= (
+            (
                 util.timer() - start_time,
                 thread.num_frames(),
                 thread.num_frames(unique=True),
-            )
+            ),
         )
+        print(m, flush=True)
 
 
 @contextlib.contextmanager
@@ -170,6 +172,7 @@
             kwargs['showtime'] = showtime
 
         statprof.display(fp, data=data, format=displayformat, **kwargs)
+        fp.flush()
 
 
 class profile:
--- a/mercurial/repoview.py	Thu Jun 13 09:52:39 2024 +0200
+++ b/mercurial/repoview.py	Mon Jun 24 12:05:31 2024 +0200
@@ -397,6 +397,9 @@
     """
 
     def __init__(self, repo, filtername, visibilityexceptions=None):
+        if filtername is None:
+            msg = "repoview should have a non-None filtername"
+            raise error.ProgrammingError(msg)
         object.__setattr__(self, '_unfilteredrepo', repo)
         object.__setattr__(self, 'filtername', filtername)
         object.__setattr__(self, '_clcachekey', None)
--- a/mercurial/revset.py	Thu Jun 13 09:52:39 2024 +0200
+++ b/mercurial/revset.py	Mon Jun 24 12:05:31 2024 +0200
@@ -149,6 +149,22 @@
         return x & subset
 
 
+def raw_node_set(repo, subset, x, order):
+    """argument is a list of nodeid, resolve and use them"""
+    nodes = _ordered_node_set(repo, x)
+    if order == followorder:
+        return subset & nodes
+    else:
+        return nodes & subset
+
+
+def _ordered_node_set(repo, nodes):
+    if not nodes:
+        return baseset()
+    to_rev = repo.changelog.index.rev
+    return baseset([to_rev(r) for r in nodes])
+
+
 def rangeset(repo, subset, x, y, order):
     m = getset(repo, fullreposet(repo), x)
     n = getset(repo, fullreposet(repo), y)
@@ -2772,6 +2788,7 @@
     b"parent": parentspec,
     b"parentpost": parentpost,
     b"smartset": rawsmartset,
+    b"nodeset": raw_node_set,
 }
 
 relations = {
--- a/mercurial/revsetlang.py	Thu Jun 13 09:52:39 2024 +0200
+++ b/mercurial/revsetlang.py	Mon Jun 24 12:05:31 2024 +0200
@@ -392,7 +392,7 @@
     elif op == b'negate':
         s = getstring(x[1], _(b"can't negate that"))
         return _analyze((b'string', b'-' + s))
-    elif op in (b'string', b'symbol', b'smartset'):
+    elif op in (b'string', b'symbol', b'smartset', b'nodeset'):
         return x
     elif op == b'rangeall':
         return (op, None)
@@ -441,8 +441,9 @@
         return 0, x
 
     op = x[0]
-    if op in (b'string', b'symbol', b'smartset'):
-        return 0.5, x  # single revisions are small
+    if op in (b'string', b'symbol', b'smartset', b'nodeset'):
+        # single revisions are small, and set of already computed revision are assumed to be cheap.
+        return 0.5, x
     elif op == b'and':
         wa, ta = _optimize(x[1])
         wb, tb = _optimize(x[2])
@@ -784,6 +785,8 @@
             if isinstance(arg, set):
                 arg = sorted(arg)
             ret.append(_formatintlist(list(arg)))
+        elif t == b'nodeset':
+            ret.append(_formatlistexp(list(arg), b"n"))
         else:
             raise error.ProgrammingError(b"unknown revspec item type: %r" % t)
     return b''.join(ret)
@@ -801,6 +804,10 @@
             newtree = (b'smartset', smartset.baseset(arg))
             inputs.append(newtree)
             ret.append(b"$")
+        elif t == b'nodeset':
+            newtree = (b'nodeset', arg)
+            inputs.append(newtree)
+            ret.append(b"$")
         else:
             raise error.ProgrammingError(b"unknown revspec item type: %r" % t)
     expr = b''.join(ret)
@@ -863,6 +870,12 @@
                 ret.append((b'baseset', arg))
                 pos += 1
                 continue
+            elif islist and d == b'n' and arg:
+                # we cannot turn the node into revision yet, but not
+                # serializing them will same a lot of time for large set.
+                ret.append((b'nodeset', arg))
+                pos += 1
+                continue
             try:
                 ret.append((None, f(list(arg), d)))
             except (TypeError, ValueError):
--- a/mercurial/scmposix.py	Thu Jun 13 09:52:39 2024 +0200
+++ b/mercurial/scmposix.py	Mon Jun 24 12:05:31 2024 +0200
@@ -60,8 +60,6 @@
 def userrcpath() -> List[bytes]:
     if pycompat.sysplatform == b'plan9':
         return [encoding.environ[b'home'] + b'/lib/hgrc']
-    elif pycompat.isdarwin:
-        return [os.path.expanduser(b'~/.hgrc')]
     else:
         confighome = encoding.environ.get(b'XDG_CONFIG_HOME')
         if confighome is None or not os.path.isabs(confighome):
--- a/mercurial/scmutil.py	Thu Jun 13 09:52:39 2024 +0200
+++ b/mercurial/scmutil.py	Mon Jun 24 12:05:31 2024 +0200
@@ -349,7 +349,7 @@
         self._newfiles.add(f)
 
 
-def filteredhash(repo, maxrev, needobsolete=False):
+def combined_filtered_and_obsolete_hash(repo, maxrev, needobsolete=False):
     """build hash of filtered revisions in the current repoview.
 
     Multiple caches perform up-to-date validation by checking that the
@@ -375,16 +375,69 @@
 
     result = cl._filteredrevs_hashcache.get(key)
     if not result:
-        revs = sorted(r for r in cl.filteredrevs | obsrevs if r <= maxrev)
+        revs, obs_revs = _filtered_and_obs_revs(repo, maxrev)
+        if needobsolete:
+            revs = revs | obs_revs
+        revs = sorted(revs)
         if revs:
-            s = hashutil.sha1()
-            for rev in revs:
-                s.update(b'%d;' % rev)
-            result = s.digest()
+            result = _hash_revs(revs)
             cl._filteredrevs_hashcache[key] = result
     return result
 
 
+def filtered_and_obsolete_hash(repo, maxrev):
+    """build hashs of filtered and obsolete revisions in the current repoview.
+
+    Multiple caches perform up-to-date validation by checking that the
+    tiprev and tipnode stored in the cache file match the current repository.
+    However, this is not sufficient for validating repoviews because the set
+    of revisions in the view may change without the repository tiprev and
+    tipnode changing.
+
+    This function hashes all the revs filtered from the view up to maxrev and
+    returns that SHA-1 digest. The obsolete revisions hashed are only the
+    non-filtered one.
+    """
+    cl = repo.changelog
+    obs_set = obsolete.getrevs(repo, b'obsolete')
+    key = (maxrev, hash(cl.filteredrevs), hash(obs_set))
+
+    result = cl._filteredrevs_hashcache.get(key)
+    if result is None:
+        filtered_hash = None
+        obs_hash = None
+        filtered_revs, obs_revs = _filtered_and_obs_revs(repo, maxrev)
+        if filtered_revs:
+            filtered_hash = _hash_revs(filtered_revs)
+        if obs_revs:
+            obs_hash = _hash_revs(obs_revs)
+        result = (filtered_hash, obs_hash)
+        cl._filteredrevs_hashcache[key] = result
+    return result
+
+
+def _filtered_and_obs_revs(repo, max_rev):
+    """return the set of filtered and non-filtered obsolete revision"""
+    cl = repo.changelog
+    obs_set = obsolete.getrevs(repo, b'obsolete')
+    filtered_set = cl.filteredrevs
+    if cl.filteredrevs:
+        obs_set = obs_set - cl.filteredrevs
+    if max_rev < (len(cl) - 1):
+        # there might be revision to filter out
+        filtered_set = set(r for r in filtered_set if r <= max_rev)
+        obs_set = set(r for r in obs_set if r <= max_rev)
+    return (filtered_set, obs_set)
+
+
+def _hash_revs(revs):
+    """return a hash from a list of revision numbers"""
+    s = hashutil.sha1()
+    for rev in revs:
+        s.update(b'%d;' % rev)
+    return s.digest()
+
+
 def walkrepos(path, followsym=False, seen_dirs=None, recurse=False):
     """yield every hg repository under path, always recursively.
     The recurse flag will only control recursion into repo working dirs"""
--- a/mercurial/store.py	Thu Jun 13 09:52:39 2024 +0200
+++ b/mercurial/store.py	Mon Jun 24 12:05:31 2024 +0200
@@ -453,6 +453,10 @@
                 self._file_size = 0
         return self._file_size
 
+    @property
+    def has_size(self):
+        return self._file_size is not None
+
     def get_stream(self, vfs, copies):
         """return data "stream" information for this file
 
@@ -480,6 +484,8 @@
 
     This is returned by `store.walk` and represent some data in the store."""
 
+    maybe_volatile = True
+
     def files(self) -> List[StoreFile]:
         raise NotImplementedError
 
@@ -505,6 +511,7 @@
 
     is_revlog = False
 
+    maybe_volatile = attr.ib()
     _entry_path = attr.ib()
     _is_volatile = attr.ib(default=False)
     _file_size = attr.ib(default=None)
@@ -521,6 +528,7 @@
         self._is_volatile = is_volatile
         self._file_size = file_size
         self._files = None
+        self.maybe_volatile = is_volatile
 
     def files(self) -> List[StoreFile]:
         if self._files is None:
@@ -542,6 +550,7 @@
 
     revlog_type = attr.ib(default=None)
     target_id = attr.ib(default=None)
+    maybe_volatile = attr.ib(default=True)
     _path_prefix = attr.ib(default=None)
     _details = attr.ib(default=None)
     _files = attr.ib(default=None)
@@ -558,6 +567,12 @@
         self.target_id = target_id
         self._path_prefix = path_prefix
         assert b'.i' in details, (path_prefix, details)
+        for ext in details:
+            if ext.endswith(REVLOG_FILES_VOLATILE_EXT):
+                self.maybe_volatile = True
+                break
+        else:
+            self.maybe_volatile = False
         self._details = details
         self._files = None
 
@@ -601,7 +616,8 @@
         max_changeset=None,
         preserve_file_count=False,
     ):
-        if (
+        pre_sized = all(f.has_size for f in self.files())
+        if pre_sized and (
             repo is None
             or max_changeset is None
             # This use revlog-v2, ignore for now
--- a/mercurial/streamclone.py	Thu Jun 13 09:52:39 2024 +0200
+++ b/mercurial/streamclone.py	Mon Jun 24 12:05:31 2024 +0200
@@ -646,11 +646,12 @@
 
     max_linkrev = len(repo)
     file_count = totalfilesize = 0
-    # record the expected size of every file
-    for k, vfs, e in entries:
-        for f in e.files():
-            file_count += 1
-            totalfilesize += f.file_size(vfs)
+    with util.nogc():
+        # record the expected size of every file
+        for k, vfs, e in entries:
+            for f in e.files():
+                file_count += 1
+                totalfilesize += f.file_size(vfs)
 
     progress = repo.ui.makeprogress(
         _(b'bundle'), total=totalfilesize, unit=_(b'bytes')
@@ -722,10 +723,12 @@
     with TempCopyManager() as copy, progress:
         # create a copy of volatile files
         for k, vfs, e in entries:
-            for f in e.files():
-                f.file_size(vfs)  # record the expected size under lock
-                if f.is_volatile:
-                    copy(vfs.join(f.unencoded_path))
+            if e.maybe_volatile:
+                for f in e.files():
+                    if f.is_volatile:
+                        # record the expected size under lock
+                        f.file_size(vfs)
+                        copy(vfs.join(f.unencoded_path))
         # the first yield release the lock on the repository
         yield None
 
@@ -770,23 +773,26 @@
         matcher = narrowspec.match(repo.root, includes, excludes)
 
     phase = not repo.publishing()
-    entries = _walkstreamfiles(
-        repo,
-        matcher,
-        phase=phase,
-        obsolescence=includeobsmarkers,
-    )
-    for entry in entries:
-        yield (_srcstore, entry)
+    # Python is getting crazy at all the small container we creates, disabling
+    # the gc while we do so helps performance a lot.
+    with util.nogc():
+        entries = _walkstreamfiles(
+            repo,
+            matcher,
+            phase=phase,
+            obsolescence=includeobsmarkers,
+        )
+        for entry in entries:
+            yield (_srcstore, entry)
 
-    for name in cacheutil.cachetocopy(repo):
-        if repo.cachevfs.exists(name):
-            # not really a StoreEntry, but close enough
-            entry = store.SimpleStoreEntry(
-                entry_path=name,
-                is_volatile=True,
-            )
-            yield (_srccache, entry)
+        for name in cacheutil.cachetocopy(repo):
+            if repo.cachevfs.exists(name):
+                # not really a StoreEntry, but close enough
+                entry = store.SimpleStoreEntry(
+                    entry_path=name,
+                    is_volatile=True,
+                )
+                yield (_srccache, entry)
 
 
 def generatev2(repo, includes, excludes, includeobsmarkers):
@@ -847,7 +853,10 @@
     - ways to adjust the number of expected entries/files ?
     """
 
-    with repo.lock():
+    # Python is getting crazy at all the small container we creates while
+    # considering the files to preserve, disabling the gc while we do so helps
+    # performance a lot.
+    with repo.lock(), util.nogc():
 
         repo.ui.debug(b'scanning\n')
 
--- a/mercurial/tags.py	Thu Jun 13 09:52:39 2024 +0200
+++ b/mercurial/tags.py	Mon Jun 24 12:05:31 2024 +0200
@@ -21,6 +21,7 @@
     short,
 )
 from .i18n import _
+from .revlogutils.constants import ENTRY_NODE_ID
 from . import (
     encoding,
     error,
@@ -30,6 +31,7 @@
 )
 from .utils import stringutil
 
+
 # Tags computation can be expensive and caches exist to make it fast in
 # the common case.
 #
@@ -80,6 +82,34 @@
 # setting it) for each tag is last.
 
 
+def warm_cache(repo):
+    """ensure the cache is properly filled"""
+    unfi = repo.unfiltered()
+    fnodescache = hgtagsfnodescache(unfi)
+    validated_fnodes = set()
+    unknown_entries = set()
+    flog = None
+
+    entries = enumerate(repo.changelog.index)
+    node_revs = ((e[ENTRY_NODE_ID], rev) for (rev, e) in entries)
+
+    for node, rev in node_revs:
+        fnode = fnodescache.getfnode(node=node, rev=rev)
+        if fnode != repo.nullid:
+            if fnode not in validated_fnodes:
+                if flog is None:
+                    flog = repo.file(b'.hgtags')
+                if flog.hasnode(fnode):
+                    validated_fnodes.add(fnode)
+                else:
+                    unknown_entries.add(node)
+
+    if unknown_entries:
+        fnodescache.refresh_invalid_nodes(unknown_entries)
+
+    fnodescache.write()
+
+
 def fnoderevs(ui, repo, revs):
     """return the list of '.hgtags' fnodes used in a set revisions
 
@@ -433,7 +463,11 @@
     if (
         cacherev == tiprev
         and cachenode == tipnode
-        and cachehash == scmutil.filteredhash(repo, tiprev)
+        and cachehash
+        == scmutil.combined_filtered_and_obsolete_hash(
+            repo,
+            tiprev,
+        )
     ):
         tags = _readtags(ui, repo, cachelines, cachefile.name)
         cachefile.close()
@@ -441,7 +475,14 @@
     if cachefile:
         cachefile.close()  # ignore rest of file
 
-    valid = (tiprev, tipnode, scmutil.filteredhash(repo, tiprev))
+    valid = (
+        tiprev,
+        tipnode,
+        scmutil.combined_filtered_and_obsolete_hash(
+            repo,
+            tiprev,
+        ),
+    )
 
     repoheads = repo.heads()
     # Case 2 (uncommon): empty repo; get out quickly and don't bother
@@ -479,7 +520,7 @@
     return (repoheads, cachefnode, valid, None, True)
 
 
-def _getfnodes(ui, repo, nodes):
+def _getfnodes(ui, repo, nodes=None, revs=None):
     """return .hgtags fnodes for a list of changeset nodes
 
     Return value is a {node: fnode} mapping. There will be no entry for nodes
@@ -491,9 +532,21 @@
     validated_fnodes = set()
     unknown_entries = set()
 
+    if nodes is None and revs is None:
+        raise error.ProgrammingError("need to specify either nodes or revs")
+    elif nodes is not None and revs is None:
+        to_rev = repo.changelog.index.rev
+        nodes_revs = ((n, to_rev(n)) for n in nodes)
+    elif nodes is None and revs is not None:
+        to_node = repo.changelog.node
+        nodes_revs = ((to_node(r), r) for r in revs)
+    else:
+        msg = "need to specify only one of nodes or revs"
+        raise error.ProgrammingError(msg)
+
     flog = None
-    for node in nodes:
-        fnode = fnodescache.getfnode(node)
+    for node, rev in nodes_revs:
+        fnode = fnodescache.getfnode(node=node, rev=rev)
         if fnode != repo.nullid:
             if fnode not in validated_fnodes:
                 if flog is None:
@@ -746,7 +799,7 @@
             # TODO: zero fill entire record, because it's invalid not missing?
             self._raw.extend(b'\xff' * (wantedlen - rawlen))
 
-    def getfnode(self, node, computemissing=True):
+    def getfnode(self, node, computemissing=True, rev=None):
         """Obtain the filenode of the .hgtags file at a specified revision.
 
         If the value is in the cache, the entry will be validated and returned.
@@ -761,7 +814,8 @@
         if node == self._repo.nullid:
             return node
 
-        rev = self._repo.changelog.rev(node)
+        if rev is None:
+            rev = self._repo.changelog.rev(node)
 
         self.lookupcount += 1
 
--- a/mercurial/util.py	Thu Jun 13 09:52:39 2024 +0200
+++ b/mercurial/util.py	Mon Jun 24 12:05:31 2024 +0200
@@ -35,6 +35,7 @@
 import warnings
 
 from typing import (
+    Any,
     Iterable,
     Iterator,
     List,
@@ -1812,7 +1813,7 @@
     return False
 
 
-def nogc(func):
+def nogc(func=None) -> Any:
     """disable garbage collector
 
     Python's garbage collector triggers a GC each time a certain number of
@@ -1825,15 +1826,27 @@
     This garbage collector issue have been fixed in 2.7. But it still affect
     CPython's performance.
     """
-
+    if func is None:
+        return _nogc_context()
+    else:
+        return _nogc_decorator(func)
+
+
+@contextlib.contextmanager
+def _nogc_context():
+    gcenabled = gc.isenabled()
+    gc.disable()
+    try:
+        yield
+    finally:
+        if gcenabled:
+            gc.enable()
+
+
+def _nogc_decorator(func):
     def wrapper(*args, **kwargs):
-        gcenabled = gc.isenabled()
-        gc.disable()
-        try:
+        with _nogc_context():
             return func(*args, **kwargs)
-        finally:
-            if gcenabled:
-                gc.enable()
 
     return wrapper
 
--- a/mercurial/utils/repoviewutil.py	Thu Jun 13 09:52:39 2024 +0200
+++ b/mercurial/utils/repoviewutil.py	Mon Jun 24 12:05:31 2024 +0200
@@ -6,6 +6,7 @@
 # This software may be used and distributed according to the terms of the
 # GNU General Public License version 2 or any later version.
 
+from .. import error
 
 ### Nearest subset relation
 # Nearest subset of filter X is a filter Y so that:
@@ -21,3 +22,30 @@
     b'served': b'immutable',
     b'immutable': b'base',
 }
+
+
+def get_ordered_subset():
+    """return a list of subset name from dependencies to dependents"""
+    _unfinalized = set(subsettable.values())
+    ordered = []
+
+    # the subset table is expected to be small so we do the stupid N² version
+    # of the algorithm
+    while _unfinalized:
+        this_level = []
+        for candidate in _unfinalized:
+            dependency = subsettable.get(candidate)
+            if dependency not in _unfinalized:
+                this_level.append(candidate)
+
+        if not this_level:
+            msg = "cyclic dependencies in repoview subset %r"
+            msg %= subsettable
+            raise error.ProgrammingError(msg)
+
+        this_level.sort(key=lambda x: x if x is not None else '')
+
+        ordered.extend(this_level)
+        _unfinalized.difference_update(this_level)
+
+    return ordered
--- a/mercurial/vfs.py	Thu Jun 13 09:52:39 2024 +0200
+++ b/mercurial/vfs.py	Mon Jun 24 12:05:31 2024 +0200
@@ -616,6 +616,10 @@
     def options(self, value):
         self.vfs.options = value
 
+    @property
+    def audit(self):
+        return self.vfs.audit
+
 
 class filtervfs(proxyvfs, abstractvfs):
     '''Wrapper vfs for filtering filenames with a function.'''
--- a/mercurial/wireprotov1server.py	Thu Jun 13 09:52:39 2024 +0200
+++ b/mercurial/wireprotov1server.py	Mon Jun 24 12:05:31 2024 +0200
@@ -312,6 +312,7 @@
         if line.startswith(bundlecaches.CLONEBUNDLESCHEME):
             continue
         modified_manifest.append(line)
+    modified_manifest.append(b'')
     return wireprototypes.bytesresponse(b'\n'.join(modified_manifest))
 
 
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/relnotes/6.8	Mon Jun 24 12:05:31 2024 +0200
@@ -0,0 +1,72 @@
+= Mercurial 6.8rc0 =
+
+/!\ This is a tentative release, any and all notes below are subject to change or removal.
+
+As usual, a *lot* of patches don't make it to this list.
+
+== New Features or performance improvements ==
+
+ * Phases have been reworked to improve their general performance
+    * revset: stop serializing node when using "%ln"
+    * phases: convert remote phase root to node while reading them
+    * phases: use revision number in new_heads
+    * phases: use revision number in analyze_remote_phases
+    * phases: stop using `repo.set` in `remotephasessummary`
+    * phases: move RemotePhasesSummary to revision number
+    * phases: use revision number in `_pushdiscoveryphase`
+    * phases: introduce a performant efficient way to access revision in a set
+    * phases: rework the logic of _pushdiscoveryphase to bound complexity
+ * The Rust working copy code is being used by more places now:
+     * matchers: support patternmatcher in rust
+     * dirstate: remove the python-side whitelist of allowed matchers
+ * stream-clone: disable gc for `_entries_walk` duration
+ * stream-clone: disable gc for the initial section for the v3 format
+ * postincoming: avoid computing branchhead if no report will be posted
+ * stream-clone: disable gc for the entry listing section for the v2 format
+ * perf: allow profiling of more than one run
+ * perf: run the gc before each run
+ * perf: start recording total time after warming
+ * perf: clear vfs audit_cache before each run
+ * outgoing: rework the handling of the `missingroots` case to be faster
+ * outgoing: add a simple fastpath when there is no common
+ * tags-cache: skip the filternode step if we are not going to use it
+ * tags-cache: directly operate on rev-num warming hgtagsfnodescache
+ * tags-cache: directly perform a monimal walk for hgtagsfnodescache warming
+ * exchange: improve computation of relevant markers for large repos
+
+
+== New Experimental Features ==
+
+ * Introduce a new experimental branch cache "v3":
+    * branchcache: add more test for the logic around obsolescence and branch heads
+    * branchcache: skip entries that are topological heads in the on disk file
+    * branchcache: add a "pure topological head" fast path
+    * branchcache: allow to detect "pure topological case" for branchmap
+
+
+== Bug Fixes ==
+
+ * perf-stream-locked-section: actually use v1 generation when requested
+ * perf-stream-locked-section: fix the call to the v3 generator
+ * perf-stream-locked-section: advertise the right version key in the help
+ * stream: in v3, skip the "size" fast path if the entries have some unknown size
+ * stream-clone: stop getting the file size of all file in v3
+ * streamclone: stop listing files for entries that have no volatile files
+ * perf-stream-consume: use the source repository config when applying
+ * bundle: do no check the changegroup version if no changegroup is included
+ * perf: create the temporary target next to the source in stream-consume
+ * bundlespec: fix the "streamv2" and "streamv3-exp" variant
+ * push: rework the computation of fallbackheads to be correct
+ * profiler: flush after writing the profiler output
+ * base-revsets: use an author that actually exercises a lot of changesets
+ * hgrc: search XDG_CONFIG_HOME on mac
+ * clonebundles: add missing newline to legacy response
+ * narrow: add a test for linkrev computation done during widen
+
+== Backwards Compatibility Changes ==
+
+== Internal API Changes ==
+
+== Miscellaneous ==
+
+ * obsolete: quote the feature name
\ No newline at end of file
--- a/rust/hg-core/src/filepatterns.rs	Thu Jun 13 09:52:39 2024 +0200
+++ b/rust/hg-core/src/filepatterns.rs	Mon Jun 24 12:05:31 2024 +0200
@@ -150,21 +150,21 @@
         .collect()
 }
 
-pub fn parse_pattern_syntax(
+pub fn parse_pattern_syntax_kind(
     kind: &[u8],
 ) -> Result<PatternSyntax, PatternError> {
     match kind {
-        b"re:" => Ok(PatternSyntax::Regexp),
-        b"path:" => Ok(PatternSyntax::Path),
-        b"filepath:" => Ok(PatternSyntax::FilePath),
-        b"relpath:" => Ok(PatternSyntax::RelPath),
-        b"rootfilesin:" => Ok(PatternSyntax::RootFilesIn),
-        b"relglob:" => Ok(PatternSyntax::RelGlob),
-        b"relre:" => Ok(PatternSyntax::RelRegexp),
-        b"glob:" => Ok(PatternSyntax::Glob),
-        b"rootglob:" => Ok(PatternSyntax::RootGlob),
-        b"include:" => Ok(PatternSyntax::Include),
-        b"subinclude:" => Ok(PatternSyntax::SubInclude),
+        b"re" => Ok(PatternSyntax::Regexp),
+        b"path" => Ok(PatternSyntax::Path),
+        b"filepath" => Ok(PatternSyntax::FilePath),
+        b"relpath" => Ok(PatternSyntax::RelPath),
+        b"rootfilesin" => Ok(PatternSyntax::RootFilesIn),
+        b"relglob" => Ok(PatternSyntax::RelGlob),
+        b"relre" => Ok(PatternSyntax::RelRegexp),
+        b"glob" => Ok(PatternSyntax::Glob),
+        b"rootglob" => Ok(PatternSyntax::RootGlob),
+        b"include" => Ok(PatternSyntax::Include),
+        b"subinclude" => Ok(PatternSyntax::SubInclude),
         _ => Err(PatternError::UnsupportedSyntax(
             String::from_utf8_lossy(kind).to_string(),
         )),
--- a/rust/hg-core/src/lib.rs	Thu Jun 13 09:52:39 2024 +0200
+++ b/rust/hg-core/src/lib.rs	Mon Jun 24 12:05:31 2024 +0200
@@ -41,7 +41,7 @@
 
 use crate::utils::hg_path::{HgPathBuf, HgPathError};
 pub use filepatterns::{
-    parse_pattern_syntax, read_pattern_file, IgnorePattern,
+    parse_pattern_syntax_kind, read_pattern_file, IgnorePattern,
     PatternFileWarning, PatternSyntax,
 };
 use std::collections::HashMap;
--- a/rust/hg-core/src/revlog/index.rs	Thu Jun 13 09:52:39 2024 +0200
+++ b/rust/hg-core/src/revlog/index.rs	Mon Jun 24 12:05:31 2024 +0200
@@ -18,11 +18,12 @@
 };
 
 pub const INDEX_ENTRY_SIZE: usize = 64;
+pub const INDEX_HEADER_SIZE: usize = 4;
 pub const COMPRESSION_MODE_INLINE: u8 = 2;
 
 #[derive(Debug)]
 pub struct IndexHeader {
-    pub(super) header_bytes: [u8; 4],
+    pub(super) header_bytes: [u8; INDEX_HEADER_SIZE],
 }
 
 #[derive(Copy, Clone)]
@@ -92,14 +93,21 @@
     truncation: Option<usize>,
     /// Bytes that were added after reading the index
     added: Vec<u8>,
+    first_entry: [u8; INDEX_ENTRY_SIZE],
 }
 
 impl IndexData {
     pub fn new(bytes: Box<dyn Deref<Target = [u8]> + Send + Sync>) -> Self {
+        let mut first_entry = [0; INDEX_ENTRY_SIZE];
+        if bytes.len() >= INDEX_ENTRY_SIZE {
+            first_entry[INDEX_HEADER_SIZE..]
+                .copy_from_slice(&bytes[INDEX_HEADER_SIZE..INDEX_ENTRY_SIZE])
+        }
         Self {
             bytes,
             truncation: None,
             added: vec![],
+            first_entry,
         }
     }
 
@@ -356,7 +364,6 @@
                 let end = offset + INDEX_ENTRY_SIZE;
                 let entry = IndexEntry {
                     bytes: &bytes[offset..end],
-                    offset_override: None,
                 };
 
                 offset += INDEX_ENTRY_SIZE + entry.compressed_len() as usize;
@@ -449,11 +456,17 @@
         if rev == NULL_REVISION {
             return None;
         }
-        Some(if self.is_inline() {
-            self.get_entry_inline(rev)
+        if rev.0 == 0 {
+            Some(IndexEntry {
+                bytes: &self.bytes.first_entry[..],
+            })
         } else {
-            self.get_entry_separated(rev)
-        })
+            Some(if self.is_inline() {
+                self.get_entry_inline(rev)
+            } else {
+                self.get_entry_separated(rev)
+            })
+        }
     }
 
     /// Return the binary content of the index entry for the given revision
@@ -512,13 +525,7 @@
         let end = start + INDEX_ENTRY_SIZE;
         let bytes = &self.bytes[start..end];
 
-        // See IndexEntry for an explanation of this override.
-        let offset_override = Some(end);
-
-        IndexEntry {
-            bytes,
-            offset_override,
-        }
+        IndexEntry { bytes }
     }
 
     fn get_entry_separated(&self, rev: Revision) -> IndexEntry {
@@ -526,20 +533,12 @@
         let end = start + INDEX_ENTRY_SIZE;
         let bytes = &self.bytes[start..end];
 
-        // Override the offset of the first revision as its bytes are used
-        // for the index's metadata (saving space because it is always 0)
-        let offset_override = if rev == Revision(0) { Some(0) } else { None };
-
-        IndexEntry {
-            bytes,
-            offset_override,
-        }
+        IndexEntry { bytes }
     }
 
     fn null_entry(&self) -> IndexEntry {
         IndexEntry {
             bytes: &[0; INDEX_ENTRY_SIZE],
-            offset_override: Some(0),
         }
     }
 
@@ -755,13 +754,20 @@
         revision_data: RevisionDataParams,
     ) -> Result<(), RevlogError> {
         revision_data.validate()?;
+        let entry_v1 = revision_data.into_v1();
+        let entry_bytes = entry_v1.as_bytes();
+        if self.bytes.len() == 0 {
+            self.bytes.first_entry[INDEX_HEADER_SIZE..].copy_from_slice(
+                &entry_bytes[INDEX_HEADER_SIZE..INDEX_ENTRY_SIZE],
+            )
+        }
         if self.is_inline() {
             let new_offset = self.bytes.len();
             if let Some(offsets) = &mut *self.get_offsets_mut() {
                 offsets.push(new_offset)
             }
         }
-        self.bytes.added.extend(revision_data.into_v1().as_bytes());
+        self.bytes.added.extend(entry_bytes);
         self.clear_head_revs();
         Ok(())
     }
@@ -1654,7 +1660,6 @@
         let end = offset + INDEX_ENTRY_SIZE;
         let entry = IndexEntry {
             bytes: &bytes[offset..end],
-            offset_override: None,
         };
 
         offset += INDEX_ENTRY_SIZE + entry.compressed_len() as usize;
@@ -1678,29 +1683,14 @@
 #[derive(Debug)]
 pub struct IndexEntry<'a> {
     bytes: &'a [u8],
-    /// Allows to override the offset value of the entry.
-    ///
-    /// For interleaved index and data, the offset stored in the index
-    /// corresponds to the separated data offset.
-    /// It has to be overridden with the actual offset in the interleaved
-    /// index which is just after the index block.
-    ///
-    /// For separated index and data, the offset stored in the first index
-    /// entry is mixed with the index headers.
-    /// It has to be overridden with 0.
-    offset_override: Option<usize>,
 }
 
 impl<'a> IndexEntry<'a> {
     /// Return the offset of the data.
     pub fn offset(&self) -> usize {
-        if let Some(offset_override) = self.offset_override {
-            offset_override
-        } else {
-            let mut bytes = [0; 8];
-            bytes[2..8].copy_from_slice(&self.bytes[0..=5]);
-            BigEndian::read_u64(&bytes[..]) as usize
-        }
+        let mut bytes = [0; 8];
+        bytes[2..8].copy_from_slice(&self.bytes[0..=5]);
+        BigEndian::read_u64(&bytes[..]) as usize
     }
     pub fn raw_offset(&self) -> u64 {
         BigEndian::read_u64(&self.bytes[0..8])
@@ -1956,32 +1946,15 @@
     #[test]
     fn test_offset() {
         let bytes = IndexEntryBuilder::new().with_offset(1).build();
-        let entry = IndexEntry {
-            bytes: &bytes,
-            offset_override: None,
-        };
+        let entry = IndexEntry { bytes: &bytes };
 
         assert_eq!(entry.offset(), 1)
     }
 
     #[test]
-    fn test_with_overridden_offset() {
-        let bytes = IndexEntryBuilder::new().with_offset(1).build();
-        let entry = IndexEntry {
-            bytes: &bytes,
-            offset_override: Some(2),
-        };
-
-        assert_eq!(entry.offset(), 2)
-    }
-
-    #[test]
     fn test_compressed_len() {
         let bytes = IndexEntryBuilder::new().with_compressed_len(1).build();
-        let entry = IndexEntry {
-            bytes: &bytes,
-            offset_override: None,
-        };
+        let entry = IndexEntry { bytes: &bytes };
 
         assert_eq!(entry.compressed_len(), 1)
     }
@@ -1989,10 +1962,7 @@
     #[test]
     fn test_uncompressed_len() {
         let bytes = IndexEntryBuilder::new().with_uncompressed_len(1).build();
-        let entry = IndexEntry {
-            bytes: &bytes,
-            offset_override: None,
-        };
+        let entry = IndexEntry { bytes: &bytes };
 
         assert_eq!(entry.uncompressed_len(), 1)
     }
@@ -2002,10 +1972,7 @@
         let bytes = IndexEntryBuilder::new()
             .with_base_revision_or_base_of_delta_chain(Revision(1))
             .build();
-        let entry = IndexEntry {
-            bytes: &bytes,
-            offset_override: None,
-        };
+        let entry = IndexEntry { bytes: &bytes };
 
         assert_eq!(entry.base_revision_or_base_of_delta_chain(), 1.into())
     }
@@ -2016,10 +1983,7 @@
             .with_link_revision(Revision(123))
             .build();
 
-        let entry = IndexEntry {
-            bytes: &bytes,
-            offset_override: None,
-        };
+        let entry = IndexEntry { bytes: &bytes };
 
         assert_eq!(entry.link_revision(), 123.into());
     }
@@ -2028,10 +1992,7 @@
     fn p1_test() {
         let bytes = IndexEntryBuilder::new().with_p1(Revision(123)).build();
 
-        let entry = IndexEntry {
-            bytes: &bytes,
-            offset_override: None,
-        };
+        let entry = IndexEntry { bytes: &bytes };
 
         assert_eq!(entry.p1(), 123.into());
     }
@@ -2040,10 +2001,7 @@
     fn p2_test() {
         let bytes = IndexEntryBuilder::new().with_p2(Revision(123)).build();
 
-        let entry = IndexEntry {
-            bytes: &bytes,
-            offset_override: None,
-        };
+        let entry = IndexEntry { bytes: &bytes };
 
         assert_eq!(entry.p2(), 123.into());
     }
@@ -2054,10 +2012,7 @@
             .unwrap();
         let bytes = IndexEntryBuilder::new().with_node(node).build();
 
-        let entry = IndexEntry {
-            bytes: &bytes,
-            offset_override: None,
-        };
+        let entry = IndexEntry { bytes: &bytes };
 
         assert_eq!(*entry.hash(), node);
     }
--- a/rust/hg-core/src/revlog/mod.rs	Thu Jun 13 09:52:39 2024 +0200
+++ b/rust/hg-core/src/revlog/mod.rs	Mon Jun 24 12:05:31 2024 +0200
@@ -29,6 +29,7 @@
 use self::node::{NODE_BYTES_LENGTH, NULL_NODE};
 use self::nodemap_docket::NodeMapDocket;
 use super::index::Index;
+use super::index::INDEX_ENTRY_SIZE;
 use super::nodemap::{NodeMap, NodeMapError};
 use crate::errors::HgError;
 use crate::vfs::Vfs;
@@ -537,7 +538,12 @@
             .index
             .get_entry(rev)
             .ok_or(RevlogError::InvalidRevision)?;
-        let start = index_entry.offset();
+        let offset = index_entry.offset();
+        let start = if self.index.is_inline() {
+            offset + ((rev.0 as usize + 1) * INDEX_ENTRY_SIZE)
+        } else {
+            offset
+        };
         let end = start + index_entry.compressed_len() as usize;
         let data = if self.index.is_inline() {
             self.index.data(start, end)
@@ -865,7 +871,7 @@
 #[cfg(test)]
 mod tests {
     use super::*;
-    use crate::index::{IndexEntryBuilder, INDEX_ENTRY_SIZE};
+    use crate::index::IndexEntryBuilder;
     use itertools::Itertools;
 
     #[test]
@@ -903,15 +909,10 @@
             .is_first(true)
             .with_version(1)
             .with_inline(true)
-            .with_offset(INDEX_ENTRY_SIZE)
             .with_node(node0)
             .build();
-        let entry1_bytes = IndexEntryBuilder::new()
-            .with_offset(INDEX_ENTRY_SIZE)
-            .with_node(node1)
-            .build();
+        let entry1_bytes = IndexEntryBuilder::new().with_node(node1).build();
         let entry2_bytes = IndexEntryBuilder::new()
-            .with_offset(INDEX_ENTRY_SIZE)
             .with_p1(Revision(0))
             .with_p2(Revision(1))
             .with_node(node2)
@@ -977,13 +978,9 @@
             .is_first(true)
             .with_version(1)
             .with_inline(true)
-            .with_offset(INDEX_ENTRY_SIZE)
             .with_node(node0)
             .build();
-        let entry1_bytes = IndexEntryBuilder::new()
-            .with_offset(INDEX_ENTRY_SIZE)
-            .with_node(node1)
-            .build();
+        let entry1_bytes = IndexEntryBuilder::new().with_node(node1).build();
         let contents = vec![entry0_bytes, entry1_bytes]
             .into_iter()
             .flatten()
--- a/rust/hg-cpython/src/dirstate/status.rs	Thu Jun 13 09:52:39 2024 +0200
+++ b/rust/hg-cpython/src/dirstate/status.rs	Mon Jun 24 12:05:31 2024 +0200
@@ -11,23 +11,23 @@
 
 use crate::{dirstate::DirstateMap, exceptions::FallbackError};
 use cpython::{
-    exc::ValueError, ObjectProtocol, PyBytes, PyErr, PyList, PyObject,
+    exc::ValueError, ObjectProtocol, PyBool, PyBytes, PyErr, PyList, PyObject,
     PyResult, PyTuple, Python, PythonObject, ToPyObject,
 };
 use hg::dirstate::status::StatusPath;
 use hg::matchers::{
     DifferenceMatcher, IntersectionMatcher, Matcher, NeverMatcher,
-    UnionMatcher,
+    PatternMatcher, UnionMatcher,
 };
 use hg::{
     matchers::{AlwaysMatcher, FileMatcher, IncludeMatcher},
-    parse_pattern_syntax,
+    parse_pattern_syntax_kind,
     utils::{
         files::{get_bytes_from_path, get_path_from_bytes},
         hg_path::{HgPath, HgPathBuf},
     },
-    BadMatch, DirstateStatus, IgnorePattern, PatternFileWarning, StatusError,
-    StatusOptions,
+    BadMatch, DirstateStatus, IgnorePattern, PatternError, PatternFileWarning,
+    StatusError, StatusOptions,
 };
 use std::borrow::Borrow;
 
@@ -153,11 +153,46 @@
     )
 }
 
+fn collect_kindpats(
+    py: Python,
+    matcher: PyObject,
+) -> PyResult<Vec<IgnorePattern>> {
+    matcher
+        .getattr(py, "_kindpats")?
+        .iter(py)?
+        .map(|k| {
+            let k = k?;
+            let syntax = parse_pattern_syntax_kind(
+                k.get_item(py, 0)?.extract::<PyBytes>(py)?.data(py),
+            )
+            .map_err(|e| handle_fallback(py, StatusError::Pattern(e)))?;
+            let pattern = k.get_item(py, 1)?.extract::<PyBytes>(py)?;
+            let pattern = pattern.data(py);
+            let source = k.get_item(py, 2)?.extract::<PyBytes>(py)?;
+            let source = get_path_from_bytes(source.data(py));
+            let new = IgnorePattern::new(syntax, pattern, source);
+            Ok(new)
+        })
+        .collect()
+}
+
 /// Transform a Python matcher into a Rust matcher.
 fn extract_matcher(
     py: Python,
     matcher: PyObject,
 ) -> PyResult<Box<dyn Matcher + Sync>> {
+    let tampered = matcher
+        .call_method(py, "was_tampered_with_nonrec", PyTuple::empty(py), None)?
+        .extract::<PyBool>(py)?
+        .is_true();
+    if tampered {
+        return Err(handle_fallback(
+            py,
+            StatusError::Pattern(PatternError::UnsupportedSyntax(
+                "Pattern matcher was tampered with!".to_string(),
+            )),
+        ));
+    };
     match matcher.get_type(py).name(py).borrow() {
         "alwaysmatcher" => Ok(Box::new(AlwaysMatcher)),
         "nevermatcher" => Ok(Box::new(NeverMatcher)),
@@ -187,33 +222,7 @@
             // Get the patterns from Python even though most of them are
             // redundant with those we will parse later on, as they include
             // those passed from the command line.
-            let ignore_patterns: PyResult<Vec<_>> = matcher
-                .getattr(py, "_kindpats")?
-                .iter(py)?
-                .map(|k| {
-                    let k = k?;
-                    let syntax = parse_pattern_syntax(
-                        &[
-                            k.get_item(py, 0)?
-                                .extract::<PyBytes>(py)?
-                                .data(py),
-                            &b":"[..],
-                        ]
-                        .concat(),
-                    )
-                    .map_err(|e| {
-                        handle_fallback(py, StatusError::Pattern(e))
-                    })?;
-                    let pattern = k.get_item(py, 1)?.extract::<PyBytes>(py)?;
-                    let pattern = pattern.data(py);
-                    let source = k.get_item(py, 2)?.extract::<PyBytes>(py)?;
-                    let source = get_path_from_bytes(source.data(py));
-                    let new = IgnorePattern::new(syntax, pattern, source);
-                    Ok(new)
-                })
-                .collect();
-
-            let ignore_patterns = ignore_patterns?;
+            let ignore_patterns = collect_kindpats(py, matcher)?;
 
             let matcher = IncludeMatcher::new(ignore_patterns)
                 .map_err(|e| handle_fallback(py, e.into()))?;
@@ -241,6 +250,14 @@
 
             Ok(Box::new(DifferenceMatcher::new(m1, m2)))
         }
+        "patternmatcher" => {
+            let patterns = collect_kindpats(py, matcher)?;
+
+            let matcher = PatternMatcher::new(patterns)
+                .map_err(|e| handle_fallback(py, e.into()))?;
+
+            Ok(Box::new(matcher))
+        }
         e => Err(PyErr::new::<FallbackError, _>(
             py,
             format!("Unsupported matcher {}", e),
--- a/setup.py	Thu Jun 13 09:52:39 2024 +0200
+++ b/setup.py	Mon Jun 24 12:05:31 2024 +0200
@@ -252,7 +252,10 @@
         if (
             not e.startswith(b'not trusting file')
             and not e.startswith(b'warning: Not importing')
-            and not e.startswith(b'obsolete feature not enabled')
+            and not (
+                e.startswith(b'obsolete feature not enabled')
+                or e.startswith(b'"obsolete" feature not enabled')
+            )
             and not e.startswith(b'*** failed to import extension')
             and not e.startswith(b'devel-warn:')
             and not (
--- a/tests/common-pattern.py	Thu Jun 13 09:52:39 2024 +0200
+++ b/tests/common-pattern.py	Mon Jun 24 12:05:31 2024 +0200
@@ -114,14 +114,6 @@
         br'(.*file:/)/?(/\$TESTTMP.*)',
         lambda m: m.group(1) + b'*' + m.group(2) + b' (glob)',
     ),
-    # `hg clone --stream` output
-    (
-        br'transferred (\S+?) KB in \S+? seconds \(.+?/sec\)(?: \(glob\))?(.*)',
-        lambda m: (
-            br'transferred %s KB in * seconds (* */sec) (glob)%s'
-            % (m.group(1), m.group(2))
-        ),
-    ),
     # `discovery debug output
     (
         br'\b(\d+) total queries in \d.\d\d\d\ds\b',
--- a/tests/test-acl.t	Thu Jun 13 09:52:39 2024 +0200
+++ b/tests/test-acl.t	Mon Jun 24 12:05:31 2024 +0200
@@ -167,7 +167,6 @@
   listing keys for "phases"
   checking for updated bookmarks
   listing keys for "bookmarks"
-  invalid branch cache (served): tip differs
   listing keys for "bookmarks"
   3 changesets found
   list of changesets:
@@ -187,7 +186,6 @@
   bundle2-input-part: total payload size * (glob)
   bundle2-input-part: "check:updated-heads" supported
   bundle2-input-part: total payload size * (glob)
-  invalid branch cache (served): tip differs
   bundle2-input-part: "changegroup" (params: 1 mandatory) supported
   adding changesets
   add changeset ef1ea85a6374
@@ -237,7 +235,6 @@
   listing keys for "phases"
   checking for updated bookmarks
   listing keys for "bookmarks"
-  invalid branch cache (served): tip differs
   listing keys for "bookmarks"
   3 changesets found
   list of changesets:
@@ -257,7 +254,6 @@
   bundle2-input-part: total payload size * (glob)
   bundle2-input-part: "check:updated-heads" supported
   bundle2-input-part: total payload size * (glob)
-  invalid branch cache (served): tip differs
   bundle2-input-part: "changegroup" (params: 1 mandatory) supported
   adding changesets
   add changeset ef1ea85a6374
@@ -317,7 +313,6 @@
   listing keys for "phases"
   checking for updated bookmarks
   listing keys for "bookmarks"
-  invalid branch cache (served): tip differs
   listing keys for "bookmarks"
   3 changesets found
   list of changesets:
@@ -337,7 +332,6 @@
   bundle2-input-part: total payload size * (glob)
   bundle2-input-part: "check:updated-heads" supported
   bundle2-input-part: total payload size * (glob)
-  invalid branch cache (served): tip differs
   bundle2-input-part: "changegroup" (params: 1 mandatory) supported
   adding changesets
   add changeset ef1ea85a6374
@@ -388,7 +382,6 @@
   listing keys for "phases"
   checking for updated bookmarks
   listing keys for "bookmarks"
-  invalid branch cache (served): tip differs
   listing keys for "bookmarks"
   3 changesets found
   list of changesets:
@@ -408,7 +401,6 @@
   bundle2-input-part: total payload size * (glob)
   bundle2-input-part: "check:updated-heads" supported
   bundle2-input-part: total payload size * (glob)
-  invalid branch cache (served): tip differs
   bundle2-input-part: "changegroup" (params: 1 mandatory) supported
   adding changesets
   add changeset ef1ea85a6374
@@ -463,7 +455,6 @@
   listing keys for "phases"
   checking for updated bookmarks
   listing keys for "bookmarks"
-  invalid branch cache (served): tip differs
   listing keys for "bookmarks"
   3 changesets found
   list of changesets:
@@ -483,7 +474,6 @@
   bundle2-input-part: total payload size * (glob)
   bundle2-input-part: "check:updated-heads" supported
   bundle2-input-part: total payload size * (glob)
-  invalid branch cache (served): tip differs
   bundle2-input-part: "changegroup" (params: 1 mandatory) supported
   adding changesets
   add changeset ef1ea85a6374
@@ -535,7 +525,6 @@
   listing keys for "phases"
   checking for updated bookmarks
   listing keys for "bookmarks"
-  invalid branch cache (served): tip differs
   listing keys for "bookmarks"
   3 changesets found
   list of changesets:
@@ -555,7 +544,6 @@
   bundle2-input-part: total payload size * (glob)
   bundle2-input-part: "check:updated-heads" supported
   bundle2-input-part: total payload size * (glob)
-  invalid branch cache (served): tip differs
   bundle2-input-part: "changegroup" (params: 1 mandatory) supported
   adding changesets
   add changeset ef1ea85a6374
@@ -612,7 +600,6 @@
   listing keys for "phases"
   checking for updated bookmarks
   listing keys for "bookmarks"
-  invalid branch cache (served): tip differs
   listing keys for "bookmarks"
   3 changesets found
   list of changesets:
@@ -632,7 +619,6 @@
   bundle2-input-part: total payload size * (glob)
   bundle2-input-part: "check:updated-heads" supported
   bundle2-input-part: total payload size * (glob)
-  invalid branch cache (served): tip differs
   bundle2-input-part: "changegroup" (params: 1 mandatory) supported
   adding changesets
   add changeset ef1ea85a6374
@@ -686,7 +672,6 @@
   listing keys for "phases"
   checking for updated bookmarks
   listing keys for "bookmarks"
-  invalid branch cache (served): tip differs
   listing keys for "bookmarks"
   3 changesets found
   list of changesets:
@@ -706,7 +691,6 @@
   bundle2-input-part: total payload size * (glob)
   bundle2-input-part: "check:updated-heads" supported
   bundle2-input-part: total payload size * (glob)
-  invalid branch cache (served): tip differs
   bundle2-input-part: "changegroup" (params: 1 mandatory) supported
   adding changesets
   add changeset ef1ea85a6374
@@ -761,7 +745,6 @@
   listing keys for "phases"
   checking for updated bookmarks
   listing keys for "bookmarks"
-  invalid branch cache (served): tip differs
   listing keys for "bookmarks"
   1 changesets found
   list of changesets:
@@ -783,7 +766,6 @@
   bundle2-input-part: total payload size * (glob)
   bundle2-input-part: "check:updated-heads" supported
   bundle2-input-part: total payload size * (glob)
-  invalid branch cache (served): tip differs
   bundle2-input-part: "changegroup" (params: 1 mandatory) supported
   adding changesets
   add changeset ef1ea85a6374
@@ -810,7 +792,6 @@
   acl: bookmark access granted: "ef1ea85a6374b77d6da9dcda9541f498f2d17df7" on bookmark "moving-bookmark"
   bundle2-input-bundle: 7 parts total
   updating the branch cache
-  invalid branch cache (served.hidden): tip differs
   added 1 changesets with 1 changes to 1 files
   bundle2-output-bundle: "HG20", 1 parts total
   bundle2-output-part: "reply:changegroup" (advisory) (params: 0 advisory) empty payload
@@ -850,7 +831,6 @@
   listing keys for "phases"
   checking for updated bookmarks
   listing keys for "bookmarks"
-  invalid branch cache (served): tip differs
   listing keys for "bookmarks"
   1 changesets found
   list of changesets:
@@ -872,7 +852,6 @@
   bundle2-input-part: total payload size * (glob)
   bundle2-input-part: "check:updated-heads" supported
   bundle2-input-part: total payload size * (glob)
-  invalid branch cache (served): tip differs
   bundle2-input-part: "changegroup" (params: 1 mandatory) supported
   adding changesets
   add changeset ef1ea85a6374
@@ -939,7 +918,6 @@
   listing keys for "phases"
   checking for updated bookmarks
   listing keys for "bookmarks"
-  invalid branch cache (served): tip differs
   listing keys for "bookmarks"
   3 changesets found
   list of changesets:
@@ -959,7 +937,6 @@
   bundle2-input-part: total payload size * (glob)
   bundle2-input-part: "check:updated-heads" supported
   bundle2-input-part: total payload size * (glob)
-  invalid branch cache (served): tip differs
   bundle2-input-part: "changegroup" (params: 1 mandatory) supported
   adding changesets
   add changeset ef1ea85a6374
@@ -1025,7 +1002,6 @@
   listing keys for "phases"
   checking for updated bookmarks
   listing keys for "bookmarks"
-  invalid branch cache (served): tip differs
   listing keys for "bookmarks"
   3 changesets found
   list of changesets:
@@ -1045,7 +1021,6 @@
   bundle2-input-part: total payload size * (glob)
   bundle2-input-part: "check:updated-heads" supported
   bundle2-input-part: total payload size * (glob)
-  invalid branch cache (served): tip differs
   bundle2-input-part: "changegroup" (params: 1 mandatory) supported
   adding changesets
   add changeset ef1ea85a6374
@@ -1109,7 +1084,6 @@
   listing keys for "phases"
   checking for updated bookmarks
   listing keys for "bookmarks"
-  invalid branch cache (served): tip differs
   listing keys for "bookmarks"
   3 changesets found
   list of changesets:
@@ -1129,7 +1103,6 @@
   bundle2-input-part: total payload size * (glob)
   bundle2-input-part: "check:updated-heads" supported
   bundle2-input-part: total payload size * (glob)
-  invalid branch cache (served): tip differs
   bundle2-input-part: "changegroup" (params: 1 mandatory) supported
   adding changesets
   add changeset ef1ea85a6374
@@ -1187,7 +1160,6 @@
   listing keys for "phases"
   checking for updated bookmarks
   listing keys for "bookmarks"
-  invalid branch cache (served): tip differs
   listing keys for "bookmarks"
   3 changesets found
   list of changesets:
@@ -1207,7 +1179,6 @@
   bundle2-input-part: total payload size * (glob)
   bundle2-input-part: "check:updated-heads" supported
   bundle2-input-part: total payload size * (glob)
-  invalid branch cache (served): tip differs
   bundle2-input-part: "changegroup" (params: 1 mandatory) supported
   adding changesets
   add changeset ef1ea85a6374
@@ -1276,7 +1247,6 @@
   listing keys for "phases"
   checking for updated bookmarks
   listing keys for "bookmarks"
-  invalid branch cache (served): tip differs
   listing keys for "bookmarks"
   3 changesets found
   list of changesets:
@@ -1296,7 +1266,6 @@
   bundle2-input-part: total payload size * (glob)
   bundle2-input-part: "check:updated-heads" supported
   bundle2-input-part: total payload size * (glob)
-  invalid branch cache (served): tip differs
   bundle2-input-part: "changegroup" (params: 1 mandatory) supported
   adding changesets
   add changeset ef1ea85a6374
@@ -1366,7 +1335,6 @@
   listing keys for "phases"
   checking for updated bookmarks
   listing keys for "bookmarks"
-  invalid branch cache (served): tip differs
   listing keys for "bookmarks"
   3 changesets found
   list of changesets:
@@ -1386,7 +1354,6 @@
   bundle2-input-part: total payload size * (glob)
   bundle2-input-part: "check:updated-heads" supported
   bundle2-input-part: total payload size * (glob)
-  invalid branch cache (served): tip differs
   bundle2-input-part: "changegroup" (params: 1 mandatory) supported
   adding changesets
   add changeset ef1ea85a6374
@@ -1453,7 +1420,6 @@
   listing keys for "phases"
   checking for updated bookmarks
   listing keys for "bookmarks"
-  invalid branch cache (served): tip differs
   listing keys for "bookmarks"
   3 changesets found
   list of changesets:
@@ -1473,7 +1439,6 @@
   bundle2-input-part: total payload size * (glob)
   bundle2-input-part: "check:updated-heads" supported
   bundle2-input-part: total payload size * (glob)
-  invalid branch cache (served): tip differs
   bundle2-input-part: "changegroup" (params: 1 mandatory) supported
   adding changesets
   add changeset ef1ea85a6374
@@ -1536,7 +1501,6 @@
   listing keys for "phases"
   checking for updated bookmarks
   listing keys for "bookmarks"
-  invalid branch cache (served): tip differs
   listing keys for "bookmarks"
   3 changesets found
   list of changesets:
@@ -1556,7 +1520,6 @@
   bundle2-input-part: total payload size * (glob)
   bundle2-input-part: "check:updated-heads" supported
   bundle2-input-part: total payload size * (glob)
-  invalid branch cache (served): tip differs
   bundle2-input-part: "changegroup" (params: 1 mandatory) supported
   adding changesets
   add changeset ef1ea85a6374
@@ -1623,7 +1586,6 @@
   listing keys for "phases"
   checking for updated bookmarks
   listing keys for "bookmarks"
-  invalid branch cache (served): tip differs
   listing keys for "bookmarks"
   3 changesets found
   list of changesets:
@@ -1643,7 +1605,6 @@
   bundle2-input-part: total payload size * (glob)
   bundle2-input-part: "check:updated-heads" supported
   bundle2-input-part: total payload size * (glob)
-  invalid branch cache (served): tip differs
   bundle2-input-part: "changegroup" (params: 1 mandatory) supported
   adding changesets
   add changeset ef1ea85a6374
@@ -1797,7 +1758,6 @@
   bundle2-input-part: total payload size * (glob)
   bundle2-input-bundle: 5 parts total
   updating the branch cache
-  invalid branch cache (served.hidden): tip differs
   added 4 changesets with 4 changes to 4 files (+1 heads)
   bundle2-output-bundle: "HG20", 1 parts total
   bundle2-output-part: "reply:changegroup" (advisory) (params: 0 advisory) empty payload
@@ -2104,7 +2064,6 @@
   bundle2-input-part: total payload size * (glob)
   bundle2-input-bundle: 5 parts total
   updating the branch cache
-  invalid branch cache (served.hidden): tip differs
   added 4 changesets with 4 changes to 4 files (+1 heads)
   bundle2-output-bundle: "HG20", 1 parts total
   bundle2-output-part: "reply:changegroup" (advisory) (params: 0 advisory) empty payload
@@ -2196,7 +2155,6 @@
   bundle2-input-part: total payload size * (glob)
   bundle2-input-bundle: 5 parts total
   updating the branch cache
-  invalid branch cache (served.hidden): tip differs
   added 4 changesets with 4 changes to 4 files (+1 heads)
   bundle2-output-bundle: "HG20", 1 parts total
   bundle2-output-part: "reply:changegroup" (advisory) (params: 0 advisory) empty payload
@@ -2360,7 +2318,6 @@
   bundle2-input-part: total payload size * (glob)
   bundle2-input-bundle: 5 parts total
   updating the branch cache
-  invalid branch cache (served.hidden): tip differs
   added 4 changesets with 4 changes to 4 files (+1 heads)
   bundle2-output-bundle: "HG20", 1 parts total
   bundle2-output-part: "reply:changegroup" (advisory) (params: 0 advisory) empty payload
--- a/tests/test-blackbox.t	Thu Jun 13 09:52:39 2024 +0200
+++ b/tests/test-blackbox.t	Mon Jun 24 12:05:31 2024 +0200
@@ -127,13 +127,11 @@
   added 1 changesets with 1 changes to 1 files
   new changesets d02f48003e62
   (run 'hg update' to get a working copy)
-  $ hg blackbox -l 6
+  $ hg blackbox -l 4
   1970-01-01 00:00:00.000 bob @6563da9dcf87b1949716e38ff3e3dfaa3198eb06 (5000)> wrote branch cache (served) with 1 labels and 2 nodes
-  1970-01-01 00:00:00.000 bob @6563da9dcf87b1949716e38ff3e3dfaa3198eb06 (5000)> updated branch cache (served.hidden) in * seconds (glob)
-  1970-01-01 00:00:00.000 bob @6563da9dcf87b1949716e38ff3e3dfaa3198eb06 (5000)> wrote branch cache (served.hidden) with 1 labels and 2 nodes
   1970-01-01 00:00:00.000 bob @6563da9dcf87b1949716e38ff3e3dfaa3198eb06 (5000)> 1 incoming changes - new heads: d02f48003e62
   1970-01-01 00:00:00.000 bob @6563da9dcf87b1949716e38ff3e3dfaa3198eb06 (5000)> pull exited 0 after * seconds (glob)
-  1970-01-01 00:00:00.000 bob @6563da9dcf87b1949716e38ff3e3dfaa3198eb06 (5000)> blackbox -l 6
+  1970-01-01 00:00:00.000 bob @6563da9dcf87b1949716e38ff3e3dfaa3198eb06 (5000)> blackbox -l 4
 
 we must not cause a failure if we cannot write to the log
 
@@ -190,13 +188,11 @@
   $ hg strip tip
   0 files updated, 0 files merged, 1 files removed, 0 files unresolved
   saved backup bundle to $TESTTMP/blackboxtest2/.hg/strip-backup/*-backup.hg (glob)
-  $ hg blackbox -l 6
+  $ hg blackbox -l 4
   1970-01-01 00:00:00.000 bob @73f6ee326b27d820b0472f1a825e3a50f3dc489b (5000)> strip tip
   1970-01-01 00:00:00.000 bob @6563da9dcf87b1949716e38ff3e3dfaa3198eb06 (5000)> saved backup bundle to $TESTTMP/blackboxtest2/.hg/strip-backup/73f6ee326b27-7612e004-backup.hg
-  1970-01-01 00:00:00.000 bob @6563da9dcf87b1949716e38ff3e3dfaa3198eb06 (5000)> updated branch cache (immutable) in * seconds (glob)
-  1970-01-01 00:00:00.000 bob @6563da9dcf87b1949716e38ff3e3dfaa3198eb06 (5000)> wrote branch cache (immutable) with 1 labels and 2 nodes
   1970-01-01 00:00:00.000 bob @6563da9dcf87b1949716e38ff3e3dfaa3198eb06 (5000)> strip tip exited 0 after * seconds (glob)
-  1970-01-01 00:00:00.000 bob @6563da9dcf87b1949716e38ff3e3dfaa3198eb06 (5000)> blackbox -l 6
+  1970-01-01 00:00:00.000 bob @6563da9dcf87b1949716e38ff3e3dfaa3198eb06 (5000)> blackbox -l 4
 
 extension and python hooks - use the eol extension for a pythonhook
 
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/tests/test-branches-obsolete.t	Mon Jun 24 12:05:31 2024 +0200
@@ -0,0 +1,563 @@
+================================================================
+test the interaction of the branch cache with obsolete changeset
+================================================================
+
+Some corner case have been covered by unrelated test (like rebase ones) this
+file meant to gather explicite testing of those.
+
+See also: test-obsolete-checkheads.t
+
+#testcases v2 v3
+
+  $ cat >> $HGRCPATH << EOF
+  > [phases]
+  > publish = false
+  > [experimental]
+  > evolution = all
+  > server.allow-hidden-access = *
+  > EOF
+
+#if v3
+  $ cat <<EOF >> $HGRCPATH
+  > [experimental]
+  > branch-cache-v3=yes
+  > EOF
+  $ CACHE_PREFIX=branch3-exp
+#else
+  $ cat <<EOF >> $HGRCPATH
+  > [experimental]
+  > branch-cache-v3=no
+  > EOF
+  $ CACHE_PREFIX=branch2
+#endif
+
+  $ show_cache() {
+  >     for cache_file in .hg/cache/$CACHE_PREFIX*; do
+  >         echo "##### $cache_file"
+  >         cat $cache_file
+  >     done
+  > }
+
+Setup graph
+#############
+
+  $ . $RUNTESTDIR/testlib/common.sh
+
+graph with a single branch
+--------------------------
+
+We want some branching and some obsolescence
+
+  $ hg init main-single-branch
+  $ cd main-single-branch
+  $ mkcommit root
+  $ mkcommit A_1
+  $ mkcommit A_2
+  $ hg update 'desc("A_2")' --quiet
+  $ mkcommit B_1
+  $ mkcommit B_2
+  $ mkcommit B_3
+  $ mkcommit B_4
+  $ hg update 'desc("A_2")' --quiet
+  $ mkcommit A_3
+  created new head
+  $ mkcommit A_4
+  $ hg up null --quiet
+  $ hg clone --noupdate . ../main-single-branch-pre-ops
+  $ hg log -r 'desc("A_1")' -T '{node}' > ../main-single-branch-node_A1
+  $ hg log -r 'desc("A_2")' -T '{node}' > ../main-single-branch-node_A2
+  $ hg log -r 'desc("A_3")' -T '{node}' > ../main-single-branch-node_A3
+  $ hg log -r 'desc("A_4")' -T '{node}' > ../main-single-branch-node_A4
+  $ hg log -r 'desc("B_1")' -T '{node}' > ../main-single-branch-node_B1
+  $ hg log -r 'desc("B_2")' -T '{node}' > ../main-single-branch-node_B2
+  $ hg log -r 'desc("B_3")' -T '{node}' > ../main-single-branch-node_B3
+  $ hg log -r 'desc("B_4")' -T '{node}' > ../main-single-branch-node_B4
+
+(double check the heads are right before we obsolete)
+
+  $ hg log -R ../main-single-branch-pre-ops -G -T '{desc}\n'
+  o  A_4
+  |
+  o  A_3
+  |
+  | o  B_4
+  | |
+  | o  B_3
+  | |
+  | o  B_2
+  | |
+  | o  B_1
+  |/
+  o  A_2
+  |
+  o  A_1
+  |
+  o  root
+  
+  $ hg log -G -T '{desc}\n'
+  o  A_4
+  |
+  o  A_3
+  |
+  | o  B_4
+  | |
+  | o  B_3
+  | |
+  | o  B_2
+  | |
+  | o  B_1
+  |/
+  o  A_2
+  |
+  o  A_1
+  |
+  o  root
+  
+
+#if v2
+  $ show_cache
+  ##### .hg/cache/branch2-served
+  3d808bbc94408ea19da905596d4079357a1f28be 8
+  63ba7cd843d1e95aac1a24435befeb1909c53619 o default
+  3d808bbc94408ea19da905596d4079357a1f28be o default
+#else
+  $ show_cache
+  ##### .hg/cache/branch3-exp-served
+  tip-node=3d808bbc94408ea19da905596d4079357a1f28be tip-rev=8 topo-mode=pure
+  default
+#endif
+  $ hg log -T '{desc}\n' --rev 'head()'
+  B_4
+  A_4
+
+Absolete a couple of changes
+
+  $ for d in B2 B3 B4 A4; do
+  >   hg debugobsolete --record-parents `cat ../main-single-branch-node_$d`;
+  > done
+  1 new obsolescence markers
+  obsoleted 1 changesets
+  2 new orphan changesets
+  1 new obsolescence markers
+  obsoleted 1 changesets
+  1 new obsolescence markers
+  obsoleted 1 changesets
+  1 new obsolescence markers
+  obsoleted 1 changesets
+
+(double check the result is okay)
+
+  $ hg log -G -T '{desc}\n'
+  o  A_3
+  |
+  | o  B_1
+  |/
+  o  A_2
+  |
+  o  A_1
+  |
+  o  root
+  
+  $ hg heads -T '{desc}\n'
+  A_3
+  B_1
+#if v2
+  $ show_cache
+  ##### .hg/cache/branch2-served
+  7c29ff2453bf38c75ee8982935739103c38a9284 7 f8006d64a10d35c011a5c5fa88be1e25c5929514
+  550bb31f072912453ccbb503de1d554616911e88 o default
+  7c29ff2453bf38c75ee8982935739103c38a9284 o default
+#else
+  $ show_cache
+  ##### .hg/cache/branch3-exp-served
+  filtered-hash=f8006d64a10d35c011a5c5fa88be1e25c5929514 tip-node=7c29ff2453bf38c75ee8982935739103c38a9284 tip-rev=7 topo-mode=pure
+  default
+#endif
+  $ cd ..
+
+
+Actual testing
+##############
+
+Revealing obsolete changeset
+----------------------------
+
+Check that revealing obsolete changesets does not confuse branch computation and checks
+
+Revealing tipmost changeset
+~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+
+  $ cp -R ./main-single-branch tmp-repo
+  $ cd tmp-repo
+  $ hg update --hidden --rev 'desc("A_4")' --quiet
+  updated to hidden changeset 3d808bbc9440
+  (hidden revision '3d808bbc9440' is pruned)
+  $ hg log -G -T '{desc}\n'
+  @  A_4
+  |
+  o  A_3
+  |
+  | o  B_1
+  |/
+  o  A_2
+  |
+  o  A_1
+  |
+  o  root
+  
+  $ hg heads -T '{desc}\n'
+  A_3
+  B_1
+#if v2
+  $ show_cache
+  ##### .hg/cache/branch2
+  3d808bbc94408ea19da905596d4079357a1f28be 8 a943c3355ad9e93654d58b1c934c7c4329a5d1d4
+  550bb31f072912453ccbb503de1d554616911e88 o default
+  7c29ff2453bf38c75ee8982935739103c38a9284 o default
+  ##### .hg/cache/branch2-served
+  3d808bbc94408ea19da905596d4079357a1f28be 8 a943c3355ad9e93654d58b1c934c7c4329a5d1d4
+  550bb31f072912453ccbb503de1d554616911e88 o default
+  7c29ff2453bf38c75ee8982935739103c38a9284 o default
+#else
+  $ show_cache
+  ##### .hg/cache/branch3-exp
+  obsolete-hash=b6d2b1f5b70f09c25c835edcae69be35f681605c tip-node=3d808bbc94408ea19da905596d4079357a1f28be tip-rev=8
+  7c29ff2453bf38c75ee8982935739103c38a9284 o default
+  ##### .hg/cache/branch3-exp-served
+  filtered-hash=f8006d64a10d35c011a5c5fa88be1e25c5929514 obsolete-hash=ac5282439f301518f362f37547fcd52bcc670373 tip-node=3d808bbc94408ea19da905596d4079357a1f28be tip-rev=8
+  7c29ff2453bf38c75ee8982935739103c38a9284 o default
+#endif
+
+Even when computing branches from scratch
+
+  $ rm -rf .hg/cache/branch*
+  $ rm -rf .hg/wcache/branch*
+  $ hg heads -T '{desc}\n'
+  A_3
+  B_1
+#if v2
+  $ show_cache
+  ##### .hg/cache/branch2-served
+  3d808bbc94408ea19da905596d4079357a1f28be 8 a943c3355ad9e93654d58b1c934c7c4329a5d1d4
+  550bb31f072912453ccbb503de1d554616911e88 o default
+  7c29ff2453bf38c75ee8982935739103c38a9284 o default
+#else
+  $ show_cache
+  ##### .hg/cache/branch3-exp-served
+  filtered-hash=f8006d64a10d35c011a5c5fa88be1e25c5929514 obsolete-hash=ac5282439f301518f362f37547fcd52bcc670373 tip-node=3d808bbc94408ea19da905596d4079357a1f28be tip-rev=8
+  7c29ff2453bf38c75ee8982935739103c38a9284 o default
+#endif
+
+And we can get back to normal
+
+  $ hg update null --quiet
+  $ hg heads -T '{desc}\n'
+  A_3
+  B_1
+#if v2
+  $ show_cache
+  ##### .hg/cache/branch2-served
+  7c29ff2453bf38c75ee8982935739103c38a9284 7 f8006d64a10d35c011a5c5fa88be1e25c5929514
+  550bb31f072912453ccbb503de1d554616911e88 o default
+  7c29ff2453bf38c75ee8982935739103c38a9284 o default
+#else
+  $ show_cache
+  ##### .hg/cache/branch3-exp-served
+  filtered-hash=f8006d64a10d35c011a5c5fa88be1e25c5929514 tip-node=7c29ff2453bf38c75ee8982935739103c38a9284 tip-rev=7 topo-mode=pure
+  default
+#endif
+
+  $ cd ..
+  $ rm -rf tmp-repo
+
+Revealing  changeset in the middle of the changelog
+~~~~~~~~~~~~~~~~~~~~~~~~~~~------------------------
+
+Check that revealing an obsolete changeset does not confuse branch computation and checks
+
+  $ cp -R ./main-single-branch tmp-repo
+  $ cd tmp-repo
+  $ hg update --hidden --rev 'desc("B_3")' --quiet
+  updated to hidden changeset 9c996d7674bb
+  (hidden revision '9c996d7674bb' is pruned)
+  $ hg log -G -T '{desc}\n'
+  o  A_3
+  |
+  | @  B_3
+  | |
+  | x  B_2
+  | |
+  | o  B_1
+  |/
+  o  A_2
+  |
+  o  A_1
+  |
+  o  root
+  
+  $ hg heads -T '{desc}\n'
+  A_3
+  B_1
+#if v2
+  $ show_cache
+  ##### .hg/cache/branch2
+  3d808bbc94408ea19da905596d4079357a1f28be 8 a943c3355ad9e93654d58b1c934c7c4329a5d1d4
+  550bb31f072912453ccbb503de1d554616911e88 o default
+  7c29ff2453bf38c75ee8982935739103c38a9284 o default
+  ##### .hg/cache/branch2-served
+  7c29ff2453bf38c75ee8982935739103c38a9284 7 f8006d64a10d35c011a5c5fa88be1e25c5929514
+  550bb31f072912453ccbb503de1d554616911e88 o default
+  7c29ff2453bf38c75ee8982935739103c38a9284 o default
+#else
+  $ show_cache
+  ##### .hg/cache/branch3-exp
+  obsolete-hash=b6d2b1f5b70f09c25c835edcae69be35f681605c tip-node=3d808bbc94408ea19da905596d4079357a1f28be tip-rev=8
+  7c29ff2453bf38c75ee8982935739103c38a9284 o default
+  ##### .hg/cache/branch3-exp-served
+  filtered-hash=f1456c0d675980582dda9b8edc7f13f503ce544f obsolete-hash=3e74f5349008671629e39d13d7e00d9ba94c74f7 tip-node=7c29ff2453bf38c75ee8982935739103c38a9284 tip-rev=7
+  550bb31f072912453ccbb503de1d554616911e88 o default
+#endif
+
+Even when computing branches from scratch
+
+  $ rm -rf .hg/cache/branch*
+  $ rm -rf .hg/wcache/branch*
+  $ hg heads -T '{desc}\n'
+  A_3
+  B_1
+#if v2
+  $ show_cache
+  ##### .hg/cache/branch2-served
+  7c29ff2453bf38c75ee8982935739103c38a9284 7 f8006d64a10d35c011a5c5fa88be1e25c5929514
+  550bb31f072912453ccbb503de1d554616911e88 o default
+  7c29ff2453bf38c75ee8982935739103c38a9284 o default
+#else
+  $ show_cache
+  ##### .hg/cache/branch3-exp-served
+  filtered-hash=f1456c0d675980582dda9b8edc7f13f503ce544f obsolete-hash=3e74f5349008671629e39d13d7e00d9ba94c74f7 tip-node=7c29ff2453bf38c75ee8982935739103c38a9284 tip-rev=7
+  550bb31f072912453ccbb503de1d554616911e88 o default
+#endif
+
+And we can get back to normal
+
+  $ hg update null --quiet
+  $ hg heads -T '{desc}\n'
+  A_3
+  B_1
+#if v2
+  $ show_cache
+  ##### .hg/cache/branch2-served
+  7c29ff2453bf38c75ee8982935739103c38a9284 7 f8006d64a10d35c011a5c5fa88be1e25c5929514
+  550bb31f072912453ccbb503de1d554616911e88 o default
+  7c29ff2453bf38c75ee8982935739103c38a9284 o default
+#else
+  $ show_cache
+  ##### .hg/cache/branch3-exp-served
+  filtered-hash=f8006d64a10d35c011a5c5fa88be1e25c5929514 tip-node=7c29ff2453bf38c75ee8982935739103c38a9284 tip-rev=7 topo-mode=pure
+  default
+#endif
+
+  $ cd ..
+  $ rm -rf tmp-repo
+
+Getting the obsolescence marker after the fact for the tip rev
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+  $ cp -R ./main-single-branch-pre-ops tmp-repo
+  $ cd tmp-repo
+  $ hg update --hidden --rev 'desc("A_4")' --quiet
+  $ hg log -G -T '{desc}\n'
+  @  A_4
+  |
+  o  A_3
+  |
+  | o  B_4
+  | |
+  | o  B_3
+  | |
+  | o  B_2
+  | |
+  | o  B_1
+  |/
+  o  A_2
+  |
+  o  A_1
+  |
+  o  root
+  
+  $ hg heads -T '{desc}\n'
+  A_4
+  B_4
+  $ hg pull --rev `cat ../main-single-branch-node_A4` --remote-hidden
+  pulling from $TESTTMP/main-single-branch
+  no changes found
+  1 new obsolescence markers
+  obsoleted 1 changesets
+
+branch head are okay
+
+  $ hg heads -T '{desc}\n'
+  A_3
+  B_4
+#if v2
+  $ show_cache
+  ##### .hg/cache/branch2-served
+  3d808bbc94408ea19da905596d4079357a1f28be 8 ac5282439f301518f362f37547fcd52bcc670373
+  63ba7cd843d1e95aac1a24435befeb1909c53619 o default
+  7c29ff2453bf38c75ee8982935739103c38a9284 o default
+#else
+  $ show_cache
+  ##### .hg/cache/branch3-exp-served
+  obsolete-hash=ac5282439f301518f362f37547fcd52bcc670373 tip-node=3d808bbc94408ea19da905596d4079357a1f28be tip-rev=8
+  7c29ff2453bf38c75ee8982935739103c38a9284 o default
+#endif
+
+Even when computing branches from scratch
+
+  $ rm -rf .hg/cache/branch*
+  $ rm -rf .hg/wcache/branch*
+  $ hg heads -T '{desc}\n'
+  A_3
+  B_4
+#if v2
+  $ show_cache
+  ##### .hg/cache/branch2-served
+  3d808bbc94408ea19da905596d4079357a1f28be 8 ac5282439f301518f362f37547fcd52bcc670373
+  63ba7cd843d1e95aac1a24435befeb1909c53619 o default
+  7c29ff2453bf38c75ee8982935739103c38a9284 o default
+#else
+  $ show_cache
+  ##### .hg/cache/branch3-exp-served
+  obsolete-hash=ac5282439f301518f362f37547fcd52bcc670373 tip-node=3d808bbc94408ea19da905596d4079357a1f28be tip-rev=8
+  7c29ff2453bf38c75ee8982935739103c38a9284 o default
+#endif
+
+And we can get back to normal
+
+  $ hg update null --quiet
+  $ hg heads -T '{desc}\n'
+  A_3
+  B_4
+#if v2
+  $ show_cache
+  ##### .hg/cache/branch2-served
+  7c29ff2453bf38c75ee8982935739103c38a9284 7
+  63ba7cd843d1e95aac1a24435befeb1909c53619 o default
+  7c29ff2453bf38c75ee8982935739103c38a9284 o default
+#else
+  $ show_cache
+  ##### .hg/cache/branch3-exp-served
+  tip-node=7c29ff2453bf38c75ee8982935739103c38a9284 tip-rev=7 topo-mode=pure
+  default
+#endif
+
+  $ cd ..
+  $ rm -rf tmp-repo
+
+Getting the obsolescence marker after the fact for another rev
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+  $ cp -R ./main-single-branch-pre-ops tmp-repo
+  $ cd tmp-repo
+  $ hg update --hidden --rev 'desc("B_3")' --quiet
+  $ hg log -G -T '{desc}\n'
+  o  A_4
+  |
+  o  A_3
+  |
+  | o  B_4
+  | |
+  | @  B_3
+  | |
+  | o  B_2
+  | |
+  | o  B_1
+  |/
+  o  A_2
+  |
+  o  A_1
+  |
+  o  root
+  
+  $ hg heads -T '{desc}\n'
+  A_4
+  B_4
+#if v2
+  $ show_cache
+  ##### .hg/cache/branch2-served
+  3d808bbc94408ea19da905596d4079357a1f28be 8
+  63ba7cd843d1e95aac1a24435befeb1909c53619 o default
+  3d808bbc94408ea19da905596d4079357a1f28be o default
+#else
+  $ show_cache
+  ##### .hg/cache/branch3-exp-served
+  tip-node=3d808bbc94408ea19da905596d4079357a1f28be tip-rev=8 topo-mode=pure
+  default
+#endif
+
+  $ hg pull --rev `cat ../main-single-branch-node_B4` --remote-hidden
+  pulling from $TESTTMP/main-single-branch
+  no changes found
+  3 new obsolescence markers
+  obsoleted 3 changesets
+
+branch head are okay
+
+  $ hg heads -T '{desc}\n'
+  A_4
+  B_1
+#if v2
+  $ show_cache
+  ##### .hg/cache/branch2-served
+  3d808bbc94408ea19da905596d4079357a1f28be 8 f8006d64a10d35c011a5c5fa88be1e25c5929514
+  550bb31f072912453ccbb503de1d554616911e88 o default
+  3d808bbc94408ea19da905596d4079357a1f28be o default
+#else
+  $ show_cache
+  ##### .hg/cache/branch3-exp-served
+  filtered-hash=f1456c0d675980582dda9b8edc7f13f503ce544f obsolete-hash=3e74f5349008671629e39d13d7e00d9ba94c74f7 tip-node=3d808bbc94408ea19da905596d4079357a1f28be tip-rev=8
+  550bb31f072912453ccbb503de1d554616911e88 o default
+#endif
+
+Even when computing branches from scratch
+
+  $ rm -rf .hg/cache/branch*
+  $ rm -rf .hg/wcache/branch*
+  $ hg heads -T '{desc}\n'
+  A_4
+  B_1
+#if v2
+  $ show_cache
+  ##### .hg/cache/branch2-served
+  3d808bbc94408ea19da905596d4079357a1f28be 8 f8006d64a10d35c011a5c5fa88be1e25c5929514
+  550bb31f072912453ccbb503de1d554616911e88 o default
+  3d808bbc94408ea19da905596d4079357a1f28be o default
+#else
+  $ show_cache
+  ##### .hg/cache/branch3-exp-served
+  filtered-hash=f1456c0d675980582dda9b8edc7f13f503ce544f obsolete-hash=3e74f5349008671629e39d13d7e00d9ba94c74f7 tip-node=3d808bbc94408ea19da905596d4079357a1f28be tip-rev=8
+  550bb31f072912453ccbb503de1d554616911e88 o default
+#endif
+
+And we can get back to normal
+
+  $ hg update null --quiet
+  $ hg heads -T '{desc}\n'
+  A_4
+  B_1
+#if v2
+  $ show_cache
+  ##### .hg/cache/branch2-served
+  3d808bbc94408ea19da905596d4079357a1f28be 8 f8006d64a10d35c011a5c5fa88be1e25c5929514
+  550bb31f072912453ccbb503de1d554616911e88 o default
+  3d808bbc94408ea19da905596d4079357a1f28be o default
+#else
+  $ show_cache
+  ##### .hg/cache/branch3-exp-served
+  filtered-hash=f8006d64a10d35c011a5c5fa88be1e25c5929514 tip-node=3d808bbc94408ea19da905596d4079357a1f28be tip-rev=8 topo-mode=pure
+  default
+#endif
+
+  $ cd ..
+  $ rm -rf tmp-repo
--- a/tests/test-branches.t	Thu Jun 13 09:52:39 2024 +0200
+++ b/tests/test-branches.t	Mon Jun 24 12:05:31 2024 +0200
@@ -1,4 +1,5 @@
 #testcases mmap nommap
+#testcases v2 v3
 
 #if mmap
   $ cat <<EOF >> $HGRCPATH
@@ -7,6 +8,18 @@
   > EOF
 #endif
 
+#if v3
+  $ cat <<EOF >> $HGRCPATH
+  > [experimental]
+  > branch-cache-v3=yes
+  > EOF
+#else
+  $ cat <<EOF >> $HGRCPATH
+  > [experimental]
+  > branch-cache-v3=no
+  > EOF
+#endif
+
   $ hg init a
   $ cd a
 
@@ -825,6 +838,7 @@
   truncating cache/rbc-revs-v1 to 160
   $ f --size .hg/cache/rbc-revs*
   .hg/cache/rbc-revs-v1: size=160
+
 recovery from invalid cache file with partial last record
   $ mv .hg/cache/rbc-revs-v1 .
   $ f -qDB 119 rbc-revs-v1 > .hg/cache/rbc-revs-v1
@@ -835,6 +849,7 @@
   truncating cache/rbc-revs-v1 to 112
   $ f --size .hg/cache/rbc-revs*
   .hg/cache/rbc-revs-v1: size=160
+
 recovery from invalid cache file with missing record - no truncation
   $ mv .hg/cache/rbc-revs-v1 .
   $ f -qDB 112 rbc-revs-v1 > .hg/cache/rbc-revs-v1
@@ -842,6 +857,7 @@
   5
   $ f --size .hg/cache/rbc-revs*
   .hg/cache/rbc-revs-v1: size=160
+
 recovery from invalid cache file with some bad records
   $ mv .hg/cache/rbc-revs-v1 .
   $ f -qDB 8 rbc-revs-v1 > .hg/cache/rbc-revs-v1
@@ -851,7 +867,7 @@
   $ f --size .hg/cache/rbc-revs*
   .hg/cache/rbc-revs-v1: size=120
   $ hg log -r 'branch(.)' -T '{rev} ' --debug
-  history modification detected - truncating revision branch cache to revision 13
+  history modification detected - truncating revision branch cache to revision * (glob)
   history modification detected - truncating revision branch cache to revision 1
   3 4 8 9 10 11 12 13 truncating cache/rbc-revs-v1 to 8
   $ rm -f .hg/cache/branch* && hg head a -T '{rev}\n' --debug
@@ -860,6 +876,7 @@
   $ f --size --hexdump --bytes=16 .hg/cache/rbc-revs*
   .hg/cache/rbc-revs-v1: size=160
   0000: 19 70 9c 5a 00 00 00 00 dd 6b 44 0d 00 00 00 01 |.p.Z.....kD.....|
+
 cache is updated when committing
   $ hg branch i-will-regret-this
   marked working directory as branch i-will-regret-this
@@ -867,30 +884,17 @@
   $ f --size .hg/cache/rbc-*
   .hg/cache/rbc-names-v1: size=111
   .hg/cache/rbc-revs-v1: size=168
+
 update after rollback - the cache will be correct but rbc-names will will still
 contain the branch name even though it no longer is used
   $ hg up -qr '.^'
   $ hg rollback -qf
-  $ f --size --hexdump .hg/cache/rbc-*
+  $ f --size .hg/cache/rbc-names-*
   .hg/cache/rbc-names-v1: size=111
-  0000: 64 65 66 61 75 6c 74 00 61 00 62 00 63 00 61 20 |default.a.b.c.a |
-  0010: 62 72 61 6e 63 68 20 6e 61 6d 65 20 6d 75 63 68 |branch name much|
-  0020: 20 6c 6f 6e 67 65 72 20 74 68 61 6e 20 74 68 65 | longer than the|
-  0030: 20 64 65 66 61 75 6c 74 20 6a 75 73 74 69 66 69 | default justifi|
-  0040: 63 61 74 69 6f 6e 20 75 73 65 64 20 62 79 20 62 |cation used by b|
-  0050: 72 61 6e 63 68 65 73 00 6d 00 6d 64 00 69 2d 77 |ranches.m.md.i-w|
-  0060: 69 6c 6c 2d 72 65 67 72 65 74 2d 74 68 69 73    |ill-regret-this|
+  $ grep "i-will-regret-this" .hg/cache/rbc-names-* > /dev/null
+  $ f --size .hg/cache/rbc-revs-*
   .hg/cache/rbc-revs-v1: size=160
-  0000: 19 70 9c 5a 00 00 00 00 dd 6b 44 0d 00 00 00 01 |.p.Z.....kD.....|
-  0010: 88 1f e2 b9 00 00 00 01 ac 22 03 33 00 00 00 02 |.........".3....|
-  0020: ae e3 9c d1 00 00 00 02 d8 cb c6 1d 00 00 00 01 |................|
-  0030: 58 97 36 a2 00 00 00 03 10 ff 58 95 00 00 00 04 |X.6.......X.....|
-  0040: ee bb 94 44 00 00 00 02 5f 40 61 bb 00 00 00 02 |...D...._@a.....|
-  0050: bf be 84 1b 00 00 00 02 d3 f1 63 45 80 00 00 02 |..........cE....|
-  0060: e3 d4 9c 05 80 00 00 02 e2 3b 55 05 00 00 00 02 |.........;U.....|
-  0070: f8 94 c2 56 80 00 00 03 f3 44 76 37 00 00 00 05 |...V.....Dv7....|
-  0080: a5 8c a5 d3 00 00 00 05 df 34 3b 0d 00 00 00 05 |.........4;.....|
-  0090: c9 14 c9 9f 00 00 00 06 cd 21 a8 0b 80 00 00 05 |.........!......|
+
 cache is updated/truncated when stripping - it is thus very hard to get in a
 situation where the cache is out of sync and the hash check detects it
   $ hg --config extensions.strip= strip -r tip --nob
@@ -902,38 +906,30 @@
   $ hg log -r '5:&branch(.)' -T '{rev} ' --debug
   referenced branch names not found - rebuilding revision branch cache from scratch
   8 9 10 11 12 13 truncating cache/rbc-revs-v1 to 40
-  $ f --size --hexdump .hg/cache/rbc-*
+  $ f --size .hg/cache/rbc-names-*
   .hg/cache/rbc-names-v1: size=84
-  0000: 62 00 61 00 63 00 61 20 62 72 61 6e 63 68 20 6e |b.a.c.a branch n|
-  0010: 61 6d 65 20 6d 75 63 68 20 6c 6f 6e 67 65 72 20 |ame much longer |
-  0020: 74 68 61 6e 20 74 68 65 20 64 65 66 61 75 6c 74 |than the default|
-  0030: 20 6a 75 73 74 69 66 69 63 61 74 69 6f 6e 20 75 | justification u|
-  0040: 73 65 64 20 62 79 20 62 72 61 6e 63 68 65 73 00 |sed by branches.|
-  0050: 6d 00 6d 64                                     |m.md|
+  $ grep "i-will-regret-this" .hg/cache/rbc-names-* > /dev/null
+  [1]
+  $ f --size .hg/cache/rbc-revs-*
   .hg/cache/rbc-revs-v1: size=152
-  0000: 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 |................|
-  0010: 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 |................|
-  0020: 00 00 00 00 00 00 00 00 d8 cb c6 1d 00 00 00 01 |................|
-  0030: 58 97 36 a2 00 00 00 02 10 ff 58 95 00 00 00 03 |X.6.......X.....|
-  0040: ee bb 94 44 00 00 00 00 5f 40 61 bb 00 00 00 00 |...D...._@a.....|
-  0050: bf be 84 1b 00 00 00 00 d3 f1 63 45 80 00 00 00 |..........cE....|
-  0060: e3 d4 9c 05 80 00 00 00 e2 3b 55 05 00 00 00 00 |.........;U.....|
-  0070: f8 94 c2 56 80 00 00 02 f3 44 76 37 00 00 00 04 |...V.....Dv7....|
-  0080: a5 8c a5 d3 00 00 00 04 df 34 3b 0d 00 00 00 04 |.........4;.....|
-  0090: c9 14 c9 9f 00 00 00 05                         |........|
 
 Test that cache files are created and grows correctly:
 
   $ rm .hg/cache/rbc*
   $ hg log -r "5 & branch(5)" -T "{rev}\n"
   5
-  $ f --size --hexdump .hg/cache/rbc-*
+
+(here v3 is querying branch info for heads so it warm much more of the cache)
+
+#if v2
+  $ f --size .hg/cache/rbc-*
   .hg/cache/rbc-names-v1: size=1
-  0000: 61                                              |a|
   .hg/cache/rbc-revs-v1: size=48
-  0000: 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 |................|
-  0010: 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 |................|
-  0020: 00 00 00 00 00 00 00 00 d8 cb c6 1d 00 00 00 00 |................|
+#else
+  $ f --size .hg/cache/rbc-*
+  .hg/cache/rbc-names-v1: size=84
+  .hg/cache/rbc-revs-v1: size=152
+#endif
 
   $ cd ..
 
@@ -948,22 +944,20 @@
   $ hg branch -q branch
   $ hg ci -Amf
 
-  $ f --size --hexdump .hg/cache/rbc-*
-  .hg/cache/rbc-names-v1: size=14
-  0000: 64 65 66 61 75 6c 74 00 62 72 61 6e 63 68       |default.branch|
-  .hg/cache/rbc-revs-v1: size=24
-  0000: 66 e5 f5 aa 00 00 00 00 fa 4c 04 e5 00 00 00 00 |f........L......|
-  0010: 56 46 78 69 00 00 00 01                         |VFxi....|
+#if v2
+
+  $ f --size --sha256 .hg/cache/rbc-*
+  .hg/cache/rbc-names-v1: size=14, sha256=d376f7eea9a7e28fac6470e78dae753c81a5543c9ad436e96999590e004a281c
+  .hg/cache/rbc-revs-v1: size=24, sha256=ec89032fd4e66e7282cb6e403848c681a855a9c36c6b44d19179218553b78779
+
   $ : > .hg/cache/rbc-revs-v1
 
 No superfluous rebuilding of cache:
   $ hg log -r "branch(null)&branch(branch)" --debug
-  $ f --size --hexdump .hg/cache/rbc-*
-  .hg/cache/rbc-names-v1: size=14
-  0000: 64 65 66 61 75 6c 74 00 62 72 61 6e 63 68       |default.branch|
-  .hg/cache/rbc-revs-v1: size=24
-  0000: 66 e5 f5 aa 00 00 00 00 fa 4c 04 e5 00 00 00 00 |f........L......|
-  0010: 56 46 78 69 00 00 00 01                         |VFxi....|
+  $ f --size --sha256 .hg/cache/rbc-*
+  .hg/cache/rbc-names-v1: size=14, sha256=d376f7eea9a7e28fac6470e78dae753c81a5543c9ad436e96999590e004a281c
+  .hg/cache/rbc-revs-v1: size=24, sha256=ec89032fd4e66e7282cb6e403848c681a855a9c36c6b44d19179218553b78779
+#endif
 
   $ cd ..
 
@@ -1316,9 +1310,15 @@
   new changesets 2ab8003a1750:99ba08759bc7
   updating to branch A
   0 files updated, 0 files merged, 0 files removed, 0 files unresolved
-  $ cat branchmap-update-01/.hg/cache/branch2-served
+#if v3
+  $ cat branchmap-update-01/.hg/cache/branch3-exp-base
+  tip-node=99ba08759bc7f6fdbe5304e83d0387f35c082479 tip-rev=1 topo-mode=pure
+  A
+#else
+  $ cat branchmap-update-01/.hg/cache/branch2-base
   99ba08759bc7f6fdbe5304e83d0387f35c082479 1
   99ba08759bc7f6fdbe5304e83d0387f35c082479 o A
+#endif
   $ hg -R branchmap-update-01 unbundle bundle.hg
   adding changesets
   adding manifests
@@ -1326,9 +1326,15 @@
   added 2 changesets with 0 changes to 0 files
   new changesets a3b807b3ff0b:71ca9a6d524e (2 drafts)
   (run 'hg update' to get a working copy)
+#if v3
+  $ cat branchmap-update-01/.hg/cache/branch3-exp-served
+  tip-node=71ca9a6d524ed3c2a215119b2086ac3b8c4c8286 tip-rev=3 topo-mode=pure
+  A
+#else
   $ cat branchmap-update-01/.hg/cache/branch2-served
   71ca9a6d524ed3c2a215119b2086ac3b8c4c8286 3
   71ca9a6d524ed3c2a215119b2086ac3b8c4c8286 o A
+#endif
 
 aborted Unbundle should not update the on disk cache
 
@@ -1350,9 +1356,15 @@
   updating to branch A
   0 files updated, 0 files merged, 0 files removed, 0 files unresolved
 
-  $ cat branchmap-update-02/.hg/cache/branch2-served
+#if v3
+  $ cat branchmap-update-02/.hg/cache/branch3-exp-base
+  tip-node=99ba08759bc7f6fdbe5304e83d0387f35c082479 tip-rev=1 topo-mode=pure
+  A
+#else
+  $ cat branchmap-update-02/.hg/cache/branch2-base
   99ba08759bc7f6fdbe5304e83d0387f35c082479 1
   99ba08759bc7f6fdbe5304e83d0387f35c082479 o A
+#endif
   $ hg -R branchmap-update-02 unbundle bundle.hg --config "hooks.pretxnclose=python:$TESTTMP/simplehook.py:hook"
   adding changesets
   adding manifests
@@ -1361,6 +1373,12 @@
   rollback completed
   abort: pretxnclose hook failed
   [40]
-  $ cat branchmap-update-02/.hg/cache/branch2-served
+#if v3
+  $ cat branchmap-update-02/.hg/cache/branch3-exp-base
+  tip-node=99ba08759bc7f6fdbe5304e83d0387f35c082479 tip-rev=1 topo-mode=pure
+  A
+#else
+  $ cat branchmap-update-02/.hg/cache/branch2-base
   99ba08759bc7f6fdbe5304e83d0387f35c082479 1
   99ba08759bc7f6fdbe5304e83d0387f35c082479 o A
+#endif
--- a/tests/test-clone-stream.t	Thu Jun 13 09:52:39 2024 +0200
+++ b/tests/test-clone-stream.t	Mon Jun 24 12:05:31 2024 +0200
@@ -109,150 +109,18 @@
 Check uncompressed
 ==================
 
-Cannot stream clone when server.uncompressed is set
+Cannot stream clone when server.uncompressed is set to false
+------------------------------------------------------------
+
+When `server.uncompressed` is disabled, the client should fallback to a bundle
+based clone with a warning.
+
 
   $ get-with-headers.py $LOCALIP:$HGPORT '?cmd=stream_out'
   200 Script output follows
   
   1
 
-#if stream-legacy
-  $ hg debugcapabilities http://localhost:$HGPORT
-  Main capabilities:
-    batch
-    branchmap
-    $USUAL_BUNDLE2_CAPS_SERVER$
-    changegroupsubset
-    compression=$BUNDLE2_COMPRESSIONS$
-    getbundle
-    httpheader=1024
-    httpmediatype=0.1rx,0.1tx,0.2tx
-    known
-    lookup
-    pushkey
-    unbundle=HG10GZ,HG10BZ,HG10UN
-    unbundlehash
-  Bundle2 capabilities:
-    HG20
-    bookmarks
-    changegroup
-      01
-      02
-      03
-    checkheads
-      related
-    digests
-      md5
-      sha1
-      sha512
-    error
-      abort
-      unsupportedcontent
-      pushraced
-      pushkey
-    hgtagsfnodes
-    listkeys
-    phases
-      heads
-    pushkey
-    remote-changegroup
-      http
-      https
-
-  $ hg clone --stream -U http://localhost:$HGPORT server-disabled
-  warning: stream clone requested but server has them disabled
-  requesting all changes
-  adding changesets
-  adding manifests
-  adding file changes
-  added 3 changesets with 1088 changes to 1088 files
-  new changesets 96ee1d7354c4:5223b5e3265f
-
-  $ get-with-headers.py $LOCALIP:$HGPORT '?cmd=getbundle' content-type --bodyfile body --hgproto 0.2 --requestheader "x-hgarg-1=bundlecaps=HG20%2Cbundle2%3DHG20%250Abookmarks%250Achangegroup%253D01%252C02%252C03%250Adigests%253Dmd5%252Csha1%252Csha512%250Aerror%253Dabort%252Cunsupportedcontent%252Cpushraced%252Cpushkey%250Ahgtagsfnodes%250Alistkeys%250Aphases%253Dheads%250Apushkey%250Aremote-changegroup%253Dhttp%252Chttps&cg=0&common=0000000000000000000000000000000000000000&heads=c17445101a72edac06facd130d14808dfbd5c7c2&stream=1"
-  200 Script output follows
-  content-type: application/mercurial-0.2
-  
-
-  $ f --size body --hexdump --bytes 100
-  body: size=140
-  0000: 04 6e 6f 6e 65 48 47 32 30 00 00 00 00 00 00 00 |.noneHG20.......|
-  0010: 73 0b 45 52 52 4f 52 3a 41 42 4f 52 54 00 00 00 |s.ERROR:ABORT...|
-  0020: 00 01 01 07 3c 04 16 6d 65 73 73 61 67 65 73 74 |....<..messagest|
-  0030: 72 65 61 6d 20 64 61 74 61 20 72 65 71 75 65 73 |ream data reques|
-  0040: 74 65 64 20 62 75 74 20 73 65 72 76 65 72 20 64 |ted but server d|
-  0050: 6f 65 73 20 6e 6f 74 20 61 6c 6c 6f 77 20 74 68 |oes not allow th|
-  0060: 69 73 20 66                                     |is f|
-
-#endif
-#if stream-bundle2-v2
-  $ hg debugcapabilities http://localhost:$HGPORT
-  Main capabilities:
-    batch
-    branchmap
-    $USUAL_BUNDLE2_CAPS_SERVER$
-    changegroupsubset
-    compression=$BUNDLE2_COMPRESSIONS$
-    getbundle
-    httpheader=1024
-    httpmediatype=0.1rx,0.1tx,0.2tx
-    known
-    lookup
-    pushkey
-    unbundle=HG10GZ,HG10BZ,HG10UN
-    unbundlehash
-  Bundle2 capabilities:
-    HG20
-    bookmarks
-    changegroup
-      01
-      02
-      03
-    checkheads
-      related
-    digests
-      md5
-      sha1
-      sha512
-    error
-      abort
-      unsupportedcontent
-      pushraced
-      pushkey
-    hgtagsfnodes
-    listkeys
-    phases
-      heads
-    pushkey
-    remote-changegroup
-      http
-      https
-
-  $ hg clone --stream -U http://localhost:$HGPORT server-disabled
-  warning: stream clone requested but server has them disabled
-  requesting all changes
-  adding changesets
-  adding manifests
-  adding file changes
-  added 3 changesets with 1088 changes to 1088 files
-  new changesets 96ee1d7354c4:5223b5e3265f
-
-  $ get-with-headers.py $LOCALIP:$HGPORT '?cmd=getbundle' content-type --bodyfile body --hgproto 0.2 --requestheader "x-hgarg-1=bundlecaps=HG20%2Cbundle2%3DHG20%250Abookmarks%250Achangegroup%253D01%252C02%252C03%250Adigests%253Dmd5%252Csha1%252Csha512%250Aerror%253Dabort%252Cunsupportedcontent%252Cpushraced%252Cpushkey%250Ahgtagsfnodes%250Alistkeys%250Aphases%253Dheads%250Apushkey%250Aremote-changegroup%253Dhttp%252Chttps&cg=0&common=0000000000000000000000000000000000000000&heads=c17445101a72edac06facd130d14808dfbd5c7c2&stream=1"
-  200 Script output follows
-  content-type: application/mercurial-0.2
-  
-
-  $ f --size body --hexdump --bytes 100
-  body: size=140
-  0000: 04 6e 6f 6e 65 48 47 32 30 00 00 00 00 00 00 00 |.noneHG20.......|
-  0010: 73 0b 45 52 52 4f 52 3a 41 42 4f 52 54 00 00 00 |s.ERROR:ABORT...|
-  0020: 00 01 01 07 3c 04 16 6d 65 73 73 61 67 65 73 74 |....<..messagest|
-  0030: 72 65 61 6d 20 64 61 74 61 20 72 65 71 75 65 73 |ream data reques|
-  0040: 74 65 64 20 62 75 74 20 73 65 72 76 65 72 20 64 |ted but server d|
-  0050: 6f 65 73 20 6e 6f 74 20 61 6c 6c 6f 77 20 74 68 |oes not allow th|
-  0060: 69 73 20 66                                     |is f|
-
-#endif
-#if stream-bundle2-v3
   $ hg debugcapabilities http://localhost:$HGPORT
   Main capabilities:
     batch
@@ -304,23 +172,6 @@
   added 3 changesets with 1088 changes to 1088 files
   new changesets 96ee1d7354c4:5223b5e3265f
 
-  $ get-with-headers.py $LOCALIP:$HGPORT '?cmd=getbundle' content-type --bodyfile body --hgproto 0.2 --requestheader "x-hgarg-1=bundlecaps=HG20%2Cbundle2%3DHG20%250Abookmarks%250Achangegroup%253D01%252C02%252C03%250Adigests%253Dmd5%252Csha1%252Csha512%250Aerror%253Dabort%252Cunsupportedcontent%252Cpushraced%252Cpushkey%250Ahgtagsfnodes%250Alistkeys%250Aphases%253Dheads%250Apushkey%250Aremote-changegroup%253Dhttp%252Chttps&cg=0&common=0000000000000000000000000000000000000000&heads=c17445101a72edac06facd130d14808dfbd5c7c2&stream=1"
-  200 Script output follows
-  content-type: application/mercurial-0.2
-  
-
-  $ f --size body --hexdump --bytes 100
-  body: size=140
-  0000: 04 6e 6f 6e 65 48 47 32 30 00 00 00 00 00 00 00 |.noneHG20.......|
-  0010: 73 0b 45 52 52 4f 52 3a 41 42 4f 52 54 00 00 00 |s.ERROR:ABORT...|
-  0020: 00 01 01 07 3c 04 16 6d 65 73 73 61 67 65 73 74 |....<..messagest|
-  0030: 72 65 61 6d 20 64 61 74 61 20 72 65 71 75 65 73 |ream data reques|
-  0040: 74 65 64 20 62 75 74 20 73 65 72 76 65 72 20 64 |ted but server d|
-  0050: 6f 65 73 20 6e 6f 74 20 61 6c 6c 6f 77 20 74 68 |oes not allow th|
-  0060: 69 73 20 66                                     |is f|
-
-#endif
-
   $ killdaemons.py
   $ cd server
   $ hg serve -p $HGPORT -d --pid-file=hg.pid --error errors.txt
@@ -328,6 +179,13 @@
   $ cd ..
 
 Basic clone
+-----------
+
+Check that --stream trigger a stream clone and result in a valid repositoty
+
+We check the associated output for exact bytes on file number as changes in
+these value implies changes in the data transfered and can detect unintended
+changes in the process.
 
 #if stream-legacy
   $ hg clone --stream -U http://localhost:$HGPORT clone1
@@ -338,7 +196,6 @@
   transferred 98.8 KB in * seconds (* */sec) (glob) (zstd !)
   searching for changes
   no changes found
-  $ cat server/errors.txt
 #endif
 #if stream-bundle2-v2
   $ hg clone --stream -U http://localhost:$HGPORT clone1
@@ -349,20 +206,8 @@
   transferred 98.9 KB in * seconds (* */sec) (glob) (zstd no-rust !)
   1096 files to transfer, 99.0 KB of data (zstd rust !)
   transferred 99.0 KB in * seconds (* */sec) (glob) (zstd rust !)
+#endif
 
-  $ ls -1 clone1/.hg/cache
-  branch2-base
-  branch2-immutable
-  branch2-served
-  branch2-served.hidden
-  branch2-visible
-  branch2-visible-hidden
-  rbc-names-v1
-  rbc-revs-v1
-  tags2
-  tags2-served
-  $ cat server/errors.txt
-#endif
 #if stream-bundle2-v3
   $ hg clone --stream -U http://localhost:$HGPORT clone1
   streaming all changes
@@ -370,244 +215,68 @@
   transferred 102 KB in * seconds (* */sec) (glob) (no-zstd !)
   transferred 98.9 KB in * seconds (* */sec) (glob) (zstd no-rust !)
   transferred 99.0 KB in * seconds (* */sec) (glob) (zstd rust !)
+#endif
 
+#if no-stream-legacy
   $ ls -1 clone1/.hg/cache
   branch2-base
-  branch2-immutable
   branch2-served
-  branch2-served.hidden
-  branch2-visible
-  branch2-visible-hidden
   rbc-names-v1
   rbc-revs-v1
   tags2
   tags2-served
-  $ cat server/errors.txt
 #endif
 
+  $ hg -R clone1 verify --quiet
+  $ cat server/errors.txt
+
 getbundle requests with stream=1 are uncompressed
+-------------------------------------------------
+
+We check that `getbundle` will return a stream bundle when requested.
+
+XXX manually building the --requestheader is fragile and will drift away from actual usage
 
   $ get-with-headers.py $LOCALIP:$HGPORT '?cmd=getbundle' content-type --bodyfile body --hgproto '0.1 0.2 comp=zlib,none' --requestheader "x-hgarg-1=bundlecaps=HG20%2Cbundle2%3DHG20%250Abookmarks%250Achangegroup%253D01%252C02%252C03%250Adigests%253Dmd5%252Csha1%252Csha512%250Aerror%253Dabort%252Cunsupportedcontent%252Cpushraced%252Cpushkey%250Ahgtagsfnodes%250Alistkeys%250Aphases%253Dheads%250Apushkey%250Aremote-changegroup%253Dhttp%252Chttps%250Astream%253Dv2&cg=0&common=0000000000000000000000000000000000000000&heads=c17445101a72edac06facd130d14808dfbd5c7c2&stream=1"
   200 Script output follows
   content-type: application/mercurial-0.2
   
 
-#if no-zstd no-rust
-  $ f --size --hex --bytes 256 body
-  body: size=119140
-  0000: 04 6e 6f 6e 65 48 47 32 30 00 00 00 00 00 00 00 |.noneHG20.......|
-  0010: 62 07 53 54 52 45 41 4d 32 00 00 00 00 03 00 09 |b.STREAM2.......|
-  0020: 06 09 04 0c 26 62 79 74 65 63 6f 75 6e 74 31 30 |....&bytecount10|
-  0030: 34 31 31 35 66 69 6c 65 63 6f 75 6e 74 31 30 39 |4115filecount109|
-  0040: 34 72 65 71 75 69 72 65 6d 65 6e 74 73 67 65 6e |4requirementsgen|
-  0050: 65 72 61 6c 64 65 6c 74 61 25 32 43 72 65 76 6c |eraldelta%2Crevl|
-  0060: 6f 67 76 31 25 32 43 73 70 61 72 73 65 72 65 76 |ogv1%2Csparserev|
-  0070: 6c 6f 67 00 00 80 00 73 08 42 64 61 74 61 2f 30 |log....s.Bdata/0|
-  0080: 2e 69 00 03 00 01 00 00 00 00 00 00 00 02 00 00 |.i..............|
-  0090: 00 01 00 00 00 00 00 00 00 01 ff ff ff ff ff ff |................|
-  00a0: ff ff 80 29 63 a0 49 d3 23 87 bf ce fe 56 67 92 |...)c.I.#....Vg.|
-  00b0: 67 2c 69 d1 ec 39 00 00 00 00 00 00 00 00 00 00 |g,i..9..........|
-  00c0: 00 00 75 30 73 26 45 64 61 74 61 2f 30 30 63 68 |..u0s&Edata/00ch|
-  00d0: 61 6e 67 65 6c 6f 67 2d 61 62 33 34 39 31 38 30 |angelog-ab349180|
-  00e0: 61 30 34 30 35 30 31 30 2e 6e 64 2e 69 00 03 00 |a0405010.nd.i...|
-  00f0: 01 00 00 00 00 00 00 00 05 00 00 00 04 00 00 00 |................|
-#endif
-#if zstd no-rust
-  $ f --size --hex --bytes 256 body
-  body: size=116327 (no-bigendian !)
-  body: size=116322 (bigendian !)
+  $ f --size --hex --bytes 48 body
+  body: size=* (glob)
   0000: 04 6e 6f 6e 65 48 47 32 30 00 00 00 00 00 00 00 |.noneHG20.......|
-  0010: 7c 07 53 54 52 45 41 4d 32 00 00 00 00 03 00 09 ||.STREAM2.......|
-  0020: 06 09 04 0c 40 62 79 74 65 63 6f 75 6e 74 31 30 |....@bytecount10|
-  0030: 31 32 37 36 66 69 6c 65 63 6f 75 6e 74 31 30 39 |1276filecount109| (no-bigendian !)
-  0030: 31 32 37 31 66 69 6c 65 63 6f 75 6e 74 31 30 39 |1271filecount109| (bigendian !)
-  0040: 34 72 65 71 75 69 72 65 6d 65 6e 74 73 67 65 6e |4requirementsgen|
-  0050: 65 72 61 6c 64 65 6c 74 61 25 32 43 72 65 76 6c |eraldelta%2Crevl|
-  0060: 6f 67 2d 63 6f 6d 70 72 65 73 73 69 6f 6e 2d 7a |og-compression-z|
-  0070: 73 74 64 25 32 43 72 65 76 6c 6f 67 76 31 25 32 |std%2Crevlogv1%2|
-  0080: 43 73 70 61 72 73 65 72 65 76 6c 6f 67 00 00 80 |Csparserevlog...|
-  0090: 00 73 08 42 64 61 74 61 2f 30 2e 69 00 03 00 01 |.s.Bdata/0.i....|
-  00a0: 00 00 00 00 00 00 00 02 00 00 00 01 00 00 00 00 |................|
-  00b0: 00 00 00 01 ff ff ff ff ff ff ff ff 80 29 63 a0 |.............)c.|
-  00c0: 49 d3 23 87 bf ce fe 56 67 92 67 2c 69 d1 ec 39 |I.#....Vg.g,i..9|
-  00d0: 00 00 00 00 00 00 00 00 00 00 00 00 75 30 73 26 |............u0s&|
-  00e0: 45 64 61 74 61 2f 30 30 63 68 61 6e 67 65 6c 6f |Edata/00changelo|
-  00f0: 67 2d 61 62 33 34 39 31 38 30 61 30 34 30 35 30 |g-ab349180a04050|
-#endif
-#if zstd rust no-dirstate-v2
-  $ f --size --hex --bytes 256 body
-  body: size=116310 (no-rust !)
-  body: size=116495 (rust no-stream-legacy no-bigendian !)
-  body: size=116490 (rust no-stream-legacy bigendian !)
-  body: size=116327 (rust stream-legacy no-bigendian !)
-  body: size=116322 (rust stream-legacy bigendian !)
-  0000: 04 6e 6f 6e 65 48 47 32 30 00 00 00 00 00 00 00 |.noneHG20.......|
-  0010: 7c 07 53 54 52 45 41 4d 32 00 00 00 00 03 00 09 ||.STREAM2.......|
-  0020: 06 09 04 0c 40 62 79 74 65 63 6f 75 6e 74 31 30 |....@bytecount10|
-  0030: 31 32 37 36 66 69 6c 65 63 6f 75 6e 74 31 30 39 |1276filecount109| (no-rust !)
-  0040: 33 72 65 71 75 69 72 65 6d 65 6e 74 73 67 65 6e |3requirementsgen| (no-rust !)
-  0030: 31 34 30 32 66 69 6c 65 63 6f 75 6e 74 31 30 39 |1402filecount109| (rust no-stream-legacy no-bigendian !)
-  0030: 31 33 39 37 66 69 6c 65 63 6f 75 6e 74 31 30 39 |1397filecount109| (rust no-stream-legacy bigendian !)
-  0040: 36 72 65 71 75 69 72 65 6d 65 6e 74 73 67 65 6e |6requirementsgen| (rust no-stream-legacy !)
-  0030: 31 32 37 36 66 69 6c 65 63 6f 75 6e 74 31 30 39 |1276filecount109| (rust stream-legacy no-bigendian !)
-  0030: 31 32 37 31 66 69 6c 65 63 6f 75 6e 74 31 30 39 |1271filecount109| (rust stream-legacy bigendian !)
-  0040: 34 72 65 71 75 69 72 65 6d 65 6e 74 73 67 65 6e |4requirementsgen| (rust stream-legacy !)
-  0050: 65 72 61 6c 64 65 6c 74 61 25 32 43 72 65 76 6c |eraldelta%2Crevl|
-  0060: 6f 67 2d 63 6f 6d 70 72 65 73 73 69 6f 6e 2d 7a |og-compression-z|
-  0070: 73 74 64 25 32 43 72 65 76 6c 6f 67 76 31 25 32 |std%2Crevlogv1%2|
-  0080: 43 73 70 61 72 73 65 72 65 76 6c 6f 67 00 00 80 |Csparserevlog...|
-  0090: 00 73 08 42 64 61 74 61 2f 30 2e 69 00 03 00 01 |.s.Bdata/0.i....|
-  00a0: 00 00 00 00 00 00 00 02 00 00 00 01 00 00 00 00 |................|
-  00b0: 00 00 00 01 ff ff ff ff ff ff ff ff 80 29 63 a0 |.............)c.|
-  00c0: 49 d3 23 87 bf ce fe 56 67 92 67 2c 69 d1 ec 39 |I.#....Vg.g,i..9|
-  00d0: 00 00 00 00 00 00 00 00 00 00 00 00 75 30 73 26 |............u0s&|
-  00e0: 45 64 61 74 61 2f 30 30 63 68 61 6e 67 65 6c 6f |Edata/00changelo|
-  00f0: 67 2d 61 62 33 34 39 31 38 30 61 30 34 30 35 30 |g-ab349180a04050|
-#endif
-#if zstd dirstate-v2
-  $ f --size --hex --bytes 256 body
-  body: size=109549
-  0000: 04 6e 6f 6e 65 48 47 32 30 00 00 00 00 00 00 00 |.noneHG20.......|
-  0010: c0 07 53 54 52 45 41 4d 32 00 00 00 00 03 00 09 |..STREAM2.......|
-  0020: 05 09 04 0c 85 62 79 74 65 63 6f 75 6e 74 39 35 |.....bytecount95|
-  0030: 38 39 37 66 69 6c 65 63 6f 75 6e 74 31 30 33 30 |897filecount1030|
-  0040: 72 65 71 75 69 72 65 6d 65 6e 74 73 64 6f 74 65 |requirementsdote|
-  0050: 6e 63 6f 64 65 25 32 43 65 78 70 2d 64 69 72 73 |ncode%2Cexp-dirs|
-  0060: 74 61 74 65 2d 76 32 25 32 43 66 6e 63 61 63 68 |tate-v2%2Cfncach|
-  0070: 65 25 32 43 67 65 6e 65 72 61 6c 64 65 6c 74 61 |e%2Cgeneraldelta|
-  0080: 25 32 43 70 65 72 73 69 73 74 65 6e 74 2d 6e 6f |%2Cpersistent-no|
-  0090: 64 65 6d 61 70 25 32 43 72 65 76 6c 6f 67 2d 63 |demap%2Crevlog-c|
-  00a0: 6f 6d 70 72 65 73 73 69 6f 6e 2d 7a 73 74 64 25 |ompression-zstd%|
-  00b0: 32 43 72 65 76 6c 6f 67 76 31 25 32 43 73 70 61 |2Crevlogv1%2Cspa|
-  00c0: 72 73 65 72 65 76 6c 6f 67 25 32 43 73 74 6f 72 |rserevlog%2Cstor|
-  00d0: 65 00 00 80 00 73 08 42 64 61 74 61 2f 30 2e 69 |e....s.Bdata/0.i|
-  00e0: 00 03 00 01 00 00 00 00 00 00 00 02 00 00 00 01 |................|
-  00f0: 00 00 00 00 00 00 00 01 ff ff ff ff ff ff ff ff |................|
-#endif
+  0010: ?? 07 53 54 52 45 41 4d 32 00 00 00 00 03 00 09 |?.STREAM2.......| (glob)
+  0020: 06 09 04 0c ?? 62 79 74 65 63 6f 75 6e 74 31 30 |....?bytecount10| (glob)
 
 --uncompressed is an alias to --stream
+---------------------------------------
 
-#if stream-legacy
-  $ hg clone --uncompressed -U http://localhost:$HGPORT clone1-uncompressed
-  streaming all changes
-  1091 files to transfer, 102 KB of data (no-zstd !)
-  transferred 102 KB in * seconds (* */sec) (glob) (no-zstd !)
-  1091 files to transfer, 98.8 KB of data (zstd !)
-  transferred 98.8 KB in * seconds (* */sec) (glob) (zstd !)
-  searching for changes
-  no changes found
-#endif
-#if stream-bundle2-v2
+The alias flag should trigger a stream clone too.
+
   $ hg clone --uncompressed -U http://localhost:$HGPORT clone1-uncompressed
   streaming all changes
-  1094 files to transfer, 102 KB of data (no-zstd !)
-  transferred 102 KB in * seconds (* */sec) (glob) (no-zstd !)
-  1094 files to transfer, 98.9 KB of data (zstd no-rust !)
-  transferred 98.9 KB in * seconds (* */sec) (glob) (zstd no-rust !)
-  1096 files to transfer, 99.0 KB of data (zstd rust !)
-  transferred 99.0 KB in * seconds (* */sec) (glob) (zstd rust !)
-#endif
-#if stream-bundle2-v3
-  $ hg clone --uncompressed -U http://localhost:$HGPORT clone1-uncompressed
-  streaming all changes
-  1093 entries to transfer
-  transferred 102 KB in * seconds (* */sec) (glob) (no-zstd !)
-  transferred 98.9 KB in * seconds (* */sec) (glob) (zstd no-rust !)
-  transferred 99.0 KB in * seconds (* */sec) (glob) (zstd rust !)
-#endif
+  * files to transfer* (glob) (no-stream-bundle2-v3 !)
+  * entries to transfer (glob) (stream-bundle2-v3 !)
+  transferred * KB in * seconds (* */sec) (glob)
+  searching for changes (stream-legacy !)
+  no changes found (stream-legacy !)
 
 Clone with background file closing enabled
+-------------------------------------------
 
-#if stream-legacy
-  $ hg --debug --config worker.backgroundclose=true --config worker.backgroundcloseminfilecount=1 clone --stream -U http://localhost:$HGPORT clone-background | grep -v adding
-  using http://localhost:$HGPORT/
-  sending capabilities command
-  sending branchmap command
-  streaming all changes
-  sending stream_out command
-  1091 files to transfer, 102 KB of data (no-zstd !)
-  1091 files to transfer, 98.8 KB of data (zstd !)
-  starting 4 threads for background file closing
-  updating the branch cache
-  transferred 102 KB in * seconds (* */sec) (glob) (no-zstd !)
-  transferred 98.8 KB in * seconds (* */sec) (glob) (zstd !)
-  query 1; heads
-  sending batch command
-  searching for changes
-  all remote heads known locally
-  no changes found
-  sending getbundle command
-  bundle2-input-bundle: with-transaction
-  bundle2-input-part: "listkeys" (params: 1 mandatory) supported
-  bundle2-input-part: "phase-heads" supported
-  bundle2-input-part: total payload size 24
-  bundle2-input-bundle: 2 parts total
-  checking for updated bookmarks
-  updating the branch cache
-  (sent 5 HTTP requests and * bytes; received * bytes in responses) (glob)
-#endif
-#if stream-bundle2-v2
-  $ hg --debug --config worker.backgroundclose=true --config worker.backgroundcloseminfilecount=1 clone --stream -U http://localhost:$HGPORT clone-background | grep -v adding
-  using http://localhost:$HGPORT/
-  sending capabilities command
-  query 1; heads
-  sending batch command
-  streaming all changes
-  sending getbundle command
-  bundle2-input-bundle: with-transaction
-  bundle2-input-part: "stream2" (params: 3 mandatory) supported
-  applying stream bundle
-  1094 files to transfer, 102 KB of data (no-zstd !)
-  1094 files to transfer, 98.9 KB of data (zstd no-rust !)
-  1096 files to transfer, 99.0 KB of data (zstd rust !)
-  starting 4 threads for background file closing
+The backgound file closing logic should trigger when configured to do so, and
+the result should be a valid repository.
+
+  $ hg --debug --config worker.backgroundclose=true --config worker.backgroundcloseminfilecount=1 clone --stream -U http://localhost:$HGPORT clone-background | grep "background file closing"
   starting 4 threads for background file closing
-  updating the branch cache
-  transferred 102 KB in * seconds (* */sec) (glob) (no-zstd !)
-  bundle2-input-part: total payload size 119001 (no-zstd !)
-  transferred 98.9 KB in * seconds (* */sec) (glob) (zstd no-rust !)
-  transferred 99.0 KB in * seconds (* */sec) (glob) (zstd rust !)
-  bundle2-input-part: total payload size 116162 (zstd no-bigendian no-rust !)
-  bundle2-input-part: total payload size 116330 (zstd no-bigendian rust !)
-  bundle2-input-part: total payload size 116157 (zstd bigendian no-rust !)
-  bundle2-input-part: total payload size 116325 (zstd bigendian rust !)
-  bundle2-input-part: "listkeys" (params: 1 mandatory) supported
-  bundle2-input-bundle: 2 parts total
-  checking for updated bookmarks
-  updating the branch cache
-  (sent 3 HTTP requests and * bytes; received * bytes in responses) (glob)
-#endif
-#if stream-bundle2-v3
-  $ hg --debug --config worker.backgroundclose=true --config worker.backgroundcloseminfilecount=1 clone --stream -U http://localhost:$HGPORT clone-background | grep -v adding
-  using http://localhost:$HGPORT/
-  sending capabilities command
-  query 1; heads
-  sending batch command
-  streaming all changes
-  sending getbundle command
-  bundle2-input-bundle: with-transaction
-  bundle2-input-part: "stream3-exp" (params: 1 mandatory) supported
-  applying stream bundle
-  1093 entries to transfer
-  starting 4 threads for background file closing
-  starting 4 threads for background file closing
-  updating the branch cache
-  transferred 102 KB in * seconds (* */sec) (glob) (no-zstd !)
-  bundle2-input-part: total payload size 120096 (no-zstd !)
-  transferred 98.9 KB in * seconds (* */sec) (glob) (zstd no-rust !)
-  transferred 99.0 KB in * seconds (* */sec) (glob) (zstd rust !)
-  bundle2-input-part: total payload size 117257 (zstd no-rust no-bigendian !)
-  bundle2-input-part: total payload size 117425 (zstd rust no-bigendian !)
-  bundle2-input-part: total payload size 117252 (zstd bigendian no-rust !)
-  bundle2-input-part: total payload size 117420 (zstd bigendian rust !)
-  bundle2-input-part: "listkeys" (params: 1 mandatory) supported
-  bundle2-input-bundle: 2 parts total
-  checking for updated bookmarks
-  updating the branch cache
-  (sent 3 HTTP requests and * bytes; received * bytes in responses) (glob)
-#endif
+  starting 4 threads for background file closing (no-stream-legacy !)
+  $ hg verify -R clone-background --quiet
 
 Cannot stream clone when there are secret changesets
+----------------------------------------------------
+
+If secret changeset are present the should not be cloned (by default) and the
+clone falls back to a bundle clone.
 
   $ hg -R server phase --force --secret -r tip
   $ hg clone --stream -U http://localhost:$HGPORT secret-denied
@@ -622,44 +291,30 @@
   $ killdaemons.py
 
 Streaming of secrets can be overridden by server config
+-------------------------------------------------------
+
+Secret changeset can still be streamed if the server is configured to do so.
 
   $ cd server
   $ hg serve --config server.uncompressedallowsecret=true -p $HGPORT -d --pid-file=hg.pid
   $ cat hg.pid > $DAEMON_PIDS
   $ cd ..
 
-#if stream-legacy
-  $ hg clone --stream -U http://localhost:$HGPORT secret-allowed
-  streaming all changes
-  1091 files to transfer, 102 KB of data (no-zstd !)
-  transferred 102 KB in * seconds (* */sec) (glob) (no-zstd !)
-  1091 files to transfer, 98.8 KB of data (zstd !)
-  transferred 98.8 KB in * seconds (* */sec) (glob) (zstd !)
-  searching for changes
-  no changes found
-#endif
-#if stream-bundle2-v2
   $ hg clone --stream -U http://localhost:$HGPORT secret-allowed
   streaming all changes
-  1094 files to transfer, 102 KB of data (no-zstd !)
-  transferred 102 KB in * seconds (* */sec) (glob) (no-zstd !)
-  1094 files to transfer, 98.9 KB of data (zstd no-rust !)
-  transferred 98.9 KB in * seconds (* */sec) (glob) (zstd no-rust !)
-  1096 files to transfer, 99.0 KB of data (zstd rust !)
-  transferred 99.0 KB in * seconds (* */sec) (glob) (zstd rust !)
-#endif
-#if stream-bundle2-v3
-  $ hg clone --stream -U http://localhost:$HGPORT secret-allowed
-  streaming all changes
-  1093 entries to transfer
-  transferred 102 KB in * seconds (* */sec) (glob) (no-zstd !)
-  transferred 98.9 KB in * seconds (* */sec) (glob) (zstd no-rust !)
-  transferred 99.0 KB in * seconds (* */sec) (glob) (zstd rust !)
-#endif
+  * files to transfer* (glob) (no-stream-bundle2-v3 !)
+  * entries to transfer (glob) (stream-bundle2-v3 !)
+  transferred * KB in * seconds (* */sec) (glob)
+  searching for changes (stream-legacy !)
+  no changes found (stream-legacy !)
 
   $ killdaemons.py
 
 Verify interaction between preferuncompressed and secret presence
+-----------------------------------------------------------------
+
+Secret presence will still make the clone falls back to a normal bundle even if
+the server prefers stream clone.
 
   $ cd server
   $ hg serve --config server.preferuncompressed=true -p $HGPORT -d --pid-file=hg.pid
@@ -677,6 +332,9 @@
   $ killdaemons.py
 
 Clone not allowed when full bundles disabled and can't serve secrets
+--------------------------------------------------------------------
+
+The clone should fail as no valid option is found.
 
   $ cd server
   $ hg serve --config server.disablefullbundle=true -p $HGPORT -d --pid-file=hg.pid
@@ -692,6 +350,8 @@
   [100]
 
 Local stream clone with secrets involved
+----------------------------------------
+
 (This is just a test over behavior: if you have access to the repo's files,
 there is no security so it isn't important to prevent a clone here.)
 
@@ -704,12 +364,20 @@
   added 2 changesets with 1025 changes to 1025 files
   new changesets 96ee1d7354c4:c17445101a72
 
+(revert introduction of secret changeset)
+
+  $ hg -R server phase --draft 'secret()'
+
 Stream clone while repo is changing:
+------------------------------------
+
+We should send a repository in a valid state, ignoring the ongoing transaction.
 
   $ mkdir changing
   $ cd changing
 
 prepare repo with small and big file to cover both code paths in emitrevlogdata
+(inlined revlog and non-inlined revlogs).
 
   $ hg init repo
   $ touch repo/f1
@@ -740,15 +408,14 @@
   $ $RUNTESTDIR/testlib/wait-on-file 10 $HG_TEST_STREAM_WALKED_FILE_3
   $ hg -R clone id
   000000000000
+  $ hg -R clone verify --quiet
   $ cat errors.log
   $ cd ..
 
 Stream repository with bookmarks
 --------------------------------
 
-(revert introduction of secret changeset)
-
-  $ hg -R server phase --draft 'secret()'
+The bookmark file should be send over in the stream bundle.
 
 add a bookmark
 
@@ -756,40 +423,17 @@
 
 clone it
 
-#if stream-legacy
-  $ hg clone --stream http://localhost:$HGPORT with-bookmarks
-  streaming all changes
-  1091 files to transfer, 102 KB of data (no-zstd !)
-  transferred 102 KB in * seconds (* */sec) (glob) (no-zstd !)
-  1091 files to transfer, 98.8 KB of data (zstd !)
-  transferred 98.8 KB in * seconds (* */sec) (glob) (zstd !)
-  searching for changes
-  no changes found
-  updating to branch default
-  1088 files updated, 0 files merged, 0 files removed, 0 files unresolved
-#endif
-#if stream-bundle2-v2
   $ hg clone --stream http://localhost:$HGPORT with-bookmarks
   streaming all changes
-  1097 files to transfer, 102 KB of data (no-zstd !)
-  transferred 102 KB in * seconds (* */sec) (glob) (no-zstd !)
-  1097 files to transfer, 99.1 KB of data (zstd no-rust !)
-  transferred 99.1 KB in * seconds (* */sec) (glob) (zstd no-rust !)
-  1099 files to transfer, 99.2 KB of data (zstd rust !)
-  transferred 99.2 KB in * seconds (* */sec) (glob) (zstd rust !)
+  1091 files to transfer, * KB of data (glob) (stream-legacy !)
+  1097 files to transfer, * KB of data (glob) (stream-bundle2-v2 no-rust !)
+  1099 files to transfer, * KB of data (glob) (stream-bundle2-v2 rust !)
+  1096 entries to transfer (stream-bundle2-v3 !)
+  transferred * KB in * seconds (* */sec) (glob)
+  searching for changes (stream-legacy !)
+  no changes found (stream-legacy !)
   updating to branch default
   1088 files updated, 0 files merged, 0 files removed, 0 files unresolved
-#endif
-#if stream-bundle2-v3
-  $ hg clone --stream http://localhost:$HGPORT with-bookmarks
-  streaming all changes
-  1096 entries to transfer
-  transferred 102 KB in * seconds (* */sec) (glob) (no-zstd !)
-  transferred 99.1 KB in * seconds (* */sec) (glob) (zstd no-rust !)
-  transferred 99.2 KB in * seconds (* */sec) (glob) (zstd rust !)
-  updating to branch default
-  1088 files updated, 0 files merged, 0 files removed, 0 files unresolved
-#endif
   $ hg verify -R with-bookmarks -q
   $ hg -R with-bookmarks bookmarks
      some-bookmark             2:5223b5e3265f
@@ -797,6 +441,9 @@
 Stream repository with phases
 -----------------------------
 
+The file storing phases information (e.g. phaseroots) should be sent as part of
+the stream bundle.
+
 Clone as publishing
 
   $ hg -R server phase -r 'all()'
@@ -804,40 +451,17 @@
   1: draft
   2: draft
 
-#if stream-legacy
-  $ hg clone --stream http://localhost:$HGPORT phase-publish
-  streaming all changes
-  1091 files to transfer, 102 KB of data (no-zstd !)
-  transferred 102 KB in * seconds (* */sec) (glob) (no-zstd !)
-  1091 files to transfer, 98.8 KB of data (zstd !)
-  transferred 98.8 KB in * seconds (* */sec) (glob) (zstd !)
-  searching for changes
-  no changes found
-  updating to branch default
-  1088 files updated, 0 files merged, 0 files removed, 0 files unresolved
-#endif
-#if stream-bundle2-v2
   $ hg clone --stream http://localhost:$HGPORT phase-publish
   streaming all changes
-  1097 files to transfer, 102 KB of data (no-zstd !)
-  transferred 102 KB in * seconds (* */sec) (glob) (no-zstd !)
-  1097 files to transfer, 99.1 KB of data (zstd no-rust !)
-  transferred 99.1 KB in * seconds (* */sec) (glob) (zstd  no-rust !)
-  1099 files to transfer, 99.2 KB of data (zstd rust !)
-  transferred 99.2 KB in * seconds (* */sec) (glob) (zstd rust !)
+  1091 files to transfer, * KB of data (glob) (stream-legacy !)
+  1097 files to transfer, * KB of data (glob) (stream-bundle2-v2 no-rust !)
+  1099 files to transfer, * KB of data (glob) (stream-bundle2-v2 rust !)
+  1096 entries to transfer (stream-bundle2-v3 !)
+  transferred * KB in * seconds (* */sec) (glob)
+  searching for changes (stream-legacy !)
+  no changes found (stream-legacy !)
   updating to branch default
   1088 files updated, 0 files merged, 0 files removed, 0 files unresolved
-#endif
-#if stream-bundle2-v3
-  $ hg clone --stream http://localhost:$HGPORT phase-publish
-  streaming all changes
-  1096 entries to transfer
-  transferred 102 KB in * seconds (* */sec) (glob) (no-zstd !)
-  transferred 99.1 KB in * seconds (* */sec) (glob) (zstd no-rust !)
-  transferred 99.2 KB in * seconds (* */sec) (glob) (zstd rust !)
-  updating to branch default
-  1088 files updated, 0 files merged, 0 files removed, 0 files unresolved
-#endif
   $ hg verify -R phase-publish -q
   $ hg -R phase-publish phase -r 'all()'
   0: public
@@ -854,73 +478,47 @@
   $ hg -R server serve -p $HGPORT -d --pid-file=hg.pid
   $ cat hg.pid > $DAEMON_PIDS
 
-#if stream-legacy
-
-With v1 of the stream protocol, changeset are always cloned as public. It make
-stream v1 unsuitable for non-publishing repository.
-
-  $ hg clone --stream http://localhost:$HGPORT phase-no-publish
-  streaming all changes
-  1091 files to transfer, 102 KB of data (no-zstd !)
-  transferred 102 KB in * seconds (* */sec) (glob) (no-zstd !)
-  1091 files to transfer, 98.8 KB of data (zstd !)
-  transferred 98.8 KB in * seconds (* */sec) (glob) (zstd !)
-  searching for changes
-  no changes found
-  updating to branch default
-  1088 files updated, 0 files merged, 0 files removed, 0 files unresolved
-  $ hg -R phase-no-publish phase -r 'all()'
-  0: public
-  1: public
-  2: public
-#endif
-#if stream-bundle2-v2
   $ hg clone --stream http://localhost:$HGPORT phase-no-publish
   streaming all changes
-  1098 files to transfer, 102 KB of data (no-zstd !)
-  transferred 102 KB in * seconds (* */sec) (glob) (no-zstd !)
-  1098 files to transfer, 99.1 KB of data (zstd no-rust !)
-  transferred 99.1 KB in * seconds (* */sec) (glob) (zstd no-rust !)
-  1100 files to transfer, 99.2 KB of data (zstd rust !)
-  transferred 99.2 KB in * seconds (* */sec) (glob) (zstd rust !)
+  1091 files to transfer, * KB of data (glob) (stream-legacy !)
+  1098 files to transfer, * KB of data (glob) (stream-bundle2-v2 no-rust !)
+  1100 files to transfer, * KB of data (glob) (stream-bundle2-v2 rust !)
+  1097 entries to transfer (stream-bundle2-v3 !)
+  transferred * KB in * seconds (* */sec) (glob)
+  searching for changes (stream-legacy !)
+  no changes found (stream-legacy !)
   updating to branch default
   1088 files updated, 0 files merged, 0 files removed, 0 files unresolved
+
+Note: With v1 of the stream protocol, changeset are always cloned as public. It
+make stream v1 unsuitable for non-publishing repository.
+
   $ hg -R phase-no-publish phase -r 'all()'
-  0: draft
-  1: draft
-  2: draft
-#endif
-#if stream-bundle2-v3
-  $ hg clone --stream http://localhost:$HGPORT phase-no-publish
-  streaming all changes
-  1097 entries to transfer
-  transferred 102 KB in * seconds (* */sec) (glob) (no-zstd !)
-  transferred 99.1 KB in * seconds (* */sec) (glob) (zstd no-rust !)
-  transferred 99.2 KB in * seconds (* */sec) (glob) (zstd rust !)
-  updating to branch default
-  1088 files updated, 0 files merged, 0 files removed, 0 files unresolved
-  $ hg -R phase-no-publish phase -r 'all()'
-  0: draft
-  1: draft
-  2: draft
-#endif
+  0: public (stream-legacy !)
+  1: public (stream-legacy !)
+  2: public (stream-legacy !)
+  0: draft (no-stream-legacy !)
+  1: draft (no-stream-legacy !)
+  2: draft (no-stream-legacy !)
   $ hg verify -R phase-no-publish -q
 
   $ killdaemons.py
 
+
+Stream repository with obsolescence
+-----------------------------------
+
 #if stream-legacy
 
 With v1 of the stream protocol, changeset are always cloned as public. There's
 no obsolescence markers exchange in stream v1.
 
-#endif
-#if stream-bundle2-v2
-
-Stream repository with obsolescence
------------------------------------
+#else
 
 Clone non-publishing with obsolescence
 
+The obsstore file should be send as part of the stream bundle
+
   $ cat >> $HGRCPATH << EOF
   > [experimental]
   > evolution=all
@@ -943,62 +541,10 @@
 
   $ hg clone -U --stream http://localhost:$HGPORT with-obsolescence
   streaming all changes
-  1099 files to transfer, 102 KB of data (no-zstd !)
-  transferred 102 KB in * seconds (* */sec) (glob) (no-zstd !)
-  1099 files to transfer, 99.5 KB of data (zstd no-rust !)
-  transferred 99.5 KB in * seconds (* */sec) (glob) (zstd no-rust !)
-  1101 files to transfer, 99.6 KB of data (zstd rust !)
-  transferred 99.6 KB in * seconds (* */sec) (glob) (zstd rust !)
-  $ hg -R with-obsolescence log -T '{rev}: {phase}\n'
-  2: draft
-  1: draft
-  0: draft
-  $ hg debugobsolete -R with-obsolescence
-  8c206a663911c1f97f2f9d7382e417ae55872cfa 0 {5223b5e3265f0df40bb743da62249413d74ac70f} (Thu Jan 01 00:00:00 1970 +0000) {'user': 'test'}
-  $ hg verify -R with-obsolescence -q
-
-  $ hg clone -U --stream --config experimental.evolution=0 http://localhost:$HGPORT with-obsolescence-no-evolution
-  streaming all changes
-  remote: abort: server has obsolescence markers, but client cannot receive them via stream clone
-  abort: pull failed on remote
-  [100]
-
-  $ killdaemons.py
-
-#endif
-#if stream-bundle2-v3
-
-Stream repository with obsolescence
------------------------------------
-
-Clone non-publishing with obsolescence
-
-  $ cat >> $HGRCPATH << EOF
-  > [experimental]
-  > evolution=all
-  > EOF
-
-  $ cd server
-  $ echo foo > foo
-  $ hg -q commit -m 'about to be pruned'
-  $ hg debugobsolete `hg log -r . -T '{node}'` -d '0 0' -u test --record-parents
-  1 new obsolescence markers
-  obsoleted 1 changesets
-  $ hg up null -q
-  $ hg log -T '{rev}: {phase}\n'
-  2: draft
-  1: draft
-  0: draft
-  $ hg serve -p $HGPORT -d --pid-file=hg.pid
-  $ cat hg.pid > $DAEMON_PIDS
-  $ cd ..
-
-  $ hg clone -U --stream http://localhost:$HGPORT with-obsolescence
-  streaming all changes
-  1098 entries to transfer
-  transferred 102 KB in * seconds (* */sec) (glob) (no-zstd !)
-  transferred 99.5 KB in * seconds (* */sec) (glob) (zstd no-rust !)
-  transferred 99.6 KB in * seconds (* */sec) (glob) (zstd rust !)
+  1099 files to transfer, * KB of data (glob) (stream-bundle2-v2 no-rust !)
+  1101 files to transfer, * KB of data (glob) (stream-bundle2-v2 rust !)
+  1098 entries to transfer (no-stream-bundle2-v2 !)
+  transferred * KB in * seconds (* */sec) (glob)
   $ hg -R with-obsolescence log -T '{rev}: {phase}\n'
   2: draft
   1: draft
@@ -1018,19 +564,16 @@
 #endif
 
 Cloning a repo with no requirements doesn't give some obscure error
+-------------------------------------------------------------------
 
   $ mkdir -p empty-repo/.hg
   $ hg clone -q --stream ssh://user@dummy/empty-repo empty-repo2
   $ hg --cwd empty-repo2 verify -q
 
 Cloning a repo with an empty manifestlog doesn't give some weird error
+----------------------------------------------------------------------
 
   $ rm -r empty-repo; hg init empty-repo
   $ (cd empty-repo; touch x; hg commit -Am empty; hg debugstrip -r 0) > /dev/null
   $ hg clone -q --stream ssh://user@dummy/empty-repo empty-repo3
-  $ hg --cwd empty-repo3 verify -q 2>&1 | grep -v warning
-  [1]
-
-The warnings filtered out here are talking about zero-length 'orphan' data files.
-Those are harmless, so that's fine.
-
+  $ hg --cwd empty-repo3 verify -q
--- a/tests/test-clone.t	Thu Jun 13 09:52:39 2024 +0200
+++ b/tests/test-clone.t	Mon Jun 24 12:05:31 2024 +0200
@@ -47,11 +47,7 @@
 
   $ ls .hg/cache
   branch2-base
-  branch2-immutable
   branch2-served
-  branch2-served.hidden
-  branch2-visible
-  branch2-visible-hidden
   rbc-names-v1
   rbc-revs-v1
   tags2
@@ -71,42 +67,34 @@
 
 #if hardlink
   $ hg --debug clone -U . ../c --config progress.debug=true
-  linking: 1/16 files (6.25%) (no-rust !)
-  linking: 2/16 files (12.50%) (no-rust !)
-  linking: 3/16 files (18.75%) (no-rust !)
-  linking: 4/16 files (25.00%) (no-rust !)
-  linking: 5/16 files (31.25%) (no-rust !)
-  linking: 6/16 files (37.50%) (no-rust !)
-  linking: 7/16 files (43.75%) (no-rust !)
-  linking: 8/16 files (50.00%) (no-rust !)
-  linking: 9/16 files (56.25%) (no-rust !)
-  linking: 10/16 files (62.50%) (no-rust !)
-  linking: 11/16 files (68.75%) (no-rust !)
-  linking: 12/16 files (75.00%) (no-rust !)
-  linking: 13/16 files (81.25%) (no-rust !)
-  linking: 14/16 files (87.50%) (no-rust !)
-  linking: 15/16 files (93.75%) (no-rust !)
-  linking: 16/16 files (100.00%) (no-rust !)
-  linked 16 files (no-rust !)
-  linking: 1/18 files (5.56%) (rust !)
-  linking: 2/18 files (11.11%) (rust !)
-  linking: 3/18 files (16.67%) (rust !)
-  linking: 4/18 files (22.22%) (rust !)
-  linking: 5/18 files (27.78%) (rust !)
-  linking: 6/18 files (33.33%) (rust !)
-  linking: 7/18 files (38.89%) (rust !)
-  linking: 8/18 files (44.44%) (rust !)
-  linking: 9/18 files (50.00%) (rust !)
-  linking: 10/18 files (55.56%) (rust !)
-  linking: 11/18 files (61.11%) (rust !)
-  linking: 12/18 files (66.67%) (rust !)
-  linking: 13/18 files (72.22%) (rust !)
-  linking: 14/18 files (77.78%) (rust !)
-  linking: 15/18 files (83.33%) (rust !)
-  linking: 16/18 files (88.89%) (rust !)
-  linking: 17/18 files (94.44%) (rust !)
-  linking: 18/18 files (100.00%) (rust !)
-  linked 18 files (rust !)
+  linking: 1/12 files (8.33%) (no-rust !)
+  linking: 2/12 files (16.67%) (no-rust !)
+  linking: 3/12 files (25.00%) (no-rust !)
+  linking: 4/12 files (33.33%) (no-rust !)
+  linking: 5/12 files (41.67%) (no-rust !)
+  linking: 6/12 files (50.00%) (no-rust !)
+  linking: 7/12 files (58.33%) (no-rust !)
+  linking: 8/12 files (66.67%) (no-rust !)
+  linking: 9/12 files (75.00%) (no-rust !)
+  linking: 10/12 files (83.33%) (no-rust !)
+  linking: 11/12 files (91.67%) (no-rust !)
+  linking: 12/12 files (100.00%) (no-rust !)
+  linked 12 files (no-rust !)
+  linking: 1/14 files (7.14%) (rust !)
+  linking: 2/14 files (14.29%) (rust !)
+  linking: 3/14 files (21.43%) (rust !)
+  linking: 4/14 files (28.57%) (rust !)
+  linking: 5/14 files (35.71%) (rust !)
+  linking: 6/14 files (42.86%) (rust !)
+  linking: 7/14 files (50.00%) (rust !)
+  linking: 8/14 files (57.14%) (rust !)
+  linking: 9/14 files (64.29%) (rust !)
+  linking: 10/14 files (71.43%) (rust !)
+  linking: 11/14 files (78.57%) (rust !)
+  linking: 12/14 files (85.71%) (rust !)
+  linking: 13/14 files (92.86%) (rust !)
+  linking: 14/14 files (100.00%) (rust !)
+  linked 14 files (rust !)
   updating the branch cache
 #else
   $ hg --debug clone -U . ../c --config progress.debug=true
@@ -125,11 +113,7 @@
 
   $ ls .hg/cache
   branch2-base
-  branch2-immutable
   branch2-served
-  branch2-served.hidden
-  branch2-visible
-  branch2-visible-hidden
   rbc-names-v1
   rbc-revs-v1
   tags2
--- a/tests/test-clonebundles.t	Thu Jun 13 09:52:39 2024 +0200
+++ b/tests/test-clonebundles.t	Mon Jun 24 12:05:31 2024 +0200
@@ -394,9 +394,9 @@
   $ hg clone -U http://localhost:$HGPORT stream-clone-no-spec
   applying clone bundle from http://localhost:$HGPORT1/packed.hg
   5 files to transfer, 613 bytes of data (no-rust !)
-  transferred 613 bytes in *.* seconds (*) (glob) (no-rust !)
+  transferred 613 bytes in * seconds (* */sec) (glob) (no-rust !)
   7 files to transfer, 739 bytes of data (rust !)
-  transferred 739 bytes in *.* seconds (*) (glob) (rust !)
+  transferred 739 bytes in * seconds (* */sec) (glob) (rust !)
   finished applying clone bundle
   searching for changes
   no changes found
@@ -409,10 +409,8 @@
 
   $ hg clone -U http://localhost:$HGPORT stream-clone-vanilla-spec
   applying clone bundle from http://localhost:$HGPORT1/packed.hg
-  5 files to transfer, 613 bytes of data (no-rust !)
-  transferred 613 bytes in *.* seconds (*) (glob) (no-rust !)
-  7 files to transfer, 739 bytes of data (rust !)
-  transferred 739 bytes in *.* seconds (*) (glob) (rust !)
+  * files to transfer, * bytes of data (glob)
+  transferred * bytes in * seconds (* */sec) (glob)
   finished applying clone bundle
   searching for changes
   no changes found
@@ -425,10 +423,8 @@
 
   $ hg clone -U http://localhost:$HGPORT stream-clone-supported-requirements
   applying clone bundle from http://localhost:$HGPORT1/packed.hg
-  5 files to transfer, 613 bytes of data (no-rust !)
-  transferred 613 bytes in *.* seconds (*) (glob) (no-rust !)
-  7 files to transfer, 739 bytes of data (rust !)
-  transferred 739 bytes in *.* seconds (*) (glob) (rust !)
+  * files to transfer, * bytes of data (glob)
+  transferred * bytes in * seconds (* */sec) (glob)
   finished applying clone bundle
   searching for changes
   no changes found
@@ -574,10 +570,8 @@
   no compatible clone bundles available on server; falling back to regular clone
   (you may want to report this to the server operator)
   streaming all changes
-  10 files to transfer, 816 bytes of data (no-rust !)
-  transferred 816 bytes in * seconds (*) (glob) (no-rust !)
-  12 files to transfer, 942 bytes of data (rust !)
-  transferred 942 bytes in *.* seconds (*) (glob) (rust !)
+  * files to transfer, * bytes of data (glob)
+  transferred * bytes in * seconds (* */sec) (glob)
 
 A manifest with a stream clone but no BUNDLESPEC
 
@@ -589,10 +583,8 @@
   no compatible clone bundles available on server; falling back to regular clone
   (you may want to report this to the server operator)
   streaming all changes
-  10 files to transfer, 816 bytes of data (no-rust !)
-  transferred 816 bytes in * seconds (*) (glob) (no-rust !)
-  12 files to transfer, 942 bytes of data (rust !)
-  transferred 942 bytes in *.* seconds (*) (glob) (rust !)
+  * files to transfer, * bytes of data (glob)
+  transferred * bytes in * seconds (* */sec) (glob)
 
 A manifest with a gzip bundle and a stream clone
 
@@ -603,10 +595,8 @@
 
   $ hg clone -U --stream http://localhost:$HGPORT uncompressed-gzip-packed
   applying clone bundle from http://localhost:$HGPORT1/packed.hg
-  5 files to transfer, 613 bytes of data (no-rust !)
-  transferred 613 bytes in *.* seconds (*) (glob) (no-rust !)
-  7 files to transfer, 739 bytes of data (rust !)
-  transferred 739 bytes in *.* seconds (*) (glob) (rust !)
+  * files to transfer, * bytes of data (glob)
+  transferred * bytes in * seconds (* */sec) (glob)
   finished applying clone bundle
   searching for changes
   no changes found
@@ -620,10 +610,8 @@
 
   $ hg clone -U --stream http://localhost:$HGPORT uncompressed-gzip-packed-requirements
   applying clone bundle from http://localhost:$HGPORT1/packed.hg
-  5 files to transfer, 613 bytes of data (no-rust !)
-  transferred 613 bytes in *.* seconds (*) (glob) (no-rust !)
-  7 files to transfer, 739 bytes of data (rust !)
-  transferred 739 bytes in *.* seconds (*) (glob) (rust !)
+  * files to transfer, * bytes of data (glob)
+  transferred * bytes in * seconds (* */sec) (glob)
   finished applying clone bundle
   searching for changes
   no changes found
@@ -639,10 +627,8 @@
   no compatible clone bundles available on server; falling back to regular clone
   (you may want to report this to the server operator)
   streaming all changes
-  10 files to transfer, 816 bytes of data (no-rust !)
-  transferred 816 bytes in * seconds (*) (glob) (no-rust !)
-  12 files to transfer, 942 bytes of data (rust !)
-  transferred 942 bytes in *.* seconds (*) (glob) (rust !)
+  * files to transfer, * bytes of data (glob)
+  transferred * bytes in * seconds (* */sec) (glob)
 
 Test clone bundle retrieved through bundle2
 
--- a/tests/test-completion.t	Thu Jun 13 09:52:39 2024 +0200
+++ b/tests/test-completion.t	Mon Jun 24 12:05:31 2024 +0200
@@ -284,7 +284,7 @@
   debug-revlog-stats: changelog, manifest, filelogs, template
   debug::stable-tail-sort: template
   debug::stable-tail-sort-leaps: template, specific
-  debug::unbundle: update
+  debug::unbundle: 
   debugancestor: 
   debugantivirusrunning: 
   debugapplystreamclonebundle: 
--- a/tests/test-contrib-perf.t	Thu Jun 13 09:52:39 2024 +0200
+++ b/tests/test-contrib-perf.t	Mon Jun 24 12:05:31 2024 +0200
@@ -59,8 +59,11 @@
     number of run to perform before starting measurement.
   
   "profile-benchmark"
-    Enable profiling for the benchmarked section. (The first iteration is
-    benchmarked)
+    Enable profiling for the benchmarked section. (by default, the first
+    iteration is benchmarked)
+  
+  "profiled-runs"
+    list of iteration to profile (starting from 0)
   
   "run-limits"
     Control the number of runs each benchmark will perform. The option value
--- a/tests/test-debugcommands.t	Thu Jun 13 09:52:39 2024 +0200
+++ b/tests/test-debugcommands.t	Mon Jun 24 12:05:31 2024 +0200
@@ -652,12 +652,7 @@
   .hg/cache/rbc-revs-v1
   .hg/cache/rbc-names-v1
   .hg/cache/hgtagsfnodes1
-  .hg/cache/branch2-visible-hidden
-  .hg/cache/branch2-visible
-  .hg/cache/branch2-served.hidden
   .hg/cache/branch2-served
-  .hg/cache/branch2-immutable
-  .hg/cache/branch2-base
 
 Test debug::unbundle
 
@@ -668,9 +663,6 @@
   adding manifests
   adding file changes
   added 0 changesets with 0 changes to 1 files (no-pure !)
-  9 local changesets published (no-pure !)
-  3 local changesets published (pure !)
-  (run 'hg update' to get a working copy)
 
 Test debugcolor
 
--- a/tests/test-hardlinks.t	Thu Jun 13 09:52:39 2024 +0200
+++ b/tests/test-hardlinks.t	Mon Jun 24 12:05:31 2024 +0200
@@ -263,11 +263,7 @@
   2 r4/.hg/00changelog.i
   [24] r4/.hg/branch (re)
   2 r4/.hg/cache/branch2-base
-  2 r4/.hg/cache/branch2-immutable
   2 r4/.hg/cache/branch2-served
-  2 r4/.hg/cache/branch2-served.hidden
-  2 r4/.hg/cache/branch2-visible
-  2 r4/.hg/cache/branch2-visible-hidden
   2 r4/.hg/cache/rbc-names-v1
   2 r4/.hg/cache/rbc-revs-v1
   2 r4/.hg/cache/tags2
@@ -320,11 +316,7 @@
   2 r4/.hg/00changelog.i
   1 r4/.hg/branch
   2 r4/.hg/cache/branch2-base
-  2 r4/.hg/cache/branch2-immutable
   2 r4/.hg/cache/branch2-served
-  2 r4/.hg/cache/branch2-served.hidden
-  2 r4/.hg/cache/branch2-visible
-  2 r4/.hg/cache/branch2-visible-hidden
   2 r4/.hg/cache/rbc-names-v1
   2 r4/.hg/cache/rbc-revs-v1
   2 r4/.hg/cache/tags2
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/tests/test-narrow-widen-linkrev-computation.t	Mon Jun 24 12:05:31 2024 +0200
@@ -0,0 +1,627 @@
+==============================================================================================
+Test the computation of linkrev that is needed when sending file content after their changeset
+==============================================================================================
+
+Setup
+=====
+
+tree/flat make the hash unstable had are anoying, reinstall that later.
+.. #testcases tree flat
+  $ . "$TESTDIR/narrow-library.sh"
+
+.. #if tree
+..   $ cat << EOF >> $HGRCPATH
+..   > [experimental]
+..   > treemanifest = 1
+..   > EOF
+.. #endif
+
+  $ hg init server
+  $ cd server
+
+We build a non linear history with some filenome that exist in parallel.
+
+  $ echo foo > readme.txt
+  $ hg add readme.txt
+  $ hg ci -m 'root'
+  $ mkdir dir_x
+  $ echo foo > dir_x/f1
+  $ echo fo0 > dir_x/f2
+  $ echo f0o > dir_x/f3
+  $ mkdir dir_y
+  $ echo bar > dir_y/f1
+  $ echo 8ar > dir_y/f2
+  $ echo ba9 > dir_y/f3
+  $ hg add dir_x dir_y
+  adding dir_x/f1
+  adding dir_x/f2
+  adding dir_x/f3
+  adding dir_y/f1
+  adding dir_y/f2
+  adding dir_y/f3
+  $ hg ci -m 'rev_a_'
+
+  $ hg update 'desc("rev_a_")'
+  0 files updated, 0 files merged, 0 files removed, 0 files unresolved
+  $ echo foo-01 > dir_x/f1
+  $ hg ci -m 'rev_b_0_'
+
+  $ hg update 'desc("rev_b_0_")'
+  0 files updated, 0 files merged, 0 files removed, 0 files unresolved
+  $ echo foo-02 > dir_x/f1
+  $ hg ci -m 'rev_b_1_'
+
+  $ hg update 'desc("rev_a_")'
+  1 files updated, 0 files merged, 0 files removed, 0 files unresolved
+  $ mkdir dir_z
+  $ echo bar-01 > dir_y/f1
+  $ echo 8ar-01 > dir_y/f2
+  $ echo babar > dir_z/f1
+  $ hg add dir_z
+  adding dir_z/f1
+  $ hg ci -m 'rev_c_0_'
+  created new head
+
+  $ hg update 'desc("rev_c_0_")'
+  0 files updated, 0 files merged, 0 files removed, 0 files unresolved
+  $ echo celeste > dir_z/f2
+  $ echo zephir > dir_z/f1
+  $ hg add dir_z
+  adding dir_z/f2
+  $ hg ci -m 'rev_c_1_'
+
+  $ hg update 'desc("rev_b_1_")'
+  3 files updated, 0 files merged, 2 files removed, 0 files unresolved
+  $ echo fo0-01 > dir_x/f2
+  $ mkdir dir_z
+  $ ls dir_z
+  $ echo babar > dir_z/f1
+  $ echo celeste > dir_z/f2
+  $ echo foo > dir_z/f3
+  $ hg add dir_z
+  adding dir_z/f1
+  adding dir_z/f2
+  adding dir_z/f3
+  $ hg ci -m 'rev_b_2_'
+
+  $ hg update 'desc("rev_b_2_")'
+  0 files updated, 0 files merged, 0 files removed, 0 files unresolved
+  $ echo f0o-01 > dir_x/f3
+  $ echo zephir > dir_z/f1
+  $ echo arthur > dir_z/f2
+  $ hg ci -m 'rev_b_3_'
+
+  $ hg update 'desc("rev_c_1_")'
+  6 files updated, 0 files merged, 1 files removed, 0 files unresolved
+  $ echo bar-02 > dir_y/f1
+  $ echo ba9-01 > dir_y/f3
+  $ echo bar > dir_z/f4
+  $ hg add dir_z/
+  adding dir_z/f4
+  $ echo arthur > dir_z/f2
+  $ hg ci -m 'rev_c_2_'
+
+  $ hg update 'desc("rev_b_3_")'
+  7 files updated, 0 files merged, 1 files removed, 0 files unresolved
+  $ hg merge 'desc("rev_c_2_")'
+  4 files updated, 0 files merged, 0 files removed, 0 files unresolved
+  (branch merge, don't forget to commit)
+  $ echo flore > dir_z/f1
+  $ echo foo-04 > dir_x/f1
+  $ echo foo-01 > dir_z/f3
+  $ hg ci -m 'rev_d_0_'
+  $ echo alexandre > dir_z/f1
+  $ echo bar-01 > dir_z/f4
+  $ echo bar-04 > dir_y/f1
+  $ hg ci -m 'rev_d_1_'
+  $ hg status
+  $ hg status -A
+  C dir_x/f1
+  C dir_x/f2
+  C dir_x/f3
+  C dir_y/f1
+  C dir_y/f2
+  C dir_y/f3
+  C dir_z/f1
+  C dir_z/f2
+  C dir_z/f3
+  C dir_z/f4
+  C readme.txt
+  $ hg up null
+  0 files updated, 0 files merged, 11 files removed, 0 files unresolved
+
+Resulting graph
+
+  $ hg log -GT "{rev}:{node|short}: {desc}\n  {files}\n"
+  o  10:71e6a9c7a6a2: rev_d_1_
+  |    dir_y/f1 dir_z/f1 dir_z/f4
+  o    9:b0a0cbe5ce57: rev_d_0_
+  |\     dir_x/f1 dir_z/f1 dir_z/f3
+  | o  8:d04e01dcc82d: rev_c_2_
+  | |    dir_y/f1 dir_y/f3 dir_z/f2 dir_z/f4
+  o |  7:fc05b303b551: rev_b_3_
+  | |    dir_x/f3 dir_z/f1 dir_z/f2
+  o |  6:17fd34adb43b: rev_b_2_
+  | |    dir_x/f2 dir_z/f1 dir_z/f2 dir_z/f3
+  | o  5:fa05dbe8eed1: rev_c_1_
+  | |    dir_z/f1 dir_z/f2
+  | o  4:59b4258b00dc: rev_c_0_
+  | |    dir_y/f1 dir_y/f2 dir_z/f1
+  o |  3:328f8ced5276: rev_b_1_
+  | |    dir_x/f1
+  o |  2:0ccce83dd29b: rev_b_0_
+  |/     dir_x/f1
+  o  1:63f468a0fdac: rev_a_
+  |    dir_x/f1 dir_x/f2 dir_x/f3 dir_y/f1 dir_y/f2 dir_y/f3
+  o  0:4978c5c7386b: root
+       readme.txt
+
+Useful save useful nodes :
+
+  $ hg log -T '{node}' > ../rev_c_2_ --rev 'desc("rev_c_2_")'
+  $ hg log -T '{node}' > ../rev_b_3_ --rev 'desc("rev_b_3_")'
+
+Reference output
+
+Since we have the same file conent on each side, we should get a limited number
+of file revision (and the associated linkrev).
+
+This these shared file-revision and the associated linkrev computation is
+fueling the complexity test in this file.
+
+  $ cat > ../linkrev-check.sh << EOF
+  > echo '# expected linkrev for dir_z/f1'
+  > hg log -T '0 {rev}\n' --rev 'min(desc(rev_b_2_) or desc(rev_c_0_))'
+  > hg log -T '1 {rev}\n' --rev 'min(desc(rev_b_3_) or desc(rev_c_1_))'
+  > hg log -T '2 {rev}\n' --rev 'min(desc(rev_d_0_))'
+  > hg log -T '3 {rev}\n' --rev 'min(desc(rev_d_1_))'
+  > hg debugindex dir_z/f1
+  > #   rev linkrev       nodeid    p1-nodeid    p2-nodeid
+  > #     0       4 360afd990eef 000000000000 000000000000
+  > #     1       5 7054ee088631 360afd990eef 000000000000
+  > #     2       9 6bb290463f21 7054ee088631 000000000000
+  > #     3      10 91fec784ff86 6bb290463f21 000000000000
+  > echo '# expected linkrev for dir_z/f2'
+  > hg log -T '0 {rev}\n' --rev 'min(desc(rev_c_1_) or desc(rev_b_2_))'
+  > hg log -T '1 {rev}\n' --rev 'min(desc(rev_c_2_) or desc(rev_b_3_))'
+  > hg debugindex dir_z/f2
+  > #    rev linkrev       nodeid    p1-nodeid    p2-nodeid
+  > #      0       5 093bb0f8a0fb 000000000000 000000000000
+  > #      1       7 0f47e254cb19 093bb0f8a0fb 000000000000
+  > if hg files --rev tip | grep dir_z/f3 > /dev/null; then
+  >     echo '# expected linkrev for dir_z/f3'
+  >     hg log -T '0 {rev}\n' --rev 'desc(rev_b_2_)'
+  >     hg log -T '1 {rev}\n' --rev 'desc(rev_d_0_)'
+  >     hg debugindex dir_z/f3
+  >     #    rev linkrev       nodeid    p1-nodeid    p2-nodeid
+  >     #      0       6 2ed2a3912a0b 000000000000 000000000000
+  >     #      1       9 7c6d649320ae 2ed2a3912a0b 000000000000
+  > fi
+  > if hg files --rev tip | grep dir_z/f4 > /dev/null; then
+  >     echo '# expected linkrev for dir_z/f4'
+  >     hg log -T '0 {rev}\n' --rev 'desc(rev_c_2_)'
+  >     hg log -T '1 {rev}\n' --rev 'desc(rev_d_1_)'
+  >     hg debugindex dir_z/f4
+  >     #   rev linkrev       nodeid    p1-nodeid    p2-nodeid
+  >     #     0       8 b004912a8510 000000000000 000000000000
+  >     #     1      10 9f85b3b95e70 b004912a8510 000000000000
+  > fi
+  > echo '# verify the repository'
+  > hg verify
+  > EOF
+  $ sh ../linkrev-check.sh
+  # expected linkrev for dir_z/f1
+  0 4
+  1 5
+  2 9
+  3 10
+     rev linkrev       nodeid    p1-nodeid    p2-nodeid
+       0       4 360afd990eef 000000000000 000000000000
+       1       5 7054ee088631 360afd990eef 000000000000
+       2       9 6bb290463f21 7054ee088631 000000000000
+       3      10 91fec784ff86 6bb290463f21 000000000000
+  # expected linkrev for dir_z/f2
+  0 5
+  1 7
+     rev linkrev       nodeid    p1-nodeid    p2-nodeid
+       0       5 093bb0f8a0fb 000000000000 000000000000
+       1       7 0f47e254cb19 093bb0f8a0fb 000000000000
+  # expected linkrev for dir_z/f3
+  0 6
+  1 9
+     rev linkrev       nodeid    p1-nodeid    p2-nodeid
+       0       6 2ed2a3912a0b 000000000000 000000000000
+       1       9 7c6d649320ae 2ed2a3912a0b 000000000000
+  # expected linkrev for dir_z/f4
+  0 8
+  1 10
+     rev linkrev       nodeid    p1-nodeid    p2-nodeid
+       0       8 b004912a8510 000000000000 000000000000
+       1      10 9f85b3b95e70 b004912a8510 000000000000
+  # verify the repository
+  checking changesets
+  checking manifests
+  crosschecking files in changesets and manifests
+  checking files
+  checking dirstate
+  checked 11 changesets with 27 changes to 11 files
+
+  $ cd ..
+
+Test linkrev computation for various widening scenario
+======================================================
+
+Having cloning all revisions initially
+--------------------------------------
+
+  $ hg clone --narrow ssh://user@dummy/server --include dir_x --include dir_y client_xy_rev_all  --noupdate
+  requesting all changes
+  adding changesets
+  adding manifests
+  adding file changes
+  added 11 changesets with 16 changes to 6 files
+  new changesets 4978c5c7386b:71e6a9c7a6a2
+  $ cd client_xy_rev_all
+  $ hg log -GT "{rev}:{node|short}: {desc}\n  {files}\n"
+  o  10:71e6a9c7a6a2: rev_d_1_
+  |    dir_y/f1 dir_z/f1 dir_z/f4
+  o    9:b0a0cbe5ce57: rev_d_0_
+  |\     dir_x/f1 dir_z/f1 dir_z/f3
+  | o  8:d04e01dcc82d: rev_c_2_
+  | |    dir_y/f1 dir_y/f3 dir_z/f2 dir_z/f4
+  o |  7:fc05b303b551: rev_b_3_
+  | |    dir_x/f3 dir_z/f1 dir_z/f2
+  o |  6:17fd34adb43b: rev_b_2_
+  | |    dir_x/f2 dir_z/f1 dir_z/f2 dir_z/f3
+  | o  5:fa05dbe8eed1: rev_c_1_
+  | |    dir_z/f1 dir_z/f2
+  | o  4:59b4258b00dc: rev_c_0_
+  | |    dir_y/f1 dir_y/f2 dir_z/f1
+  o |  3:328f8ced5276: rev_b_1_
+  | |    dir_x/f1
+  o |  2:0ccce83dd29b: rev_b_0_
+  |/     dir_x/f1
+  o  1:63f468a0fdac: rev_a_
+  |    dir_x/f1 dir_x/f2 dir_x/f3 dir_y/f1 dir_y/f2 dir_y/f3
+  o  0:4978c5c7386b: root
+       readme.txt
+
+  $ hg tracked --addinclude dir_z
+  comparing with ssh://user@dummy/server
+  searching for changes
+  adding changesets
+  adding manifests
+  adding file changes
+  added 0 changesets with 10 changes to 4 files
+  $ sh ../linkrev-check.sh
+  # expected linkrev for dir_z/f1
+  0 4
+  1 5
+  2 9
+  3 10
+     rev linkrev       nodeid    p1-nodeid    p2-nodeid
+       0       4 360afd990eef 000000000000 000000000000
+       1       5 7054ee088631 360afd990eef 000000000000
+       2       9 6bb290463f21 7054ee088631 000000000000
+       3      10 91fec784ff86 6bb290463f21 000000000000
+  # expected linkrev for dir_z/f2
+  0 5
+  1 7
+     rev linkrev       nodeid    p1-nodeid    p2-nodeid
+       0       5 093bb0f8a0fb 000000000000 000000000000
+       1       7 0f47e254cb19 093bb0f8a0fb 000000000000
+  # expected linkrev for dir_z/f3
+  0 6
+  1 9
+     rev linkrev       nodeid    p1-nodeid    p2-nodeid
+       0       6 2ed2a3912a0b 000000000000 000000000000
+       1       9 7c6d649320ae 2ed2a3912a0b 000000000000
+  # expected linkrev for dir_z/f4
+  0 8
+  1 10
+     rev linkrev       nodeid    p1-nodeid    p2-nodeid
+       0       8 b004912a8510 000000000000 000000000000
+       1      10 9f85b3b95e70 b004912a8510 000000000000
+  # verify the repository
+  checking changesets
+  checking manifests
+  crosschecking files in changesets and manifests
+  checking files
+  checking dirstate
+  checked 11 changesets with 26 changes to 10 files
+  $ cd ..
+
+
+Having cloning all only branch b
+--------------------------------
+
+  $ hg clone --narrow ssh://user@dummy/server --rev `cat ./rev_b_3_` --include dir_x --include dir_y client_xy_rev_from_b_only  --noupdate
+  adding changesets
+  adding manifests
+  adding file changes
+  added 6 changesets with 10 changes to 6 files
+  new changesets 4978c5c7386b:fc05b303b551
+  $ cd client_xy_rev_from_b_only
+  $ hg log -GT "{rev}:{node|short}: {desc}\n  {files}\n"
+  o  5:fc05b303b551: rev_b_3_
+  |    dir_x/f3 dir_z/f1 dir_z/f2
+  o  4:17fd34adb43b: rev_b_2_
+  |    dir_x/f2 dir_z/f1 dir_z/f2 dir_z/f3
+  o  3:328f8ced5276: rev_b_1_
+  |    dir_x/f1
+  o  2:0ccce83dd29b: rev_b_0_
+  |    dir_x/f1
+  o  1:63f468a0fdac: rev_a_
+  |    dir_x/f1 dir_x/f2 dir_x/f3 dir_y/f1 dir_y/f2 dir_y/f3
+  o  0:4978c5c7386b: root
+       readme.txt
+
+  $ hg tracked --addinclude dir_z
+  comparing with ssh://user@dummy/server
+  searching for changes
+  adding changesets
+  adding manifests
+  adding file changes
+  added 0 changesets with 5 changes to 3 files
+  $ sh ../linkrev-check.sh
+  # expected linkrev for dir_z/f1
+  0 4
+  1 5
+     rev linkrev       nodeid    p1-nodeid    p2-nodeid
+       0       4 360afd990eef 000000000000 000000000000
+       1       5 7054ee088631 360afd990eef 000000000000
+  # expected linkrev for dir_z/f2
+  0 4
+  1 5
+     rev linkrev       nodeid    p1-nodeid    p2-nodeid
+       0       4 093bb0f8a0fb 000000000000 000000000000
+       1       5 0f47e254cb19 093bb0f8a0fb 000000000000
+  # expected linkrev for dir_z/f3
+  0 4
+     rev linkrev       nodeid    p1-nodeid    p2-nodeid
+       0       4 2ed2a3912a0b 000000000000 000000000000
+  # verify the repository
+  checking changesets
+  checking manifests
+  crosschecking files in changesets and manifests
+  checking files
+  checking dirstate
+  checked 6 changesets with 15 changes to 9 files
+  $ cd ..
+
+
+Having cloning all only branch c
+--------------------------------
+
+  $ hg clone --narrow ssh://user@dummy/server --rev `cat ./rev_c_2_` --include dir_x --include dir_y client_xy_rev_from_c_only --noupdate
+  adding changesets
+  adding manifests
+  adding file changes
+  added 5 changesets with 10 changes to 6 files
+  new changesets 4978c5c7386b:d04e01dcc82d
+  $ cd client_xy_rev_from_c_only
+  $ hg log -GT "{rev}:{node|short}: {desc}\n  {files}\n"
+  o  4:d04e01dcc82d: rev_c_2_
+  |    dir_y/f1 dir_y/f3 dir_z/f2 dir_z/f4
+  o  3:fa05dbe8eed1: rev_c_1_
+  |    dir_z/f1 dir_z/f2
+  o  2:59b4258b00dc: rev_c_0_
+  |    dir_y/f1 dir_y/f2 dir_z/f1
+  o  1:63f468a0fdac: rev_a_
+  |    dir_x/f1 dir_x/f2 dir_x/f3 dir_y/f1 dir_y/f2 dir_y/f3
+  o  0:4978c5c7386b: root
+       readme.txt
+
+  $ hg tracked --addinclude dir_z
+  comparing with ssh://user@dummy/server
+  searching for changes
+  adding changesets
+  adding manifests
+  adding file changes
+  added 0 changesets with 5 changes to 3 files
+  $ sh ../linkrev-check.sh
+  # expected linkrev for dir_z/f1
+  0 2
+  1 3
+     rev linkrev       nodeid    p1-nodeid    p2-nodeid
+       0       2 360afd990eef 000000000000 000000000000
+       1       3 7054ee088631 360afd990eef 000000000000
+  # expected linkrev for dir_z/f2
+  0 3
+  1 4
+     rev linkrev       nodeid    p1-nodeid    p2-nodeid
+       0       3 093bb0f8a0fb 000000000000 000000000000
+       1       4 0f47e254cb19 093bb0f8a0fb 000000000000
+  # expected linkrev for dir_z/f4
+  0 4
+     rev linkrev       nodeid    p1-nodeid    p2-nodeid
+       0       4 b004912a8510 000000000000 000000000000
+  # verify the repository
+  checking changesets
+  checking manifests
+  crosschecking files in changesets and manifests
+  checking files
+  checking dirstate
+  checked 5 changesets with 15 changes to 9 files
+  $ cd ..
+
+Having cloning all first branch b
+---------------------------------
+
+  $ hg clone --narrow ssh://user@dummy/server --rev `cat ./rev_b_3_` --include dir_x --include dir_y client_xy_rev_from_b_first  --noupdate
+  adding changesets
+  adding manifests
+  adding file changes
+  added 6 changesets with 10 changes to 6 files
+  new changesets 4978c5c7386b:fc05b303b551
+  $ cd client_xy_rev_from_b_first
+  $ hg pull
+  pulling from ssh://user@dummy/server
+  searching for changes
+  adding changesets
+  adding manifests
+  adding file changes
+  added 5 changesets with 6 changes to 4 files
+  new changesets 59b4258b00dc:71e6a9c7a6a2
+  (run 'hg update' to get a working copy)
+  $ hg log -GT "{rev}:{node|short}: {desc}\n  {files}\n"
+  o  10:71e6a9c7a6a2: rev_d_1_
+  |    dir_y/f1 dir_z/f1 dir_z/f4
+  o    9:b0a0cbe5ce57: rev_d_0_
+  |\     dir_x/f1 dir_z/f1 dir_z/f3
+  | o  8:d04e01dcc82d: rev_c_2_
+  | |    dir_y/f1 dir_y/f3 dir_z/f2 dir_z/f4
+  | o  7:fa05dbe8eed1: rev_c_1_
+  | |    dir_z/f1 dir_z/f2
+  | o  6:59b4258b00dc: rev_c_0_
+  | |    dir_y/f1 dir_y/f2 dir_z/f1
+  o |  5:fc05b303b551: rev_b_3_
+  | |    dir_x/f3 dir_z/f1 dir_z/f2
+  o |  4:17fd34adb43b: rev_b_2_
+  | |    dir_x/f2 dir_z/f1 dir_z/f2 dir_z/f3
+  o |  3:328f8ced5276: rev_b_1_
+  | |    dir_x/f1
+  o |  2:0ccce83dd29b: rev_b_0_
+  |/     dir_x/f1
+  o  1:63f468a0fdac: rev_a_
+  |    dir_x/f1 dir_x/f2 dir_x/f3 dir_y/f1 dir_y/f2 dir_y/f3
+  o  0:4978c5c7386b: root
+       readme.txt
+
+  $ hg tracked --addinclude dir_z
+  comparing with ssh://user@dummy/server
+  searching for changes
+  adding changesets
+  adding manifests
+  adding file changes
+  added 0 changesets with 10 changes to 4 files
+  $ sh ../linkrev-check.sh
+  # expected linkrev for dir_z/f1
+  0 4
+  1 5
+  2 9
+  3 10
+     rev linkrev       nodeid    p1-nodeid    p2-nodeid
+       0       6 360afd990eef 000000000000 000000000000 (known-bad-output !)
+       0       4 360afd990eef 000000000000 000000000000 (missing-correct-output !)
+       1       7 7054ee088631 360afd990eef 000000000000 (known-bad-output !)
+       1       5 7054ee088631 360afd990eef 000000000000 (missing-correct-output !)
+       2       9 6bb290463f21 7054ee088631 000000000000
+       3      10 91fec784ff86 6bb290463f21 000000000000
+  # expected linkrev for dir_z/f2
+  0 4
+  1 5
+     rev linkrev       nodeid    p1-nodeid    p2-nodeid
+       0       7 093bb0f8a0fb 000000000000 000000000000 (known-bad-output !)
+       0       4 093bb0f8a0fb 000000000000 000000000000 (missing-correct-output !)
+       1       5 0f47e254cb19 093bb0f8a0fb 000000000000
+  # expected linkrev for dir_z/f3
+  0 4
+  1 9
+     rev linkrev       nodeid    p1-nodeid    p2-nodeid
+       0       4 2ed2a3912a0b 000000000000 000000000000
+       1       9 7c6d649320ae 2ed2a3912a0b 000000000000
+  # expected linkrev for dir_z/f4
+  0 8
+  1 10
+     rev linkrev       nodeid    p1-nodeid    p2-nodeid
+       0       8 b004912a8510 000000000000 000000000000
+       1      10 9f85b3b95e70 b004912a8510 000000000000
+  # verify the repository
+  checking changesets
+  checking manifests
+  crosschecking files in changesets and manifests
+  checking files
+  checking dirstate
+  checked 11 changesets with 26 changes to 10 files
+  $ cd ..
+
+
+Having cloning all first branch c
+---------------------------------
+
+  $ hg clone --narrow ssh://user@dummy/server --rev `cat ./rev_c_2_` --include dir_x --include dir_y client_xy_rev_from_c_first --noupdate
+  adding changesets
+  adding manifests
+  adding file changes
+  added 5 changesets with 10 changes to 6 files
+  new changesets 4978c5c7386b:d04e01dcc82d
+  $ cd client_xy_rev_from_c_first
+  $ hg pull
+  pulling from ssh://user@dummy/server
+  searching for changes
+  adding changesets
+  adding manifests
+  adding file changes
+  added 6 changesets with 6 changes to 4 files
+  new changesets 0ccce83dd29b:71e6a9c7a6a2
+  (run 'hg update' to get a working copy)
+  $ hg log -GT "{rev}:{node|short}: {desc}\n  {files}\n"
+  o  10:71e6a9c7a6a2: rev_d_1_
+  |    dir_y/f1 dir_z/f1 dir_z/f4
+  o    9:b0a0cbe5ce57: rev_d_0_
+  |\     dir_x/f1 dir_z/f1 dir_z/f3
+  | o  8:fc05b303b551: rev_b_3_
+  | |    dir_x/f3 dir_z/f1 dir_z/f2
+  | o  7:17fd34adb43b: rev_b_2_
+  | |    dir_x/f2 dir_z/f1 dir_z/f2 dir_z/f3
+  | o  6:328f8ced5276: rev_b_1_
+  | |    dir_x/f1
+  | o  5:0ccce83dd29b: rev_b_0_
+  | |    dir_x/f1
+  o |  4:d04e01dcc82d: rev_c_2_
+  | |    dir_y/f1 dir_y/f3 dir_z/f2 dir_z/f4
+  o |  3:fa05dbe8eed1: rev_c_1_
+  | |    dir_z/f1 dir_z/f2
+  o |  2:59b4258b00dc: rev_c_0_
+  |/     dir_y/f1 dir_y/f2 dir_z/f1
+  o  1:63f468a0fdac: rev_a_
+  |    dir_x/f1 dir_x/f2 dir_x/f3 dir_y/f1 dir_y/f2 dir_y/f3
+  o  0:4978c5c7386b: root
+       readme.txt
+
+  $ hg tracked --addinclude dir_z
+  comparing with ssh://user@dummy/server
+  searching for changes
+  adding changesets
+  adding manifests
+  adding file changes
+  added 0 changesets with 10 changes to 4 files
+  $ sh ../linkrev-check.sh
+  # expected linkrev for dir_z/f1
+  0 2
+  1 3
+  2 9
+  3 10
+     rev linkrev       nodeid    p1-nodeid    p2-nodeid
+       0       2 360afd990eef 000000000000 000000000000
+       1       3 7054ee088631 360afd990eef 000000000000
+       2       9 6bb290463f21 7054ee088631 000000000000
+       3      10 91fec784ff86 6bb290463f21 000000000000
+  # expected linkrev for dir_z/f2
+  0 3
+  1 4
+     rev linkrev       nodeid    p1-nodeid    p2-nodeid
+       0       3 093bb0f8a0fb 000000000000 000000000000
+       1       8 0f47e254cb19 093bb0f8a0fb 000000000000 (known-bad-output !)
+       1       4 0f47e254cb19 093bb0f8a0fb 000000000000 (missing-correct-output !)
+  # expected linkrev for dir_z/f3
+  0 7
+  1 9
+     rev linkrev       nodeid    p1-nodeid    p2-nodeid
+       0       7 2ed2a3912a0b 000000000000 000000000000
+       1       9 7c6d649320ae 2ed2a3912a0b 000000000000
+  # expected linkrev for dir_z/f4
+  0 4
+  1 10
+     rev linkrev       nodeid    p1-nodeid    p2-nodeid
+       0       4 b004912a8510 000000000000 000000000000
+       1      10 9f85b3b95e70 b004912a8510 000000000000
+  # verify the repository
+  checking changesets
+  checking manifests
+  crosschecking files in changesets and manifests
+  checking files
+  checking dirstate
+  checked 11 changesets with 26 changes to 10 files
+  $ cd ..
--- a/tests/test-phabricator.t	Thu Jun 13 09:52:39 2024 +0200
+++ b/tests/test-phabricator.t	Mon Jun 24 12:05:31 2024 +0200
@@ -730,7 +730,7 @@
   $ hg amend --config experimental.evolution=all --config extensions.amend=
   1 new orphan changesets
   $ hg up 3
-  obsolete feature not enabled but 1 markers found!
+  "obsolete" feature not enabled but 1 markers found!
   1 files updated, 0 files merged, 1 files removed, 0 files unresolved
   $ hg rebase --config experimental.evolution=all --config extensions.rebase=
   note: not rebasing 2:832553266fe8 "two: second commit to review", already in destination as 4:0124e5474c88 tip "two: second commit to review"
@@ -741,7 +741,7 @@
 
   $ echo y | hg phabsend --fold --confirm -r 1:: \
   >          --test-vcr "$VCR/phabsend-fold-updated.json"
-  obsolete feature not enabled but 2 markers found!
+  "obsolete" feature not enabled but 2 markers found!
   602c4e738243 mapped to old nodes ['602c4e738243']
   0124e5474c88 mapped to old nodes ['832553266fe8']
   e4edb1fe3565 mapped to old nodes ['921f8265efbd']
@@ -752,11 +752,11 @@
   D8387 - updated - 1:602c4e738243 "one: first commit to review"
   D8387 - updated - 4:0124e5474c88 "two: second commit to review"
   D8387 - updated - 5:e4edb1fe3565 tip "3: a commit with no detailed message"
-  obsolete feature not enabled but 2 markers found! (?)
+  "obsolete" feature not enabled but 2 markers found! (?)
   updating local commit list for D8387
   new commits: ['602c4e738243', '0124e5474c88', 'e4edb1fe3565']
   $ hg log -Tcompact
-  obsolete feature not enabled but 2 markers found!
+  "obsolete" feature not enabled but 2 markers found!
   5[tip]   e4edb1fe3565   1970-01-01 00:00 +0000   test
     3: a commit with no detailed message
   
@@ -773,17 +773,17 @@
 updated, and nothing is changed locally afterward.
 
   $ hg phabsend --fold -r 1:: --test-vcr "$VCR/phabsend-fold-no-changes.json"
-  obsolete feature not enabled but 2 markers found!
+  "obsolete" feature not enabled but 2 markers found!
   602c4e738243 mapped to old nodes ['602c4e738243']
   0124e5474c88 mapped to old nodes ['0124e5474c88']
   e4edb1fe3565 mapped to old nodes ['e4edb1fe3565']
   D8387 - updated - 1:602c4e738243 "one: first commit to review"
   D8387 - updated - 4:0124e5474c88 "two: second commit to review"
   D8387 - updated - 5:e4edb1fe3565 tip "3: a commit with no detailed message"
-  obsolete feature not enabled but 2 markers found! (?)
+  "obsolete" feature not enabled but 2 markers found! (?)
   local commit list for D8387 is already up-to-date
   $ hg log -Tcompact
-  obsolete feature not enabled but 2 markers found!
+  "obsolete" feature not enabled but 2 markers found!
   5[tip]   e4edb1fe3565   1970-01-01 00:00 +0000   test
     3: a commit with no detailed message
   
@@ -800,7 +800,7 @@
 
   $ echo 'another mod' > file2.txt
   $ hg ci -m 'four: extend the fold range'
-  obsolete feature not enabled but 2 markers found!
+  "obsolete" feature not enabled but 2 markers found!
   $ hg phabsend --fold -r 1:: --test-vcr "$VCR/phabsend-fold-extend-end.json" \
   >             --config experimental.evolution=all
   602c4e738243 mapped to old nodes ['602c4e738243']
@@ -817,7 +817,7 @@
   
   Differential Revision: https://phab.mercurial-scm.org/D8387
   $ hg log -T'{rev} {if(phabreview, "{phabreview.url} {phabreview.id}")}\n' -r 1::
-  obsolete feature not enabled but 3 markers found!
+  "obsolete" feature not enabled but 3 markers found!
   1 https://phab.mercurial-scm.org/D8387 D8387
   4 https://phab.mercurial-scm.org/D8387 D8387
   5 https://phab.mercurial-scm.org/D8387 D8387
@@ -846,7 +846,7 @@
   new commits: ['15e9b14b4b4c', '6320b7d714cf', '3ee132d41dbc', '30682b960804', 'ac7db67f0991']
 
   $ hg log -T '{rev}:{node|short}\n{indent(desc, "  ")}\n'
-  obsolete feature not enabled but 8 markers found!
+  "obsolete" feature not enabled but 8 markers found!
   12:ac7db67f0991
     four: extend the fold range
   
@@ -962,7 +962,7 @@
   new commits: ['15e9b14b4b4c', '6320b7d714cf', '3ee132d41dbc', '30682b960804', 'e919cdf3d4fe']
 
   $ hg log -r tip -v
-  obsolete feature not enabled but 12 markers found!
+  "obsolete" feature not enabled but 12 markers found!
   changeset:   16:e919cdf3d4fe
   tag:         tip
   parent:      11:30682b960804
--- a/tests/test-server-view.t	Thu Jun 13 09:52:39 2024 +0200
+++ b/tests/test-server-view.t	Mon Jun 24 12:05:31 2024 +0200
@@ -36,12 +36,7 @@
   $ hg -R test --config experimental.extra-filter-revs='not public()' debugupdatecache
   $ ls -1 test/.hg/cache/
   branch2-base%89c45d2fa07e
-  branch2-immutable%89c45d2fa07e
   branch2-served
-  branch2-served%89c45d2fa07e
-  branch2-served.hidden%89c45d2fa07e
-  branch2-visible%89c45d2fa07e
-  branch2-visible-hidden%89c45d2fa07e
   hgtagsfnodes1
   rbc-names-v1
   rbc-revs-v1
--- a/tests/test-share.t	Thu Jun 13 09:52:39 2024 +0200
+++ b/tests/test-share.t	Mon Jun 24 12:05:31 2024 +0200
@@ -63,11 +63,7 @@
   1 files updated, 0 files merged, 0 files removed, 0 files unresolved
   $ ls -1 ../repo2-clone/.hg/cache
   branch2-base
-  branch2-immutable
   branch2-served
-  branch2-served.hidden
-  branch2-visible
-  branch2-visible-hidden
   rbc-names-v1
   rbc-revs-v1
   tags2
--- a/tests/test-ssh.t	Thu Jun 13 09:52:39 2024 +0200
+++ b/tests/test-ssh.t	Mon Jun 24 12:05:31 2024 +0200
@@ -72,8 +72,8 @@
   $ hg -R local-stream book mybook
   $ hg clone --stream ssh://user@dummy/local-stream stream2
   streaming all changes
-  16 files to transfer, * of data (glob) (no-rust !)
-  18 files to transfer, * of data (glob) (rust !)
+  12 files to transfer, * of data (glob) (no-rust !)
+  14 files to transfer, * of data (glob) (rust !)
   transferred * in * seconds (*) (glob)
   updating to branch default
   2 files updated, 0 files merged, 0 files removed, 0 files unresolved
--- a/tests/test-stream-bundle-v2.t	Thu Jun 13 09:52:39 2024 +0200
+++ b/tests/test-stream-bundle-v2.t	Mon Jun 24 12:05:31 2024 +0200
@@ -74,6 +74,23 @@
   none-v2;stream=v3-exp;requirements%3Dgeneraldelta%2Crevlog-compression-zstd%2Crevlogv1%2Csparserevlog (stream-v3 zstd no-rust !)
   none-v2;stream=v3-exp;requirements%3Dgeneraldelta%2Crevlog-compression-zstd%2Crevlogv1%2Csparserevlog (stream-v3 rust !)
 
+  $ hg bundle -a --type="none-$bundle_format" bundle.hg
+  $ hg debugbundle bundle.hg
+  Stream params: {}
+  stream2 -- {bytecount: 1693, filecount: 12, requirements: generaldelta%2Crevlogv1%2Csparserevlog} (mandatory: True) (stream-v2 no-zstd !)
+  stream2 -- {bytecount: 1693, filecount: 12, requirements: generaldelta%2Crevlog-compression-zstd%2Crevlogv1%2Csparserevlog} (mandatory: True) (stream-v2 zstd no-rust !)
+  stream2 -- {bytecount: 1819, filecount: 14, requirements: generaldelta%2Crevlog-compression-zstd%2Crevlogv1%2Csparserevlog} (mandatory: True) (stream-v2 rust !)
+  stream3-exp -- {requirements: generaldelta%2Crevlogv1%2Csparserevlog} (mandatory: True) (stream-v3 no-zstd !)
+  stream3-exp -- {requirements: generaldelta%2Crevlog-compression-zstd%2Crevlogv1%2Csparserevlog} (mandatory: True) (stream-v3 zstd no-rust !)
+  stream3-exp -- {requirements: generaldelta%2Crevlog-compression-zstd%2Crevlogv1%2Csparserevlog} (mandatory: True) (stream-v3 rust !)
+  $ hg debugbundle --spec bundle.hg
+  none-v2;stream=v2;requirements%3Dgeneraldelta%2Crevlogv1%2Csparserevlog (stream-v2 no-zstd !)
+  none-v2;stream=v2;requirements%3Dgeneraldelta%2Crevlog-compression-zstd%2Crevlogv1%2Csparserevlog (stream-v2 zstd no-rust !)
+  none-v2;stream=v2;requirements%3Dgeneraldelta%2Crevlog-compression-zstd%2Crevlogv1%2Csparserevlog (stream-v2 rust !)
+  none-v2;stream=v3-exp;requirements%3Dgeneraldelta%2Crevlogv1%2Csparserevlog (stream-v3 no-zstd !)
+  none-v2;stream=v3-exp;requirements%3Dgeneraldelta%2Crevlog-compression-zstd%2Crevlogv1%2Csparserevlog (stream-v3 zstd no-rust !)
+  none-v2;stream=v3-exp;requirements%3Dgeneraldelta%2Crevlog-compression-zstd%2Crevlogv1%2Csparserevlog (stream-v3 rust !)
+
 Test that we can apply the bundle as a stream clone bundle
 
   $ cat > .hg/clonebundles.manifest << EOF
--- a/tests/test-strip-branch-cache.t	Thu Jun 13 09:52:39 2024 +0200
+++ b/tests/test-strip-branch-cache.t	Mon Jun 24 12:05:31 2024 +0200
@@ -1,3 +1,5 @@
+This test cover a bug that no longer exist.
+
 Define helpers.
 
   $ hg_log () { hg log -G -T "{rev}:{node|short}"; }
@@ -18,7 +20,10 @@
 
   $ hg pull -q ../repo
 
-  $ cat .hg/cache/branch2-visible
+  $ ls -1 .hg/cache/branch?*
+  .hg/cache/branch2-base
+  .hg/cache/branch2-served
+  $ cat .hg/cache/branch?-served
   222ae9789a75703f9836e44de7db179cbfd420ee 2
   a3498d6e39376d2456425dd8c692367bdbf00fa2 o default
   222ae9789a75703f9836e44de7db179cbfd420ee o default
@@ -33,24 +38,36 @@
 
   $ strip '1:'
 
-The branchmap cache is not adjusted on strip.
-Now mentions a changelog entry that has been stripped.
+After the strip the "served" cache is now identical to the "base" one, and the
+older one have been actively deleted.
 
-  $ cat .hg/cache/branch2-visible
-  222ae9789a75703f9836e44de7db179cbfd420ee 2
-  a3498d6e39376d2456425dd8c692367bdbf00fa2 o default
-  222ae9789a75703f9836e44de7db179cbfd420ee o default
+  $ ls -1 .hg/cache/branch?*
+  .hg/cache/branch2-base
+  $ cat .hg/cache/branch?-base
+  7ab0a3bd758a58b9f79557ce708533e627776cce 0
+  7ab0a3bd758a58b9f79557ce708533e627776cce o default
+
+We do a new commit and we get a new valid branchmap for the served version
 
   $ commit c
-
-Not adjusted on commit, either.
+  $ ls -1 .hg/cache/branch?*
+  .hg/cache/branch2-base
+  .hg/cache/branch2-served
+  $ cat .hg/cache/branch?-served
+  a1602b357cfca067600406eb19060c7128804d72 1
+  a1602b357cfca067600406eb19060c7128804d72 o default
 
-  $ cat .hg/cache/branch2-visible
-  222ae9789a75703f9836e44de7db179cbfd420ee 2
-  a3498d6e39376d2456425dd8c692367bdbf00fa2 o default
-  222ae9789a75703f9836e44de7db179cbfd420ee o default
 
 On pull we end up with the same tip, and so wrongly reuse the invalid cache and crash.
 
-  $ hg pull ../repo 2>&1 | grep 'ValueError:'
-  ValueError: node a3498d6e39376d2456425dd8c692367bdbf00fa2 does not exist (known-bad-output !)
+  $ hg pull ../repo --quiet
+  $ hg heads -T '{rev} {node} {branch}\n'
+  2 222ae9789a75703f9836e44de7db179cbfd420ee default
+  1 a1602b357cfca067600406eb19060c7128804d72 default
+  $ ls -1 .hg/cache/branch?*
+  .hg/cache/branch2-base
+  .hg/cache/branch2-served
+  $ cat .hg/cache/branch?-served
+  222ae9789a75703f9836e44de7db179cbfd420ee 2
+  a1602b357cfca067600406eb19060c7128804d72 o default
+  222ae9789a75703f9836e44de7db179cbfd420ee o default
--- a/tests/test-tags.t	Thu Jun 13 09:52:39 2024 +0200
+++ b/tests/test-tags.t	Mon Jun 24 12:05:31 2024 +0200
@@ -792,11 +792,6 @@
 
   $ ls tagsclient/.hg/cache
   branch2-base
-  branch2-immutable
-  branch2-served
-  branch2-served.hidden
-  branch2-visible
-  branch2-visible-hidden
   hgtagsfnodes1
   rbc-names-v1
   rbc-revs-v1
@@ -823,11 +818,6 @@
 
   $ ls tagsclient/.hg/cache
   branch2-base
-  branch2-immutable
-  branch2-served
-  branch2-served.hidden
-  branch2-visible
-  branch2-visible-hidden
   hgtagsfnodes1
   rbc-names-v1
   rbc-revs-v1
--- a/tests/test-treemanifest.t	Thu Jun 13 09:52:39 2024 +0200
+++ b/tests/test-treemanifest.t	Mon Jun 24 12:05:31 2024 +0200
@@ -761,8 +761,8 @@
   $ hg clone --config experimental.changegroup3=True --stream -U \
   >   http://localhost:$HGPORT1 stream-clone-basicstore
   streaming all changes
-  29 files to transfer, * of data (glob) (no-rust !)
-  31 files to transfer, * of data (glob) (rust !)
+  24 files to transfer, * of data (glob) (no-rust !)
+  26 files to transfer, * of data (glob) (rust !)
   transferred * in * seconds (*) (glob)
   $ hg -R stream-clone-basicstore verify -q
   $ cat port-1-errors.log
@@ -771,8 +771,8 @@
   $ hg clone --config experimental.changegroup3=True --stream -U \
   >   http://localhost:$HGPORT2 stream-clone-encodedstore
   streaming all changes
-  29 files to transfer, * of data (glob) (no-rust !)
-  31 files to transfer, * of data (glob) (rust !)
+  24 files to transfer, * of data (glob) (no-rust !)
+  26 files to transfer, * of data (glob) (rust !)
   transferred * in * seconds (*) (glob)
   $ hg -R stream-clone-encodedstore verify -q
   $ cat port-2-errors.log
--- a/tests/test-xdg.t	Thu Jun 13 09:52:39 2024 +0200
+++ b/tests/test-xdg.t	Mon Jun 24 12:05:31 2024 +0200
@@ -1,4 +1,4 @@
-#if no-windows no-osx
+#if no-windows
 
   $ mkdir -p xdgconf/hg
   $ echo '[ui]' > xdgconf/hg/hgrc