--- a/contrib/perf.py Mon Mar 25 02:09:15 2024 +0100
+++ b/contrib/perf.py Mon Mar 25 16:27:48 2024 +0000
@@ -4205,15 +4205,24 @@
# add unfiltered
allfilters.append(None)
- if util.safehasattr(branchmap.branchcache, 'fromfile'):
+ old_branch_cache_from_file = None
+ branchcacheread = None
+ if util.safehasattr(branchmap, 'branch_cache_from_file'):
+ old_branch_cache_from_file = branchmap.branch_cache_from_file
+ branchmap.branch_cache_from_file = lambda *args: None
+ elif util.safehasattr(branchmap.branchcache, 'fromfile'):
branchcacheread = safeattrsetter(branchmap.branchcache, b'fromfile')
branchcacheread.set(classmethod(lambda *args: None))
else:
# older versions
branchcacheread = safeattrsetter(branchmap, b'read')
branchcacheread.set(lambda *args: None)
- branchcachewrite = safeattrsetter(branchmap.branchcache, b'write')
- branchcachewrite.set(lambda *args: None)
+ if util.safehasattr(branchmap, '_LocalBranchCache'):
+ branchcachewrite = safeattrsetter(branchmap._LocalBranchCache, b'write')
+ branchcachewrite.set(lambda *args: None)
+ else:
+ branchcachewrite = safeattrsetter(branchmap.branchcache, b'write')
+ branchcachewrite.set(lambda *args: None)
try:
for name in allfilters:
printname = name
@@ -4221,7 +4230,10 @@
printname = b'unfiltered'
timer(getbranchmap(name), title=printname)
finally:
- branchcacheread.restore()
+ if old_branch_cache_from_file is not None:
+ branchmap.branch_cache_from_file = old_branch_cache_from_file
+ if branchcacheread is not None:
+ branchcacheread.restore()
branchcachewrite.restore()
fm.end()
@@ -4303,6 +4315,19 @@
baserepo = repo.filtered(b'__perf_branchmap_update_base')
targetrepo = repo.filtered(b'__perf_branchmap_update_target')
+ bcache = repo.branchmap()
+ copy_method = 'copy'
+
+ copy_base_kwargs = copy_base_kwargs = {}
+ if hasattr(bcache, 'copy'):
+ if 'repo' in getargspec(bcache.copy).args:
+ copy_base_kwargs = {"repo": baserepo}
+ copy_target_kwargs = {"repo": targetrepo}
+ else:
+ copy_method = 'inherit_for'
+ copy_base_kwargs = {"repo": baserepo}
+ copy_target_kwargs = {"repo": targetrepo}
+
# try to find an existing branchmap to reuse
subsettable = getbranchmapsubsettable()
candidatefilter = subsettable.get(None)
@@ -4311,7 +4336,7 @@
if candidatebm.validfor(baserepo):
filtered = repoview.filterrevs(repo, candidatefilter)
missing = [r for r in allbaserevs if r in filtered]
- base = candidatebm.copy()
+ base = getattr(candidatebm, copy_method)(**copy_base_kwargs)
base.update(baserepo, missing)
break
candidatefilter = subsettable.get(candidatefilter)
@@ -4321,7 +4346,7 @@
base.update(baserepo, allbaserevs)
def setup():
- x[0] = base.copy()
+ x[0] = getattr(base, copy_method)(**copy_target_kwargs)
if clearcaches:
unfi._revbranchcache = None
clearchangelog(repo)
@@ -4368,10 +4393,10 @@
repo.branchmap() # make sure we have a relevant, up to date branchmap
- try:
- fromfile = branchmap.branchcache.fromfile
- except AttributeError:
- # older versions
+ fromfile = getattr(branchmap, 'branch_cache_from_file', None)
+ if fromfile is None:
+ fromfile = getattr(branchmap.branchcache, 'fromfile', None)
+ if fromfile is None:
fromfile = branchmap.read
currentfilter = filter
--- a/mercurial/branchmap.py Mon Mar 25 02:09:15 2024 +0100
+++ b/mercurial/branchmap.py Mon Mar 25 16:27:48 2024 +0000
@@ -15,6 +15,7 @@
)
from typing import (
+ Any,
Callable,
Dict,
Iterable,
@@ -24,6 +25,7 @@
TYPE_CHECKING,
Tuple,
Union,
+ cast,
)
from . import (
@@ -59,7 +61,37 @@
def __getitem__(self, repo):
self.updatecache(repo)
- return self._per_filter[repo.filtername]
+ bcache = self._per_filter[repo.filtername]
+ bcache._ensure_populated(repo)
+ assert bcache._filtername == repo.filtername, (
+ bcache._filtername,
+ repo.filtername,
+ )
+ return bcache
+
+ def update_disk(self, repo, detect_pure_topo=False):
+ """ensure and up-to-date cache is (or will be) written on disk
+
+ The cache for this repository view is updated if needed and written on
+ disk.
+
+ If a transaction is in progress, the writing is schedule to transaction
+ close. See the `BranchMapCache.write_dirty` method.
+
+ This method exist independently of __getitem__ as it is sometime useful
+ to signal that we have no intend to use the data in memory yet.
+ """
+ self.updatecache(repo)
+ bcache = self._per_filter[repo.filtername]
+ assert bcache._filtername == repo.filtername, (
+ bcache._filtername,
+ repo.filtername,
+ )
+ if detect_pure_topo:
+ bcache._detect_pure_topo(repo)
+ tr = repo.currenttransaction()
+ if getattr(tr, 'finalized', True):
+ bcache.sync_disk(repo)
def updatecache(self, repo):
"""Update the cache for the given filtered view on a repository"""
@@ -72,7 +104,7 @@
bcache = self._per_filter.get(filtername)
if bcache is None or not bcache.validfor(repo):
# cache object missing or cache object stale? Read from disk
- bcache = branchcache.fromfile(repo)
+ bcache = branch_cache_from_file(repo)
revs = []
if bcache is None:
@@ -82,12 +114,13 @@
subsetname = subsettable.get(filtername)
if subsetname is not None:
subset = repo.filtered(subsetname)
- bcache = self[subset].copy()
+ self.updatecache(subset)
+ bcache = self._per_filter[subset.filtername].inherit_for(repo)
extrarevs = subset.changelog.filteredrevs - cl.filteredrevs
revs.extend(r for r in extrarevs if r <= bcache.tiprev)
else:
# nothing to fall back on, start empty.
- bcache = branchcache(repo)
+ bcache = new_branch_cache(repo)
revs.extend(cl.revs(start=bcache.tiprev + 1))
if revs:
@@ -118,7 +151,7 @@
if rbheads:
rtiprev = max((int(clrev(node)) for node in rbheads))
- cache = branchcache(
+ cache = new_branch_cache(
repo,
remotebranchmap,
repo[rtiprev].node(),
@@ -131,19 +164,26 @@
for candidate in (b'base', b'immutable', b'served'):
rview = repo.filtered(candidate)
if cache.validfor(rview):
+ cache._filtername = candidate
self._per_filter[candidate] = cache
+ cache._state = STATE_DIRTY
cache.write(rview)
return
def clear(self):
self._per_filter.clear()
- def write_delayed(self, repo):
+ def write_dirty(self, repo):
unfi = repo.unfiltered()
- for filtername, cache in self._per_filter.items():
- if cache._delayed:
+ for filtername in repoviewutil.get_ordered_subset():
+ cache = self._per_filter.get(filtername)
+ if cache is None:
+ continue
+ if filtername is None:
+ repo = unfi
+ else:
repo = unfi.filtered(filtername)
- cache.write(repo)
+ cache.sync_disk(repo)
def _unknownnode(node):
@@ -158,26 +198,11 @@
return b'branch cache'
-class branchcache:
+class _BaseBranchCache:
"""A dict like object that hold branches heads cache.
This cache is used to avoid costly computations to determine all the
branch heads of a repo.
-
- The cache is serialized on disk in the following format:
-
- <tip hex node> <tip rev number> [optional filtered repo hex hash]
- <branch head hex node> <open/closed state> <branch name>
- <branch head hex node> <open/closed state> <branch name>
- ...
-
- The first line is used to check if the cache is still valid. If the
- branch cache is for a filtered repo view, an optional third hash is
- included that hashes the hashes of all filtered and obsolete revisions.
-
- The open/closed state is represented by a single letter 'o' or 'c'.
- This field can be used to avoid changelog reads when determining if a
- branch head closes a branch or not.
"""
def __init__(
@@ -186,64 +211,18 @@
entries: Union[
Dict[bytes, List[bytes]], Iterable[Tuple[bytes, List[bytes]]]
] = (),
- tipnode: Optional[bytes] = None,
- tiprev: Optional[int] = nullrev,
- filteredhash: Optional[bytes] = None,
- closednodes: Optional[Set[bytes]] = None,
- hasnode: Optional[Callable[[bytes], bool]] = None,
+ closed_nodes: Optional[Set[bytes]] = None,
) -> None:
"""hasnode is a function which can be used to verify whether changelog
has a given node or not. If it's not provided, we assume that every node
we have exists in changelog"""
- self._repo = repo
- self._delayed = False
- if tipnode is None:
- self.tipnode = repo.nullid
- else:
- self.tipnode = tipnode
- self.tiprev = tiprev
- self.filteredhash = filteredhash
# closednodes is a set of nodes that close their branch. If the branch
# cache has been updated, it may contain nodes that are no longer
# heads.
- if closednodes is None:
- self._closednodes = set()
- else:
- self._closednodes = closednodes
+ if closed_nodes is None:
+ closed_nodes = set()
+ self._closednodes = set(closed_nodes)
self._entries = dict(entries)
- # whether closed nodes are verified or not
- self._closedverified = False
- # branches for which nodes are verified
- self._verifiedbranches = set()
- self._hasnode = hasnode
- if self._hasnode is None:
- self._hasnode = lambda x: True
-
- def _verifyclosed(self):
- """verify the closed nodes we have"""
- if self._closedverified:
- return
- for node in self._closednodes:
- if not self._hasnode(node):
- _unknownnode(node)
-
- self._closedverified = True
-
- def _verifybranch(self, branch):
- """verify head nodes for the given branch."""
- if branch not in self._entries or branch in self._verifiedbranches:
- return
- for n in self._entries[branch]:
- if not self._hasnode(n):
- _unknownnode(n)
-
- self._verifiedbranches.add(branch)
-
- def _verifyall(self):
- """verifies nodes of all the branches"""
- needverification = set(self._entries.keys()) - self._verifiedbranches
- for b in needverification:
- self._verifybranch(b)
def __iter__(self):
return iter(self._entries)
@@ -252,115 +231,20 @@
self._entries[key] = value
def __getitem__(self, key):
- self._verifybranch(key)
return self._entries[key]
def __contains__(self, key):
- self._verifybranch(key)
return key in self._entries
def iteritems(self):
- for k, v in self._entries.items():
- self._verifybranch(k)
- yield k, v
+ return self._entries.items()
items = iteritems
def hasbranch(self, label):
"""checks whether a branch of this name exists or not"""
- self._verifybranch(label)
return label in self._entries
- @classmethod
- def fromfile(cls, repo):
- f = None
- try:
- f = repo.cachevfs(cls._filename(repo))
- lineiter = iter(f)
- cachekey = next(lineiter).rstrip(b'\n').split(b" ", 2)
- last, lrev = cachekey[:2]
- last, lrev = bin(last), int(lrev)
- filteredhash = None
- hasnode = repo.changelog.hasnode
- if len(cachekey) > 2:
- filteredhash = bin(cachekey[2])
- bcache = cls(
- repo,
- tipnode=last,
- tiprev=lrev,
- filteredhash=filteredhash,
- hasnode=hasnode,
- )
- if not bcache.validfor(repo):
- # invalidate the cache
- raise ValueError('tip differs')
- bcache.load(repo, lineiter)
- except (IOError, OSError):
- return None
-
- except Exception as inst:
- if repo.ui.debugflag:
- msg = b'invalid %s: %s\n'
- repo.ui.debug(
- msg
- % (
- _branchcachedesc(repo),
- stringutil.forcebytestr(inst),
- )
- )
- bcache = None
-
- finally:
- if f:
- f.close()
-
- return bcache
-
- def load(self, repo, lineiter):
- """fully loads the branchcache by reading from the file using the line
- iterator passed"""
- for line in lineiter:
- line = line.rstrip(b'\n')
- if not line:
- continue
- node, state, label = line.split(b" ", 2)
- if state not in b'oc':
- raise ValueError('invalid branch state')
- label = encoding.tolocal(label.strip())
- node = bin(node)
- self._entries.setdefault(label, []).append(node)
- if state == b'c':
- self._closednodes.add(node)
-
- @staticmethod
- def _filename(repo):
- """name of a branchcache file for a given repo or repoview"""
- filename = b"branch2"
- if repo.filtername:
- filename = b'%s-%s' % (filename, repo.filtername)
- return filename
-
- def validfor(self, repo):
- """check that cache contents are valid for (a subset of) this repo
-
- - False when the order of changesets changed or if we detect a strip.
- - True when cache is up-to-date for the current repo or its subset."""
- try:
- node = repo.changelog.node(self.tiprev)
- except IndexError:
- # changesets were stripped and now we don't even have enough to
- # find tiprev
- return False
- if self.tipnode != node:
- # tiprev doesn't correspond to tipnode: repo was stripped, or this
- # repo has a different order of changesets
- return False
- tiphash = scmutil.filteredhash(repo, self.tiprev, needobsolete=True)
- # hashes don't match if this repo view has a different set of filtered
- # revisions (e.g. due to phase changes) or obsolete revisions (e.g.
- # history was rewritten)
- return self.filteredhash == tiphash
-
def _branchtip(self, heads):
"""Return tuple with last open head in heads and false,
otherwise return last closed head and true."""
@@ -383,7 +267,6 @@
return (n for n in nodes if n not in self._closednodes)
def branchheads(self, branch, closed=False):
- self._verifybranch(branch)
heads = self._entries[branch]
if not closed:
heads = list(self.iteropen(heads))
@@ -395,60 +278,8 @@
def iterheads(self):
"""returns all the heads"""
- self._verifyall()
return self._entries.values()
- def copy(self):
- """return an deep copy of the branchcache object"""
- return type(self)(
- self._repo,
- self._entries,
- self.tipnode,
- self.tiprev,
- self.filteredhash,
- self._closednodes,
- )
-
- def write(self, repo):
- tr = repo.currenttransaction()
- if not getattr(tr, 'finalized', True):
- # Avoid premature writing.
- #
- # (The cache warming setup by localrepo will update the file later.)
- self._delayed = True
- return
- try:
- filename = self._filename(repo)
- with repo.cachevfs(filename, b"w", atomictemp=True) as f:
- cachekey = [hex(self.tipnode), b'%d' % self.tiprev]
- if self.filteredhash is not None:
- cachekey.append(hex(self.filteredhash))
- f.write(b" ".join(cachekey) + b'\n')
- nodecount = 0
- for label, nodes in sorted(self._entries.items()):
- label = encoding.fromlocal(label)
- for node in nodes:
- nodecount += 1
- if node in self._closednodes:
- state = b'c'
- else:
- state = b'o'
- f.write(b"%s %s %s\n" % (hex(node), state, label))
- repo.ui.log(
- b'branchcache',
- b'wrote %s with %d labels and %d nodes\n',
- _branchcachedesc(repo),
- len(self._entries),
- nodecount,
- )
- self._delayed = False
- except (IOError, OSError, error.Abort) as inst:
- # Abort may be raised by read only opener, so log and continue
- repo.ui.debug(
- b"couldn't write branch cache: %s\n"
- % stringutil.forcebytestr(inst)
- )
-
def update(self, repo, revgen):
"""Given a branchhead cache, self, that may have extra nodes or be
missing heads, and a generator of nodes that are strictly a superset of
@@ -456,29 +287,69 @@
"""
starttime = util.timer()
cl = repo.changelog
+ # Faster than using ctx.obsolete()
+ obsrevs = obsolete.getrevs(repo, b'obsolete')
# collect new branch entries
newbranches = {}
+ new_closed = set()
+ obs_ignored = set()
getbranchinfo = repo.revbranchcache().branchinfo
+ max_rev = -1
for r in revgen:
+ max_rev = max(max_rev, r)
+ if r in obsrevs:
+ # We ignore obsolete changesets as they shouldn't be
+ # considered heads.
+ obs_ignored.add(r)
+ continue
branch, closesbranch = getbranchinfo(r)
newbranches.setdefault(branch, []).append(r)
if closesbranch:
- self._closednodes.add(cl.node(r))
+ new_closed.add(r)
+ if max_rev < 0:
+ msg = "running branchcache.update without revision to update"
+ raise error.ProgrammingError(msg)
+
+ self._process_new(
+ repo,
+ newbranches,
+ new_closed,
+ obs_ignored,
+ max_rev,
+ )
+
+ self._closednodes.update(cl.node(rev) for rev in new_closed)
- # new tip revision which we found after iterating items from new
- # branches
- ntiprev = self.tiprev
+ duration = util.timer() - starttime
+ repo.ui.log(
+ b'branchcache',
+ b'updated %s in %.4f seconds\n',
+ _branchcachedesc(repo),
+ duration,
+ )
+ return max_rev
+ def _process_new(
+ self,
+ repo,
+ newbranches,
+ new_closed,
+ obs_ignored,
+ max_rev,
+ ):
+ """update the branchmap from a set of new information"""
# Delay fetching the topological heads until they are needed.
# A repository without non-continous branches can skip this part.
topoheads = None
+ cl = repo.changelog
+ getbranchinfo = repo.revbranchcache().branchinfo
+ # Faster than using ctx.obsolete()
+ obsrevs = obsolete.getrevs(repo, b'obsolete')
+
# If a changeset is visible, its parents must be visible too, so
# use the faster unfiltered parent accessor.
- parentrevs = repo.unfiltered().changelog.parentrevs
-
- # Faster than using ctx.obsolete()
- obsrevs = obsolete.getrevs(repo, b'obsolete')
+ parentrevs = cl._uncheckedparentrevs
for branch, newheadrevs in newbranches.items():
# For every branch, compute the new branchheads.
@@ -520,11 +391,6 @@
bheadset = {cl.rev(node) for node in bheads}
uncertain = set()
for newrev in sorted(newheadrevs):
- if newrev in obsrevs:
- # We ignore obsolete changesets as they shouldn't be
- # considered heads.
- continue
-
if not bheadset:
bheadset.add(newrev)
continue
@@ -561,50 +427,665 @@
bheadset -= ancestors
if bheadset:
self[branch] = [cl.node(rev) for rev in sorted(bheadset)]
- tiprev = max(newheadrevs)
- if tiprev > ntiprev:
- ntiprev = tiprev
+
+
+STATE_CLEAN = 1
+STATE_INHERITED = 2
+STATE_DIRTY = 3
+
+
+class _LocalBranchCache(_BaseBranchCache):
+ """base class of branch-map info for a local repo or repoview"""
+
+ _base_filename = None
+ _default_key_hashes: Tuple[bytes] = cast(Tuple[bytes], ())
+
+ def __init__(
+ self,
+ repo: "localrepo.localrepository",
+ entries: Union[
+ Dict[bytes, List[bytes]], Iterable[Tuple[bytes, List[bytes]]]
+ ] = (),
+ tipnode: Optional[bytes] = None,
+ tiprev: Optional[int] = nullrev,
+ key_hashes: Optional[Tuple[bytes]] = None,
+ closednodes: Optional[Set[bytes]] = None,
+ hasnode: Optional[Callable[[bytes], bool]] = None,
+ verify_node: bool = False,
+ inherited: bool = False,
+ ) -> None:
+ """hasnode is a function which can be used to verify whether changelog
+ has a given node or not. If it's not provided, we assume that every node
+ we have exists in changelog"""
+ self._filtername = repo.filtername
+ if tipnode is None:
+ self.tipnode = repo.nullid
+ else:
+ self.tipnode = tipnode
+ self.tiprev = tiprev
+ if key_hashes is None:
+ self.key_hashes = self._default_key_hashes
+ else:
+ self.key_hashes = key_hashes
+ self._state = STATE_CLEAN
+ if inherited:
+ self._state = STATE_INHERITED
+
+ super().__init__(repo=repo, entries=entries, closed_nodes=closednodes)
+ # closednodes is a set of nodes that close their branch. If the branch
+ # cache has been updated, it may contain nodes that are no longer
+ # heads.
+
+ # Do we need to verify branch at all ?
+ self._verify_node = verify_node
+ # branches for which nodes are verified
+ self._verifiedbranches = set()
+ self._hasnode = None
+ if self._verify_node:
+ self._hasnode = repo.changelog.hasnode
+
+ def _compute_key_hashes(self, repo) -> Tuple[bytes]:
+ raise NotImplementedError
+
+ def _ensure_populated(self, repo):
+ """make sure any lazily loaded values are fully populated"""
+
+ def _detect_pure_topo(self, repo) -> None:
+ pass
+
+ def validfor(self, repo):
+ """check that cache contents are valid for (a subset of) this repo
+
+ - False when the order of changesets changed or if we detect a strip.
+ - True when cache is up-to-date for the current repo or its subset."""
+ try:
+ node = repo.changelog.node(self.tiprev)
+ except IndexError:
+ # changesets were stripped and now we don't even have enough to
+ # find tiprev
+ return False
+ if self.tipnode != node:
+ # tiprev doesn't correspond to tipnode: repo was stripped, or this
+ # repo has a different order of changesets
+ return False
+ repo_key_hashes = self._compute_key_hashes(repo)
+ # hashes don't match if this repo view has a different set of filtered
+ # revisions (e.g. due to phase changes) or obsolete revisions (e.g.
+ # history was rewritten)
+ return self.key_hashes == repo_key_hashes
+
+ @classmethod
+ def fromfile(cls, repo):
+ f = None
+ try:
+ f = repo.cachevfs(cls._filename(repo))
+ lineiter = iter(f)
+ init_kwargs = cls._load_header(repo, lineiter)
+ bcache = cls(
+ repo,
+ verify_node=True,
+ **init_kwargs,
+ )
+ if not bcache.validfor(repo):
+ # invalidate the cache
+ raise ValueError('tip differs')
+ bcache._load_heads(repo, lineiter)
+ except (IOError, OSError):
+ return None
+
+ except Exception as inst:
+ if repo.ui.debugflag:
+ msg = b'invalid %s: %s\n'
+ msg %= (
+ _branchcachedesc(repo),
+ stringutil.forcebytestr(inst),
+ )
+ repo.ui.debug(msg)
+ bcache = None
+
+ finally:
+ if f:
+ f.close()
+
+ return bcache
+
+ @classmethod
+ def _load_header(cls, repo, lineiter) -> "dict[str, Any]":
+ raise NotImplementedError
+
+ def _load_heads(self, repo, lineiter):
+ """fully loads the branchcache by reading from the file using the line
+ iterator passed"""
+ for line in lineiter:
+ line = line.rstrip(b'\n')
+ if not line:
+ continue
+ node, state, label = line.split(b" ", 2)
+ if state not in b'oc':
+ raise ValueError('invalid branch state')
+ label = encoding.tolocal(label.strip())
+ node = bin(node)
+ self._entries.setdefault(label, []).append(node)
+ if state == b'c':
+ self._closednodes.add(node)
- if ntiprev > self.tiprev:
- self.tiprev = ntiprev
- self.tipnode = cl.node(ntiprev)
+ @classmethod
+ def _filename(cls, repo):
+ """name of a branchcache file for a given repo or repoview"""
+ filename = cls._base_filename
+ assert filename is not None
+ if repo.filtername:
+ filename = b'%s-%s' % (filename, repo.filtername)
+ return filename
+
+ def inherit_for(self, repo):
+ """return a deep copy of the branchcache object"""
+ assert repo.filtername != self._filtername
+ other = type(self)(
+ repo=repo,
+ # we always do a shally copy of self._entries, and the values is
+ # always replaced, so no need to deepcopy until the above remains
+ # true.
+ entries=self._entries,
+ tipnode=self.tipnode,
+ tiprev=self.tiprev,
+ key_hashes=self.key_hashes,
+ closednodes=set(self._closednodes),
+ verify_node=self._verify_node,
+ inherited=True,
+ )
+ # also copy information about the current verification state
+ other._verifiedbranches = set(self._verifiedbranches)
+ return other
+
+ def sync_disk(self, repo):
+ """synchronise the on disk file with the cache state
+
+ If new value specific to this filter level need to be written, the file
+ will be updated, if the state of the branchcache is inherited from a
+ subset, any stalled on disk file will be deleted.
+
+ That method does nothing if there is nothing to do.
+ """
+ if self._state == STATE_DIRTY:
+ self.write(repo)
+ elif self._state == STATE_INHERITED:
+ filename = self._filename(repo)
+ repo.cachevfs.tryunlink(filename)
+
+ def write(self, repo):
+ assert self._filtername == repo.filtername, (
+ self._filtername,
+ repo.filtername,
+ )
+ assert self._state == STATE_DIRTY, self._state
+ # This method should not be called during an open transaction
+ tr = repo.currenttransaction()
+ if not getattr(tr, 'finalized', True):
+ msg = "writing branchcache in the middle of a transaction"
+ raise error.ProgrammingError(msg)
+ try:
+ filename = self._filename(repo)
+ with repo.cachevfs(filename, b"w", atomictemp=True) as f:
+ self._write_header(f)
+ nodecount = self._write_heads(repo, f)
+ repo.ui.log(
+ b'branchcache',
+ b'wrote %s with %d labels and %d nodes\n',
+ _branchcachedesc(repo),
+ len(self._entries),
+ nodecount,
+ )
+ self._state = STATE_CLEAN
+ except (IOError, OSError, error.Abort) as inst:
+ # Abort may be raised by read only opener, so log and continue
+ repo.ui.debug(
+ b"couldn't write branch cache: %s\n"
+ % stringutil.forcebytestr(inst)
+ )
+
+ def _write_header(self, fp) -> None:
+ raise NotImplementedError
+
+ def _write_heads(self, repo, fp) -> int:
+ """write list of heads to a file
+
+ Return the number of heads written."""
+ nodecount = 0
+ for label, nodes in sorted(self._entries.items()):
+ label = encoding.fromlocal(label)
+ for node in nodes:
+ nodecount += 1
+ if node in self._closednodes:
+ state = b'c'
+ else:
+ state = b'o'
+ fp.write(b"%s %s %s\n" % (hex(node), state, label))
+ return nodecount
+
+ def _verifybranch(self, branch):
+ """verify head nodes for the given branch."""
+ if not self._verify_node:
+ return
+ if branch not in self._entries or branch in self._verifiedbranches:
+ return
+ assert self._hasnode is not None
+ for n in self._entries[branch]:
+ if not self._hasnode(n):
+ _unknownnode(n)
+
+ self._verifiedbranches.add(branch)
+
+ def _verifyall(self):
+ """verifies nodes of all the branches"""
+ for b in self._entries.keys():
+ if b not in self._verifiedbranches:
+ self._verifybranch(b)
+
+ def __getitem__(self, key):
+ self._verifybranch(key)
+ return super().__getitem__(key)
+
+ def __contains__(self, key):
+ self._verifybranch(key)
+ return super().__contains__(key)
+
+ def iteritems(self):
+ self._verifyall()
+ return super().iteritems()
+
+ items = iteritems
+
+ def iterheads(self):
+ """returns all the heads"""
+ self._verifyall()
+ return super().iterheads()
+
+ def hasbranch(self, label):
+ """checks whether a branch of this name exists or not"""
+ self._verifybranch(label)
+ return super().hasbranch(label)
+
+ def branchheads(self, branch, closed=False):
+ self._verifybranch(branch)
+ return super().branchheads(branch, closed=closed)
+
+ def update(self, repo, revgen):
+ assert self._filtername == repo.filtername, (
+ self._filtername,
+ repo.filtername,
+ )
+ cl = repo.changelog
+ max_rev = super().update(repo, revgen)
+ # new tip revision which we found after iterating items from new
+ # branches
+ if max_rev is not None and max_rev > self.tiprev:
+ self.tiprev = max_rev
+ self.tipnode = cl.node(max_rev)
+ else:
+ # We should not be here is if this is false
+ assert cl.node(self.tiprev) == self.tipnode
if not self.validfor(repo):
- # old cache key is now invalid for the repo, but we've just updated
- # the cache and we assume it's valid, so let's make the cache key
- # valid as well by recomputing it from the cached data
- self.tipnode = repo.nullid
- self.tiprev = nullrev
- for heads in self.iterheads():
- if not heads:
- # all revisions on a branch are obsolete
- continue
- # note: tiprev is not necessarily the tip revision of repo,
- # because the tip could be obsolete (i.e. not a head)
- tiprev = max(cl.rev(node) for node in heads)
- if tiprev > self.tiprev:
- self.tipnode = cl.node(tiprev)
- self.tiprev = tiprev
- self.filteredhash = scmutil.filteredhash(
- repo, self.tiprev, needobsolete=True
+ # the tiprev and tipnode should be aligned, so if the current repo
+ # is not seens as valid this is because old cache key is now
+ # invalid for the repo.
+ #
+ # However. we've just updated the cache and we assume it's valid,
+ # so let's make the cache key valid as well by recomputing it from
+ # the cached data
+ self.key_hashes = self._compute_key_hashes(repo)
+ self.filteredhash = scmutil.combined_filtered_and_obsolete_hash(
+ repo,
+ self.tiprev,
+ )
+
+ self._state = STATE_DIRTY
+ tr = repo.currenttransaction()
+ if getattr(tr, 'finalized', True):
+ # Avoid premature writing.
+ #
+ # (The cache warming setup by localrepo will update the file later.)
+ self.write(repo)
+
+
+def branch_cache_from_file(repo) -> Optional[_LocalBranchCache]:
+ """Build a branch cache from on-disk data if possible
+
+ Return a branch cache of the right format depending of the repository.
+ """
+ if repo.ui.configbool(b"experimental", b"branch-cache-v3"):
+ return BranchCacheV3.fromfile(repo)
+ else:
+ return BranchCacheV2.fromfile(repo)
+
+
+def new_branch_cache(repo, *args, **kwargs):
+ """Build a new branch cache from argument
+
+ Return a branch cache of the right format depending of the repository.
+ """
+ if repo.ui.configbool(b"experimental", b"branch-cache-v3"):
+ return BranchCacheV3(repo, *args, **kwargs)
+ else:
+ return BranchCacheV2(repo, *args, **kwargs)
+
+
+class BranchCacheV2(_LocalBranchCache):
+ """a branch cache using version 2 of the format on disk
+
+ The cache is serialized on disk in the following format:
+
+ <tip hex node> <tip rev number> [optional filtered repo hex hash]
+ <branch head hex node> <open/closed state> <branch name>
+ <branch head hex node> <open/closed state> <branch name>
+ ...
+
+ The first line is used to check if the cache is still valid. If the
+ branch cache is for a filtered repo view, an optional third hash is
+ included that hashes the hashes of all filtered and obsolete revisions.
+
+ The open/closed state is represented by a single letter 'o' or 'c'.
+ This field can be used to avoid changelog reads when determining if a
+ branch head closes a branch or not.
+ """
+
+ _base_filename = b"branch2"
+
+ @classmethod
+ def _load_header(cls, repo, lineiter) -> "dict[str, Any]":
+ """parse the head of a branchmap file
+
+ return parameters to pass to a newly created class instance.
+ """
+ cachekey = next(lineiter).rstrip(b'\n').split(b" ", 2)
+ last, lrev = cachekey[:2]
+ last, lrev = bin(last), int(lrev)
+ filteredhash = ()
+ if len(cachekey) > 2:
+ filteredhash = (bin(cachekey[2]),)
+ return {
+ "tipnode": last,
+ "tiprev": lrev,
+ "key_hashes": filteredhash,
+ }
+
+ def _write_header(self, fp) -> None:
+ """write the branch cache header to a file"""
+ cachekey = [hex(self.tipnode), b'%d' % self.tiprev]
+ if self.key_hashes:
+ cachekey.append(hex(self.key_hashes[0]))
+ fp.write(b" ".join(cachekey) + b'\n')
+
+ def _compute_key_hashes(self, repo) -> Tuple[bytes]:
+ """return the cache key hashes that match this repoview state"""
+ filtered_hash = scmutil.combined_filtered_and_obsolete_hash(
+ repo,
+ self.tiprev,
+ needobsolete=True,
+ )
+ keys: Tuple[bytes] = cast(Tuple[bytes], ())
+ if filtered_hash is not None:
+ keys: Tuple[bytes] = (filtered_hash,)
+ return keys
+
+
+class BranchCacheV3(_LocalBranchCache):
+ """a branch cache using version 3 of the format on disk
+
+ This version is still EXPERIMENTAL and the format is subject to changes.
+
+ The cache is serialized on disk in the following format:
+
+ <cache-key-xxx>=<xxx-value> <cache-key-yyy>=<yyy-value> […]
+ <branch head hex node> <open/closed state> <branch name>
+ <branch head hex node> <open/closed state> <branch name>
+ ...
+
+ The first line is used to check if the cache is still valid. It is a series
+ of key value pair. The following key are recognized:
+
+ - tip-rev: the rev-num of the tip-most revision seen by this cache
+ - tip-node: the node-id of the tip-most revision sen by this cache
+ - filtered-hash: the hash of all filtered revisions (before tip-rev)
+ ignored by this cache.
+ - obsolete-hash: the hash of all non-filtered obsolete revisions (before
+ tip-rev) ignored by this cache.
+
+ The tip-rev is used to know how far behind the value in the file are
+ compared to the current repository state.
+
+ The tip-node, filtered-hash and obsolete-hash are used to detect if this
+ cache can be used for this repository state at all.
+
+ The open/closed state is represented by a single letter 'o' or 'c'.
+ This field can be used to avoid changelog reads when determining if a
+ branch head closes a branch or not.
+
+ Topological heads are not included in the listing and should be dispatched
+ on the right branch at read time. Obsolete topological heads should be
+ ignored.
+ """
+
+ _base_filename = b"branch3"
+ _default_key_hashes = (None, None)
+
+ def __init__(self, *args, pure_topo_branch=None, **kwargs):
+ super().__init__(*args, **kwargs)
+ self._pure_topo_branch = pure_topo_branch
+ self._needs_populate = self._pure_topo_branch is not None
+
+ def inherit_for(self, repo):
+ new = super().inherit_for(repo)
+ new._pure_topo_branch = self._pure_topo_branch
+ new._needs_populate = self._needs_populate
+ return new
+
+ def _get_topo_heads(self, repo):
+ """returns the topological head of a repoview content up to self.tiprev"""
+ cl = repo.changelog
+ if self.tiprev == nullrev:
+ return []
+ elif self.tiprev == cl.tiprev():
+ return cl.headrevs()
+ else:
+ # XXX passing tiprev as ceiling of cl.headrevs could be faster
+ heads = cl.headrevs(cl.revs(stop=self.tiprev))
+ return heads
+
+ def _write_header(self, fp) -> None:
+ cache_keys = {
+ b"tip-node": hex(self.tipnode),
+ b"tip-rev": b'%d' % self.tiprev,
+ }
+ if self.key_hashes:
+ if self.key_hashes[0] is not None:
+ cache_keys[b"filtered-hash"] = hex(self.key_hashes[0])
+ if self.key_hashes[1] is not None:
+ cache_keys[b"obsolete-hash"] = hex(self.key_hashes[1])
+ if self._pure_topo_branch is not None:
+ cache_keys[b"topo-mode"] = b"pure"
+ pieces = (b"%s=%s" % i for i in sorted(cache_keys.items()))
+ fp.write(b" ".join(pieces) + b'\n')
+ if self._pure_topo_branch is not None:
+ label = encoding.fromlocal(self._pure_topo_branch)
+ fp.write(label + b'\n')
+
+ def _write_heads(self, repo, fp) -> int:
+ """write list of heads to a file
+
+ Return the number of heads written."""
+ nodecount = 0
+ topo_heads = None
+ if self._pure_topo_branch is None:
+ topo_heads = set(self._get_topo_heads(repo))
+ to_rev = repo.changelog.index.rev
+ for label, nodes in sorted(self._entries.items()):
+ if label == self._pure_topo_branch:
+ # not need to write anything the header took care of that
+ continue
+ label = encoding.fromlocal(label)
+ for node in nodes:
+ if topo_heads is not None:
+ rev = to_rev(node)
+ if rev in topo_heads:
+ continue
+ if node in self._closednodes:
+ state = b'c'
+ else:
+ state = b'o'
+ nodecount += 1
+ fp.write(b"%s %s %s\n" % (hex(node), state, label))
+ return nodecount
+
+ @classmethod
+ def _load_header(cls, repo, lineiter):
+ header_line = next(lineiter)
+ pieces = header_line.rstrip(b'\n').split(b" ")
+ cache_keys = dict(p.split(b'=', 1) for p in pieces)
+
+ args = {}
+ filtered_hash = None
+ obsolete_hash = None
+ has_pure_topo_heads = False
+ for k, v in cache_keys.items():
+ if k == b"tip-rev":
+ args["tiprev"] = int(v)
+ elif k == b"tip-node":
+ args["tipnode"] = bin(v)
+ elif k == b"filtered-hash":
+ filtered_hash = bin(v)
+ elif k == b"obsolete-hash":
+ obsolete_hash = bin(v)
+ elif k == b"topo-mode":
+ if v == b"pure":
+ has_pure_topo_heads = True
+ else:
+ msg = b"unknown topo-mode: %r" % v
+ raise ValueError(msg)
+ else:
+ msg = b"unknown cache key: %r" % k
+ raise ValueError(msg)
+ args["key_hashes"] = (filtered_hash, obsolete_hash)
+ if has_pure_topo_heads:
+ pure_line = next(lineiter).rstrip(b'\n')
+ args["pure_topo_branch"] = encoding.tolocal(pure_line)
+ return args
+
+ def _load_heads(self, repo, lineiter):
+ """fully loads the branchcache by reading from the file using the line
+ iterator passed"""
+ super()._load_heads(repo, lineiter)
+ if self._pure_topo_branch is not None:
+ # no need to read the repository heads, we know their value already.
+ return
+ cl = repo.changelog
+ getbranchinfo = repo.revbranchcache().branchinfo
+ obsrevs = obsolete.getrevs(repo, b'obsolete')
+ to_node = cl.node
+ touched_branch = set()
+ for head in self._get_topo_heads(repo):
+ if head in obsrevs:
+ continue
+ node = to_node(head)
+ branch, closed = getbranchinfo(head)
+ self._entries.setdefault(branch, []).append(node)
+ if closed:
+ self._closednodes.add(node)
+ touched_branch.add(branch)
+ to_rev = cl.index.rev
+ for branch in touched_branch:
+ self._entries[branch].sort(key=to_rev)
+
+ def _compute_key_hashes(self, repo) -> Tuple[bytes]:
+ """return the cache key hashes that match this repoview state"""
+ return scmutil.filtered_and_obsolete_hash(
+ repo,
+ self.tiprev,
)
- duration = util.timer() - starttime
- repo.ui.log(
- b'branchcache',
- b'updated %s in %.4f seconds\n',
- _branchcachedesc(repo),
- duration,
+ def _process_new(
+ self,
+ repo,
+ newbranches,
+ new_closed,
+ obs_ignored,
+ max_rev,
+ ) -> None:
+ if (
+ # note: the check about `obs_ignored` is too strict as the
+ # obsolete revision could be non-topological, but lets keep
+ # things simple for now
+ #
+ # The same apply to `new_closed` if the closed changeset are
+ # not a head, we don't care that it is closed, but lets keep
+ # things simple here too.
+ not (obs_ignored or new_closed)
+ and (
+ not newbranches
+ or (
+ len(newbranches) == 1
+ and (
+ self.tiprev == nullrev
+ or self._pure_topo_branch in newbranches
+ )
+ )
+ )
+ ):
+ if newbranches:
+ assert len(newbranches) == 1
+ self._pure_topo_branch = list(newbranches.keys())[0]
+ self._needs_populate = True
+ self._entries.pop(self._pure_topo_branch, None)
+ return
+
+ self._ensure_populated(repo)
+ self._pure_topo_branch = None
+ super()._process_new(
+ repo,
+ newbranches,
+ new_closed,
+ obs_ignored,
+ max_rev,
)
- self.write(repo)
+ def _ensure_populated(self, repo):
+ """make sure any lazily loaded values are fully populated"""
+ if self._needs_populate:
+ assert self._pure_topo_branch is not None
+ cl = repo.changelog
+ to_node = cl.node
+ topo_heads = self._get_topo_heads(repo)
+ heads = [to_node(r) for r in topo_heads]
+ self._entries[self._pure_topo_branch] = heads
+ self._needs_populate = False
+
+ def _detect_pure_topo(self, repo) -> None:
+ if self._pure_topo_branch is not None:
+ # we are pure topological already
+ return
+ to_node = repo.changelog.node
+ topo_heads = [to_node(r) for r in self._get_topo_heads(repo)]
+ if any(n in self._closednodes for n in topo_heads):
+ return
+ for branch, heads in self._entries.items():
+ if heads == topo_heads:
+ self._pure_topo_branch = branch
+ break
-class remotebranchcache(branchcache):
+class remotebranchcache(_BaseBranchCache):
"""Branchmap info for a remote connection, should not write locally"""
- def write(self, repo):
- pass
+ def __init__(
+ self,
+ repo: "localrepo.localrepository",
+ entries: Union[
+ Dict[bytes, List[bytes]], Iterable[Tuple[bytes, List[bytes]]]
+ ] = (),
+ closednodes: Optional[Set[bytes]] = None,
+ ) -> None:
+ super().__init__(repo=repo, entries=entries, closed_nodes=closednodes)
# Revision branch info cache
--- a/mercurial/cacheutil.py Mon Mar 25 02:09:15 2024 +0100
+++ b/mercurial/cacheutil.py Mon Mar 25 16:27:48 2024 +0000
@@ -14,6 +14,8 @@
# ones. Therefore copy all branch caches over.
cachefiles = [b'branch2']
cachefiles += [b'branch2-%s' % f for f in repoview.filtertable]
+ cachefiles += [b'branch3']
+ cachefiles += [b'branch3-%s' % f for f in repoview.filtertable]
cachefiles += [b'rbc-names-v1', b'rbc-revs-v1']
cachefiles += [b'tags2']
cachefiles += [b'tags2-%s' % f for f in repoview.filtertable]
--- a/mercurial/changelog.py Mon Mar 25 02:09:15 2024 +0100
+++ b/mercurial/changelog.py Mon Mar 25 16:27:48 2024 +0000
@@ -327,6 +327,9 @@
self._filteredrevs_hashcache = {}
self._copiesstorage = opener.options.get(b'copies-storage')
+ def __contains__(self, rev):
+ return (0 <= rev < len(self)) and rev not in self._filteredrevs
+
@property
def filteredrevs(self):
return self._filteredrevs
--- a/mercurial/configitems.toml Mon Mar 25 02:09:15 2024 +0100
+++ b/mercurial/configitems.toml Mon Mar 25 16:27:48 2024 +0000
@@ -719,6 +719,15 @@
name = "auto-publish"
default = "publish"
+
+# The current implementation of the filtering/injecting of topological heads is
+# naive and need proper benchmark and optimisation because we can envision
+# moving the the v3 of the branch-cache format out of experimental
+[[items]]
+section = "experimental"
+name = "branch-cache-v3"
+default = false
+
[[items]]
section = "experimental"
name = "bundle-phases"
--- a/mercurial/interfaces/repository.py Mon Mar 25 02:09:15 2024 +0100
+++ b/mercurial/interfaces/repository.py Mon Mar 25 16:27:48 2024 +0000
@@ -54,6 +54,8 @@
CACHE_BRANCHMAP_SERVED = b"branchmap-served"
# Warm internal changelog cache (eg: persistent nodemap)
CACHE_CHANGELOG_CACHE = b"changelog-cache"
+# check of a branchmap can use the "pure topo" mode
+CACHE_BRANCHMAP_DETECT_PURE_TOPO = b"branchmap-detect-pure-topo"
# Warm full manifest cache
CACHE_FULL_MANIFEST = b"full-manifest"
# Warm file-node-tags cache
@@ -78,6 +80,7 @@
CACHES_ALL = {
CACHE_BRANCHMAP_SERVED,
CACHE_BRANCHMAP_ALL,
+ CACHE_BRANCHMAP_DETECT_PURE_TOPO,
CACHE_CHANGELOG_CACHE,
CACHE_FILE_NODE_TAGS,
CACHE_FULL_MANIFEST,
--- a/mercurial/localrepo.py Mon Mar 25 02:09:15 2024 +0100
+++ b/mercurial/localrepo.py Mon Mar 25 16:27:48 2024 +0000
@@ -2923,12 +2923,14 @@
if repository.CACHE_BRANCHMAP_SERVED in caches:
if tr is None or tr.changes[b'origrepolen'] < len(self):
- # accessing the 'served' branchmap should refresh all the others,
self.ui.debug(b'updating the branch cache\n')
- self.filtered(b'served').branchmap()
- self.filtered(b'served.hidden').branchmap()
- # flush all possibly delayed write.
- self._branchcaches.write_delayed(self)
+ dpt = repository.CACHE_BRANCHMAP_DETECT_PURE_TOPO in caches
+ served = self.filtered(b'served')
+ self._branchcaches.update_disk(served, detect_pure_topo=dpt)
+ served_hidden = self.filtered(b'served.hidden')
+ self._branchcaches.update_disk(
+ served_hidden, detect_pure_topo=dpt
+ )
if repository.CACHE_CHANGELOG_CACHE in caches:
self.changelog.update_caches(transaction=tr)
@@ -2971,9 +2973,14 @@
# even if they haven't explicitly been requested yet (if they've
# never been used by hg, they won't ever have been written, even if
# they're a subset of another kind of cache that *has* been used).
+ dpt = repository.CACHE_BRANCHMAP_DETECT_PURE_TOPO in caches
+
for filt in repoview.filtertable.keys():
filtered = self.filtered(filt)
- filtered.branchmap().write(filtered)
+ self._branchcaches.update_disk(filtered, detect_pure_topo=dpt)
+
+ # flush all possibly delayed write.
+ self._branchcaches.write_dirty(self)
def invalidatecaches(self):
if '_tagscache' in vars(self):
--- a/mercurial/repoview.py Mon Mar 25 02:09:15 2024 +0100
+++ b/mercurial/repoview.py Mon Mar 25 16:27:48 2024 +0000
@@ -397,6 +397,9 @@
"""
def __init__(self, repo, filtername, visibilityexceptions=None):
+ if filtername is None:
+ msg = "repoview should have a non-None filtername"
+ raise error.ProgrammingError(msg)
object.__setattr__(self, '_unfilteredrepo', repo)
object.__setattr__(self, 'filtername', filtername)
object.__setattr__(self, '_clcachekey', None)
--- a/mercurial/scmutil.py Mon Mar 25 02:09:15 2024 +0100
+++ b/mercurial/scmutil.py Mon Mar 25 16:27:48 2024 +0000
@@ -349,7 +349,7 @@
self._newfiles.add(f)
-def filteredhash(repo, maxrev, needobsolete=False):
+def combined_filtered_and_obsolete_hash(repo, maxrev, needobsolete=False):
"""build hash of filtered revisions in the current repoview.
Multiple caches perform up-to-date validation by checking that the
@@ -375,16 +375,69 @@
result = cl._filteredrevs_hashcache.get(key)
if not result:
- revs = sorted(r for r in cl.filteredrevs | obsrevs if r <= maxrev)
+ revs, obs_revs = _filtered_and_obs_revs(repo, maxrev)
+ if needobsolete:
+ revs = revs | obs_revs
+ revs = sorted(revs)
if revs:
- s = hashutil.sha1()
- for rev in revs:
- s.update(b'%d;' % rev)
- result = s.digest()
+ result = _hash_revs(revs)
cl._filteredrevs_hashcache[key] = result
return result
+def filtered_and_obsolete_hash(repo, maxrev):
+ """build hashs of filtered and obsolete revisions in the current repoview.
+
+ Multiple caches perform up-to-date validation by checking that the
+ tiprev and tipnode stored in the cache file match the current repository.
+ However, this is not sufficient for validating repoviews because the set
+ of revisions in the view may change without the repository tiprev and
+ tipnode changing.
+
+ This function hashes all the revs filtered from the view up to maxrev and
+ returns that SHA-1 digest. The obsolete revisions hashed are only the
+ non-filtered one.
+ """
+ cl = repo.changelog
+ obs_set = obsolete.getrevs(repo, b'obsolete')
+ key = (maxrev, hash(cl.filteredrevs), hash(obs_set))
+
+ result = cl._filteredrevs_hashcache.get(key)
+ if result is None:
+ filtered_hash = None
+ obs_hash = None
+ filtered_revs, obs_revs = _filtered_and_obs_revs(repo, maxrev)
+ if filtered_revs:
+ filtered_hash = _hash_revs(filtered_revs)
+ if obs_revs:
+ obs_hash = _hash_revs(obs_revs)
+ result = (filtered_hash, obs_hash)
+ cl._filteredrevs_hashcache[key] = result
+ return result
+
+
+def _filtered_and_obs_revs(repo, max_rev):
+ """return the set of filtered and non-filtered obsolete revision"""
+ cl = repo.changelog
+ obs_set = obsolete.getrevs(repo, b'obsolete')
+ filtered_set = cl.filteredrevs
+ if cl.filteredrevs:
+ obs_set = obs_set - cl.filteredrevs
+ if max_rev < (len(cl) - 1):
+ # there might be revision to filter out
+ filtered_set = set(r for r in filtered_set if r <= max_rev)
+ obs_set = set(r for r in obs_set if r <= max_rev)
+ return (filtered_set, obs_set)
+
+
+def _hash_revs(revs):
+ """return a hash from a list of revision numbers"""
+ s = hashutil.sha1()
+ for rev in revs:
+ s.update(b'%d;' % rev)
+ return s.digest()
+
+
def walkrepos(path, followsym=False, seen_dirs=None, recurse=False):
"""yield every hg repository under path, always recursively.
The recurse flag will only control recursion into repo working dirs"""
--- a/mercurial/tags.py Mon Mar 25 02:09:15 2024 +0100
+++ b/mercurial/tags.py Mon Mar 25 16:27:48 2024 +0000
@@ -433,7 +433,11 @@
if (
cacherev == tiprev
and cachenode == tipnode
- and cachehash == scmutil.filteredhash(repo, tiprev)
+ and cachehash
+ == scmutil.combined_filtered_and_obsolete_hash(
+ repo,
+ tiprev,
+ )
):
tags = _readtags(ui, repo, cachelines, cachefile.name)
cachefile.close()
@@ -441,7 +445,14 @@
if cachefile:
cachefile.close() # ignore rest of file
- valid = (tiprev, tipnode, scmutil.filteredhash(repo, tiprev))
+ valid = (
+ tiprev,
+ tipnode,
+ scmutil.combined_filtered_and_obsolete_hash(
+ repo,
+ tiprev,
+ ),
+ )
repoheads = repo.heads()
# Case 2 (uncommon): empty repo; get out quickly and don't bother
--- a/mercurial/utils/repoviewutil.py Mon Mar 25 02:09:15 2024 +0100
+++ b/mercurial/utils/repoviewutil.py Mon Mar 25 16:27:48 2024 +0000
@@ -6,6 +6,7 @@
# This software may be used and distributed according to the terms of the
# GNU General Public License version 2 or any later version.
+from .. import error
### Nearest subset relation
# Nearest subset of filter X is a filter Y so that:
@@ -21,3 +22,30 @@
b'served': b'immutable',
b'immutable': b'base',
}
+
+
+def get_ordered_subset():
+ """return a list of subset name from dependencies to dependents"""
+ _unfinalized = set(subsettable.values())
+ ordered = []
+
+ # the subset table is expected to be small so we do the stupid N² version
+ # of the algorithm
+ while _unfinalized:
+ this_level = []
+ for candidate in _unfinalized:
+ dependency = subsettable.get(candidate)
+ if dependency not in _unfinalized:
+ this_level.append(candidate)
+
+ if not this_level:
+ msg = "cyclic dependencies in repoview subset %r"
+ msg %= subsettable
+ raise error.ProgrammingError(msg)
+
+ this_level.sort(key=lambda x: x if x is not None else '')
+
+ ordered.extend(this_level)
+ _unfinalized.difference_update(this_level)
+
+ return ordered
--- a/rust/hg-core/src/revlog/index.rs Mon Mar 25 02:09:15 2024 +0100
+++ b/rust/hg-core/src/revlog/index.rs Mon Mar 25 16:27:48 2024 +0000
@@ -18,11 +18,12 @@
};
pub const INDEX_ENTRY_SIZE: usize = 64;
+pub const INDEX_HEADER_SIZE: usize = 4;
pub const COMPRESSION_MODE_INLINE: u8 = 2;
#[derive(Debug)]
pub struct IndexHeader {
- pub(super) header_bytes: [u8; 4],
+ pub(super) header_bytes: [u8; INDEX_HEADER_SIZE],
}
#[derive(Copy, Clone)]
@@ -92,14 +93,21 @@
truncation: Option<usize>,
/// Bytes that were added after reading the index
added: Vec<u8>,
+ first_entry: [u8; INDEX_ENTRY_SIZE],
}
impl IndexData {
pub fn new(bytes: Box<dyn Deref<Target = [u8]> + Send + Sync>) -> Self {
+ let mut first_entry = [0; INDEX_ENTRY_SIZE];
+ if bytes.len() >= INDEX_ENTRY_SIZE {
+ first_entry[INDEX_HEADER_SIZE..]
+ .copy_from_slice(&bytes[INDEX_HEADER_SIZE..INDEX_ENTRY_SIZE])
+ }
Self {
bytes,
truncation: None,
added: vec![],
+ first_entry,
}
}
@@ -356,7 +364,6 @@
let end = offset + INDEX_ENTRY_SIZE;
let entry = IndexEntry {
bytes: &bytes[offset..end],
- offset_override: None,
};
offset += INDEX_ENTRY_SIZE + entry.compressed_len() as usize;
@@ -449,11 +456,17 @@
if rev == NULL_REVISION {
return None;
}
- Some(if self.is_inline() {
- self.get_entry_inline(rev)
+ if rev.0 == 0 {
+ Some(IndexEntry {
+ bytes: &self.bytes.first_entry[..],
+ })
} else {
- self.get_entry_separated(rev)
- })
+ Some(if self.is_inline() {
+ self.get_entry_inline(rev)
+ } else {
+ self.get_entry_separated(rev)
+ })
+ }
}
/// Return the binary content of the index entry for the given revision
@@ -512,13 +525,7 @@
let end = start + INDEX_ENTRY_SIZE;
let bytes = &self.bytes[start..end];
- // See IndexEntry for an explanation of this override.
- let offset_override = Some(end);
-
- IndexEntry {
- bytes,
- offset_override,
- }
+ IndexEntry { bytes }
}
fn get_entry_separated(&self, rev: Revision) -> IndexEntry {
@@ -526,20 +533,12 @@
let end = start + INDEX_ENTRY_SIZE;
let bytes = &self.bytes[start..end];
- // Override the offset of the first revision as its bytes are used
- // for the index's metadata (saving space because it is always 0)
- let offset_override = if rev == Revision(0) { Some(0) } else { None };
-
- IndexEntry {
- bytes,
- offset_override,
- }
+ IndexEntry { bytes }
}
fn null_entry(&self) -> IndexEntry {
IndexEntry {
bytes: &[0; INDEX_ENTRY_SIZE],
- offset_override: Some(0),
}
}
@@ -755,13 +754,20 @@
revision_data: RevisionDataParams,
) -> Result<(), RevlogError> {
revision_data.validate()?;
+ let entry_v1 = revision_data.into_v1();
+ let entry_bytes = entry_v1.as_bytes();
+ if self.bytes.len() == 0 {
+ self.bytes.first_entry[INDEX_HEADER_SIZE..].copy_from_slice(
+ &entry_bytes[INDEX_HEADER_SIZE..INDEX_ENTRY_SIZE],
+ )
+ }
if self.is_inline() {
let new_offset = self.bytes.len();
if let Some(offsets) = &mut *self.get_offsets_mut() {
offsets.push(new_offset)
}
}
- self.bytes.added.extend(revision_data.into_v1().as_bytes());
+ self.bytes.added.extend(entry_bytes);
self.clear_head_revs();
Ok(())
}
@@ -1654,7 +1660,6 @@
let end = offset + INDEX_ENTRY_SIZE;
let entry = IndexEntry {
bytes: &bytes[offset..end],
- offset_override: None,
};
offset += INDEX_ENTRY_SIZE + entry.compressed_len() as usize;
@@ -1678,29 +1683,14 @@
#[derive(Debug)]
pub struct IndexEntry<'a> {
bytes: &'a [u8],
- /// Allows to override the offset value of the entry.
- ///
- /// For interleaved index and data, the offset stored in the index
- /// corresponds to the separated data offset.
- /// It has to be overridden with the actual offset in the interleaved
- /// index which is just after the index block.
- ///
- /// For separated index and data, the offset stored in the first index
- /// entry is mixed with the index headers.
- /// It has to be overridden with 0.
- offset_override: Option<usize>,
}
impl<'a> IndexEntry<'a> {
/// Return the offset of the data.
pub fn offset(&self) -> usize {
- if let Some(offset_override) = self.offset_override {
- offset_override
- } else {
- let mut bytes = [0; 8];
- bytes[2..8].copy_from_slice(&self.bytes[0..=5]);
- BigEndian::read_u64(&bytes[..]) as usize
- }
+ let mut bytes = [0; 8];
+ bytes[2..8].copy_from_slice(&self.bytes[0..=5]);
+ BigEndian::read_u64(&bytes[..]) as usize
}
pub fn raw_offset(&self) -> u64 {
BigEndian::read_u64(&self.bytes[0..8])
@@ -1956,32 +1946,15 @@
#[test]
fn test_offset() {
let bytes = IndexEntryBuilder::new().with_offset(1).build();
- let entry = IndexEntry {
- bytes: &bytes,
- offset_override: None,
- };
+ let entry = IndexEntry { bytes: &bytes };
assert_eq!(entry.offset(), 1)
}
#[test]
- fn test_with_overridden_offset() {
- let bytes = IndexEntryBuilder::new().with_offset(1).build();
- let entry = IndexEntry {
- bytes: &bytes,
- offset_override: Some(2),
- };
-
- assert_eq!(entry.offset(), 2)
- }
-
- #[test]
fn test_compressed_len() {
let bytes = IndexEntryBuilder::new().with_compressed_len(1).build();
- let entry = IndexEntry {
- bytes: &bytes,
- offset_override: None,
- };
+ let entry = IndexEntry { bytes: &bytes };
assert_eq!(entry.compressed_len(), 1)
}
@@ -1989,10 +1962,7 @@
#[test]
fn test_uncompressed_len() {
let bytes = IndexEntryBuilder::new().with_uncompressed_len(1).build();
- let entry = IndexEntry {
- bytes: &bytes,
- offset_override: None,
- };
+ let entry = IndexEntry { bytes: &bytes };
assert_eq!(entry.uncompressed_len(), 1)
}
@@ -2002,10 +1972,7 @@
let bytes = IndexEntryBuilder::new()
.with_base_revision_or_base_of_delta_chain(Revision(1))
.build();
- let entry = IndexEntry {
- bytes: &bytes,
- offset_override: None,
- };
+ let entry = IndexEntry { bytes: &bytes };
assert_eq!(entry.base_revision_or_base_of_delta_chain(), 1.into())
}
@@ -2016,10 +1983,7 @@
.with_link_revision(Revision(123))
.build();
- let entry = IndexEntry {
- bytes: &bytes,
- offset_override: None,
- };
+ let entry = IndexEntry { bytes: &bytes };
assert_eq!(entry.link_revision(), 123.into());
}
@@ -2028,10 +1992,7 @@
fn p1_test() {
let bytes = IndexEntryBuilder::new().with_p1(Revision(123)).build();
- let entry = IndexEntry {
- bytes: &bytes,
- offset_override: None,
- };
+ let entry = IndexEntry { bytes: &bytes };
assert_eq!(entry.p1(), 123.into());
}
@@ -2040,10 +2001,7 @@
fn p2_test() {
let bytes = IndexEntryBuilder::new().with_p2(Revision(123)).build();
- let entry = IndexEntry {
- bytes: &bytes,
- offset_override: None,
- };
+ let entry = IndexEntry { bytes: &bytes };
assert_eq!(entry.p2(), 123.into());
}
@@ -2054,10 +2012,7 @@
.unwrap();
let bytes = IndexEntryBuilder::new().with_node(node).build();
- let entry = IndexEntry {
- bytes: &bytes,
- offset_override: None,
- };
+ let entry = IndexEntry { bytes: &bytes };
assert_eq!(*entry.hash(), node);
}
--- a/rust/hg-core/src/revlog/mod.rs Mon Mar 25 02:09:15 2024 +0100
+++ b/rust/hg-core/src/revlog/mod.rs Mon Mar 25 16:27:48 2024 +0000
@@ -29,6 +29,7 @@
use self::node::{NODE_BYTES_LENGTH, NULL_NODE};
use self::nodemap_docket::NodeMapDocket;
use super::index::Index;
+use super::index::INDEX_ENTRY_SIZE;
use super::nodemap::{NodeMap, NodeMapError};
use crate::errors::HgError;
use crate::vfs::Vfs;
@@ -531,7 +532,12 @@
.index
.get_entry(rev)
.ok_or(RevlogError::InvalidRevision)?;
- let start = index_entry.offset();
+ let offset = index_entry.offset();
+ let start = if self.index.is_inline() {
+ offset + ((rev.0 as usize + 1) * INDEX_ENTRY_SIZE)
+ } else {
+ offset
+ };
let end = start + index_entry.compressed_len() as usize;
let data = if self.index.is_inline() {
self.index.data(start, end)
@@ -859,7 +865,7 @@
#[cfg(test)]
mod tests {
use super::*;
- use crate::index::{IndexEntryBuilder, INDEX_ENTRY_SIZE};
+ use crate::index::IndexEntryBuilder;
use itertools::Itertools;
#[test]
@@ -897,15 +903,10 @@
.is_first(true)
.with_version(1)
.with_inline(true)
- .with_offset(INDEX_ENTRY_SIZE)
.with_node(node0)
.build();
- let entry1_bytes = IndexEntryBuilder::new()
- .with_offset(INDEX_ENTRY_SIZE)
- .with_node(node1)
- .build();
+ let entry1_bytes = IndexEntryBuilder::new().with_node(node1).build();
let entry2_bytes = IndexEntryBuilder::new()
- .with_offset(INDEX_ENTRY_SIZE)
.with_p1(Revision(0))
.with_p2(Revision(1))
.with_node(node2)
@@ -971,13 +972,9 @@
.is_first(true)
.with_version(1)
.with_inline(true)
- .with_offset(INDEX_ENTRY_SIZE)
.with_node(node0)
.build();
- let entry1_bytes = IndexEntryBuilder::new()
- .with_offset(INDEX_ENTRY_SIZE)
- .with_node(node1)
- .build();
+ let entry1_bytes = IndexEntryBuilder::new().with_node(node1).build();
let contents = vec![entry0_bytes, entry1_bytes]
.into_iter()
.flatten()
--- a/tests/common-pattern.py Mon Mar 25 02:09:15 2024 +0100
+++ b/tests/common-pattern.py Mon Mar 25 16:27:48 2024 +0000
@@ -114,14 +114,6 @@
br'(.*file:/)/?(/\$TESTTMP.*)',
lambda m: m.group(1) + b'*' + m.group(2) + b' (glob)',
),
- # `hg clone --stream` output
- (
- br'transferred (\S+?) KB in \S+? seconds \(.+?/sec\)(?: \(glob\))?(.*)',
- lambda m: (
- br'transferred %s KB in * seconds (* */sec) (glob)%s'
- % (m.group(1), m.group(2))
- ),
- ),
# `discovery debug output
(
br'\b(\d+) total queries in \d.\d\d\d\ds\b',
--- a/tests/test-acl.t Mon Mar 25 02:09:15 2024 +0100
+++ b/tests/test-acl.t Mon Mar 25 16:27:48 2024 +0000
@@ -167,7 +167,6 @@
listing keys for "phases"
checking for updated bookmarks
listing keys for "bookmarks"
- invalid branch cache (served): tip differs
listing keys for "bookmarks"
3 changesets found
list of changesets:
@@ -187,7 +186,6 @@
bundle2-input-part: total payload size * (glob)
bundle2-input-part: "check:updated-heads" supported
bundle2-input-part: total payload size * (glob)
- invalid branch cache (served): tip differs
bundle2-input-part: "changegroup" (params: 1 mandatory) supported
adding changesets
add changeset ef1ea85a6374
@@ -237,7 +235,6 @@
listing keys for "phases"
checking for updated bookmarks
listing keys for "bookmarks"
- invalid branch cache (served): tip differs
listing keys for "bookmarks"
3 changesets found
list of changesets:
@@ -257,7 +254,6 @@
bundle2-input-part: total payload size * (glob)
bundle2-input-part: "check:updated-heads" supported
bundle2-input-part: total payload size * (glob)
- invalid branch cache (served): tip differs
bundle2-input-part: "changegroup" (params: 1 mandatory) supported
adding changesets
add changeset ef1ea85a6374
@@ -317,7 +313,6 @@
listing keys for "phases"
checking for updated bookmarks
listing keys for "bookmarks"
- invalid branch cache (served): tip differs
listing keys for "bookmarks"
3 changesets found
list of changesets:
@@ -337,7 +332,6 @@
bundle2-input-part: total payload size * (glob)
bundle2-input-part: "check:updated-heads" supported
bundle2-input-part: total payload size * (glob)
- invalid branch cache (served): tip differs
bundle2-input-part: "changegroup" (params: 1 mandatory) supported
adding changesets
add changeset ef1ea85a6374
@@ -388,7 +382,6 @@
listing keys for "phases"
checking for updated bookmarks
listing keys for "bookmarks"
- invalid branch cache (served): tip differs
listing keys for "bookmarks"
3 changesets found
list of changesets:
@@ -408,7 +401,6 @@
bundle2-input-part: total payload size * (glob)
bundle2-input-part: "check:updated-heads" supported
bundle2-input-part: total payload size * (glob)
- invalid branch cache (served): tip differs
bundle2-input-part: "changegroup" (params: 1 mandatory) supported
adding changesets
add changeset ef1ea85a6374
@@ -463,7 +455,6 @@
listing keys for "phases"
checking for updated bookmarks
listing keys for "bookmarks"
- invalid branch cache (served): tip differs
listing keys for "bookmarks"
3 changesets found
list of changesets:
@@ -483,7 +474,6 @@
bundle2-input-part: total payload size * (glob)
bundle2-input-part: "check:updated-heads" supported
bundle2-input-part: total payload size * (glob)
- invalid branch cache (served): tip differs
bundle2-input-part: "changegroup" (params: 1 mandatory) supported
adding changesets
add changeset ef1ea85a6374
@@ -535,7 +525,6 @@
listing keys for "phases"
checking for updated bookmarks
listing keys for "bookmarks"
- invalid branch cache (served): tip differs
listing keys for "bookmarks"
3 changesets found
list of changesets:
@@ -555,7 +544,6 @@
bundle2-input-part: total payload size * (glob)
bundle2-input-part: "check:updated-heads" supported
bundle2-input-part: total payload size * (glob)
- invalid branch cache (served): tip differs
bundle2-input-part: "changegroup" (params: 1 mandatory) supported
adding changesets
add changeset ef1ea85a6374
@@ -612,7 +600,6 @@
listing keys for "phases"
checking for updated bookmarks
listing keys for "bookmarks"
- invalid branch cache (served): tip differs
listing keys for "bookmarks"
3 changesets found
list of changesets:
@@ -632,7 +619,6 @@
bundle2-input-part: total payload size * (glob)
bundle2-input-part: "check:updated-heads" supported
bundle2-input-part: total payload size * (glob)
- invalid branch cache (served): tip differs
bundle2-input-part: "changegroup" (params: 1 mandatory) supported
adding changesets
add changeset ef1ea85a6374
@@ -686,7 +672,6 @@
listing keys for "phases"
checking for updated bookmarks
listing keys for "bookmarks"
- invalid branch cache (served): tip differs
listing keys for "bookmarks"
3 changesets found
list of changesets:
@@ -706,7 +691,6 @@
bundle2-input-part: total payload size * (glob)
bundle2-input-part: "check:updated-heads" supported
bundle2-input-part: total payload size * (glob)
- invalid branch cache (served): tip differs
bundle2-input-part: "changegroup" (params: 1 mandatory) supported
adding changesets
add changeset ef1ea85a6374
@@ -761,7 +745,6 @@
listing keys for "phases"
checking for updated bookmarks
listing keys for "bookmarks"
- invalid branch cache (served): tip differs
listing keys for "bookmarks"
1 changesets found
list of changesets:
@@ -783,7 +766,6 @@
bundle2-input-part: total payload size * (glob)
bundle2-input-part: "check:updated-heads" supported
bundle2-input-part: total payload size * (glob)
- invalid branch cache (served): tip differs
bundle2-input-part: "changegroup" (params: 1 mandatory) supported
adding changesets
add changeset ef1ea85a6374
@@ -810,7 +792,6 @@
acl: bookmark access granted: "ef1ea85a6374b77d6da9dcda9541f498f2d17df7" on bookmark "moving-bookmark"
bundle2-input-bundle: 7 parts total
updating the branch cache
- invalid branch cache (served.hidden): tip differs
added 1 changesets with 1 changes to 1 files
bundle2-output-bundle: "HG20", 1 parts total
bundle2-output-part: "reply:changegroup" (advisory) (params: 0 advisory) empty payload
@@ -850,7 +831,6 @@
listing keys for "phases"
checking for updated bookmarks
listing keys for "bookmarks"
- invalid branch cache (served): tip differs
listing keys for "bookmarks"
1 changesets found
list of changesets:
@@ -872,7 +852,6 @@
bundle2-input-part: total payload size * (glob)
bundle2-input-part: "check:updated-heads" supported
bundle2-input-part: total payload size * (glob)
- invalid branch cache (served): tip differs
bundle2-input-part: "changegroup" (params: 1 mandatory) supported
adding changesets
add changeset ef1ea85a6374
@@ -939,7 +918,6 @@
listing keys for "phases"
checking for updated bookmarks
listing keys for "bookmarks"
- invalid branch cache (served): tip differs
listing keys for "bookmarks"
3 changesets found
list of changesets:
@@ -959,7 +937,6 @@
bundle2-input-part: total payload size * (glob)
bundle2-input-part: "check:updated-heads" supported
bundle2-input-part: total payload size * (glob)
- invalid branch cache (served): tip differs
bundle2-input-part: "changegroup" (params: 1 mandatory) supported
adding changesets
add changeset ef1ea85a6374
@@ -1025,7 +1002,6 @@
listing keys for "phases"
checking for updated bookmarks
listing keys for "bookmarks"
- invalid branch cache (served): tip differs
listing keys for "bookmarks"
3 changesets found
list of changesets:
@@ -1045,7 +1021,6 @@
bundle2-input-part: total payload size * (glob)
bundle2-input-part: "check:updated-heads" supported
bundle2-input-part: total payload size * (glob)
- invalid branch cache (served): tip differs
bundle2-input-part: "changegroup" (params: 1 mandatory) supported
adding changesets
add changeset ef1ea85a6374
@@ -1109,7 +1084,6 @@
listing keys for "phases"
checking for updated bookmarks
listing keys for "bookmarks"
- invalid branch cache (served): tip differs
listing keys for "bookmarks"
3 changesets found
list of changesets:
@@ -1129,7 +1103,6 @@
bundle2-input-part: total payload size * (glob)
bundle2-input-part: "check:updated-heads" supported
bundle2-input-part: total payload size * (glob)
- invalid branch cache (served): tip differs
bundle2-input-part: "changegroup" (params: 1 mandatory) supported
adding changesets
add changeset ef1ea85a6374
@@ -1187,7 +1160,6 @@
listing keys for "phases"
checking for updated bookmarks
listing keys for "bookmarks"
- invalid branch cache (served): tip differs
listing keys for "bookmarks"
3 changesets found
list of changesets:
@@ -1207,7 +1179,6 @@
bundle2-input-part: total payload size * (glob)
bundle2-input-part: "check:updated-heads" supported
bundle2-input-part: total payload size * (glob)
- invalid branch cache (served): tip differs
bundle2-input-part: "changegroup" (params: 1 mandatory) supported
adding changesets
add changeset ef1ea85a6374
@@ -1276,7 +1247,6 @@
listing keys for "phases"
checking for updated bookmarks
listing keys for "bookmarks"
- invalid branch cache (served): tip differs
listing keys for "bookmarks"
3 changesets found
list of changesets:
@@ -1296,7 +1266,6 @@
bundle2-input-part: total payload size * (glob)
bundle2-input-part: "check:updated-heads" supported
bundle2-input-part: total payload size * (glob)
- invalid branch cache (served): tip differs
bundle2-input-part: "changegroup" (params: 1 mandatory) supported
adding changesets
add changeset ef1ea85a6374
@@ -1366,7 +1335,6 @@
listing keys for "phases"
checking for updated bookmarks
listing keys for "bookmarks"
- invalid branch cache (served): tip differs
listing keys for "bookmarks"
3 changesets found
list of changesets:
@@ -1386,7 +1354,6 @@
bundle2-input-part: total payload size * (glob)
bundle2-input-part: "check:updated-heads" supported
bundle2-input-part: total payload size * (glob)
- invalid branch cache (served): tip differs
bundle2-input-part: "changegroup" (params: 1 mandatory) supported
adding changesets
add changeset ef1ea85a6374
@@ -1453,7 +1420,6 @@
listing keys for "phases"
checking for updated bookmarks
listing keys for "bookmarks"
- invalid branch cache (served): tip differs
listing keys for "bookmarks"
3 changesets found
list of changesets:
@@ -1473,7 +1439,6 @@
bundle2-input-part: total payload size * (glob)
bundle2-input-part: "check:updated-heads" supported
bundle2-input-part: total payload size * (glob)
- invalid branch cache (served): tip differs
bundle2-input-part: "changegroup" (params: 1 mandatory) supported
adding changesets
add changeset ef1ea85a6374
@@ -1536,7 +1501,6 @@
listing keys for "phases"
checking for updated bookmarks
listing keys for "bookmarks"
- invalid branch cache (served): tip differs
listing keys for "bookmarks"
3 changesets found
list of changesets:
@@ -1556,7 +1520,6 @@
bundle2-input-part: total payload size * (glob)
bundle2-input-part: "check:updated-heads" supported
bundle2-input-part: total payload size * (glob)
- invalid branch cache (served): tip differs
bundle2-input-part: "changegroup" (params: 1 mandatory) supported
adding changesets
add changeset ef1ea85a6374
@@ -1623,7 +1586,6 @@
listing keys for "phases"
checking for updated bookmarks
listing keys for "bookmarks"
- invalid branch cache (served): tip differs
listing keys for "bookmarks"
3 changesets found
list of changesets:
@@ -1643,7 +1605,6 @@
bundle2-input-part: total payload size * (glob)
bundle2-input-part: "check:updated-heads" supported
bundle2-input-part: total payload size * (glob)
- invalid branch cache (served): tip differs
bundle2-input-part: "changegroup" (params: 1 mandatory) supported
adding changesets
add changeset ef1ea85a6374
@@ -1797,7 +1758,6 @@
bundle2-input-part: total payload size * (glob)
bundle2-input-bundle: 5 parts total
updating the branch cache
- invalid branch cache (served.hidden): tip differs
added 4 changesets with 4 changes to 4 files (+1 heads)
bundle2-output-bundle: "HG20", 1 parts total
bundle2-output-part: "reply:changegroup" (advisory) (params: 0 advisory) empty payload
@@ -2104,7 +2064,6 @@
bundle2-input-part: total payload size * (glob)
bundle2-input-bundle: 5 parts total
updating the branch cache
- invalid branch cache (served.hidden): tip differs
added 4 changesets with 4 changes to 4 files (+1 heads)
bundle2-output-bundle: "HG20", 1 parts total
bundle2-output-part: "reply:changegroup" (advisory) (params: 0 advisory) empty payload
@@ -2196,7 +2155,6 @@
bundle2-input-part: total payload size * (glob)
bundle2-input-bundle: 5 parts total
updating the branch cache
- invalid branch cache (served.hidden): tip differs
added 4 changesets with 4 changes to 4 files (+1 heads)
bundle2-output-bundle: "HG20", 1 parts total
bundle2-output-part: "reply:changegroup" (advisory) (params: 0 advisory) empty payload
@@ -2360,7 +2318,6 @@
bundle2-input-part: total payload size * (glob)
bundle2-input-bundle: 5 parts total
updating the branch cache
- invalid branch cache (served.hidden): tip differs
added 4 changesets with 4 changes to 4 files (+1 heads)
bundle2-output-bundle: "HG20", 1 parts total
bundle2-output-part: "reply:changegroup" (advisory) (params: 0 advisory) empty payload
--- a/tests/test-blackbox.t Mon Mar 25 02:09:15 2024 +0100
+++ b/tests/test-blackbox.t Mon Mar 25 16:27:48 2024 +0000
@@ -127,13 +127,11 @@
added 1 changesets with 1 changes to 1 files
new changesets d02f48003e62
(run 'hg update' to get a working copy)
- $ hg blackbox -l 6
+ $ hg blackbox -l 4
1970-01-01 00:00:00.000 bob @6563da9dcf87b1949716e38ff3e3dfaa3198eb06 (5000)> wrote branch cache (served) with 1 labels and 2 nodes
- 1970-01-01 00:00:00.000 bob @6563da9dcf87b1949716e38ff3e3dfaa3198eb06 (5000)> updated branch cache (served.hidden) in * seconds (glob)
- 1970-01-01 00:00:00.000 bob @6563da9dcf87b1949716e38ff3e3dfaa3198eb06 (5000)> wrote branch cache (served.hidden) with 1 labels and 2 nodes
1970-01-01 00:00:00.000 bob @6563da9dcf87b1949716e38ff3e3dfaa3198eb06 (5000)> 1 incoming changes - new heads: d02f48003e62
1970-01-01 00:00:00.000 bob @6563da9dcf87b1949716e38ff3e3dfaa3198eb06 (5000)> pull exited 0 after * seconds (glob)
- 1970-01-01 00:00:00.000 bob @6563da9dcf87b1949716e38ff3e3dfaa3198eb06 (5000)> blackbox -l 6
+ 1970-01-01 00:00:00.000 bob @6563da9dcf87b1949716e38ff3e3dfaa3198eb06 (5000)> blackbox -l 4
we must not cause a failure if we cannot write to the log
@@ -190,13 +188,11 @@
$ hg strip tip
0 files updated, 0 files merged, 1 files removed, 0 files unresolved
saved backup bundle to $TESTTMP/blackboxtest2/.hg/strip-backup/*-backup.hg (glob)
- $ hg blackbox -l 6
+ $ hg blackbox -l 4
1970-01-01 00:00:00.000 bob @73f6ee326b27d820b0472f1a825e3a50f3dc489b (5000)> strip tip
1970-01-01 00:00:00.000 bob @6563da9dcf87b1949716e38ff3e3dfaa3198eb06 (5000)> saved backup bundle to $TESTTMP/blackboxtest2/.hg/strip-backup/73f6ee326b27-7612e004-backup.hg
- 1970-01-01 00:00:00.000 bob @6563da9dcf87b1949716e38ff3e3dfaa3198eb06 (5000)> updated branch cache (immutable) in * seconds (glob)
- 1970-01-01 00:00:00.000 bob @6563da9dcf87b1949716e38ff3e3dfaa3198eb06 (5000)> wrote branch cache (immutable) with 1 labels and 2 nodes
1970-01-01 00:00:00.000 bob @6563da9dcf87b1949716e38ff3e3dfaa3198eb06 (5000)> strip tip exited 0 after * seconds (glob)
- 1970-01-01 00:00:00.000 bob @6563da9dcf87b1949716e38ff3e3dfaa3198eb06 (5000)> blackbox -l 6
+ 1970-01-01 00:00:00.000 bob @6563da9dcf87b1949716e38ff3e3dfaa3198eb06 (5000)> blackbox -l 4
extension and python hooks - use the eol extension for a pythonhook
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/tests/test-branches-obsolete.t Mon Mar 25 16:27:48 2024 +0000
@@ -0,0 +1,563 @@
+================================================================
+test the interaction of the branch cache with obsolete changeset
+================================================================
+
+Some corner case have been covered by unrelated test (like rebase ones) this
+file meant to gather explicite testing of those.
+
+See also: test-obsolete-checkheads.t
+
+#testcases v2 v3
+
+ $ cat >> $HGRCPATH << EOF
+ > [phases]
+ > publish = false
+ > [experimental]
+ > evolution = all
+ > server.allow-hidden-access = *
+ > EOF
+
+#if v3
+ $ cat <<EOF >> $HGRCPATH
+ > [experimental]
+ > branch-cache-v3=yes
+ > EOF
+ $ CACHE_PREFIX=branch3
+#else
+ $ cat <<EOF >> $HGRCPATH
+ > [experimental]
+ > branch-cache-v3=no
+ > EOF
+ $ CACHE_PREFIX=branch2
+#endif
+
+ $ show_cache() {
+ > for cache_file in .hg/cache/$CACHE_PREFIX*; do
+ > echo "##### $cache_file"
+ > cat $cache_file
+ > done
+ > }
+
+Setup graph
+#############
+
+ $ . $RUNTESTDIR/testlib/common.sh
+
+graph with a single branch
+--------------------------
+
+We want some branching and some obsolescence
+
+ $ hg init main-single-branch
+ $ cd main-single-branch
+ $ mkcommit root
+ $ mkcommit A_1
+ $ mkcommit A_2
+ $ hg update 'desc("A_2")' --quiet
+ $ mkcommit B_1
+ $ mkcommit B_2
+ $ mkcommit B_3
+ $ mkcommit B_4
+ $ hg update 'desc("A_2")' --quiet
+ $ mkcommit A_3
+ created new head
+ $ mkcommit A_4
+ $ hg up null --quiet
+ $ hg clone --noupdate . ../main-single-branch-pre-ops
+ $ hg log -r 'desc("A_1")' -T '{node}' > ../main-single-branch-node_A1
+ $ hg log -r 'desc("A_2")' -T '{node}' > ../main-single-branch-node_A2
+ $ hg log -r 'desc("A_3")' -T '{node}' > ../main-single-branch-node_A3
+ $ hg log -r 'desc("A_4")' -T '{node}' > ../main-single-branch-node_A4
+ $ hg log -r 'desc("B_1")' -T '{node}' > ../main-single-branch-node_B1
+ $ hg log -r 'desc("B_2")' -T '{node}' > ../main-single-branch-node_B2
+ $ hg log -r 'desc("B_3")' -T '{node}' > ../main-single-branch-node_B3
+ $ hg log -r 'desc("B_4")' -T '{node}' > ../main-single-branch-node_B4
+
+(double check the heads are right before we obsolete)
+
+ $ hg log -R ../main-single-branch-pre-ops -G -T '{desc}\n'
+ o A_4
+ |
+ o A_3
+ |
+ | o B_4
+ | |
+ | o B_3
+ | |
+ | o B_2
+ | |
+ | o B_1
+ |/
+ o A_2
+ |
+ o A_1
+ |
+ o root
+
+ $ hg log -G -T '{desc}\n'
+ o A_4
+ |
+ o A_3
+ |
+ | o B_4
+ | |
+ | o B_3
+ | |
+ | o B_2
+ | |
+ | o B_1
+ |/
+ o A_2
+ |
+ o A_1
+ |
+ o root
+
+
+#if v2
+ $ show_cache
+ ##### .hg/cache/branch2-served
+ 3d808bbc94408ea19da905596d4079357a1f28be 8
+ 63ba7cd843d1e95aac1a24435befeb1909c53619 o default
+ 3d808bbc94408ea19da905596d4079357a1f28be o default
+#else
+ $ show_cache
+ ##### .hg/cache/branch3-served
+ tip-node=3d808bbc94408ea19da905596d4079357a1f28be tip-rev=8 topo-mode=pure
+ default
+#endif
+ $ hg log -T '{desc}\n' --rev 'head()'
+ B_4
+ A_4
+
+Absolete a couple of changes
+
+ $ for d in B2 B3 B4 A4; do
+ > hg debugobsolete --record-parents `cat ../main-single-branch-node_$d`;
+ > done
+ 1 new obsolescence markers
+ obsoleted 1 changesets
+ 2 new orphan changesets
+ 1 new obsolescence markers
+ obsoleted 1 changesets
+ 1 new obsolescence markers
+ obsoleted 1 changesets
+ 1 new obsolescence markers
+ obsoleted 1 changesets
+
+(double check the result is okay)
+
+ $ hg log -G -T '{desc}\n'
+ o A_3
+ |
+ | o B_1
+ |/
+ o A_2
+ |
+ o A_1
+ |
+ o root
+
+ $ hg heads -T '{desc}\n'
+ A_3
+ B_1
+#if v2
+ $ show_cache
+ ##### .hg/cache/branch2-served
+ 7c29ff2453bf38c75ee8982935739103c38a9284 7 f8006d64a10d35c011a5c5fa88be1e25c5929514
+ 550bb31f072912453ccbb503de1d554616911e88 o default
+ 7c29ff2453bf38c75ee8982935739103c38a9284 o default
+#else
+ $ show_cache
+ ##### .hg/cache/branch3-served
+ filtered-hash=f8006d64a10d35c011a5c5fa88be1e25c5929514 tip-node=7c29ff2453bf38c75ee8982935739103c38a9284 tip-rev=7 topo-mode=pure
+ default
+#endif
+ $ cd ..
+
+
+Actual testing
+##############
+
+Revealing obsolete changeset
+----------------------------
+
+Check that revealing obsolete changesets does not confuse branch computation and checks
+
+Revealing tipmost changeset
+~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+
+ $ cp -R ./main-single-branch tmp-repo
+ $ cd tmp-repo
+ $ hg update --hidden --rev 'desc("A_4")' --quiet
+ updated to hidden changeset 3d808bbc9440
+ (hidden revision '3d808bbc9440' is pruned)
+ $ hg log -G -T '{desc}\n'
+ @ A_4
+ |
+ o A_3
+ |
+ | o B_1
+ |/
+ o A_2
+ |
+ o A_1
+ |
+ o root
+
+ $ hg heads -T '{desc}\n'
+ A_3
+ B_1
+#if v2
+ $ show_cache
+ ##### .hg/cache/branch2
+ 3d808bbc94408ea19da905596d4079357a1f28be 8 a943c3355ad9e93654d58b1c934c7c4329a5d1d4
+ 550bb31f072912453ccbb503de1d554616911e88 o default
+ 7c29ff2453bf38c75ee8982935739103c38a9284 o default
+ ##### .hg/cache/branch2-served
+ 3d808bbc94408ea19da905596d4079357a1f28be 8 a943c3355ad9e93654d58b1c934c7c4329a5d1d4
+ 550bb31f072912453ccbb503de1d554616911e88 o default
+ 7c29ff2453bf38c75ee8982935739103c38a9284 o default
+#else
+ $ show_cache
+ ##### .hg/cache/branch3
+ obsolete-hash=b6d2b1f5b70f09c25c835edcae69be35f681605c tip-node=3d808bbc94408ea19da905596d4079357a1f28be tip-rev=8
+ 7c29ff2453bf38c75ee8982935739103c38a9284 o default
+ ##### .hg/cache/branch3-served
+ filtered-hash=f8006d64a10d35c011a5c5fa88be1e25c5929514 obsolete-hash=ac5282439f301518f362f37547fcd52bcc670373 tip-node=3d808bbc94408ea19da905596d4079357a1f28be tip-rev=8
+ 7c29ff2453bf38c75ee8982935739103c38a9284 o default
+#endif
+
+Even when computing branches from scratch
+
+ $ rm -rf .hg/cache/branch*
+ $ rm -rf .hg/wcache/branch*
+ $ hg heads -T '{desc}\n'
+ A_3
+ B_1
+#if v2
+ $ show_cache
+ ##### .hg/cache/branch2-served
+ 3d808bbc94408ea19da905596d4079357a1f28be 8 a943c3355ad9e93654d58b1c934c7c4329a5d1d4
+ 550bb31f072912453ccbb503de1d554616911e88 o default
+ 7c29ff2453bf38c75ee8982935739103c38a9284 o default
+#else
+ $ show_cache
+ ##### .hg/cache/branch3-served
+ filtered-hash=f8006d64a10d35c011a5c5fa88be1e25c5929514 obsolete-hash=ac5282439f301518f362f37547fcd52bcc670373 tip-node=3d808bbc94408ea19da905596d4079357a1f28be tip-rev=8
+ 7c29ff2453bf38c75ee8982935739103c38a9284 o default
+#endif
+
+And we can get back to normal
+
+ $ hg update null --quiet
+ $ hg heads -T '{desc}\n'
+ A_3
+ B_1
+#if v2
+ $ show_cache
+ ##### .hg/cache/branch2-served
+ 7c29ff2453bf38c75ee8982935739103c38a9284 7 f8006d64a10d35c011a5c5fa88be1e25c5929514
+ 550bb31f072912453ccbb503de1d554616911e88 o default
+ 7c29ff2453bf38c75ee8982935739103c38a9284 o default
+#else
+ $ show_cache
+ ##### .hg/cache/branch3-served
+ filtered-hash=f8006d64a10d35c011a5c5fa88be1e25c5929514 tip-node=7c29ff2453bf38c75ee8982935739103c38a9284 tip-rev=7 topo-mode=pure
+ default
+#endif
+
+ $ cd ..
+ $ rm -rf tmp-repo
+
+Revealing changeset in the middle of the changelog
+~~~~~~~~~~~~~~~~~~~~~~~~~~~------------------------
+
+Check that revealing an obsolete changeset does not confuse branch computation and checks
+
+ $ cp -R ./main-single-branch tmp-repo
+ $ cd tmp-repo
+ $ hg update --hidden --rev 'desc("B_3")' --quiet
+ updated to hidden changeset 9c996d7674bb
+ (hidden revision '9c996d7674bb' is pruned)
+ $ hg log -G -T '{desc}\n'
+ o A_3
+ |
+ | @ B_3
+ | |
+ | x B_2
+ | |
+ | o B_1
+ |/
+ o A_2
+ |
+ o A_1
+ |
+ o root
+
+ $ hg heads -T '{desc}\n'
+ A_3
+ B_1
+#if v2
+ $ show_cache
+ ##### .hg/cache/branch2
+ 3d808bbc94408ea19da905596d4079357a1f28be 8 a943c3355ad9e93654d58b1c934c7c4329a5d1d4
+ 550bb31f072912453ccbb503de1d554616911e88 o default
+ 7c29ff2453bf38c75ee8982935739103c38a9284 o default
+ ##### .hg/cache/branch2-served
+ 7c29ff2453bf38c75ee8982935739103c38a9284 7 f8006d64a10d35c011a5c5fa88be1e25c5929514
+ 550bb31f072912453ccbb503de1d554616911e88 o default
+ 7c29ff2453bf38c75ee8982935739103c38a9284 o default
+#else
+ $ show_cache
+ ##### .hg/cache/branch3
+ obsolete-hash=b6d2b1f5b70f09c25c835edcae69be35f681605c tip-node=3d808bbc94408ea19da905596d4079357a1f28be tip-rev=8
+ 7c29ff2453bf38c75ee8982935739103c38a9284 o default
+ ##### .hg/cache/branch3-served
+ filtered-hash=f1456c0d675980582dda9b8edc7f13f503ce544f obsolete-hash=3e74f5349008671629e39d13d7e00d9ba94c74f7 tip-node=7c29ff2453bf38c75ee8982935739103c38a9284 tip-rev=7
+ 550bb31f072912453ccbb503de1d554616911e88 o default
+#endif
+
+Even when computing branches from scratch
+
+ $ rm -rf .hg/cache/branch*
+ $ rm -rf .hg/wcache/branch*
+ $ hg heads -T '{desc}\n'
+ A_3
+ B_1
+#if v2
+ $ show_cache
+ ##### .hg/cache/branch2-served
+ 7c29ff2453bf38c75ee8982935739103c38a9284 7 f8006d64a10d35c011a5c5fa88be1e25c5929514
+ 550bb31f072912453ccbb503de1d554616911e88 o default
+ 7c29ff2453bf38c75ee8982935739103c38a9284 o default
+#else
+ $ show_cache
+ ##### .hg/cache/branch3-served
+ filtered-hash=f1456c0d675980582dda9b8edc7f13f503ce544f obsolete-hash=3e74f5349008671629e39d13d7e00d9ba94c74f7 tip-node=7c29ff2453bf38c75ee8982935739103c38a9284 tip-rev=7
+ 550bb31f072912453ccbb503de1d554616911e88 o default
+#endif
+
+And we can get back to normal
+
+ $ hg update null --quiet
+ $ hg heads -T '{desc}\n'
+ A_3
+ B_1
+#if v2
+ $ show_cache
+ ##### .hg/cache/branch2-served
+ 7c29ff2453bf38c75ee8982935739103c38a9284 7 f8006d64a10d35c011a5c5fa88be1e25c5929514
+ 550bb31f072912453ccbb503de1d554616911e88 o default
+ 7c29ff2453bf38c75ee8982935739103c38a9284 o default
+#else
+ $ show_cache
+ ##### .hg/cache/branch3-served
+ filtered-hash=f8006d64a10d35c011a5c5fa88be1e25c5929514 tip-node=7c29ff2453bf38c75ee8982935739103c38a9284 tip-rev=7 topo-mode=pure
+ default
+#endif
+
+ $ cd ..
+ $ rm -rf tmp-repo
+
+Getting the obsolescence marker after the fact for the tip rev
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+ $ cp -R ./main-single-branch-pre-ops tmp-repo
+ $ cd tmp-repo
+ $ hg update --hidden --rev 'desc("A_4")' --quiet
+ $ hg log -G -T '{desc}\n'
+ @ A_4
+ |
+ o A_3
+ |
+ | o B_4
+ | |
+ | o B_3
+ | |
+ | o B_2
+ | |
+ | o B_1
+ |/
+ o A_2
+ |
+ o A_1
+ |
+ o root
+
+ $ hg heads -T '{desc}\n'
+ A_4
+ B_4
+ $ hg pull --rev `cat ../main-single-branch-node_A4` --remote-hidden
+ pulling from $TESTTMP/main-single-branch
+ no changes found
+ 1 new obsolescence markers
+ obsoleted 1 changesets
+
+branch head are okay
+
+ $ hg heads -T '{desc}\n'
+ A_3
+ B_4
+#if v2
+ $ show_cache
+ ##### .hg/cache/branch2-served
+ 3d808bbc94408ea19da905596d4079357a1f28be 8 ac5282439f301518f362f37547fcd52bcc670373
+ 63ba7cd843d1e95aac1a24435befeb1909c53619 o default
+ 7c29ff2453bf38c75ee8982935739103c38a9284 o default
+#else
+ $ show_cache
+ ##### .hg/cache/branch3-served
+ obsolete-hash=ac5282439f301518f362f37547fcd52bcc670373 tip-node=3d808bbc94408ea19da905596d4079357a1f28be tip-rev=8
+ 7c29ff2453bf38c75ee8982935739103c38a9284 o default
+#endif
+
+Even when computing branches from scratch
+
+ $ rm -rf .hg/cache/branch*
+ $ rm -rf .hg/wcache/branch*
+ $ hg heads -T '{desc}\n'
+ A_3
+ B_4
+#if v2
+ $ show_cache
+ ##### .hg/cache/branch2-served
+ 3d808bbc94408ea19da905596d4079357a1f28be 8 ac5282439f301518f362f37547fcd52bcc670373
+ 63ba7cd843d1e95aac1a24435befeb1909c53619 o default
+ 7c29ff2453bf38c75ee8982935739103c38a9284 o default
+#else
+ $ show_cache
+ ##### .hg/cache/branch3-served
+ obsolete-hash=ac5282439f301518f362f37547fcd52bcc670373 tip-node=3d808bbc94408ea19da905596d4079357a1f28be tip-rev=8
+ 7c29ff2453bf38c75ee8982935739103c38a9284 o default
+#endif
+
+And we can get back to normal
+
+ $ hg update null --quiet
+ $ hg heads -T '{desc}\n'
+ A_3
+ B_4
+#if v2
+ $ show_cache
+ ##### .hg/cache/branch2-served
+ 7c29ff2453bf38c75ee8982935739103c38a9284 7
+ 63ba7cd843d1e95aac1a24435befeb1909c53619 o default
+ 7c29ff2453bf38c75ee8982935739103c38a9284 o default
+#else
+ $ show_cache
+ ##### .hg/cache/branch3-served
+ tip-node=7c29ff2453bf38c75ee8982935739103c38a9284 tip-rev=7 topo-mode=pure
+ default
+#endif
+
+ $ cd ..
+ $ rm -rf tmp-repo
+
+Getting the obsolescence marker after the fact for another rev
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+ $ cp -R ./main-single-branch-pre-ops tmp-repo
+ $ cd tmp-repo
+ $ hg update --hidden --rev 'desc("B_3")' --quiet
+ $ hg log -G -T '{desc}\n'
+ o A_4
+ |
+ o A_3
+ |
+ | o B_4
+ | |
+ | @ B_3
+ | |
+ | o B_2
+ | |
+ | o B_1
+ |/
+ o A_2
+ |
+ o A_1
+ |
+ o root
+
+ $ hg heads -T '{desc}\n'
+ A_4
+ B_4
+#if v2
+ $ show_cache
+ ##### .hg/cache/branch2-served
+ 3d808bbc94408ea19da905596d4079357a1f28be 8
+ 63ba7cd843d1e95aac1a24435befeb1909c53619 o default
+ 3d808bbc94408ea19da905596d4079357a1f28be o default
+#else
+ $ show_cache
+ ##### .hg/cache/branch3-served
+ tip-node=3d808bbc94408ea19da905596d4079357a1f28be tip-rev=8 topo-mode=pure
+ default
+#endif
+
+ $ hg pull --rev `cat ../main-single-branch-node_B4` --remote-hidden
+ pulling from $TESTTMP/main-single-branch
+ no changes found
+ 3 new obsolescence markers
+ obsoleted 3 changesets
+
+branch head are okay
+
+ $ hg heads -T '{desc}\n'
+ A_4
+ B_1
+#if v2
+ $ show_cache
+ ##### .hg/cache/branch2-served
+ 3d808bbc94408ea19da905596d4079357a1f28be 8 f8006d64a10d35c011a5c5fa88be1e25c5929514
+ 550bb31f072912453ccbb503de1d554616911e88 o default
+ 3d808bbc94408ea19da905596d4079357a1f28be o default
+#else
+ $ show_cache
+ ##### .hg/cache/branch3-served
+ filtered-hash=f1456c0d675980582dda9b8edc7f13f503ce544f obsolete-hash=3e74f5349008671629e39d13d7e00d9ba94c74f7 tip-node=3d808bbc94408ea19da905596d4079357a1f28be tip-rev=8
+ 550bb31f072912453ccbb503de1d554616911e88 o default
+#endif
+
+Even when computing branches from scratch
+
+ $ rm -rf .hg/cache/branch*
+ $ rm -rf .hg/wcache/branch*
+ $ hg heads -T '{desc}\n'
+ A_4
+ B_1
+#if v2
+ $ show_cache
+ ##### .hg/cache/branch2-served
+ 3d808bbc94408ea19da905596d4079357a1f28be 8 f8006d64a10d35c011a5c5fa88be1e25c5929514
+ 550bb31f072912453ccbb503de1d554616911e88 o default
+ 3d808bbc94408ea19da905596d4079357a1f28be o default
+#else
+ $ show_cache
+ ##### .hg/cache/branch3-served
+ filtered-hash=f1456c0d675980582dda9b8edc7f13f503ce544f obsolete-hash=3e74f5349008671629e39d13d7e00d9ba94c74f7 tip-node=3d808bbc94408ea19da905596d4079357a1f28be tip-rev=8
+ 550bb31f072912453ccbb503de1d554616911e88 o default
+#endif
+
+And we can get back to normal
+
+ $ hg update null --quiet
+ $ hg heads -T '{desc}\n'
+ A_4
+ B_1
+#if v2
+ $ show_cache
+ ##### .hg/cache/branch2-served
+ 3d808bbc94408ea19da905596d4079357a1f28be 8 f8006d64a10d35c011a5c5fa88be1e25c5929514
+ 550bb31f072912453ccbb503de1d554616911e88 o default
+ 3d808bbc94408ea19da905596d4079357a1f28be o default
+#else
+ $ show_cache
+ ##### .hg/cache/branch3-served
+ filtered-hash=f8006d64a10d35c011a5c5fa88be1e25c5929514 tip-node=3d808bbc94408ea19da905596d4079357a1f28be tip-rev=8 topo-mode=pure
+ default
+#endif
+
+ $ cd ..
+ $ rm -rf tmp-repo
--- a/tests/test-branches.t Mon Mar 25 02:09:15 2024 +0100
+++ b/tests/test-branches.t Mon Mar 25 16:27:48 2024 +0000
@@ -1,4 +1,5 @@
#testcases mmap nommap
+#testcases v2 v3
#if mmap
$ cat <<EOF >> $HGRCPATH
@@ -7,6 +8,18 @@
> EOF
#endif
+#if v3
+ $ cat <<EOF >> $HGRCPATH
+ > [experimental]
+ > branch-cache-v3=yes
+ > EOF
+#else
+ $ cat <<EOF >> $HGRCPATH
+ > [experimental]
+ > branch-cache-v3=no
+ > EOF
+#endif
+
$ hg init a
$ cd a
@@ -825,6 +838,7 @@
truncating cache/rbc-revs-v1 to 160
$ f --size .hg/cache/rbc-revs*
.hg/cache/rbc-revs-v1: size=160
+
recovery from invalid cache file with partial last record
$ mv .hg/cache/rbc-revs-v1 .
$ f -qDB 119 rbc-revs-v1 > .hg/cache/rbc-revs-v1
@@ -835,6 +849,7 @@
truncating cache/rbc-revs-v1 to 112
$ f --size .hg/cache/rbc-revs*
.hg/cache/rbc-revs-v1: size=160
+
recovery from invalid cache file with missing record - no truncation
$ mv .hg/cache/rbc-revs-v1 .
$ f -qDB 112 rbc-revs-v1 > .hg/cache/rbc-revs-v1
@@ -842,6 +857,7 @@
5
$ f --size .hg/cache/rbc-revs*
.hg/cache/rbc-revs-v1: size=160
+
recovery from invalid cache file with some bad records
$ mv .hg/cache/rbc-revs-v1 .
$ f -qDB 8 rbc-revs-v1 > .hg/cache/rbc-revs-v1
@@ -851,7 +867,7 @@
$ f --size .hg/cache/rbc-revs*
.hg/cache/rbc-revs-v1: size=120
$ hg log -r 'branch(.)' -T '{rev} ' --debug
- history modification detected - truncating revision branch cache to revision 13
+ history modification detected - truncating revision branch cache to revision * (glob)
history modification detected - truncating revision branch cache to revision 1
3 4 8 9 10 11 12 13 truncating cache/rbc-revs-v1 to 8
$ rm -f .hg/cache/branch* && hg head a -T '{rev}\n' --debug
@@ -860,6 +876,7 @@
$ f --size --hexdump --bytes=16 .hg/cache/rbc-revs*
.hg/cache/rbc-revs-v1: size=160
0000: 19 70 9c 5a 00 00 00 00 dd 6b 44 0d 00 00 00 01 |.p.Z.....kD.....|
+
cache is updated when committing
$ hg branch i-will-regret-this
marked working directory as branch i-will-regret-this
@@ -867,30 +884,17 @@
$ f --size .hg/cache/rbc-*
.hg/cache/rbc-names-v1: size=111
.hg/cache/rbc-revs-v1: size=168
+
update after rollback - the cache will be correct but rbc-names will will still
contain the branch name even though it no longer is used
$ hg up -qr '.^'
$ hg rollback -qf
- $ f --size --hexdump .hg/cache/rbc-*
+ $ f --size .hg/cache/rbc-names-*
.hg/cache/rbc-names-v1: size=111
- 0000: 64 65 66 61 75 6c 74 00 61 00 62 00 63 00 61 20 |default.a.b.c.a |
- 0010: 62 72 61 6e 63 68 20 6e 61 6d 65 20 6d 75 63 68 |branch name much|
- 0020: 20 6c 6f 6e 67 65 72 20 74 68 61 6e 20 74 68 65 | longer than the|
- 0030: 20 64 65 66 61 75 6c 74 20 6a 75 73 74 69 66 69 | default justifi|
- 0040: 63 61 74 69 6f 6e 20 75 73 65 64 20 62 79 20 62 |cation used by b|
- 0050: 72 61 6e 63 68 65 73 00 6d 00 6d 64 00 69 2d 77 |ranches.m.md.i-w|
- 0060: 69 6c 6c 2d 72 65 67 72 65 74 2d 74 68 69 73 |ill-regret-this|
+ $ grep "i-will-regret-this" .hg/cache/rbc-names-* > /dev/null
+ $ f --size .hg/cache/rbc-revs-*
.hg/cache/rbc-revs-v1: size=160
- 0000: 19 70 9c 5a 00 00 00 00 dd 6b 44 0d 00 00 00 01 |.p.Z.....kD.....|
- 0010: 88 1f e2 b9 00 00 00 01 ac 22 03 33 00 00 00 02 |.........".3....|
- 0020: ae e3 9c d1 00 00 00 02 d8 cb c6 1d 00 00 00 01 |................|
- 0030: 58 97 36 a2 00 00 00 03 10 ff 58 95 00 00 00 04 |X.6.......X.....|
- 0040: ee bb 94 44 00 00 00 02 5f 40 61 bb 00 00 00 02 |...D...._@a.....|
- 0050: bf be 84 1b 00 00 00 02 d3 f1 63 45 80 00 00 02 |..........cE....|
- 0060: e3 d4 9c 05 80 00 00 02 e2 3b 55 05 00 00 00 02 |.........;U.....|
- 0070: f8 94 c2 56 80 00 00 03 f3 44 76 37 00 00 00 05 |...V.....Dv7....|
- 0080: a5 8c a5 d3 00 00 00 05 df 34 3b 0d 00 00 00 05 |.........4;.....|
- 0090: c9 14 c9 9f 00 00 00 06 cd 21 a8 0b 80 00 00 05 |.........!......|
+
cache is updated/truncated when stripping - it is thus very hard to get in a
situation where the cache is out of sync and the hash check detects it
$ hg --config extensions.strip= strip -r tip --nob
@@ -902,38 +906,30 @@
$ hg log -r '5:&branch(.)' -T '{rev} ' --debug
referenced branch names not found - rebuilding revision branch cache from scratch
8 9 10 11 12 13 truncating cache/rbc-revs-v1 to 40
- $ f --size --hexdump .hg/cache/rbc-*
+ $ f --size .hg/cache/rbc-names-*
.hg/cache/rbc-names-v1: size=84
- 0000: 62 00 61 00 63 00 61 20 62 72 61 6e 63 68 20 6e |b.a.c.a branch n|
- 0010: 61 6d 65 20 6d 75 63 68 20 6c 6f 6e 67 65 72 20 |ame much longer |
- 0020: 74 68 61 6e 20 74 68 65 20 64 65 66 61 75 6c 74 |than the default|
- 0030: 20 6a 75 73 74 69 66 69 63 61 74 69 6f 6e 20 75 | justification u|
- 0040: 73 65 64 20 62 79 20 62 72 61 6e 63 68 65 73 00 |sed by branches.|
- 0050: 6d 00 6d 64 |m.md|
+ $ grep "i-will-regret-this" .hg/cache/rbc-names-* > /dev/null
+ [1]
+ $ f --size .hg/cache/rbc-revs-*
.hg/cache/rbc-revs-v1: size=152
- 0000: 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 |................|
- 0010: 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 |................|
- 0020: 00 00 00 00 00 00 00 00 d8 cb c6 1d 00 00 00 01 |................|
- 0030: 58 97 36 a2 00 00 00 02 10 ff 58 95 00 00 00 03 |X.6.......X.....|
- 0040: ee bb 94 44 00 00 00 00 5f 40 61 bb 00 00 00 00 |...D...._@a.....|
- 0050: bf be 84 1b 00 00 00 00 d3 f1 63 45 80 00 00 00 |..........cE....|
- 0060: e3 d4 9c 05 80 00 00 00 e2 3b 55 05 00 00 00 00 |.........;U.....|
- 0070: f8 94 c2 56 80 00 00 02 f3 44 76 37 00 00 00 04 |...V.....Dv7....|
- 0080: a5 8c a5 d3 00 00 00 04 df 34 3b 0d 00 00 00 04 |.........4;.....|
- 0090: c9 14 c9 9f 00 00 00 05 |........|
Test that cache files are created and grows correctly:
$ rm .hg/cache/rbc*
$ hg log -r "5 & branch(5)" -T "{rev}\n"
5
- $ f --size --hexdump .hg/cache/rbc-*
+
+(here v3 is querying branch info for heads so it warm much more of the cache)
+
+#if v2
+ $ f --size .hg/cache/rbc-*
.hg/cache/rbc-names-v1: size=1
- 0000: 61 |a|
.hg/cache/rbc-revs-v1: size=48
- 0000: 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 |................|
- 0010: 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 |................|
- 0020: 00 00 00 00 00 00 00 00 d8 cb c6 1d 00 00 00 00 |................|
+#else
+ $ f --size .hg/cache/rbc-*
+ .hg/cache/rbc-names-v1: size=84
+ .hg/cache/rbc-revs-v1: size=152
+#endif
$ cd ..
@@ -948,22 +944,20 @@
$ hg branch -q branch
$ hg ci -Amf
- $ f --size --hexdump .hg/cache/rbc-*
- .hg/cache/rbc-names-v1: size=14
- 0000: 64 65 66 61 75 6c 74 00 62 72 61 6e 63 68 |default.branch|
- .hg/cache/rbc-revs-v1: size=24
- 0000: 66 e5 f5 aa 00 00 00 00 fa 4c 04 e5 00 00 00 00 |f........L......|
- 0010: 56 46 78 69 00 00 00 01 |VFxi....|
+#if v2
+
+ $ f --size --sha256 .hg/cache/rbc-*
+ .hg/cache/rbc-names-v1: size=14, sha256=d376f7eea9a7e28fac6470e78dae753c81a5543c9ad436e96999590e004a281c
+ .hg/cache/rbc-revs-v1: size=24, sha256=ec89032fd4e66e7282cb6e403848c681a855a9c36c6b44d19179218553b78779
+
$ : > .hg/cache/rbc-revs-v1
No superfluous rebuilding of cache:
$ hg log -r "branch(null)&branch(branch)" --debug
- $ f --size --hexdump .hg/cache/rbc-*
- .hg/cache/rbc-names-v1: size=14
- 0000: 64 65 66 61 75 6c 74 00 62 72 61 6e 63 68 |default.branch|
- .hg/cache/rbc-revs-v1: size=24
- 0000: 66 e5 f5 aa 00 00 00 00 fa 4c 04 e5 00 00 00 00 |f........L......|
- 0010: 56 46 78 69 00 00 00 01 |VFxi....|
+ $ f --size --sha256 .hg/cache/rbc-*
+ .hg/cache/rbc-names-v1: size=14, sha256=d376f7eea9a7e28fac6470e78dae753c81a5543c9ad436e96999590e004a281c
+ .hg/cache/rbc-revs-v1: size=24, sha256=ec89032fd4e66e7282cb6e403848c681a855a9c36c6b44d19179218553b78779
+#endif
$ cd ..
@@ -1316,9 +1310,15 @@
new changesets 2ab8003a1750:99ba08759bc7
updating to branch A
0 files updated, 0 files merged, 0 files removed, 0 files unresolved
- $ cat branchmap-update-01/.hg/cache/branch2-served
+#if v3
+ $ cat branchmap-update-01/.hg/cache/branch3-base
+ tip-node=99ba08759bc7f6fdbe5304e83d0387f35c082479 tip-rev=1 topo-mode=pure
+ A
+#else
+ $ cat branchmap-update-01/.hg/cache/branch2-base
99ba08759bc7f6fdbe5304e83d0387f35c082479 1
99ba08759bc7f6fdbe5304e83d0387f35c082479 o A
+#endif
$ hg -R branchmap-update-01 unbundle bundle.hg
adding changesets
adding manifests
@@ -1326,9 +1326,15 @@
added 2 changesets with 0 changes to 0 files
new changesets a3b807b3ff0b:71ca9a6d524e (2 drafts)
(run 'hg update' to get a working copy)
+#if v3
+ $ cat branchmap-update-01/.hg/cache/branch3-served
+ tip-node=71ca9a6d524ed3c2a215119b2086ac3b8c4c8286 tip-rev=3 topo-mode=pure
+ A
+#else
$ cat branchmap-update-01/.hg/cache/branch2-served
71ca9a6d524ed3c2a215119b2086ac3b8c4c8286 3
71ca9a6d524ed3c2a215119b2086ac3b8c4c8286 o A
+#endif
aborted Unbundle should not update the on disk cache
@@ -1350,9 +1356,15 @@
updating to branch A
0 files updated, 0 files merged, 0 files removed, 0 files unresolved
- $ cat branchmap-update-02/.hg/cache/branch2-served
+#if v3
+ $ cat branchmap-update-02/.hg/cache/branch3-base
+ tip-node=99ba08759bc7f6fdbe5304e83d0387f35c082479 tip-rev=1 topo-mode=pure
+ A
+#else
+ $ cat branchmap-update-02/.hg/cache/branch2-base
99ba08759bc7f6fdbe5304e83d0387f35c082479 1
99ba08759bc7f6fdbe5304e83d0387f35c082479 o A
+#endif
$ hg -R branchmap-update-02 unbundle bundle.hg --config "hooks.pretxnclose=python:$TESTTMP/simplehook.py:hook"
adding changesets
adding manifests
@@ -1361,6 +1373,12 @@
rollback completed
abort: pretxnclose hook failed
[40]
- $ cat branchmap-update-02/.hg/cache/branch2-served
+#if v3
+ $ cat branchmap-update-02/.hg/cache/branch3-base
+ tip-node=99ba08759bc7f6fdbe5304e83d0387f35c082479 tip-rev=1 topo-mode=pure
+ A
+#else
+ $ cat branchmap-update-02/.hg/cache/branch2-base
99ba08759bc7f6fdbe5304e83d0387f35c082479 1
99ba08759bc7f6fdbe5304e83d0387f35c082479 o A
+#endif
--- a/tests/test-clone-stream.t Mon Mar 25 02:09:15 2024 +0100
+++ b/tests/test-clone-stream.t Mon Mar 25 16:27:48 2024 +0000
@@ -109,150 +109,18 @@
Check uncompressed
==================
-Cannot stream clone when server.uncompressed is set
+Cannot stream clone when server.uncompressed is set to false
+------------------------------------------------------------
+
+When `server.uncompressed` is disabled, the client should fallback to a bundle
+based clone with a warning.
+
$ get-with-headers.py $LOCALIP:$HGPORT '?cmd=stream_out'
200 Script output follows
1
-#if stream-legacy
- $ hg debugcapabilities http://localhost:$HGPORT
- Main capabilities:
- batch
- branchmap
- $USUAL_BUNDLE2_CAPS_SERVER$
- changegroupsubset
- compression=$BUNDLE2_COMPRESSIONS$
- getbundle
- httpheader=1024
- httpmediatype=0.1rx,0.1tx,0.2tx
- known
- lookup
- pushkey
- unbundle=HG10GZ,HG10BZ,HG10UN
- unbundlehash
- Bundle2 capabilities:
- HG20
- bookmarks
- changegroup
- 01
- 02
- 03
- checkheads
- related
- digests
- md5
- sha1
- sha512
- error
- abort
- unsupportedcontent
- pushraced
- pushkey
- hgtagsfnodes
- listkeys
- phases
- heads
- pushkey
- remote-changegroup
- http
- https
-
- $ hg clone --stream -U http://localhost:$HGPORT server-disabled
- warning: stream clone requested but server has them disabled
- requesting all changes
- adding changesets
- adding manifests
- adding file changes
- added 3 changesets with 1088 changes to 1088 files
- new changesets 96ee1d7354c4:5223b5e3265f
-
- $ get-with-headers.py $LOCALIP:$HGPORT '?cmd=getbundle' content-type --bodyfile body --hgproto 0.2 --requestheader "x-hgarg-1=bundlecaps=HG20%2Cbundle2%3DHG20%250Abookmarks%250Achangegroup%253D01%252C02%252C03%250Adigests%253Dmd5%252Csha1%252Csha512%250Aerror%253Dabort%252Cunsupportedcontent%252Cpushraced%252Cpushkey%250Ahgtagsfnodes%250Alistkeys%250Aphases%253Dheads%250Apushkey%250Aremote-changegroup%253Dhttp%252Chttps&cg=0&common=0000000000000000000000000000000000000000&heads=c17445101a72edac06facd130d14808dfbd5c7c2&stream=1"
- 200 Script output follows
- content-type: application/mercurial-0.2
-
-
- $ f --size body --hexdump --bytes 100
- body: size=140
- 0000: 04 6e 6f 6e 65 48 47 32 30 00 00 00 00 00 00 00 |.noneHG20.......|
- 0010: 73 0b 45 52 52 4f 52 3a 41 42 4f 52 54 00 00 00 |s.ERROR:ABORT...|
- 0020: 00 01 01 07 3c 04 16 6d 65 73 73 61 67 65 73 74 |....<..messagest|
- 0030: 72 65 61 6d 20 64 61 74 61 20 72 65 71 75 65 73 |ream data reques|
- 0040: 74 65 64 20 62 75 74 20 73 65 72 76 65 72 20 64 |ted but server d|
- 0050: 6f 65 73 20 6e 6f 74 20 61 6c 6c 6f 77 20 74 68 |oes not allow th|
- 0060: 69 73 20 66 |is f|
-
-#endif
-#if stream-bundle2-v2
- $ hg debugcapabilities http://localhost:$HGPORT
- Main capabilities:
- batch
- branchmap
- $USUAL_BUNDLE2_CAPS_SERVER$
- changegroupsubset
- compression=$BUNDLE2_COMPRESSIONS$
- getbundle
- httpheader=1024
- httpmediatype=0.1rx,0.1tx,0.2tx
- known
- lookup
- pushkey
- unbundle=HG10GZ,HG10BZ,HG10UN
- unbundlehash
- Bundle2 capabilities:
- HG20
- bookmarks
- changegroup
- 01
- 02
- 03
- checkheads
- related
- digests
- md5
- sha1
- sha512
- error
- abort
- unsupportedcontent
- pushraced
- pushkey
- hgtagsfnodes
- listkeys
- phases
- heads
- pushkey
- remote-changegroup
- http
- https
-
- $ hg clone --stream -U http://localhost:$HGPORT server-disabled
- warning: stream clone requested but server has them disabled
- requesting all changes
- adding changesets
- adding manifests
- adding file changes
- added 3 changesets with 1088 changes to 1088 files
- new changesets 96ee1d7354c4:5223b5e3265f
-
- $ get-with-headers.py $LOCALIP:$HGPORT '?cmd=getbundle' content-type --bodyfile body --hgproto 0.2 --requestheader "x-hgarg-1=bundlecaps=HG20%2Cbundle2%3DHG20%250Abookmarks%250Achangegroup%253D01%252C02%252C03%250Adigests%253Dmd5%252Csha1%252Csha512%250Aerror%253Dabort%252Cunsupportedcontent%252Cpushraced%252Cpushkey%250Ahgtagsfnodes%250Alistkeys%250Aphases%253Dheads%250Apushkey%250Aremote-changegroup%253Dhttp%252Chttps&cg=0&common=0000000000000000000000000000000000000000&heads=c17445101a72edac06facd130d14808dfbd5c7c2&stream=1"
- 200 Script output follows
- content-type: application/mercurial-0.2
-
-
- $ f --size body --hexdump --bytes 100
- body: size=140
- 0000: 04 6e 6f 6e 65 48 47 32 30 00 00 00 00 00 00 00 |.noneHG20.......|
- 0010: 73 0b 45 52 52 4f 52 3a 41 42 4f 52 54 00 00 00 |s.ERROR:ABORT...|
- 0020: 00 01 01 07 3c 04 16 6d 65 73 73 61 67 65 73 74 |....<..messagest|
- 0030: 72 65 61 6d 20 64 61 74 61 20 72 65 71 75 65 73 |ream data reques|
- 0040: 74 65 64 20 62 75 74 20 73 65 72 76 65 72 20 64 |ted but server d|
- 0050: 6f 65 73 20 6e 6f 74 20 61 6c 6c 6f 77 20 74 68 |oes not allow th|
- 0060: 69 73 20 66 |is f|
-
-#endif
-#if stream-bundle2-v3
$ hg debugcapabilities http://localhost:$HGPORT
Main capabilities:
batch
@@ -304,23 +172,6 @@
added 3 changesets with 1088 changes to 1088 files
new changesets 96ee1d7354c4:5223b5e3265f
- $ get-with-headers.py $LOCALIP:$HGPORT '?cmd=getbundle' content-type --bodyfile body --hgproto 0.2 --requestheader "x-hgarg-1=bundlecaps=HG20%2Cbundle2%3DHG20%250Abookmarks%250Achangegroup%253D01%252C02%252C03%250Adigests%253Dmd5%252Csha1%252Csha512%250Aerror%253Dabort%252Cunsupportedcontent%252Cpushraced%252Cpushkey%250Ahgtagsfnodes%250Alistkeys%250Aphases%253Dheads%250Apushkey%250Aremote-changegroup%253Dhttp%252Chttps&cg=0&common=0000000000000000000000000000000000000000&heads=c17445101a72edac06facd130d14808dfbd5c7c2&stream=1"
- 200 Script output follows
- content-type: application/mercurial-0.2
-
-
- $ f --size body --hexdump --bytes 100
- body: size=140
- 0000: 04 6e 6f 6e 65 48 47 32 30 00 00 00 00 00 00 00 |.noneHG20.......|
- 0010: 73 0b 45 52 52 4f 52 3a 41 42 4f 52 54 00 00 00 |s.ERROR:ABORT...|
- 0020: 00 01 01 07 3c 04 16 6d 65 73 73 61 67 65 73 74 |....<..messagest|
- 0030: 72 65 61 6d 20 64 61 74 61 20 72 65 71 75 65 73 |ream data reques|
- 0040: 74 65 64 20 62 75 74 20 73 65 72 76 65 72 20 64 |ted but server d|
- 0050: 6f 65 73 20 6e 6f 74 20 61 6c 6c 6f 77 20 74 68 |oes not allow th|
- 0060: 69 73 20 66 |is f|
-
-#endif
-
$ killdaemons.py
$ cd server
$ hg serve -p $HGPORT -d --pid-file=hg.pid --error errors.txt
@@ -328,6 +179,13 @@
$ cd ..
Basic clone
+-----------
+
+Check that --stream trigger a stream clone and result in a valid repositoty
+
+We check the associated output for exact bytes on file number as changes in
+these value implies changes in the data transfered and can detect unintended
+changes in the process.
#if stream-legacy
$ hg clone --stream -U http://localhost:$HGPORT clone1
@@ -338,7 +196,6 @@
transferred 98.8 KB in * seconds (* */sec) (glob) (zstd !)
searching for changes
no changes found
- $ cat server/errors.txt
#endif
#if stream-bundle2-v2
$ hg clone --stream -U http://localhost:$HGPORT clone1
@@ -349,20 +206,8 @@
transferred 98.9 KB in * seconds (* */sec) (glob) (zstd no-rust !)
1096 files to transfer, 99.0 KB of data (zstd rust !)
transferred 99.0 KB in * seconds (* */sec) (glob) (zstd rust !)
+#endif
- $ ls -1 clone1/.hg/cache
- branch2-base
- branch2-immutable
- branch2-served
- branch2-served.hidden
- branch2-visible
- branch2-visible-hidden
- rbc-names-v1
- rbc-revs-v1
- tags2
- tags2-served
- $ cat server/errors.txt
-#endif
#if stream-bundle2-v3
$ hg clone --stream -U http://localhost:$HGPORT clone1
streaming all changes
@@ -370,244 +215,68 @@
transferred 102 KB in * seconds (* */sec) (glob) (no-zstd !)
transferred 98.9 KB in * seconds (* */sec) (glob) (zstd no-rust !)
transferred 99.0 KB in * seconds (* */sec) (glob) (zstd rust !)
+#endif
+#if no-stream-legacy
$ ls -1 clone1/.hg/cache
branch2-base
- branch2-immutable
branch2-served
- branch2-served.hidden
- branch2-visible
- branch2-visible-hidden
rbc-names-v1
rbc-revs-v1
tags2
tags2-served
- $ cat server/errors.txt
#endif
+ $ hg -R clone1 verify --quiet
+ $ cat server/errors.txt
+
getbundle requests with stream=1 are uncompressed
+-------------------------------------------------
+
+We check that `getbundle` will return a stream bundle when requested.
+
+XXX manually building the --requestheader is fragile and will drift away from actual usage
$ get-with-headers.py $LOCALIP:$HGPORT '?cmd=getbundle' content-type --bodyfile body --hgproto '0.1 0.2 comp=zlib,none' --requestheader "x-hgarg-1=bundlecaps=HG20%2Cbundle2%3DHG20%250Abookmarks%250Achangegroup%253D01%252C02%252C03%250Adigests%253Dmd5%252Csha1%252Csha512%250Aerror%253Dabort%252Cunsupportedcontent%252Cpushraced%252Cpushkey%250Ahgtagsfnodes%250Alistkeys%250Aphases%253Dheads%250Apushkey%250Aremote-changegroup%253Dhttp%252Chttps%250Astream%253Dv2&cg=0&common=0000000000000000000000000000000000000000&heads=c17445101a72edac06facd130d14808dfbd5c7c2&stream=1"
200 Script output follows
content-type: application/mercurial-0.2
-#if no-zstd no-rust
- $ f --size --hex --bytes 256 body
- body: size=119140
- 0000: 04 6e 6f 6e 65 48 47 32 30 00 00 00 00 00 00 00 |.noneHG20.......|
- 0010: 62 07 53 54 52 45 41 4d 32 00 00 00 00 03 00 09 |b.STREAM2.......|
- 0020: 06 09 04 0c 26 62 79 74 65 63 6f 75 6e 74 31 30 |....&bytecount10|
- 0030: 34 31 31 35 66 69 6c 65 63 6f 75 6e 74 31 30 39 |4115filecount109|
- 0040: 34 72 65 71 75 69 72 65 6d 65 6e 74 73 67 65 6e |4requirementsgen|
- 0050: 65 72 61 6c 64 65 6c 74 61 25 32 43 72 65 76 6c |eraldelta%2Crevl|
- 0060: 6f 67 76 31 25 32 43 73 70 61 72 73 65 72 65 76 |ogv1%2Csparserev|
- 0070: 6c 6f 67 00 00 80 00 73 08 42 64 61 74 61 2f 30 |log....s.Bdata/0|
- 0080: 2e 69 00 03 00 01 00 00 00 00 00 00 00 02 00 00 |.i..............|
- 0090: 00 01 00 00 00 00 00 00 00 01 ff ff ff ff ff ff |................|
- 00a0: ff ff 80 29 63 a0 49 d3 23 87 bf ce fe 56 67 92 |...)c.I.#....Vg.|
- 00b0: 67 2c 69 d1 ec 39 00 00 00 00 00 00 00 00 00 00 |g,i..9..........|
- 00c0: 00 00 75 30 73 26 45 64 61 74 61 2f 30 30 63 68 |..u0s&Edata/00ch|
- 00d0: 61 6e 67 65 6c 6f 67 2d 61 62 33 34 39 31 38 30 |angelog-ab349180|
- 00e0: 61 30 34 30 35 30 31 30 2e 6e 64 2e 69 00 03 00 |a0405010.nd.i...|
- 00f0: 01 00 00 00 00 00 00 00 05 00 00 00 04 00 00 00 |................|
-#endif
-#if zstd no-rust
- $ f --size --hex --bytes 256 body
- body: size=116327 (no-bigendian !)
- body: size=116322 (bigendian !)
+ $ f --size --hex --bytes 48 body
+ body: size=* (glob)
0000: 04 6e 6f 6e 65 48 47 32 30 00 00 00 00 00 00 00 |.noneHG20.......|
- 0010: 7c 07 53 54 52 45 41 4d 32 00 00 00 00 03 00 09 ||.STREAM2.......|
- 0020: 06 09 04 0c 40 62 79 74 65 63 6f 75 6e 74 31 30 |....@bytecount10|
- 0030: 31 32 37 36 66 69 6c 65 63 6f 75 6e 74 31 30 39 |1276filecount109| (no-bigendian !)
- 0030: 31 32 37 31 66 69 6c 65 63 6f 75 6e 74 31 30 39 |1271filecount109| (bigendian !)
- 0040: 34 72 65 71 75 69 72 65 6d 65 6e 74 73 67 65 6e |4requirementsgen|
- 0050: 65 72 61 6c 64 65 6c 74 61 25 32 43 72 65 76 6c |eraldelta%2Crevl|
- 0060: 6f 67 2d 63 6f 6d 70 72 65 73 73 69 6f 6e 2d 7a |og-compression-z|
- 0070: 73 74 64 25 32 43 72 65 76 6c 6f 67 76 31 25 32 |std%2Crevlogv1%2|
- 0080: 43 73 70 61 72 73 65 72 65 76 6c 6f 67 00 00 80 |Csparserevlog...|
- 0090: 00 73 08 42 64 61 74 61 2f 30 2e 69 00 03 00 01 |.s.Bdata/0.i....|
- 00a0: 00 00 00 00 00 00 00 02 00 00 00 01 00 00 00 00 |................|
- 00b0: 00 00 00 01 ff ff ff ff ff ff ff ff 80 29 63 a0 |.............)c.|
- 00c0: 49 d3 23 87 bf ce fe 56 67 92 67 2c 69 d1 ec 39 |I.#....Vg.g,i..9|
- 00d0: 00 00 00 00 00 00 00 00 00 00 00 00 75 30 73 26 |............u0s&|
- 00e0: 45 64 61 74 61 2f 30 30 63 68 61 6e 67 65 6c 6f |Edata/00changelo|
- 00f0: 67 2d 61 62 33 34 39 31 38 30 61 30 34 30 35 30 |g-ab349180a04050|
-#endif
-#if zstd rust no-dirstate-v2
- $ f --size --hex --bytes 256 body
- body: size=116310 (no-rust !)
- body: size=116495 (rust no-stream-legacy no-bigendian !)
- body: size=116490 (rust no-stream-legacy bigendian !)
- body: size=116327 (rust stream-legacy no-bigendian !)
- body: size=116322 (rust stream-legacy bigendian !)
- 0000: 04 6e 6f 6e 65 48 47 32 30 00 00 00 00 00 00 00 |.noneHG20.......|
- 0010: 7c 07 53 54 52 45 41 4d 32 00 00 00 00 03 00 09 ||.STREAM2.......|
- 0020: 06 09 04 0c 40 62 79 74 65 63 6f 75 6e 74 31 30 |....@bytecount10|
- 0030: 31 32 37 36 66 69 6c 65 63 6f 75 6e 74 31 30 39 |1276filecount109| (no-rust !)
- 0040: 33 72 65 71 75 69 72 65 6d 65 6e 74 73 67 65 6e |3requirementsgen| (no-rust !)
- 0030: 31 34 30 32 66 69 6c 65 63 6f 75 6e 74 31 30 39 |1402filecount109| (rust no-stream-legacy no-bigendian !)
- 0030: 31 33 39 37 66 69 6c 65 63 6f 75 6e 74 31 30 39 |1397filecount109| (rust no-stream-legacy bigendian !)
- 0040: 36 72 65 71 75 69 72 65 6d 65 6e 74 73 67 65 6e |6requirementsgen| (rust no-stream-legacy !)
- 0030: 31 32 37 36 66 69 6c 65 63 6f 75 6e 74 31 30 39 |1276filecount109| (rust stream-legacy no-bigendian !)
- 0030: 31 32 37 31 66 69 6c 65 63 6f 75 6e 74 31 30 39 |1271filecount109| (rust stream-legacy bigendian !)
- 0040: 34 72 65 71 75 69 72 65 6d 65 6e 74 73 67 65 6e |4requirementsgen| (rust stream-legacy !)
- 0050: 65 72 61 6c 64 65 6c 74 61 25 32 43 72 65 76 6c |eraldelta%2Crevl|
- 0060: 6f 67 2d 63 6f 6d 70 72 65 73 73 69 6f 6e 2d 7a |og-compression-z|
- 0070: 73 74 64 25 32 43 72 65 76 6c 6f 67 76 31 25 32 |std%2Crevlogv1%2|
- 0080: 43 73 70 61 72 73 65 72 65 76 6c 6f 67 00 00 80 |Csparserevlog...|
- 0090: 00 73 08 42 64 61 74 61 2f 30 2e 69 00 03 00 01 |.s.Bdata/0.i....|
- 00a0: 00 00 00 00 00 00 00 02 00 00 00 01 00 00 00 00 |................|
- 00b0: 00 00 00 01 ff ff ff ff ff ff ff ff 80 29 63 a0 |.............)c.|
- 00c0: 49 d3 23 87 bf ce fe 56 67 92 67 2c 69 d1 ec 39 |I.#....Vg.g,i..9|
- 00d0: 00 00 00 00 00 00 00 00 00 00 00 00 75 30 73 26 |............u0s&|
- 00e0: 45 64 61 74 61 2f 30 30 63 68 61 6e 67 65 6c 6f |Edata/00changelo|
- 00f0: 67 2d 61 62 33 34 39 31 38 30 61 30 34 30 35 30 |g-ab349180a04050|
-#endif
-#if zstd dirstate-v2
- $ f --size --hex --bytes 256 body
- body: size=109549
- 0000: 04 6e 6f 6e 65 48 47 32 30 00 00 00 00 00 00 00 |.noneHG20.......|
- 0010: c0 07 53 54 52 45 41 4d 32 00 00 00 00 03 00 09 |..STREAM2.......|
- 0020: 05 09 04 0c 85 62 79 74 65 63 6f 75 6e 74 39 35 |.....bytecount95|
- 0030: 38 39 37 66 69 6c 65 63 6f 75 6e 74 31 30 33 30 |897filecount1030|
- 0040: 72 65 71 75 69 72 65 6d 65 6e 74 73 64 6f 74 65 |requirementsdote|
- 0050: 6e 63 6f 64 65 25 32 43 65 78 70 2d 64 69 72 73 |ncode%2Cexp-dirs|
- 0060: 74 61 74 65 2d 76 32 25 32 43 66 6e 63 61 63 68 |tate-v2%2Cfncach|
- 0070: 65 25 32 43 67 65 6e 65 72 61 6c 64 65 6c 74 61 |e%2Cgeneraldelta|
- 0080: 25 32 43 70 65 72 73 69 73 74 65 6e 74 2d 6e 6f |%2Cpersistent-no|
- 0090: 64 65 6d 61 70 25 32 43 72 65 76 6c 6f 67 2d 63 |demap%2Crevlog-c|
- 00a0: 6f 6d 70 72 65 73 73 69 6f 6e 2d 7a 73 74 64 25 |ompression-zstd%|
- 00b0: 32 43 72 65 76 6c 6f 67 76 31 25 32 43 73 70 61 |2Crevlogv1%2Cspa|
- 00c0: 72 73 65 72 65 76 6c 6f 67 25 32 43 73 74 6f 72 |rserevlog%2Cstor|
- 00d0: 65 00 00 80 00 73 08 42 64 61 74 61 2f 30 2e 69 |e....s.Bdata/0.i|
- 00e0: 00 03 00 01 00 00 00 00 00 00 00 02 00 00 00 01 |................|
- 00f0: 00 00 00 00 00 00 00 01 ff ff ff ff ff ff ff ff |................|
-#endif
+ 0010: ?? 07 53 54 52 45 41 4d 32 00 00 00 00 03 00 09 |?.STREAM2.......| (glob)
+ 0020: 06 09 04 0c ?? 62 79 74 65 63 6f 75 6e 74 31 30 |....?bytecount10| (glob)
--uncompressed is an alias to --stream
+---------------------------------------
-#if stream-legacy
- $ hg clone --uncompressed -U http://localhost:$HGPORT clone1-uncompressed
- streaming all changes
- 1091 files to transfer, 102 KB of data (no-zstd !)
- transferred 102 KB in * seconds (* */sec) (glob) (no-zstd !)
- 1091 files to transfer, 98.8 KB of data (zstd !)
- transferred 98.8 KB in * seconds (* */sec) (glob) (zstd !)
- searching for changes
- no changes found
-#endif
-#if stream-bundle2-v2
+The alias flag should trigger a stream clone too.
+
$ hg clone --uncompressed -U http://localhost:$HGPORT clone1-uncompressed
streaming all changes
- 1094 files to transfer, 102 KB of data (no-zstd !)
- transferred 102 KB in * seconds (* */sec) (glob) (no-zstd !)
- 1094 files to transfer, 98.9 KB of data (zstd no-rust !)
- transferred 98.9 KB in * seconds (* */sec) (glob) (zstd no-rust !)
- 1096 files to transfer, 99.0 KB of data (zstd rust !)
- transferred 99.0 KB in * seconds (* */sec) (glob) (zstd rust !)
-#endif
-#if stream-bundle2-v3
- $ hg clone --uncompressed -U http://localhost:$HGPORT clone1-uncompressed
- streaming all changes
- 1093 entries to transfer
- transferred 102 KB in * seconds (* */sec) (glob) (no-zstd !)
- transferred 98.9 KB in * seconds (* */sec) (glob) (zstd no-rust !)
- transferred 99.0 KB in * seconds (* */sec) (glob) (zstd rust !)
-#endif
+ * files to transfer* (glob) (no-stream-bundle2-v3 !)
+ * entries to transfer (glob) (stream-bundle2-v3 !)
+ transferred * KB in * seconds (* */sec) (glob)
+ searching for changes (stream-legacy !)
+ no changes found (stream-legacy !)
Clone with background file closing enabled
+-------------------------------------------
-#if stream-legacy
- $ hg --debug --config worker.backgroundclose=true --config worker.backgroundcloseminfilecount=1 clone --stream -U http://localhost:$HGPORT clone-background | grep -v adding
- using http://localhost:$HGPORT/
- sending capabilities command
- sending branchmap command
- streaming all changes
- sending stream_out command
- 1091 files to transfer, 102 KB of data (no-zstd !)
- 1091 files to transfer, 98.8 KB of data (zstd !)
- starting 4 threads for background file closing
- updating the branch cache
- transferred 102 KB in * seconds (* */sec) (glob) (no-zstd !)
- transferred 98.8 KB in * seconds (* */sec) (glob) (zstd !)
- query 1; heads
- sending batch command
- searching for changes
- all remote heads known locally
- no changes found
- sending getbundle command
- bundle2-input-bundle: with-transaction
- bundle2-input-part: "listkeys" (params: 1 mandatory) supported
- bundle2-input-part: "phase-heads" supported
- bundle2-input-part: total payload size 24
- bundle2-input-bundle: 2 parts total
- checking for updated bookmarks
- updating the branch cache
- (sent 5 HTTP requests and * bytes; received * bytes in responses) (glob)
-#endif
-#if stream-bundle2-v2
- $ hg --debug --config worker.backgroundclose=true --config worker.backgroundcloseminfilecount=1 clone --stream -U http://localhost:$HGPORT clone-background | grep -v adding
- using http://localhost:$HGPORT/
- sending capabilities command
- query 1; heads
- sending batch command
- streaming all changes
- sending getbundle command
- bundle2-input-bundle: with-transaction
- bundle2-input-part: "stream2" (params: 3 mandatory) supported
- applying stream bundle
- 1094 files to transfer, 102 KB of data (no-zstd !)
- 1094 files to transfer, 98.9 KB of data (zstd no-rust !)
- 1096 files to transfer, 99.0 KB of data (zstd rust !)
- starting 4 threads for background file closing
+The backgound file closing logic should trigger when configured to do so, and
+the result should be a valid repository.
+
+ $ hg --debug --config worker.backgroundclose=true --config worker.backgroundcloseminfilecount=1 clone --stream -U http://localhost:$HGPORT clone-background | grep "background file closing"
starting 4 threads for background file closing
- updating the branch cache
- transferred 102 KB in * seconds (* */sec) (glob) (no-zstd !)
- bundle2-input-part: total payload size 119001 (no-zstd !)
- transferred 98.9 KB in * seconds (* */sec) (glob) (zstd no-rust !)
- transferred 99.0 KB in * seconds (* */sec) (glob) (zstd rust !)
- bundle2-input-part: total payload size 116162 (zstd no-bigendian no-rust !)
- bundle2-input-part: total payload size 116330 (zstd no-bigendian rust !)
- bundle2-input-part: total payload size 116157 (zstd bigendian no-rust !)
- bundle2-input-part: total payload size 116325 (zstd bigendian rust !)
- bundle2-input-part: "listkeys" (params: 1 mandatory) supported
- bundle2-input-bundle: 2 parts total
- checking for updated bookmarks
- updating the branch cache
- (sent 3 HTTP requests and * bytes; received * bytes in responses) (glob)
-#endif
-#if stream-bundle2-v3
- $ hg --debug --config worker.backgroundclose=true --config worker.backgroundcloseminfilecount=1 clone --stream -U http://localhost:$HGPORT clone-background | grep -v adding
- using http://localhost:$HGPORT/
- sending capabilities command
- query 1; heads
- sending batch command
- streaming all changes
- sending getbundle command
- bundle2-input-bundle: with-transaction
- bundle2-input-part: "stream3-exp" (params: 1 mandatory) supported
- applying stream bundle
- 1093 entries to transfer
- starting 4 threads for background file closing
- starting 4 threads for background file closing
- updating the branch cache
- transferred 102 KB in * seconds (* */sec) (glob) (no-zstd !)
- bundle2-input-part: total payload size 120096 (no-zstd !)
- transferred 98.9 KB in * seconds (* */sec) (glob) (zstd no-rust !)
- transferred 99.0 KB in * seconds (* */sec) (glob) (zstd rust !)
- bundle2-input-part: total payload size 117257 (zstd no-rust no-bigendian !)
- bundle2-input-part: total payload size 117425 (zstd rust no-bigendian !)
- bundle2-input-part: total payload size 117252 (zstd bigendian no-rust !)
- bundle2-input-part: total payload size 117420 (zstd bigendian rust !)
- bundle2-input-part: "listkeys" (params: 1 mandatory) supported
- bundle2-input-bundle: 2 parts total
- checking for updated bookmarks
- updating the branch cache
- (sent 3 HTTP requests and * bytes; received * bytes in responses) (glob)
-#endif
+ starting 4 threads for background file closing (no-stream-legacy !)
+ $ hg verify -R clone-background --quiet
Cannot stream clone when there are secret changesets
+----------------------------------------------------
+
+If secret changeset are present the should not be cloned (by default) and the
+clone falls back to a bundle clone.
$ hg -R server phase --force --secret -r tip
$ hg clone --stream -U http://localhost:$HGPORT secret-denied
@@ -622,44 +291,30 @@
$ killdaemons.py
Streaming of secrets can be overridden by server config
+-------------------------------------------------------
+
+Secret changeset can still be streamed if the server is configured to do so.
$ cd server
$ hg serve --config server.uncompressedallowsecret=true -p $HGPORT -d --pid-file=hg.pid
$ cat hg.pid > $DAEMON_PIDS
$ cd ..
-#if stream-legacy
- $ hg clone --stream -U http://localhost:$HGPORT secret-allowed
- streaming all changes
- 1091 files to transfer, 102 KB of data (no-zstd !)
- transferred 102 KB in * seconds (* */sec) (glob) (no-zstd !)
- 1091 files to transfer, 98.8 KB of data (zstd !)
- transferred 98.8 KB in * seconds (* */sec) (glob) (zstd !)
- searching for changes
- no changes found
-#endif
-#if stream-bundle2-v2
$ hg clone --stream -U http://localhost:$HGPORT secret-allowed
streaming all changes
- 1094 files to transfer, 102 KB of data (no-zstd !)
- transferred 102 KB in * seconds (* */sec) (glob) (no-zstd !)
- 1094 files to transfer, 98.9 KB of data (zstd no-rust !)
- transferred 98.9 KB in * seconds (* */sec) (glob) (zstd no-rust !)
- 1096 files to transfer, 99.0 KB of data (zstd rust !)
- transferred 99.0 KB in * seconds (* */sec) (glob) (zstd rust !)
-#endif
-#if stream-bundle2-v3
- $ hg clone --stream -U http://localhost:$HGPORT secret-allowed
- streaming all changes
- 1093 entries to transfer
- transferred 102 KB in * seconds (* */sec) (glob) (no-zstd !)
- transferred 98.9 KB in * seconds (* */sec) (glob) (zstd no-rust !)
- transferred 99.0 KB in * seconds (* */sec) (glob) (zstd rust !)
-#endif
+ * files to transfer* (glob) (no-stream-bundle2-v3 !)
+ * entries to transfer (glob) (stream-bundle2-v3 !)
+ transferred * KB in * seconds (* */sec) (glob)
+ searching for changes (stream-legacy !)
+ no changes found (stream-legacy !)
$ killdaemons.py
Verify interaction between preferuncompressed and secret presence
+-----------------------------------------------------------------
+
+Secret presence will still make the clone falls back to a normal bundle even if
+the server prefers stream clone.
$ cd server
$ hg serve --config server.preferuncompressed=true -p $HGPORT -d --pid-file=hg.pid
@@ -677,6 +332,9 @@
$ killdaemons.py
Clone not allowed when full bundles disabled and can't serve secrets
+--------------------------------------------------------------------
+
+The clone should fail as no valid option is found.
$ cd server
$ hg serve --config server.disablefullbundle=true -p $HGPORT -d --pid-file=hg.pid
@@ -692,6 +350,8 @@
[100]
Local stream clone with secrets involved
+----------------------------------------
+
(This is just a test over behavior: if you have access to the repo's files,
there is no security so it isn't important to prevent a clone here.)
@@ -704,12 +364,20 @@
added 2 changesets with 1025 changes to 1025 files
new changesets 96ee1d7354c4:c17445101a72
+(revert introduction of secret changeset)
+
+ $ hg -R server phase --draft 'secret()'
+
Stream clone while repo is changing:
+------------------------------------
+
+We should send a repository in a valid state, ignoring the ongoing transaction.
$ mkdir changing
$ cd changing
prepare repo with small and big file to cover both code paths in emitrevlogdata
+(inlined revlog and non-inlined revlogs).
$ hg init repo
$ touch repo/f1
@@ -740,15 +408,14 @@
$ $RUNTESTDIR/testlib/wait-on-file 10 $HG_TEST_STREAM_WALKED_FILE_3
$ hg -R clone id
000000000000
+ $ hg -R clone verify --quiet
$ cat errors.log
$ cd ..
Stream repository with bookmarks
--------------------------------
-(revert introduction of secret changeset)
-
- $ hg -R server phase --draft 'secret()'
+The bookmark file should be send over in the stream bundle.
add a bookmark
@@ -756,40 +423,17 @@
clone it
-#if stream-legacy
- $ hg clone --stream http://localhost:$HGPORT with-bookmarks
- streaming all changes
- 1091 files to transfer, 102 KB of data (no-zstd !)
- transferred 102 KB in * seconds (* */sec) (glob) (no-zstd !)
- 1091 files to transfer, 98.8 KB of data (zstd !)
- transferred 98.8 KB in * seconds (* */sec) (glob) (zstd !)
- searching for changes
- no changes found
- updating to branch default
- 1088 files updated, 0 files merged, 0 files removed, 0 files unresolved
-#endif
-#if stream-bundle2-v2
$ hg clone --stream http://localhost:$HGPORT with-bookmarks
streaming all changes
- 1097 files to transfer, 102 KB of data (no-zstd !)
- transferred 102 KB in * seconds (* */sec) (glob) (no-zstd !)
- 1097 files to transfer, 99.1 KB of data (zstd no-rust !)
- transferred 99.1 KB in * seconds (* */sec) (glob) (zstd no-rust !)
- 1099 files to transfer, 99.2 KB of data (zstd rust !)
- transferred 99.2 KB in * seconds (* */sec) (glob) (zstd rust !)
+ 1091 files to transfer, * KB of data (glob) (stream-legacy !)
+ 1097 files to transfer, * KB of data (glob) (stream-bundle2-v2 no-rust !)
+ 1099 files to transfer, * KB of data (glob) (stream-bundle2-v2 rust !)
+ 1096 entries to transfer (stream-bundle2-v3 !)
+ transferred * KB in * seconds (* */sec) (glob)
+ searching for changes (stream-legacy !)
+ no changes found (stream-legacy !)
updating to branch default
1088 files updated, 0 files merged, 0 files removed, 0 files unresolved
-#endif
-#if stream-bundle2-v3
- $ hg clone --stream http://localhost:$HGPORT with-bookmarks
- streaming all changes
- 1096 entries to transfer
- transferred 102 KB in * seconds (* */sec) (glob) (no-zstd !)
- transferred 99.1 KB in * seconds (* */sec) (glob) (zstd no-rust !)
- transferred 99.2 KB in * seconds (* */sec) (glob) (zstd rust !)
- updating to branch default
- 1088 files updated, 0 files merged, 0 files removed, 0 files unresolved
-#endif
$ hg verify -R with-bookmarks -q
$ hg -R with-bookmarks bookmarks
some-bookmark 2:5223b5e3265f
@@ -797,6 +441,9 @@
Stream repository with phases
-----------------------------
+The file storing phases information (e.g. phaseroots) should be sent as part of
+the stream bundle.
+
Clone as publishing
$ hg -R server phase -r 'all()'
@@ -804,40 +451,17 @@
1: draft
2: draft
-#if stream-legacy
- $ hg clone --stream http://localhost:$HGPORT phase-publish
- streaming all changes
- 1091 files to transfer, 102 KB of data (no-zstd !)
- transferred 102 KB in * seconds (* */sec) (glob) (no-zstd !)
- 1091 files to transfer, 98.8 KB of data (zstd !)
- transferred 98.8 KB in * seconds (* */sec) (glob) (zstd !)
- searching for changes
- no changes found
- updating to branch default
- 1088 files updated, 0 files merged, 0 files removed, 0 files unresolved
-#endif
-#if stream-bundle2-v2
$ hg clone --stream http://localhost:$HGPORT phase-publish
streaming all changes
- 1097 files to transfer, 102 KB of data (no-zstd !)
- transferred 102 KB in * seconds (* */sec) (glob) (no-zstd !)
- 1097 files to transfer, 99.1 KB of data (zstd no-rust !)
- transferred 99.1 KB in * seconds (* */sec) (glob) (zstd no-rust !)
- 1099 files to transfer, 99.2 KB of data (zstd rust !)
- transferred 99.2 KB in * seconds (* */sec) (glob) (zstd rust !)
+ 1091 files to transfer, * KB of data (glob) (stream-legacy !)
+ 1097 files to transfer, * KB of data (glob) (stream-bundle2-v2 no-rust !)
+ 1099 files to transfer, * KB of data (glob) (stream-bundle2-v2 rust !)
+ 1096 entries to transfer (stream-bundle2-v3 !)
+ transferred * KB in * seconds (* */sec) (glob)
+ searching for changes (stream-legacy !)
+ no changes found (stream-legacy !)
updating to branch default
1088 files updated, 0 files merged, 0 files removed, 0 files unresolved
-#endif
-#if stream-bundle2-v3
- $ hg clone --stream http://localhost:$HGPORT phase-publish
- streaming all changes
- 1096 entries to transfer
- transferred 102 KB in * seconds (* */sec) (glob) (no-zstd !)
- transferred 99.1 KB in * seconds (* */sec) (glob) (zstd no-rust !)
- transferred 99.2 KB in * seconds (* */sec) (glob) (zstd rust !)
- updating to branch default
- 1088 files updated, 0 files merged, 0 files removed, 0 files unresolved
-#endif
$ hg verify -R phase-publish -q
$ hg -R phase-publish phase -r 'all()'
0: public
@@ -854,73 +478,47 @@
$ hg -R server serve -p $HGPORT -d --pid-file=hg.pid
$ cat hg.pid > $DAEMON_PIDS
-#if stream-legacy
-
-With v1 of the stream protocol, changeset are always cloned as public. It make
-stream v1 unsuitable for non-publishing repository.
-
- $ hg clone --stream http://localhost:$HGPORT phase-no-publish
- streaming all changes
- 1091 files to transfer, 102 KB of data (no-zstd !)
- transferred 102 KB in * seconds (* */sec) (glob) (no-zstd !)
- 1091 files to transfer, 98.8 KB of data (zstd !)
- transferred 98.8 KB in * seconds (* */sec) (glob) (zstd !)
- searching for changes
- no changes found
- updating to branch default
- 1088 files updated, 0 files merged, 0 files removed, 0 files unresolved
- $ hg -R phase-no-publish phase -r 'all()'
- 0: public
- 1: public
- 2: public
-#endif
-#if stream-bundle2-v2
$ hg clone --stream http://localhost:$HGPORT phase-no-publish
streaming all changes
- 1098 files to transfer, 102 KB of data (no-zstd !)
- transferred 102 KB in * seconds (* */sec) (glob) (no-zstd !)
- 1098 files to transfer, 99.1 KB of data (zstd no-rust !)
- transferred 99.1 KB in * seconds (* */sec) (glob) (zstd no-rust !)
- 1100 files to transfer, 99.2 KB of data (zstd rust !)
- transferred 99.2 KB in * seconds (* */sec) (glob) (zstd rust !)
+ 1091 files to transfer, * KB of data (glob) (stream-legacy !)
+ 1098 files to transfer, * KB of data (glob) (stream-bundle2-v2 no-rust !)
+ 1100 files to transfer, * KB of data (glob) (stream-bundle2-v2 rust !)
+ 1097 entries to transfer (stream-bundle2-v3 !)
+ transferred * KB in * seconds (* */sec) (glob)
+ searching for changes (stream-legacy !)
+ no changes found (stream-legacy !)
updating to branch default
1088 files updated, 0 files merged, 0 files removed, 0 files unresolved
+
+Note: With v1 of the stream protocol, changeset are always cloned as public. It
+make stream v1 unsuitable for non-publishing repository.
+
$ hg -R phase-no-publish phase -r 'all()'
- 0: draft
- 1: draft
- 2: draft
-#endif
-#if stream-bundle2-v3
- $ hg clone --stream http://localhost:$HGPORT phase-no-publish
- streaming all changes
- 1097 entries to transfer
- transferred 102 KB in * seconds (* */sec) (glob) (no-zstd !)
- transferred 99.1 KB in * seconds (* */sec) (glob) (zstd no-rust !)
- transferred 99.2 KB in * seconds (* */sec) (glob) (zstd rust !)
- updating to branch default
- 1088 files updated, 0 files merged, 0 files removed, 0 files unresolved
- $ hg -R phase-no-publish phase -r 'all()'
- 0: draft
- 1: draft
- 2: draft
-#endif
+ 0: public (stream-legacy !)
+ 1: public (stream-legacy !)
+ 2: public (stream-legacy !)
+ 0: draft (no-stream-legacy !)
+ 1: draft (no-stream-legacy !)
+ 2: draft (no-stream-legacy !)
$ hg verify -R phase-no-publish -q
$ killdaemons.py
+
+Stream repository with obsolescence
+-----------------------------------
+
#if stream-legacy
With v1 of the stream protocol, changeset are always cloned as public. There's
no obsolescence markers exchange in stream v1.
-#endif
-#if stream-bundle2-v2
-
-Stream repository with obsolescence
------------------------------------
+#else
Clone non-publishing with obsolescence
+The obsstore file should be send as part of the stream bundle
+
$ cat >> $HGRCPATH << EOF
> [experimental]
> evolution=all
@@ -943,62 +541,10 @@
$ hg clone -U --stream http://localhost:$HGPORT with-obsolescence
streaming all changes
- 1099 files to transfer, 102 KB of data (no-zstd !)
- transferred 102 KB in * seconds (* */sec) (glob) (no-zstd !)
- 1099 files to transfer, 99.5 KB of data (zstd no-rust !)
- transferred 99.5 KB in * seconds (* */sec) (glob) (zstd no-rust !)
- 1101 files to transfer, 99.6 KB of data (zstd rust !)
- transferred 99.6 KB in * seconds (* */sec) (glob) (zstd rust !)
- $ hg -R with-obsolescence log -T '{rev}: {phase}\n'
- 2: draft
- 1: draft
- 0: draft
- $ hg debugobsolete -R with-obsolescence
- 8c206a663911c1f97f2f9d7382e417ae55872cfa 0 {5223b5e3265f0df40bb743da62249413d74ac70f} (Thu Jan 01 00:00:00 1970 +0000) {'user': 'test'}
- $ hg verify -R with-obsolescence -q
-
- $ hg clone -U --stream --config experimental.evolution=0 http://localhost:$HGPORT with-obsolescence-no-evolution
- streaming all changes
- remote: abort: server has obsolescence markers, but client cannot receive them via stream clone
- abort: pull failed on remote
- [100]
-
- $ killdaemons.py
-
-#endif
-#if stream-bundle2-v3
-
-Stream repository with obsolescence
------------------------------------
-
-Clone non-publishing with obsolescence
-
- $ cat >> $HGRCPATH << EOF
- > [experimental]
- > evolution=all
- > EOF
-
- $ cd server
- $ echo foo > foo
- $ hg -q commit -m 'about to be pruned'
- $ hg debugobsolete `hg log -r . -T '{node}'` -d '0 0' -u test --record-parents
- 1 new obsolescence markers
- obsoleted 1 changesets
- $ hg up null -q
- $ hg log -T '{rev}: {phase}\n'
- 2: draft
- 1: draft
- 0: draft
- $ hg serve -p $HGPORT -d --pid-file=hg.pid
- $ cat hg.pid > $DAEMON_PIDS
- $ cd ..
-
- $ hg clone -U --stream http://localhost:$HGPORT with-obsolescence
- streaming all changes
- 1098 entries to transfer
- transferred 102 KB in * seconds (* */sec) (glob) (no-zstd !)
- transferred 99.5 KB in * seconds (* */sec) (glob) (zstd no-rust !)
- transferred 99.6 KB in * seconds (* */sec) (glob) (zstd rust !)
+ 1099 files to transfer, * KB of data (glob) (stream-bundle2-v2 no-rust !)
+ 1101 files to transfer, * KB of data (glob) (stream-bundle2-v2 rust !)
+ 1098 entries to transfer (no-stream-bundle2-v2 !)
+ transferred * KB in * seconds (* */sec) (glob)
$ hg -R with-obsolescence log -T '{rev}: {phase}\n'
2: draft
1: draft
@@ -1018,19 +564,16 @@
#endif
Cloning a repo with no requirements doesn't give some obscure error
+-------------------------------------------------------------------
$ mkdir -p empty-repo/.hg
$ hg clone -q --stream ssh://user@dummy/empty-repo empty-repo2
$ hg --cwd empty-repo2 verify -q
Cloning a repo with an empty manifestlog doesn't give some weird error
+----------------------------------------------------------------------
$ rm -r empty-repo; hg init empty-repo
$ (cd empty-repo; touch x; hg commit -Am empty; hg debugstrip -r 0) > /dev/null
$ hg clone -q --stream ssh://user@dummy/empty-repo empty-repo3
- $ hg --cwd empty-repo3 verify -q 2>&1 | grep -v warning
- [1]
-
-The warnings filtered out here are talking about zero-length 'orphan' data files.
-Those are harmless, so that's fine.
-
+ $ hg --cwd empty-repo3 verify -q
--- a/tests/test-clone.t Mon Mar 25 02:09:15 2024 +0100
+++ b/tests/test-clone.t Mon Mar 25 16:27:48 2024 +0000
@@ -47,11 +47,7 @@
$ ls .hg/cache
branch2-base
- branch2-immutable
branch2-served
- branch2-served.hidden
- branch2-visible
- branch2-visible-hidden
rbc-names-v1
rbc-revs-v1
tags2
@@ -71,42 +67,34 @@
#if hardlink
$ hg --debug clone -U . ../c --config progress.debug=true
- linking: 1/16 files (6.25%) (no-rust !)
- linking: 2/16 files (12.50%) (no-rust !)
- linking: 3/16 files (18.75%) (no-rust !)
- linking: 4/16 files (25.00%) (no-rust !)
- linking: 5/16 files (31.25%) (no-rust !)
- linking: 6/16 files (37.50%) (no-rust !)
- linking: 7/16 files (43.75%) (no-rust !)
- linking: 8/16 files (50.00%) (no-rust !)
- linking: 9/16 files (56.25%) (no-rust !)
- linking: 10/16 files (62.50%) (no-rust !)
- linking: 11/16 files (68.75%) (no-rust !)
- linking: 12/16 files (75.00%) (no-rust !)
- linking: 13/16 files (81.25%) (no-rust !)
- linking: 14/16 files (87.50%) (no-rust !)
- linking: 15/16 files (93.75%) (no-rust !)
- linking: 16/16 files (100.00%) (no-rust !)
- linked 16 files (no-rust !)
- linking: 1/18 files (5.56%) (rust !)
- linking: 2/18 files (11.11%) (rust !)
- linking: 3/18 files (16.67%) (rust !)
- linking: 4/18 files (22.22%) (rust !)
- linking: 5/18 files (27.78%) (rust !)
- linking: 6/18 files (33.33%) (rust !)
- linking: 7/18 files (38.89%) (rust !)
- linking: 8/18 files (44.44%) (rust !)
- linking: 9/18 files (50.00%) (rust !)
- linking: 10/18 files (55.56%) (rust !)
- linking: 11/18 files (61.11%) (rust !)
- linking: 12/18 files (66.67%) (rust !)
- linking: 13/18 files (72.22%) (rust !)
- linking: 14/18 files (77.78%) (rust !)
- linking: 15/18 files (83.33%) (rust !)
- linking: 16/18 files (88.89%) (rust !)
- linking: 17/18 files (94.44%) (rust !)
- linking: 18/18 files (100.00%) (rust !)
- linked 18 files (rust !)
+ linking: 1/12 files (8.33%) (no-rust !)
+ linking: 2/12 files (16.67%) (no-rust !)
+ linking: 3/12 files (25.00%) (no-rust !)
+ linking: 4/12 files (33.33%) (no-rust !)
+ linking: 5/12 files (41.67%) (no-rust !)
+ linking: 6/12 files (50.00%) (no-rust !)
+ linking: 7/12 files (58.33%) (no-rust !)
+ linking: 8/12 files (66.67%) (no-rust !)
+ linking: 9/12 files (75.00%) (no-rust !)
+ linking: 10/12 files (83.33%) (no-rust !)
+ linking: 11/12 files (91.67%) (no-rust !)
+ linking: 12/12 files (100.00%) (no-rust !)
+ linked 12 files (no-rust !)
+ linking: 1/14 files (7.14%) (rust !)
+ linking: 2/14 files (14.29%) (rust !)
+ linking: 3/14 files (21.43%) (rust !)
+ linking: 4/14 files (28.57%) (rust !)
+ linking: 5/14 files (35.71%) (rust !)
+ linking: 6/14 files (42.86%) (rust !)
+ linking: 7/14 files (50.00%) (rust !)
+ linking: 8/14 files (57.14%) (rust !)
+ linking: 9/14 files (64.29%) (rust !)
+ linking: 10/14 files (71.43%) (rust !)
+ linking: 11/14 files (78.57%) (rust !)
+ linking: 12/14 files (85.71%) (rust !)
+ linking: 13/14 files (92.86%) (rust !)
+ linking: 14/14 files (100.00%) (rust !)
+ linked 14 files (rust !)
updating the branch cache
#else
$ hg --debug clone -U . ../c --config progress.debug=true
@@ -125,11 +113,7 @@
$ ls .hg/cache
branch2-base
- branch2-immutable
branch2-served
- branch2-served.hidden
- branch2-visible
- branch2-visible-hidden
rbc-names-v1
rbc-revs-v1
tags2
--- a/tests/test-clonebundles.t Mon Mar 25 02:09:15 2024 +0100
+++ b/tests/test-clonebundles.t Mon Mar 25 16:27:48 2024 +0000
@@ -394,9 +394,9 @@
$ hg clone -U http://localhost:$HGPORT stream-clone-no-spec
applying clone bundle from http://localhost:$HGPORT1/packed.hg
5 files to transfer, 613 bytes of data (no-rust !)
- transferred 613 bytes in *.* seconds (*) (glob) (no-rust !)
+ transferred 613 bytes in * seconds (* */sec) (glob) (no-rust !)
7 files to transfer, 739 bytes of data (rust !)
- transferred 739 bytes in *.* seconds (*) (glob) (rust !)
+ transferred 739 bytes in * seconds (* */sec) (glob) (rust !)
finished applying clone bundle
searching for changes
no changes found
@@ -409,10 +409,8 @@
$ hg clone -U http://localhost:$HGPORT stream-clone-vanilla-spec
applying clone bundle from http://localhost:$HGPORT1/packed.hg
- 5 files to transfer, 613 bytes of data (no-rust !)
- transferred 613 bytes in *.* seconds (*) (glob) (no-rust !)
- 7 files to transfer, 739 bytes of data (rust !)
- transferred 739 bytes in *.* seconds (*) (glob) (rust !)
+ * files to transfer, * bytes of data (glob)
+ transferred * bytes in * seconds (* */sec) (glob)
finished applying clone bundle
searching for changes
no changes found
@@ -425,10 +423,8 @@
$ hg clone -U http://localhost:$HGPORT stream-clone-supported-requirements
applying clone bundle from http://localhost:$HGPORT1/packed.hg
- 5 files to transfer, 613 bytes of data (no-rust !)
- transferred 613 bytes in *.* seconds (*) (glob) (no-rust !)
- 7 files to transfer, 739 bytes of data (rust !)
- transferred 739 bytes in *.* seconds (*) (glob) (rust !)
+ * files to transfer, * bytes of data (glob)
+ transferred * bytes in * seconds (* */sec) (glob)
finished applying clone bundle
searching for changes
no changes found
@@ -574,10 +570,8 @@
no compatible clone bundles available on server; falling back to regular clone
(you may want to report this to the server operator)
streaming all changes
- 10 files to transfer, 816 bytes of data (no-rust !)
- transferred 816 bytes in * seconds (*) (glob) (no-rust !)
- 12 files to transfer, 942 bytes of data (rust !)
- transferred 942 bytes in *.* seconds (*) (glob) (rust !)
+ * files to transfer, * bytes of data (glob)
+ transferred * bytes in * seconds (* */sec) (glob)
A manifest with a stream clone but no BUNDLESPEC
@@ -589,10 +583,8 @@
no compatible clone bundles available on server; falling back to regular clone
(you may want to report this to the server operator)
streaming all changes
- 10 files to transfer, 816 bytes of data (no-rust !)
- transferred 816 bytes in * seconds (*) (glob) (no-rust !)
- 12 files to transfer, 942 bytes of data (rust !)
- transferred 942 bytes in *.* seconds (*) (glob) (rust !)
+ * files to transfer, * bytes of data (glob)
+ transferred * bytes in * seconds (* */sec) (glob)
A manifest with a gzip bundle and a stream clone
@@ -603,10 +595,8 @@
$ hg clone -U --stream http://localhost:$HGPORT uncompressed-gzip-packed
applying clone bundle from http://localhost:$HGPORT1/packed.hg
- 5 files to transfer, 613 bytes of data (no-rust !)
- transferred 613 bytes in *.* seconds (*) (glob) (no-rust !)
- 7 files to transfer, 739 bytes of data (rust !)
- transferred 739 bytes in *.* seconds (*) (glob) (rust !)
+ * files to transfer, * bytes of data (glob)
+ transferred * bytes in * seconds (* */sec) (glob)
finished applying clone bundle
searching for changes
no changes found
@@ -620,10 +610,8 @@
$ hg clone -U --stream http://localhost:$HGPORT uncompressed-gzip-packed-requirements
applying clone bundle from http://localhost:$HGPORT1/packed.hg
- 5 files to transfer, 613 bytes of data (no-rust !)
- transferred 613 bytes in *.* seconds (*) (glob) (no-rust !)
- 7 files to transfer, 739 bytes of data (rust !)
- transferred 739 bytes in *.* seconds (*) (glob) (rust !)
+ * files to transfer, * bytes of data (glob)
+ transferred * bytes in * seconds (* */sec) (glob)
finished applying clone bundle
searching for changes
no changes found
@@ -639,10 +627,8 @@
no compatible clone bundles available on server; falling back to regular clone
(you may want to report this to the server operator)
streaming all changes
- 10 files to transfer, 816 bytes of data (no-rust !)
- transferred 816 bytes in * seconds (*) (glob) (no-rust !)
- 12 files to transfer, 942 bytes of data (rust !)
- transferred 942 bytes in *.* seconds (*) (glob) (rust !)
+ * files to transfer, * bytes of data (glob)
+ transferred * bytes in * seconds (* */sec) (glob)
Test clone bundle retrieved through bundle2
--- a/tests/test-debugcommands.t Mon Mar 25 02:09:15 2024 +0100
+++ b/tests/test-debugcommands.t Mon Mar 25 16:27:48 2024 +0000
@@ -652,12 +652,7 @@
.hg/cache/rbc-revs-v1
.hg/cache/rbc-names-v1
.hg/cache/hgtagsfnodes1
- .hg/cache/branch2-visible-hidden
- .hg/cache/branch2-visible
- .hg/cache/branch2-served.hidden
.hg/cache/branch2-served
- .hg/cache/branch2-immutable
- .hg/cache/branch2-base
Test debug::unbundle
--- a/tests/test-hardlinks.t Mon Mar 25 02:09:15 2024 +0100
+++ b/tests/test-hardlinks.t Mon Mar 25 16:27:48 2024 +0000
@@ -263,11 +263,7 @@
2 r4/.hg/00changelog.i
[24] r4/.hg/branch (re)
2 r4/.hg/cache/branch2-base
- 2 r4/.hg/cache/branch2-immutable
2 r4/.hg/cache/branch2-served
- 2 r4/.hg/cache/branch2-served.hidden
- 2 r4/.hg/cache/branch2-visible
- 2 r4/.hg/cache/branch2-visible-hidden
2 r4/.hg/cache/rbc-names-v1
2 r4/.hg/cache/rbc-revs-v1
2 r4/.hg/cache/tags2
@@ -320,11 +316,7 @@
2 r4/.hg/00changelog.i
1 r4/.hg/branch
2 r4/.hg/cache/branch2-base
- 2 r4/.hg/cache/branch2-immutable
2 r4/.hg/cache/branch2-served
- 2 r4/.hg/cache/branch2-served.hidden
- 2 r4/.hg/cache/branch2-visible
- 2 r4/.hg/cache/branch2-visible-hidden
2 r4/.hg/cache/rbc-names-v1
2 r4/.hg/cache/rbc-revs-v1
2 r4/.hg/cache/tags2
--- a/tests/test-server-view.t Mon Mar 25 02:09:15 2024 +0100
+++ b/tests/test-server-view.t Mon Mar 25 16:27:48 2024 +0000
@@ -36,12 +36,7 @@
$ hg -R test --config experimental.extra-filter-revs='not public()' debugupdatecache
$ ls -1 test/.hg/cache/
branch2-base%89c45d2fa07e
- branch2-immutable%89c45d2fa07e
branch2-served
- branch2-served%89c45d2fa07e
- branch2-served.hidden%89c45d2fa07e
- branch2-visible%89c45d2fa07e
- branch2-visible-hidden%89c45d2fa07e
hgtagsfnodes1
rbc-names-v1
rbc-revs-v1
--- a/tests/test-share.t Mon Mar 25 02:09:15 2024 +0100
+++ b/tests/test-share.t Mon Mar 25 16:27:48 2024 +0000
@@ -63,11 +63,7 @@
1 files updated, 0 files merged, 0 files removed, 0 files unresolved
$ ls -1 ../repo2-clone/.hg/cache
branch2-base
- branch2-immutable
branch2-served
- branch2-served.hidden
- branch2-visible
- branch2-visible-hidden
rbc-names-v1
rbc-revs-v1
tags2
--- a/tests/test-ssh.t Mon Mar 25 02:09:15 2024 +0100
+++ b/tests/test-ssh.t Mon Mar 25 16:27:48 2024 +0000
@@ -72,8 +72,8 @@
$ hg -R local-stream book mybook
$ hg clone --stream ssh://user@dummy/local-stream stream2
streaming all changes
- 16 files to transfer, * of data (glob) (no-rust !)
- 18 files to transfer, * of data (glob) (rust !)
+ 12 files to transfer, * of data (glob) (no-rust !)
+ 14 files to transfer, * of data (glob) (rust !)
transferred * in * seconds (*) (glob)
updating to branch default
2 files updated, 0 files merged, 0 files removed, 0 files unresolved
--- a/tests/test-strip-branch-cache.t Mon Mar 25 02:09:15 2024 +0100
+++ b/tests/test-strip-branch-cache.t Mon Mar 25 16:27:48 2024 +0000
@@ -1,3 +1,5 @@
+This test cover a bug that no longer exist.
+
Define helpers.
$ hg_log () { hg log -G -T "{rev}:{node|short}"; }
@@ -18,7 +20,10 @@
$ hg pull -q ../repo
- $ cat .hg/cache/branch2-visible
+ $ ls -1 .hg/cache/branch?*
+ .hg/cache/branch2-base
+ .hg/cache/branch2-served
+ $ cat .hg/cache/branch?-served
222ae9789a75703f9836e44de7db179cbfd420ee 2
a3498d6e39376d2456425dd8c692367bdbf00fa2 o default
222ae9789a75703f9836e44de7db179cbfd420ee o default
@@ -33,24 +38,36 @@
$ strip '1:'
-The branchmap cache is not adjusted on strip.
-Now mentions a changelog entry that has been stripped.
+After the strip the "served" cache is now identical to the "base" one, and the
+older one have been actively deleted.
- $ cat .hg/cache/branch2-visible
- 222ae9789a75703f9836e44de7db179cbfd420ee 2
- a3498d6e39376d2456425dd8c692367bdbf00fa2 o default
- 222ae9789a75703f9836e44de7db179cbfd420ee o default
+ $ ls -1 .hg/cache/branch?*
+ .hg/cache/branch2-base
+ $ cat .hg/cache/branch?-base
+ 7ab0a3bd758a58b9f79557ce708533e627776cce 0
+ 7ab0a3bd758a58b9f79557ce708533e627776cce o default
+
+We do a new commit and we get a new valid branchmap for the served version
$ commit c
-
-Not adjusted on commit, either.
+ $ ls -1 .hg/cache/branch?*
+ .hg/cache/branch2-base
+ .hg/cache/branch2-served
+ $ cat .hg/cache/branch?-served
+ a1602b357cfca067600406eb19060c7128804d72 1
+ a1602b357cfca067600406eb19060c7128804d72 o default
- $ cat .hg/cache/branch2-visible
- 222ae9789a75703f9836e44de7db179cbfd420ee 2
- a3498d6e39376d2456425dd8c692367bdbf00fa2 o default
- 222ae9789a75703f9836e44de7db179cbfd420ee o default
On pull we end up with the same tip, and so wrongly reuse the invalid cache and crash.
- $ hg pull ../repo 2>&1 | grep 'ValueError:'
- ValueError: node a3498d6e39376d2456425dd8c692367bdbf00fa2 does not exist (known-bad-output !)
+ $ hg pull ../repo --quiet
+ $ hg heads -T '{rev} {node} {branch}\n'
+ 2 222ae9789a75703f9836e44de7db179cbfd420ee default
+ 1 a1602b357cfca067600406eb19060c7128804d72 default
+ $ ls -1 .hg/cache/branch?*
+ .hg/cache/branch2-base
+ .hg/cache/branch2-served
+ $ cat .hg/cache/branch?-served
+ 222ae9789a75703f9836e44de7db179cbfd420ee 2
+ a1602b357cfca067600406eb19060c7128804d72 o default
+ 222ae9789a75703f9836e44de7db179cbfd420ee o default
--- a/tests/test-tags.t Mon Mar 25 02:09:15 2024 +0100
+++ b/tests/test-tags.t Mon Mar 25 16:27:48 2024 +0000
@@ -792,11 +792,6 @@
$ ls tagsclient/.hg/cache
branch2-base
- branch2-immutable
- branch2-served
- branch2-served.hidden
- branch2-visible
- branch2-visible-hidden
hgtagsfnodes1
rbc-names-v1
rbc-revs-v1
@@ -823,11 +818,6 @@
$ ls tagsclient/.hg/cache
branch2-base
- branch2-immutable
- branch2-served
- branch2-served.hidden
- branch2-visible
- branch2-visible-hidden
hgtagsfnodes1
rbc-names-v1
rbc-revs-v1
--- a/tests/test-treemanifest.t Mon Mar 25 02:09:15 2024 +0100
+++ b/tests/test-treemanifest.t Mon Mar 25 16:27:48 2024 +0000
@@ -761,8 +761,8 @@
$ hg clone --config experimental.changegroup3=True --stream -U \
> http://localhost:$HGPORT1 stream-clone-basicstore
streaming all changes
- 29 files to transfer, * of data (glob) (no-rust !)
- 31 files to transfer, * of data (glob) (rust !)
+ 24 files to transfer, * of data (glob) (no-rust !)
+ 26 files to transfer, * of data (glob) (rust !)
transferred * in * seconds (*) (glob)
$ hg -R stream-clone-basicstore verify -q
$ cat port-1-errors.log
@@ -771,8 +771,8 @@
$ hg clone --config experimental.changegroup3=True --stream -U \
> http://localhost:$HGPORT2 stream-clone-encodedstore
streaming all changes
- 29 files to transfer, * of data (glob) (no-rust !)
- 31 files to transfer, * of data (glob) (rust !)
+ 24 files to transfer, * of data (glob) (no-rust !)
+ 26 files to transfer, * of data (glob) (rust !)
transferred * in * seconds (*) (glob)
$ hg -R stream-clone-encodedstore verify -q
$ cat port-2-errors.log