--- a/contrib/chg/chg.c Sun May 02 16:56:20 2021 -0400
+++ b/contrib/chg/chg.c Fri May 07 22:06:25 2021 -0400
@@ -240,13 +240,8 @@
const char *hgcmd = gethgcmd();
const char *baseargv[] = {
- hgcmd,
- "serve",
- "--cmdserver",
- "chgunix",
- "--address",
- opts->initsockname,
- "--daemon-postexec",
+ hgcmd, "serve", "--no-profile", "--cmdserver",
+ "chgunix", "--address", opts->initsockname, "--daemon-postexec",
"chdir:/",
};
size_t baseargvsize = sizeof(baseargv) / sizeof(baseargv[0]);
--- a/contrib/dumprevlog Sun May 02 16:56:20 2021 -0400
+++ b/contrib/dumprevlog Fri May 07 22:06:25 2021 -0400
@@ -13,6 +13,10 @@
)
from mercurial.utils import procutil
+from mercurial.revlogutils import (
+ constants as revlog_constants,
+)
+
for fp in (sys.stdin, sys.stdout, sys.stderr):
procutil.setbinary(fp)
@@ -32,7 +36,11 @@
for f in sys.argv[1:]:
- r = revlog.revlog(binopen, encoding.strtolocal(f))
+ r = revlog.revlog(
+ binopen,
+ target=(revlog_constants.KIND_OTHER, b'dump-revlog'),
+ indexfile=encoding.strtolocal(f),
+ )
print("file:", f)
for i in r:
n = r.node(i)
--- a/contrib/perf.py Sun May 02 16:56:20 2021 -0400
+++ b/contrib/perf.py Fri May 07 22:06:25 2021 -0400
@@ -66,6 +66,8 @@
import tempfile
import threading
import time
+
+import mercurial.revlog
from mercurial import (
changegroup,
cmdutil,
@@ -76,7 +78,6 @@
hg,
mdiff,
merge,
- revlog,
util,
)
@@ -119,6 +120,21 @@
except ImportError:
profiling = None
+try:
+ from mercurial.revlogutils import constants as revlog_constants
+
+ perf_rl_kind = (revlog_constants.KIND_OTHER, b'created-by-perf')
+
+ def revlog(opener, *args, **kwargs):
+ return mercurial.revlog.revlog(opener, perf_rl_kind, *args, **kwargs)
+
+
+except (ImportError, AttributeError):
+ perf_rl_kind = None
+
+ def revlog(opener, *args, **kwargs):
+ return mercurial.revlog.revlog(opener, *args, **kwargs)
+
def identity(a):
return a
@@ -1809,7 +1825,8 @@
mercurial.revlog._prereadsize = 2 ** 24 # disable lazy parser in old hg
n = scmutil.revsingle(repo, rev).node()
- cl = mercurial.revlog.revlog(getsvfs(repo), b"00changelog.i")
+
+ cl = revlog(getsvfs(repo), indexfile=b"00changelog.i")
def d():
cl.rev(n)
@@ -2598,11 +2615,14 @@
header = struct.unpack(b'>I', data[0:4])[0]
version = header & 0xFFFF
if version == 1:
- revlogio = revlog.revlogio()
inline = header & (1 << 16)
else:
raise error.Abort(b'unsupported revlog version: %d' % version)
+ parse_index_v1 = getattr(mercurial.revlog, 'parse_index_v1', None)
+ if parse_index_v1 is None:
+ parse_index_v1 = mercurial.revlog.revlogio().parseindex
+
rllen = len(rl)
node0 = rl.node(0)
@@ -2617,33 +2637,31 @@
allnodesrev = list(reversed(allnodes))
def constructor():
- revlog.revlog(opener, indexfile)
+ revlog(opener, indexfile=indexfile)
def read():
with opener(indexfile) as fh:
fh.read()
def parseindex():
- revlogio.parseindex(data, inline)
+ parse_index_v1(data, inline)
def getentry(revornode):
- index = revlogio.parseindex(data, inline)[0]
+ index = parse_index_v1(data, inline)[0]
index[revornode]
def getentries(revs, count=1):
- index = revlogio.parseindex(data, inline)[0]
+ index = parse_index_v1(data, inline)[0]
for i in range(count):
for rev in revs:
index[rev]
def resolvenode(node):
- index = revlogio.parseindex(data, inline)[0]
+ index = parse_index_v1(data, inline)[0]
rev = getattr(index, 'rev', None)
if rev is None:
- nodemap = getattr(
- revlogio.parseindex(data, inline)[0], 'nodemap', None
- )
+ nodemap = getattr(parse_index_v1(data, inline)[0], 'nodemap', None)
# This only works for the C code.
if nodemap is None:
return
@@ -2655,12 +2673,10 @@
pass
def resolvenodes(nodes, count=1):
- index = revlogio.parseindex(data, inline)[0]
+ index = parse_index_v1(data, inline)[0]
rev = getattr(index, 'rev', None)
if rev is None:
- nodemap = getattr(
- revlogio.parseindex(data, inline)[0], 'nodemap', None
- )
+ nodemap = getattr(parse_index_v1(data, inline)[0], 'nodemap', None)
# This only works for the C code.
if nodemap is None:
return
@@ -3043,7 +3059,7 @@
vfs = vfsmod.vfs(tmpdir)
vfs.options = getattr(orig.opener, 'options', None)
- dest = revlog.revlog(
+ dest = revlog(
vfs, indexfile=indexname, datafile=dataname, **revlogkwargs
)
if dest._inline:
--- a/contrib/undumprevlog Sun May 02 16:56:20 2021 -0400
+++ b/contrib/undumprevlog Fri May 07 22:06:25 2021 -0400
@@ -15,6 +15,10 @@
)
from mercurial.utils import procutil
+from mercurial.revlogutils import (
+ constants as revlog_constants,
+)
+
for fp in (sys.stdin, sys.stdout, sys.stderr):
procutil.setbinary(fp)
@@ -28,7 +32,11 @@
break
if l.startswith("file:"):
f = encoding.strtolocal(l[6:-1])
- r = revlog.revlog(opener, f)
+ r = revlog.revlog(
+ opener,
+ target=(revlog_constants.KIND_OTHER, b'undump-revlog'),
+ indexfile=f,
+ )
procutil.stdout.write(b'%s\n' % f)
elif l.startswith("node:"):
n = bin(l[6:-1])
--- a/hgext/absorb.py Sun May 02 16:56:20 2021 -0400
+++ b/hgext/absorb.py Fri May 07 22:06:25 2021 -0400
@@ -38,7 +38,6 @@
from mercurial.i18n import _
from mercurial.node import (
hex,
- nullid,
short,
)
from mercurial import (
@@ -109,7 +108,7 @@
return b''
def node(self):
- return nullid
+ return self._repo.nullid
def uniq(lst):
@@ -927,7 +926,7 @@
the commit is a clone from ctx, with a (optionally) different p1, and
different file contents replaced by memworkingcopy.
"""
- parents = p1 and (p1, nullid)
+ parents = p1 and (p1, self.repo.nullid)
extra = ctx.extra()
if self._useobsolete and self.ui.configbool(b'absorb', b'add-noise'):
extra[b'absorb_source'] = ctx.hex()
--- a/hgext/convert/git.py Sun May 02 16:56:20 2021 -0400
+++ b/hgext/convert/git.py Fri May 07 22:06:25 2021 -0400
@@ -9,7 +9,7 @@
import os
from mercurial.i18n import _
-from mercurial.node import nullhex
+from mercurial.node import sha1nodeconstants
from mercurial import (
config,
error,
@@ -192,7 +192,7 @@
return heads
def catfile(self, rev, ftype):
- if rev == nullhex:
+ if rev == sha1nodeconstants.nullhex:
raise IOError
self.catfilepipe[0].write(rev + b'\n')
self.catfilepipe[0].flush()
@@ -214,7 +214,7 @@
return data
def getfile(self, name, rev):
- if rev == nullhex:
+ if rev == sha1nodeconstants.nullhex:
return None, None
if name == b'.hgsub':
data = b'\n'.join([m.hgsub() for m in self.submoditer()])
@@ -228,7 +228,7 @@
return data, mode
def submoditer(self):
- null = nullhex
+ null = sha1nodeconstants.nullhex
for m in sorted(self.submodules, key=lambda p: p.path):
if m.node != null:
yield m
@@ -317,7 +317,7 @@
subexists[0] = True
if entry[4] == b'D' or renamesource:
subdeleted[0] = True
- changes.append((b'.hgsub', nullhex))
+ changes.append((b'.hgsub', sha1nodeconstants.nullhex))
else:
changes.append((b'.hgsub', b''))
elif entry[1] == b'160000' or entry[0] == b':160000':
@@ -325,7 +325,7 @@
subexists[0] = True
else:
if renamesource:
- h = nullhex
+ h = sha1nodeconstants.nullhex
self.modecache[(f, h)] = (p and b"x") or (s and b"l") or b""
changes.append((f, h))
@@ -362,7 +362,7 @@
if subexists[0]:
if subdeleted[0]:
- changes.append((b'.hgsubstate', nullhex))
+ changes.append((b'.hgsubstate', sha1nodeconstants.nullhex))
else:
self.retrievegitmodules(version)
changes.append((b'.hgsubstate', b''))
--- a/hgext/convert/hg.py Sun May 02 16:56:20 2021 -0400
+++ b/hgext/convert/hg.py Fri May 07 22:06:25 2021 -0400
@@ -27,8 +27,7 @@
from mercurial.node import (
bin,
hex,
- nullhex,
- nullid,
+ sha1nodeconstants,
)
from mercurial import (
bookmarks,
@@ -160,7 +159,7 @@
continue
revid = revmap.get(source.lookuprev(s[0]))
if not revid:
- if s[0] == nullhex:
+ if s[0] == sha1nodeconstants.nullhex:
revid = s[0]
else:
# missing, but keep for hash stability
@@ -179,7 +178,7 @@
revid = s[0]
subpath = s[1]
- if revid != nullhex:
+ if revid != sha1nodeconstants.nullhex:
revmap = self.subrevmaps.get(subpath)
if revmap is None:
revmap = mapfile(
@@ -304,9 +303,9 @@
parent = parents[0]
if len(parents) < 2:
- parents.append(nullid)
+ parents.append(self.repo.nullid)
if len(parents) < 2:
- parents.append(nullid)
+ parents.append(self.repo.nullid)
p2 = parents.pop(0)
text = commit.desc
@@ -356,7 +355,7 @@
p2 = parents.pop(0)
p1ctx = self.repo[p1]
p2ctx = None
- if p2 != nullid:
+ if p2 != self.repo.nullid:
p2ctx = self.repo[p2]
fileset = set(files)
if full:
@@ -421,7 +420,7 @@
def puttags(self, tags):
tagparent = self.repo.branchtip(self.tagsbranch, ignoremissing=True)
- tagparent = tagparent or nullid
+ tagparent = tagparent or self.repo.nullid
oldlines = set()
for branch, heads in pycompat.iteritems(self.repo.branchmap()):
--- a/hgext/git/dirstate.py Sun May 02 16:56:20 2021 -0400
+++ b/hgext/git/dirstate.py Fri May 07 22:06:25 2021 -0400
@@ -4,7 +4,7 @@
import errno
import os
-from mercurial.node import nullid
+from mercurial.node import sha1nodeconstants
from mercurial import (
error,
extensions,
@@ -81,14 +81,16 @@
except pygit2.GitError:
# Typically happens when peeling HEAD fails, as in an
# empty repository.
- return nullid
+ return sha1nodeconstants.nullid
def p2(self):
# TODO: MERGE_HEAD? something like that, right?
- return nullid
+ return sha1nodeconstants.nullid
- def setparents(self, p1, p2=nullid):
- assert p2 == nullid, b'TODO merging support'
+ def setparents(self, p1, p2=None):
+ if p2 is None:
+ p2 = sha1nodeconstants.nullid
+ assert p2 == sha1nodeconstants.nullid, b'TODO merging support'
self.git.head.set_target(gitutil.togitnode(p1))
@util.propertycache
@@ -102,7 +104,7 @@
def parents(self):
# TODO how on earth do we find p2 if a merge is in flight?
- return self.p1(), nullid
+ return self.p1(), sha1nodeconstants.nullid
def __iter__(self):
return (pycompat.fsencode(f.path) for f in self.git.index)
--- a/hgext/git/gitlog.py Sun May 02 16:56:20 2021 -0400
+++ b/hgext/git/gitlog.py Fri May 07 22:06:25 2021 -0400
@@ -5,11 +5,8 @@
from mercurial.node import (
bin,
hex,
- nullhex,
- nullid,
nullrev,
sha1nodeconstants,
- wdirhex,
)
from mercurial import (
ancestor,
@@ -47,7 +44,7 @@
)
def rev(self, n):
- if n == nullid:
+ if n == sha1nodeconstants.nullid:
return -1
t = self._db.execute(
'SELECT rev FROM changelog WHERE node = ?', (gitutil.togitnode(n),)
@@ -58,7 +55,7 @@
def node(self, r):
if r == nullrev:
- return nullid
+ return sha1nodeconstants.nullid
t = self._db.execute(
'SELECT node FROM changelog WHERE rev = ?', (r,)
).fetchone()
@@ -135,7 +132,7 @@
bin(v[0]): v[1]
for v in self._db.execute('SELECT node, rev FROM changelog')
}
- r[nullid] = nullrev
+ r[sha1nodeconstants.nullid] = nullrev
return r
def tip(self):
@@ -144,7 +141,7 @@
).fetchone()
if t:
return bin(t[0])
- return nullid
+ return sha1nodeconstants.nullid
def revs(self, start=0, stop=None):
if stop is None:
@@ -167,7 +164,7 @@
return -1
def _partialmatch(self, id):
- if wdirhex.startswith(id):
+ if sha1nodeconstants.wdirhex.startswith(id):
raise error.WdirUnsupported
candidates = [
bin(x[0])
@@ -176,8 +173,8 @@
(pycompat.sysstr(id + b'%'),),
)
]
- if nullhex.startswith(id):
- candidates.append(nullid)
+ if sha1nodeconstants.nullhex.startswith(id):
+ candidates.append(sha1nodeconstants.nullid)
if len(candidates) > 1:
raise error.AmbiguousPrefixLookupError(
id, b'00changelog.i', _(b'ambiguous identifier')
@@ -223,8 +220,10 @@
n = nodeorrev
extra = {b'branch': b'default'}
# handle looking up nullid
- if n == nullid:
- return hgchangelog._changelogrevision(extra=extra, manifest=nullid)
+ if n == sha1nodeconstants.nullid:
+ return hgchangelog._changelogrevision(
+ extra=extra, manifest=sha1nodeconstants.nullid
+ )
hn = gitutil.togitnode(n)
# We've got a real commit!
files = [
@@ -301,7 +300,7 @@
not supplied, uses all of the revlog's heads. If common is not
supplied, uses nullid."""
if common is None:
- common = [nullid]
+ common = [sha1nodeconstants.nullid]
if heads is None:
heads = self.heads()
@@ -400,9 +399,9 @@
):
parents = []
hp1, hp2 = gitutil.togitnode(p1), gitutil.togitnode(p2)
- if p1 != nullid:
+ if p1 != sha1nodeconstants.nullid:
parents.append(hp1)
- if p2 and p2 != nullid:
+ if p2 and p2 != sha1nodeconstants.nullid:
parents.append(hp2)
assert date is not None
timestamp, tz = date
@@ -435,7 +434,7 @@
return self.get(b'', node)
def get(self, relpath, node):
- if node == nullid:
+ if node == sha1nodeconstants.nullid:
# TODO: this should almost certainly be a memgittreemanifestctx
return manifest.memtreemanifestctx(self, relpath)
commit = self.gitrepo[gitutil.togitnode(node)]
@@ -454,9 +453,10 @@
super(filelog, self).__init__(gr, db)
assert isinstance(path, bytes)
self.path = path
+ self.nullid = sha1nodeconstants.nullid
def read(self, node):
- if node == nullid:
+ if node == sha1nodeconstants.nullid:
return b''
return self.gitrepo[gitutil.togitnode(node)].data
--- a/hgext/git/gitutil.py Sun May 02 16:56:20 2021 -0400
+++ b/hgext/git/gitutil.py Fri May 07 22:06:25 2021 -0400
@@ -1,7 +1,7 @@
"""utilities to assist in working with pygit2"""
from __future__ import absolute_import
-from mercurial.node import bin, hex, nullid
+from mercurial.node import bin, hex, sha1nodeconstants
from mercurial import pycompat
@@ -50,4 +50,4 @@
return bin(n)
-nullgit = togitnode(nullid)
+nullgit = togitnode(sha1nodeconstants.nullid)
--- a/hgext/git/index.py Sun May 02 16:56:20 2021 -0400
+++ b/hgext/git/index.py Fri May 07 22:06:25 2021 -0400
@@ -5,9 +5,7 @@
import sqlite3
from mercurial.i18n import _
-from mercurial.node import (
- nullid,
-)
+from mercurial.node import sha1nodeconstants
from mercurial import (
encoding,
@@ -317,7 +315,9 @@
)
new_files = (p.delta.new_file for p in patchgen)
files = {
- nf.path: nf.id.hex for nf in new_files if nf.id.raw != nullid
+ nf.path: nf.id.hex
+ for nf in new_files
+ if nf.id.raw != sha1nodeconstants.nullid
}
for p, n in files.items():
# We intentionally set NULLs for any file parentage
--- a/hgext/gpg.py Sun May 02 16:56:20 2021 -0400
+++ b/hgext/gpg.py Fri May 07 22:06:25 2021 -0400
@@ -14,7 +14,6 @@
from mercurial.node import (
bin,
hex,
- nullid,
short,
)
from mercurial import (
@@ -314,7 +313,9 @@
if revs:
nodes = [repo.lookup(n) for n in revs]
else:
- nodes = [node for node in repo.dirstate.parents() if node != nullid]
+ nodes = [
+ node for node in repo.dirstate.parents() if node != repo.nullid
+ ]
if len(nodes) > 1:
raise error.Abort(
_(b'uncommitted merge - please provide a specific revision')
--- a/hgext/hgk.py Sun May 02 16:56:20 2021 -0400
+++ b/hgext/hgk.py Fri May 07 22:06:25 2021 -0400
@@ -40,7 +40,6 @@
from mercurial.i18n import _
from mercurial.node import (
- nullid,
nullrev,
short,
)
@@ -95,7 +94,7 @@
mmap2 = repo[node2].manifest()
m = scmutil.match(repo[node1], files)
st = repo.status(node1, node2, m)
- empty = short(nullid)
+ empty = short(repo.nullid)
for f in st.modified:
# TODO get file permissions
@@ -317,9 +316,9 @@
parentstr = b""
if parents:
pp = repo.changelog.parents(n)
- if pp[0] != nullid:
+ if pp[0] != repo.nullid:
parentstr += b" " + short(pp[0])
- if pp[1] != nullid:
+ if pp[1] != repo.nullid:
parentstr += b" " + short(pp[1])
if not full:
ui.write(b"%s%s\n" % (short(n), parentstr))
--- a/hgext/journal.py Sun May 02 16:56:20 2021 -0400
+++ b/hgext/journal.py Fri May 07 22:06:25 2021 -0400
@@ -22,7 +22,6 @@
from mercurial.node import (
bin,
hex,
- nullid,
)
from mercurial import (
@@ -117,8 +116,8 @@
new = list(new)
if util.safehasattr(dirstate, 'journalstorage'):
# only record two hashes if there was a merge
- oldhashes = old[:1] if old[1] == nullid else old
- newhashes = new[:1] if new[1] == nullid else new
+ oldhashes = old[:1] if old[1] == dirstate._nodeconstants.nullid else old
+ newhashes = new[:1] if new[1] == dirstate._nodeconstants.nullid else new
dirstate.journalstorage.record(
wdirparenttype, b'.', oldhashes, newhashes
)
@@ -131,7 +130,7 @@
if util.safehasattr(repo, 'journal'):
oldmarks = bookmarks.bmstore(repo)
for mark, value in pycompat.iteritems(store):
- oldvalue = oldmarks.get(mark, nullid)
+ oldvalue = oldmarks.get(mark, repo.nullid)
if value != oldvalue:
repo.journal.record(bookmarktype, mark, oldvalue, value)
return orig(store, fp)
--- a/hgext/largefiles/basestore.py Sun May 02 16:56:20 2021 -0400
+++ b/hgext/largefiles/basestore.py Fri May 07 22:06:25 2021 -0400
@@ -11,7 +11,8 @@
from mercurial.i18n import _
-from mercurial import node, util
+from mercurial.node import short
+from mercurial import util
from mercurial.utils import (
urlutil,
)
@@ -137,7 +138,7 @@
filestocheck = [] # list of (cset, filename, expectedhash)
for rev in revs:
cctx = self.repo[rev]
- cset = b"%d:%s" % (cctx.rev(), node.short(cctx.node()))
+ cset = b"%d:%s" % (cctx.rev(), short(cctx.node()))
for standin in cctx:
filename = lfutil.splitstandin(standin)
--- a/hgext/largefiles/lfcommands.py Sun May 02 16:56:20 2021 -0400
+++ b/hgext/largefiles/lfcommands.py Fri May 07 22:06:25 2021 -0400
@@ -17,7 +17,6 @@
from mercurial.node import (
bin,
hex,
- nullid,
)
from mercurial import (
@@ -115,7 +114,7 @@
rsrc[ctx]
for ctx in rsrc.changelog.nodesbetween(None, rsrc.heads())[0]
)
- revmap = {nullid: nullid}
+ revmap = {rsrc.nullid: rdst.nullid}
if tolfile:
# Lock destination to prevent modification while it is converted to.
# Don't need to lock src because we are just reading from its
@@ -340,7 +339,7 @@
# Generate list of changed files
def _getchangedfiles(ctx, parents):
files = set(ctx.files())
- if nullid not in parents:
+ if ctx.repo().nullid not in parents:
mc = ctx.manifest()
for pctx in ctx.parents():
for fn in pctx.manifest().diff(mc):
@@ -354,7 +353,7 @@
for p in ctx.parents():
parents.append(revmap[p.node()])
while len(parents) < 2:
- parents.append(nullid)
+ parents.append(ctx.repo().nullid)
return parents
--- a/hgext/largefiles/lfutil.py Sun May 02 16:56:20 2021 -0400
+++ b/hgext/largefiles/lfutil.py Fri May 07 22:06:25 2021 -0400
@@ -15,10 +15,7 @@
import stat
from mercurial.i18n import _
-from mercurial.node import (
- hex,
- nullid,
-)
+from mercurial.node import hex
from mercurial.pycompat import open
from mercurial import (
@@ -613,7 +610,7 @@
) as progress:
for i, n in enumerate(missing):
progress.update(i)
- parents = [p for p in repo[n].parents() if p != nullid]
+ parents = [p for p in repo[n].parents() if p != repo.nullid]
with lfstatus(repo, value=False):
ctx = repo[n]
--- a/hgext/lfs/wrapper.py Sun May 02 16:56:20 2021 -0400
+++ b/hgext/lfs/wrapper.py Fri May 07 22:06:25 2021 -0400
@@ -10,7 +10,7 @@
import hashlib
from mercurial.i18n import _
-from mercurial.node import bin, hex, nullid, short
+from mercurial.node import bin, hex, short
from mercurial.pycompat import (
getattr,
setattr,
@@ -158,7 +158,7 @@
rev = rlog.rev(node)
else:
node = rlog.node(rev)
- if node == nullid:
+ if node == rlog.nullid:
return False
flags = rlog.flags(rev)
return bool(flags & revlog.REVIDX_EXTSTORED)
--- a/hgext/mq.py Sun May 02 16:56:20 2021 -0400
+++ b/hgext/mq.py Fri May 07 22:06:25 2021 -0400
@@ -73,7 +73,6 @@
from mercurial.node import (
bin,
hex,
- nullid,
nullrev,
short,
)
@@ -908,13 +907,13 @@
"""
if rev is None:
(p1, p2) = repo.dirstate.parents()
- if p2 == nullid:
+ if p2 == repo.nullid:
return p1
if not self.applied:
return None
return self.applied[-1].node
p1, p2 = repo.changelog.parents(rev)
- if p2 != nullid and p2 in [x.node for x in self.applied]:
+ if p2 != repo.nullid and p2 in [x.node for x in self.applied]:
return p2
return p1
@@ -1591,7 +1590,7 @@
for hs in repo.branchmap().iterheads():
heads.extend(hs)
if not heads:
- heads = [nullid]
+ heads = [repo.nullid]
if repo.dirstate.p1() not in heads and not exact:
self.ui.status(_(b"(working directory not at a head)\n"))
@@ -1857,7 +1856,7 @@
fctx = ctx[f]
repo.wwrite(f, fctx.data(), fctx.flags())
repo.dirstate.normal(f)
- repo.setparents(qp, nullid)
+ repo.setparents(qp, repo.nullid)
for patch in reversed(self.applied[start:end]):
self.ui.status(_(b"popping %s\n") % patch.name)
del self.applied[start:end]
--- a/hgext/narrow/narrowbundle2.py Sun May 02 16:56:20 2021 -0400
+++ b/hgext/narrow/narrowbundle2.py Fri May 07 22:06:25 2021 -0400
@@ -11,7 +11,6 @@
import struct
from mercurial.i18n import _
-from mercurial.node import nullid
from mercurial import (
bundle2,
changegroup,
@@ -94,7 +93,7 @@
raise error.Abort(_(b'depth must be positive, got %d') % depth)
heads = set(heads or repo.heads())
- common = set(common or [nullid])
+ common = set(common or [repo.nullid])
visitnodes, relevant_nodes, ellipsisroots = exchange._computeellipsis(
repo, common, heads, set(), match, depth=depth
@@ -128,7 +127,7 @@
common,
known,
):
- common = set(common or [nullid])
+ common = set(common or [repo.nullid])
# Steps:
# 1. Send kill for "$known & ::common"
#
--- a/hgext/narrow/narrowcommands.py Sun May 02 16:56:20 2021 -0400
+++ b/hgext/narrow/narrowcommands.py Fri May 07 22:06:25 2021 -0400
@@ -12,7 +12,6 @@
from mercurial.i18n import _
from mercurial.node import (
hex,
- nullid,
short,
)
from mercurial import (
@@ -193,7 +192,7 @@
kwargs[b'known'] = [
hex(ctx.node())
for ctx in repo.set(b'::%ln', pullop.common)
- if ctx.node() != nullid
+ if ctx.node() != repo.nullid
]
if not kwargs[b'known']:
# Mercurial serializes an empty list as '' and deserializes it as
@@ -228,10 +227,17 @@
unfi = repo.unfiltered()
outgoing = discovery.findcommonoutgoing(unfi, remote, commoninc=commoninc)
ui.status(_(b'looking for local changes to affected paths\n'))
+ progress = ui.makeprogress(
+ topic=_(b'changesets'),
+ unit=_(b'changesets'),
+ total=len(outgoing.missing) + len(outgoing.excluded),
+ )
localnodes = []
- for n in itertools.chain(outgoing.missing, outgoing.excluded):
- if any(oldmatch(f) and not newmatch(f) for f in unfi[n].files()):
- localnodes.append(n)
+ with progress:
+ for n in itertools.chain(outgoing.missing, outgoing.excluded):
+ progress.increment()
+ if any(oldmatch(f) and not newmatch(f) for f in unfi[n].files()):
+ localnodes.append(n)
revstostrip = unfi.revs(b'descendants(%ln)', localnodes)
hiddenrevs = repoview.filterrevs(repo, b'visible')
visibletostrip = list(
@@ -275,6 +281,10 @@
)
hg.clean(repo, urev)
overrides = {(b'devel', b'strip-obsmarkers'): False}
+ if backup:
+ ui.status(_(b'moving unwanted changesets to backup\n'))
+ else:
+ ui.status(_(b'deleting unwanted changesets\n'))
with ui.configoverride(overrides, b'narrow'):
repair.strip(ui, unfi, tostrip, topic=b'narrow', backup=backup)
@@ -310,6 +320,7 @@
util.unlinkpath(repo.svfs.join(f))
repo.store.markremoved(f)
+ ui.status(_(b'deleting unwanted files from working copy\n'))
narrowspec.updateworkingcopy(repo, assumeclean=True)
narrowspec.copytoworkingcopy(repo)
@@ -370,7 +381,7 @@
ds = repo.dirstate
p1, p2 = ds.p1(), ds.p2()
with ds.parentchange():
- ds.setparents(nullid, nullid)
+ ds.setparents(repo.nullid, repo.nullid)
if isoldellipses:
with wrappedextraprepare:
exchange.pull(repo, remote, heads=common)
@@ -380,7 +391,7 @@
known = [
ctx.node()
for ctx in repo.set(b'::%ln', common)
- if ctx.node() != nullid
+ if ctx.node() != repo.nullid
]
with remote.commandexecutor() as e:
bundle = e.callcommand(
--- a/hgext/phabricator.py Sun May 02 16:56:20 2021 -0400
+++ b/hgext/phabricator.py Fri May 07 22:06:25 2021 -0400
@@ -69,7 +69,7 @@
import re
import time
-from mercurial.node import bin, nullid, short
+from mercurial.node import bin, short
from mercurial.i18n import _
from mercurial.pycompat import getattr
from mercurial.thirdparty import attr
@@ -586,7 +586,7 @@
tags.tag(
repo,
tagname,
- nullid,
+ repo.nullid,
message=None,
user=None,
date=None,
@@ -1606,7 +1606,7 @@
tags.tag(
repo,
tagname,
- nullid,
+ repo.nullid,
message=None,
user=None,
date=None,
--- a/hgext/rebase.py Sun May 02 16:56:20 2021 -0400
+++ b/hgext/rebase.py Fri May 07 22:06:25 2021 -0400
@@ -446,8 +446,15 @@
rebaseset = set(destmap.keys())
rebaseset -= set(self.obsolete_with_successor_in_destination)
rebaseset -= self.obsolete_with_successor_in_rebase_set
+ # We have our own divergence-checking in the rebase extension
+ overrides = {}
+ if obsolete.isenabled(self.repo, obsolete.createmarkersopt):
+ overrides = {
+ (b'experimental', b'evolution.allowdivergence'): b'true'
+ }
try:
- rewriteutil.precheck(self.repo, rebaseset, action=b'rebase')
+ with self.ui.configoverride(overrides):
+ rewriteutil.precheck(self.repo, rebaseset, action=b'rebase')
except error.Abort as e:
if e.hint is None:
e.hint = _(b'use --keep to keep original changesets')
--- a/hgext/remotefilelog/contentstore.py Sun May 02 16:56:20 2021 -0400
+++ b/hgext/remotefilelog/contentstore.py Fri May 07 22:06:25 2021 -0400
@@ -2,7 +2,10 @@
import threading
-from mercurial.node import hex, nullid
+from mercurial.node import (
+ hex,
+ sha1nodeconstants,
+)
from mercurial.pycompat import getattr
from mercurial import (
mdiff,
@@ -55,7 +58,7 @@
"""
chain = self.getdeltachain(name, node)
- if chain[-1][ChainIndicies.BASENODE] != nullid:
+ if chain[-1][ChainIndicies.BASENODE] != sha1nodeconstants.nullid:
# If we didn't receive a full chain, throw
raise KeyError((name, hex(node)))
@@ -92,7 +95,7 @@
deltabasenode.
"""
chain = self._getpartialchain(name, node)
- while chain[-1][ChainIndicies.BASENODE] != nullid:
+ while chain[-1][ChainIndicies.BASENODE] != sha1nodeconstants.nullid:
x, x, deltabasename, deltabasenode, x = chain[-1]
try:
morechain = self._getpartialchain(deltabasename, deltabasenode)
@@ -187,7 +190,12 @@
# Since remotefilelog content stores only contain full texts, just
# return that.
revision = self.get(name, node)
- return revision, name, nullid, self.getmeta(name, node)
+ return (
+ revision,
+ name,
+ sha1nodeconstants.nullid,
+ self.getmeta(name, node),
+ )
def getdeltachain(self, name, node):
# Since remotefilelog content stores just contain full texts, we return
@@ -195,7 +203,7 @@
# The nullid in the deltabasenode slot indicates that the revision is a
# fulltext.
revision = self.get(name, node)
- return [(name, node, None, nullid, revision)]
+ return [(name, node, None, sha1nodeconstants.nullid, revision)]
def getmeta(self, name, node):
self._sanitizemetacache()
@@ -237,7 +245,12 @@
def getdelta(self, name, node):
revision = self.get(name, node)
- return revision, name, nullid, self._shared.getmeta(name, node)
+ return (
+ revision,
+ name,
+ sha1nodeconstants.nullid,
+ self._shared.getmeta(name, node),
+ )
def getdeltachain(self, name, node):
# Since our remote content stores just contain full texts, we return a
@@ -245,7 +258,7 @@
# The nullid in the deltabasenode slot indicates that the revision is a
# fulltext.
revision = self.get(name, node)
- return [(name, node, None, nullid, revision)]
+ return [(name, node, None, sha1nodeconstants.nullid, revision)]
def getmeta(self, name, node):
self._fileservice.prefetch(
@@ -276,11 +289,11 @@
def getdelta(self, name, node):
revision = self.get(name, node)
- return revision, name, nullid, self.getmeta(name, node)
+ return revision, name, self._cl.nullid, self.getmeta(name, node)
def getdeltachain(self, name, node):
revision = self.get(name, node)
- return [(name, node, None, nullid, revision)]
+ return [(name, node, None, self._cl.nullid, revision)]
def getmeta(self, name, node):
rl = self._revlog(name)
@@ -304,9 +317,9 @@
missing.discard(ancnode)
p1, p2 = rl.parents(ancnode)
- if p1 != nullid and p1 not in known:
+ if p1 != self._cl.nullid and p1 not in known:
missing.add(p1)
- if p2 != nullid and p2 not in known:
+ if p2 != self._cl.nullid and p2 not in known:
missing.add(p2)
linknode = self._cl.node(rl.linkrev(ancrev))
--- a/hgext/remotefilelog/datapack.py Sun May 02 16:56:20 2021 -0400
+++ b/hgext/remotefilelog/datapack.py Fri May 07 22:06:25 2021 -0400
@@ -3,7 +3,10 @@
import struct
import zlib
-from mercurial.node import hex, nullid
+from mercurial.node import (
+ hex,
+ sha1nodeconstants,
+)
from mercurial.i18n import _
from mercurial import (
pycompat,
@@ -458,7 +461,7 @@
rawindex = b''
fmt = self.INDEXFORMAT
for node, deltabase, offset, size in entries:
- if deltabase == nullid:
+ if deltabase == sha1nodeconstants.nullid:
deltabaselocation = FULLTEXTINDEXMARK
else:
# Instead of storing the deltabase node in the index, let's
--- a/hgext/remotefilelog/debugcommands.py Sun May 02 16:56:20 2021 -0400
+++ b/hgext/remotefilelog/debugcommands.py Fri May 07 22:06:25 2021 -0400
@@ -12,7 +12,7 @@
from mercurial.node import (
bin,
hex,
- nullid,
+ sha1nodeconstants,
short,
)
from mercurial.i18n import _
@@ -57,9 +57,9 @@
_(b"%s => %s %s %s %s\n")
% (short(node), short(p1), short(p2), short(linknode), copyfrom)
)
- if p1 != nullid:
+ if p1 != sha1nodeconstants.nullid:
queue.append(p1)
- if p2 != nullid:
+ if p2 != sha1nodeconstants.nullid:
queue.append(p2)
@@ -152,7 +152,7 @@
try:
pp = r.parents(node)
except Exception:
- pp = [nullid, nullid]
+ pp = [repo.nullid, repo.nullid]
ui.write(
b"% 6d % 9d % 7d % 6d % 7d %s %s %s\n"
% (
@@ -197,7 +197,7 @@
node = r.node(i)
pp = r.parents(node)
ui.write(b"\t%d -> %d\n" % (r.rev(pp[0]), i))
- if pp[1] != nullid:
+ if pp[1] != repo.nullid:
ui.write(b"\t%d -> %d\n" % (r.rev(pp[1]), i))
ui.write(b"}\n")
@@ -212,7 +212,7 @@
filepath = os.path.join(root, file)
size, firstnode, mapping = parsefileblob(filepath, decompress)
for p1, p2, linknode, copyfrom in pycompat.itervalues(mapping):
- if linknode == nullid:
+ if linknode == sha1nodeconstants.nullid:
actualpath = os.path.relpath(root, path)
key = fileserverclient.getcachekey(
b"reponame", actualpath, file
@@ -371,7 +371,7 @@
current = node
deltabase = bases[current]
- while deltabase != nullid:
+ while deltabase != sha1nodeconstants.nullid:
if deltabase not in nodes:
ui.warn(
(
@@ -397,7 +397,7 @@
deltabase = bases[current]
# Since ``node`` begins a valid chain, reset/memoize its base to nullid
# so we don't traverse it again.
- bases[node] = nullid
+ bases[node] = sha1nodeconstants.nullid
return failures
--- a/hgext/remotefilelog/fileserverclient.py Sun May 02 16:56:20 2021 -0400
+++ b/hgext/remotefilelog/fileserverclient.py Fri May 07 22:06:25 2021 -0400
@@ -14,7 +14,7 @@
import zlib
from mercurial.i18n import _
-from mercurial.node import bin, hex, nullid
+from mercurial.node import bin, hex
from mercurial import (
error,
pycompat,
@@ -272,7 +272,7 @@
def _getfiles_threaded(
remote, receivemissing, progresstick, missed, idmap, step
):
- remote._callstream(b"getfiles")
+ remote._callstream(b"x_rfl_getfiles")
pipeo = remote._pipeo
pipei = remote._pipei
@@ -599,9 +599,13 @@
# partition missing nodes into nullid and not-nullid so we can
# warn about this filtering potentially shadowing bugs.
- nullids = len([None for unused, id in missingids if id == nullid])
+ nullids = len(
+ [None for unused, id in missingids if id == self.repo.nullid]
+ )
if nullids:
- missingids = [(f, id) for f, id in missingids if id != nullid]
+ missingids = [
+ (f, id) for f, id in missingids if id != self.repo.nullid
+ ]
repo.ui.develwarn(
(
b'remotefilelog not fetching %d null revs'
--- a/hgext/remotefilelog/historypack.py Sun May 02 16:56:20 2021 -0400
+++ b/hgext/remotefilelog/historypack.py Fri May 07 22:06:25 2021 -0400
@@ -2,7 +2,10 @@
import struct
-from mercurial.node import hex, nullid
+from mercurial.node import (
+ hex,
+ sha1nodeconstants,
+)
from mercurial import (
pycompat,
util,
@@ -147,9 +150,9 @@
pending.remove(ancnode)
p1node = entry[ANC_P1NODE]
p2node = entry[ANC_P2NODE]
- if p1node != nullid and p1node not in known:
+ if p1node != sha1nodeconstants.nullid and p1node not in known:
pending.add(p1node)
- if p2node != nullid and p2node not in known:
+ if p2node != sha1nodeconstants.nullid and p2node not in known:
pending.add(p2node)
yield (ancnode, p1node, p2node, entry[ANC_LINKNODE], copyfrom)
@@ -457,9 +460,9 @@
def parentfunc(node):
x, p1, p2, x, x, x = entrymap[node]
parents = []
- if p1 != nullid:
+ if p1 != sha1nodeconstants.nullid:
parents.append(p1)
- if p2 != nullid:
+ if p2 != sha1nodeconstants.nullid:
parents.append(p2)
return parents
--- a/hgext/remotefilelog/metadatastore.py Sun May 02 16:56:20 2021 -0400
+++ b/hgext/remotefilelog/metadatastore.py Fri May 07 22:06:25 2021 -0400
@@ -1,6 +1,9 @@
from __future__ import absolute_import
-from mercurial.node import hex, nullid
+from mercurial.node import (
+ hex,
+ sha1nodeconstants,
+)
from . import (
basestore,
shallowutil,
@@ -51,9 +54,9 @@
missing.append((name, node))
continue
p1, p2, linknode, copyfrom = value
- if p1 != nullid and p1 not in known:
+ if p1 != sha1nodeconstants.nullid and p1 not in known:
queue.append((copyfrom or curname, p1))
- if p2 != nullid and p2 not in known:
+ if p2 != sha1nodeconstants.nullid and p2 not in known:
queue.append((curname, p2))
return missing
--- a/hgext/remotefilelog/remotefilectx.py Sun May 02 16:56:20 2021 -0400
+++ b/hgext/remotefilelog/remotefilectx.py Fri May 07 22:06:25 2021 -0400
@@ -9,7 +9,7 @@
import collections
import time
-from mercurial.node import bin, hex, nullid, nullrev
+from mercurial.node import bin, hex, nullrev
from mercurial import (
ancestor,
context,
@@ -35,7 +35,7 @@
ancestormap=None,
):
if fileid == nullrev:
- fileid = nullid
+ fileid = repo.nullid
if fileid and len(fileid) == 40:
fileid = bin(fileid)
super(remotefilectx, self).__init__(
@@ -78,7 +78,7 @@
@propertycache
def _linkrev(self):
- if self._filenode == nullid:
+ if self._filenode == self._repo.nullid:
return nullrev
ancestormap = self.ancestormap()
@@ -174,7 +174,7 @@
p1, p2, linknode, copyfrom = ancestormap[self._filenode]
results = []
- if p1 != nullid:
+ if p1 != repo.nullid:
path = copyfrom or self._path
flog = repo.file(path)
p1ctx = remotefilectx(
@@ -183,7 +183,7 @@
p1ctx._descendantrev = self.rev()
results.append(p1ctx)
- if p2 != nullid:
+ if p2 != repo.nullid:
path = self._path
flog = repo.file(path)
p2ctx = remotefilectx(
@@ -504,25 +504,25 @@
if renamed:
p1 = renamed
else:
- p1 = (path, pcl[0]._manifest.get(path, nullid))
+ p1 = (path, pcl[0]._manifest.get(path, self._repo.nullid))
- p2 = (path, nullid)
+ p2 = (path, self._repo.nullid)
if len(pcl) > 1:
- p2 = (path, pcl[1]._manifest.get(path, nullid))
+ p2 = (path, pcl[1]._manifest.get(path, self._repo.nullid))
m = {}
- if p1[1] != nullid:
+ if p1[1] != self._repo.nullid:
p1ctx = self._repo.filectx(p1[0], fileid=p1[1])
m.update(p1ctx.filelog().ancestormap(p1[1]))
- if p2[1] != nullid:
+ if p2[1] != self._repo.nullid:
p2ctx = self._repo.filectx(p2[0], fileid=p2[1])
m.update(p2ctx.filelog().ancestormap(p2[1]))
copyfrom = b''
if renamed:
copyfrom = renamed[0]
- m[None] = (p1[1], p2[1], nullid, copyfrom)
+ m[None] = (p1[1], p2[1], self._repo.nullid, copyfrom)
self._ancestormap = m
return self._ancestormap
--- a/hgext/remotefilelog/remotefilelog.py Sun May 02 16:56:20 2021 -0400
+++ b/hgext/remotefilelog/remotefilelog.py Fri May 07 22:06:25 2021 -0400
@@ -10,12 +10,7 @@
import collections
import os
-from mercurial.node import (
- bin,
- nullid,
- wdirfilenodeids,
- wdirid,
-)
+from mercurial.node import bin
from mercurial.i18n import _
from mercurial import (
ancestor,
@@ -100,7 +95,7 @@
pancestors = {}
queue = []
- if realp1 != nullid:
+ if realp1 != self.repo.nullid:
p1flog = self
if copyfrom:
p1flog = remotefilelog(self.opener, copyfrom, self.repo)
@@ -108,7 +103,7 @@
pancestors.update(p1flog.ancestormap(realp1))
queue.append(realp1)
visited.add(realp1)
- if p2 != nullid:
+ if p2 != self.repo.nullid:
pancestors.update(self.ancestormap(p2))
queue.append(p2)
visited.add(p2)
@@ -129,10 +124,10 @@
pacopyfrom,
)
- if pa1 != nullid and pa1 not in visited:
+ if pa1 != self.repo.nullid and pa1 not in visited:
queue.append(pa1)
visited.add(pa1)
- if pa2 != nullid and pa2 not in visited:
+ if pa2 != self.repo.nullid and pa2 not in visited:
queue.append(pa2)
visited.add(pa2)
@@ -238,7 +233,7 @@
returns True if text is different than what is stored.
"""
- if node == nullid:
+ if node == self.repo.nullid:
return True
nodetext = self.read(node)
@@ -275,13 +270,13 @@
return store.getmeta(self.filename, node).get(constants.METAKEYFLAG, 0)
def parents(self, node):
- if node == nullid:
- return nullid, nullid
+ if node == self.repo.nullid:
+ return self.repo.nullid, self.repo.nullid
ancestormap = self.repo.metadatastore.getancestors(self.filename, node)
p1, p2, linknode, copyfrom = ancestormap[node]
if copyfrom:
- p1 = nullid
+ p1 = self.repo.nullid
return p1, p2
@@ -317,8 +312,8 @@
if prevnode is None:
basenode = prevnode = p1
if basenode == node:
- basenode = nullid
- if basenode != nullid:
+ basenode = self.repo.nullid
+ if basenode != self.repo.nullid:
revision = None
delta = self.revdiff(basenode, node)
else:
@@ -336,6 +331,8 @@
delta=delta,
# Sidedata is not supported yet
sidedata=None,
+ # Protocol flags are not used yet
+ protocol_flags=0,
)
def revdiff(self, node1, node2):
@@ -380,13 +377,16 @@
this is generally only used for bundling and communicating with vanilla
hg clients.
"""
- if node == nullid:
+ if node == self.repo.nullid:
return b""
if len(node) != 20:
raise error.LookupError(
node, self.filename, _(b'invalid revision input')
)
- if node == wdirid or node in wdirfilenodeids:
+ if (
+ node == self.repo.nodeconstants.wdirid
+ or node in self.repo.nodeconstants.wdirfilenodeids
+ ):
raise error.WdirUnsupported
store = self.repo.contentstore
@@ -432,8 +432,8 @@
return self.repo.metadatastore.getancestors(self.filename, node)
def ancestor(self, a, b):
- if a == nullid or b == nullid:
- return nullid
+ if a == self.repo.nullid or b == self.repo.nullid:
+ return self.repo.nullid
revmap, parentfunc = self._buildrevgraph(a, b)
nodemap = {v: k for (k, v) in pycompat.iteritems(revmap)}
@@ -442,13 +442,13 @@
if ancs:
# choose a consistent winner when there's a tie
return min(map(nodemap.__getitem__, ancs))
- return nullid
+ return self.repo.nullid
def commonancestorsheads(self, a, b):
"""calculate all the heads of the common ancestors of nodes a and b"""
- if a == nullid or b == nullid:
- return nullid
+ if a == self.repo.nullid or b == self.repo.nullid:
+ return self.repo.nullid
revmap, parentfunc = self._buildrevgraph(a, b)
nodemap = {v: k for (k, v) in pycompat.iteritems(revmap)}
@@ -472,10 +472,10 @@
p1, p2, linknode, copyfrom = pdata
# Don't follow renames (copyfrom).
# remotefilectx.ancestor does that.
- if p1 != nullid and not copyfrom:
+ if p1 != self.repo.nullid and not copyfrom:
parents.append(p1)
allparents.add(p1)
- if p2 != nullid:
+ if p2 != self.repo.nullid:
parents.append(p2)
allparents.add(p2)
--- a/hgext/remotefilelog/remotefilelogserver.py Sun May 02 16:56:20 2021 -0400
+++ b/hgext/remotefilelog/remotefilelogserver.py Fri May 07 22:06:25 2021 -0400
@@ -13,7 +13,7 @@
import zlib
from mercurial.i18n import _
-from mercurial.node import bin, hex, nullid
+from mercurial.node import bin, hex
from mercurial.pycompat import open
from mercurial import (
changegroup,
@@ -242,7 +242,7 @@
filecachepath = os.path.join(cachepath, path, hex(node))
if not os.path.exists(filecachepath) or os.path.getsize(filecachepath) == 0:
filectx = repo.filectx(path, fileid=node)
- if filectx.node() == nullid:
+ if filectx.node() == repo.nullid:
repo.changelog = changelog.changelog(repo.svfs)
filectx = repo.filectx(path, fileid=node)
@@ -284,7 +284,7 @@
"""A server api for requesting a filelog's heads"""
flog = repo.file(path)
heads = flog.heads()
- return b'\n'.join((hex(head) for head in heads if head != nullid))
+ return b'\n'.join((hex(head) for head in heads if head != repo.nullid))
def getfile(repo, proto, file, node):
@@ -302,7 +302,7 @@
if not cachepath:
cachepath = os.path.join(repo.path, b"remotefilelogcache")
node = bin(node.strip())
- if node == nullid:
+ if node == repo.nullid:
return b'0\0'
return b'0\0' + _loadfileblob(repo, cachepath, file, node)
@@ -327,7 +327,7 @@
break
node = bin(request[:40])
- if node == nullid:
+ if node == repo.nullid:
yield b'0\n'
continue
@@ -380,8 +380,8 @@
ancestortext = b""
for ancestorctx in ancestors:
parents = ancestorctx.parents()
- p1 = nullid
- p2 = nullid
+ p1 = repo.nullid
+ p2 = repo.nullid
if len(parents) > 0:
p1 = parents[0].filenode()
if len(parents) > 1:
--- a/hgext/remotefilelog/repack.py Sun May 02 16:56:20 2021 -0400
+++ b/hgext/remotefilelog/repack.py Fri May 07 22:06:25 2021 -0400
@@ -4,10 +4,7 @@
import time
from mercurial.i18n import _
-from mercurial.node import (
- nullid,
- short,
-)
+from mercurial.node import short
from mercurial import (
encoding,
error,
@@ -586,7 +583,7 @@
# Create one contiguous chain and reassign deltabases.
for i, node in enumerate(orphans):
if i == 0:
- deltabases[node] = (nullid, 0)
+ deltabases[node] = (self.repo.nullid, 0)
else:
parent = orphans[i - 1]
deltabases[node] = (parent, deltabases[parent][1] + 1)
@@ -676,8 +673,8 @@
# of immediate child
deltatuple = deltabases.get(node, None)
if deltatuple is None:
- deltabase, chainlen = nullid, 0
- deltabases[node] = (nullid, 0)
+ deltabase, chainlen = self.repo.nullid, 0
+ deltabases[node] = (self.repo.nullid, 0)
nobase.add(node)
else:
deltabase, chainlen = deltatuple
@@ -692,7 +689,7 @@
# file was copied from elsewhere. So don't attempt to do any
# deltas with the other file.
if copyfrom:
- p1 = nullid
+ p1 = self.repo.nullid
if chainlen < maxchainlen:
# Record this child as the delta base for its parents.
@@ -700,9 +697,9 @@
# many children, and this will only choose the last one.
# TODO: record all children and try all deltas to find
# best
- if p1 != nullid:
+ if p1 != self.repo.nullid:
deltabases[p1] = (node, chainlen + 1)
- if p2 != nullid:
+ if p2 != self.repo.nullid:
deltabases[p2] = (node, chainlen + 1)
# experimental config: repack.chainorphansbysize
@@ -719,7 +716,7 @@
# TODO: Optimize the deltachain fetching. Since we're
# iterating over the different version of the file, we may
# be fetching the same deltachain over and over again.
- if deltabase != nullid:
+ if deltabase != self.repo.nullid:
deltaentry = self.data.getdelta(filename, node)
delta, deltabasename, origdeltabase, meta = deltaentry
size = meta.get(constants.METAKEYSIZE)
@@ -791,9 +788,9 @@
# If copyfrom == filename, it means the copy history
# went to come other file, then came back to this one, so we
# should continue processing it.
- if p1 != nullid and copyfrom != filename:
+ if p1 != self.repo.nullid and copyfrom != filename:
dontprocess.add(p1)
- if p2 != nullid:
+ if p2 != self.repo.nullid:
dontprocess.add(p2)
continue
@@ -814,9 +811,9 @@
def parentfunc(node):
p1, p2, linknode, copyfrom = ancestors[node]
parents = []
- if p1 != nullid:
+ if p1 != self.repo.nullid:
parents.append(p1)
- if p2 != nullid:
+ if p2 != self.repo.nullid:
parents.append(p2)
return parents
--- a/hgext/remotefilelog/shallowbundle.py Sun May 02 16:56:20 2021 -0400
+++ b/hgext/remotefilelog/shallowbundle.py Fri May 07 22:06:25 2021 -0400
@@ -7,7 +7,7 @@
from __future__ import absolute_import
from mercurial.i18n import _
-from mercurial.node import bin, hex, nullid
+from mercurial.node import bin, hex
from mercurial import (
bundlerepo,
changegroup,
@@ -143,7 +143,7 @@
def nodechunk(self, revlog, node, prevnode, linknode):
prefix = b''
- if prevnode == nullid:
+ if prevnode == revlog.nullid:
delta = revlog.rawdata(node)
prefix = mdiff.trivialdiffheader(len(delta))
else:
@@ -245,7 +245,7 @@
processed = set()
def available(f, node, depf, depnode):
- if depnode != nullid and (depf, depnode) not in processed:
+ if depnode != repo.nullid and (depf, depnode) not in processed:
if not (depf, depnode) in revisiondatas:
# It's not in the changegroup, assume it's already
# in the repo
@@ -267,7 +267,7 @@
dependents = [revisiondata[1], revisiondata[2], revisiondata[4]]
for dependent in dependents:
- if dependent == nullid or (f, dependent) in revisiondatas:
+ if dependent == repo.nullid or (f, dependent) in revisiondatas:
continue
prefetchfiles.append((f, hex(dependent)))
@@ -306,7 +306,7 @@
continue
for p in [p1, p2]:
- if p != nullid:
+ if p != repo.nullid:
if not available(f, node, f, p):
continue
--- a/hgext/remotefilelog/shallowrepo.py Sun May 02 16:56:20 2021 -0400
+++ b/hgext/remotefilelog/shallowrepo.py Fri May 07 22:06:25 2021 -0400
@@ -9,7 +9,7 @@
import os
from mercurial.i18n import _
-from mercurial.node import hex, nullid, nullrev
+from mercurial.node import hex, nullrev
from mercurial import (
encoding,
error,
@@ -206,8 +206,8 @@
m1 = ctx.p1().manifest()
files = []
for f in ctx.modified() + ctx.added():
- fparent1 = m1.get(f, nullid)
- if fparent1 != nullid:
+ fparent1 = m1.get(f, self.nullid)
+ if fparent1 != self.nullid:
files.append((f, hex(fparent1)))
self.fileservice.prefetch(files)
return super(shallowrepository, self).commitctx(
--- a/hgext/sqlitestore.py Sun May 02 16:56:20 2021 -0400
+++ b/hgext/sqlitestore.py Fri May 07 22:06:25 2021 -0400
@@ -52,7 +52,6 @@
from mercurial.i18n import _
from mercurial.node import (
- nullid,
nullrev,
sha1nodeconstants,
short,
@@ -290,6 +289,7 @@
revision = attr.ib()
delta = attr.ib()
sidedata = attr.ib()
+ protocol_flags = attr.ib()
linknode = attr.ib(default=None)
@@ -366,12 +366,12 @@
)
if p1rev == nullrev:
- p1node = nullid
+ p1node = sha1nodeconstants.nullid
else:
p1node = self._revtonode[p1rev]
if p2rev == nullrev:
- p2node = nullid
+ p2node = sha1nodeconstants.nullid
else:
p2node = self._revtonode[p2rev]
@@ -400,7 +400,7 @@
return iter(pycompat.xrange(len(self._revisions)))
def hasnode(self, node):
- if node == nullid:
+ if node == sha1nodeconstants.nullid:
return False
return node in self._nodetorev
@@ -411,8 +411,8 @@
)
def parents(self, node):
- if node == nullid:
- return nullid, nullid
+ if node == sha1nodeconstants.nullid:
+ return sha1nodeconstants.nullid, sha1nodeconstants.nullid
if node not in self._revisions:
raise error.LookupError(node, self._path, _(b'no node'))
@@ -431,7 +431,7 @@
return entry.p1rev, entry.p2rev
def rev(self, node):
- if node == nullid:
+ if node == sha1nodeconstants.nullid:
return nullrev
if node not in self._nodetorev:
@@ -441,7 +441,7 @@
def node(self, rev):
if rev == nullrev:
- return nullid
+ return sha1nodeconstants.nullid
if rev not in self._revtonode:
raise IndexError(rev)
@@ -485,7 +485,7 @@
def heads(self, start=None, stop=None):
if start is None and stop is None:
if not len(self):
- return [nullid]
+ return [sha1nodeconstants.nullid]
startrev = self.rev(start) if start is not None else nullrev
stoprevs = {self.rev(n) for n in stop or []}
@@ -529,7 +529,7 @@
return len(self.revision(node))
def revision(self, node, raw=False, _verifyhash=True):
- if node in (nullid, nullrev):
+ if node in (sha1nodeconstants.nullid, nullrev):
return b''
if isinstance(node, int):
@@ -596,7 +596,7 @@
b'unhandled value for nodesorder: %s' % nodesorder
)
- nodes = [n for n in nodes if n != nullid]
+ nodes = [n for n in nodes if n != sha1nodeconstants.nullid]
if not nodes:
return
@@ -705,12 +705,12 @@
raise SQLiteStoreError(b'unhandled revision flag')
if maybemissingparents:
- if p1 != nullid and not self.hasnode(p1):
- p1 = nullid
+ if p1 != sha1nodeconstants.nullid and not self.hasnode(p1):
+ p1 = sha1nodeconstants.nullid
storeflags |= FLAG_MISSING_P1
- if p2 != nullid and not self.hasnode(p2):
- p2 = nullid
+ if p2 != sha1nodeconstants.nullid and not self.hasnode(p2):
+ p2 = sha1nodeconstants.nullid
storeflags |= FLAG_MISSING_P2
baserev = self.rev(deltabase)
@@ -736,7 +736,10 @@
# Possibly reset parents to make them proper.
entry = self._revisions[node]
- if entry.flags & FLAG_MISSING_P1 and p1 != nullid:
+ if (
+ entry.flags & FLAG_MISSING_P1
+ and p1 != sha1nodeconstants.nullid
+ ):
entry.p1node = p1
entry.p1rev = self._nodetorev[p1]
entry.flags &= ~FLAG_MISSING_P1
@@ -746,7 +749,10 @@
(self._nodetorev[p1], entry.flags, entry.rid),
)
- if entry.flags & FLAG_MISSING_P2 and p2 != nullid:
+ if (
+ entry.flags & FLAG_MISSING_P2
+ and p2 != sha1nodeconstants.nullid
+ ):
entry.p2node = p2
entry.p2rev = self._nodetorev[p2]
entry.flags &= ~FLAG_MISSING_P2
@@ -761,7 +767,7 @@
empty = False
continue
- if deltabase == nullid:
+ if deltabase == sha1nodeconstants.nullid:
text = mdiff.patch(b'', delta)
storedelta = None
else:
@@ -1012,7 +1018,7 @@
assert revisiondata is not None
deltabase = p1
- if deltabase == nullid:
+ if deltabase == sha1nodeconstants.nullid:
delta = revisiondata
else:
delta = mdiff.textdiff(
@@ -1021,7 +1027,7 @@
# File index stores a pointer to its delta and the parent delta.
# The parent delta is stored via a pointer to the fileindex PK.
- if deltabase == nullid:
+ if deltabase == sha1nodeconstants.nullid:
baseid = None
else:
baseid = self._revisions[deltabase].rid
@@ -1055,12 +1061,12 @@
rev = len(self)
- if p1 == nullid:
+ if p1 == sha1nodeconstants.nullid:
p1rev = nullrev
else:
p1rev = self._nodetorev[p1]
- if p2 == nullid:
+ if p2 == sha1nodeconstants.nullid:
p2rev = nullrev
else:
p2rev = self._nodetorev[p2]
--- a/hgext/transplant.py Sun May 02 16:56:20 2021 -0400
+++ b/hgext/transplant.py Fri May 07 22:06:25 2021 -0400
@@ -22,7 +22,6 @@
from mercurial.node import (
bin,
hex,
- nullid,
short,
)
from mercurial import (
@@ -134,6 +133,7 @@
class transplanter(object):
def __init__(self, ui, repo, opts):
self.ui = ui
+ self.repo = repo
self.path = repo.vfs.join(b'transplant')
self.opener = vfsmod.vfs(self.path)
self.transplants = transplants(
@@ -221,7 +221,7 @@
exchange.pull(repo, source.peer(), heads=[node])
skipmerge = False
- if parents[1] != nullid:
+ if parents[1] != repo.nullid:
if not opts.get(b'parent'):
self.ui.note(
_(b'skipping merge changeset %d:%s\n')
@@ -516,7 +516,7 @@
def parselog(self, fp):
parents = []
message = []
- node = nullid
+ node = self.repo.nullid
inmsg = False
user = None
date = None
@@ -568,7 +568,7 @@
def matchfn(node):
if self.applied(repo, node, root):
return False
- if source.changelog.parents(node)[1] != nullid:
+ if source.changelog.parents(node)[1] != repo.nullid:
return False
extra = source.changelog.read(node)[5]
cnode = extra.get(b'transplant_source')
@@ -804,7 +804,7 @@
tp = transplanter(ui, repo, opts)
p1 = repo.dirstate.p1()
- if len(repo) > 0 and p1 == nullid:
+ if len(repo) > 0 and p1 == repo.nullid:
raise error.Abort(_(b'no revision checked out'))
if opts.get(b'continue'):
if not tp.canresume():
--- a/hgext/uncommit.py Sun May 02 16:56:20 2021 -0400
+++ b/hgext/uncommit.py Fri May 07 22:06:25 2021 -0400
@@ -20,7 +20,6 @@
from __future__ import absolute_import
from mercurial.i18n import _
-from mercurial.node import nullid
from mercurial import (
cmdutil,
@@ -113,7 +112,7 @@
new = context.memctx(
repo,
- parents=[base.node(), nullid],
+ parents=[base.node(), repo.nullid],
text=message,
files=files,
filectxfn=filectxfn,
--- a/mercurial/bookmarks.py Sun May 02 16:56:20 2021 -0400
+++ b/mercurial/bookmarks.py Fri May 07 22:06:25 2021 -0400
@@ -15,7 +15,6 @@
bin,
hex,
short,
- wdirid,
)
from .pycompat import getattr
from . import (
@@ -642,7 +641,7 @@
binarydata = []
for book, node in bookmarks:
if not node: # None or ''
- node = wdirid
+ node = repo.nodeconstants.wdirid
binarydata.append(_binaryentry.pack(node, len(book)))
binarydata.append(book)
return b''.join(binarydata)
@@ -674,7 +673,7 @@
if len(bookmark) < length:
if entry:
raise error.Abort(_(b'bad bookmark stream'))
- if node == wdirid:
+ if node == repo.nodeconstants.wdirid:
node = None
books.append((bookmark, node))
return books
--- a/mercurial/branchmap.py Sun May 02 16:56:20 2021 -0400
+++ b/mercurial/branchmap.py Fri May 07 22:06:25 2021 -0400
@@ -12,7 +12,6 @@
from .node import (
bin,
hex,
- nullid,
nullrev,
)
from . import (
@@ -189,7 +188,7 @@
self,
repo,
entries=(),
- tipnode=nullid,
+ tipnode=None,
tiprev=nullrev,
filteredhash=None,
closednodes=None,
@@ -200,7 +199,10 @@
has a given node or not. If it's not provided, we assume that every node
we have exists in changelog"""
self._repo = repo
- self.tipnode = tipnode
+ if tipnode is None:
+ self.tipnode = repo.nullid
+ else:
+ self.tipnode = tipnode
self.tiprev = tiprev
self.filteredhash = filteredhash
# closednodes is a set of nodes that close their branch. If the branch
@@ -536,7 +538,7 @@
if not self.validfor(repo):
# cache key are not valid anymore
- self.tipnode = nullid
+ self.tipnode = repo.nullid
self.tiprev = nullrev
for heads in self.iterheads():
tiprev = max(cl.rev(node) for node in heads)
--- a/mercurial/bundle2.py Sun May 02 16:56:20 2021 -0400
+++ b/mercurial/bundle2.py Fri May 07 22:06:25 2021 -0400
@@ -158,7 +158,6 @@
from .i18n import _
from .node import (
hex,
- nullid,
short,
)
from . import (
@@ -2014,13 +2013,6 @@
)
scmutil.writereporequirements(op.repo)
- bundlesidedata = bool(b'exp-sidedata' in inpart.params)
- reposidedata = bool(b'exp-sidedata-flag' in op.repo.requirements)
- if reposidedata and not bundlesidedata:
- msg = b"repository is using sidedata but the bundle source do not"
- hint = b'this is currently unsupported'
- raise error.Abort(msg, hint=hint)
-
extrakwargs = {}
targetphase = inpart.params.get(b'targetphase')
if targetphase is not None:
@@ -2576,7 +2568,7 @@
fullnodes=commonnodes,
)
cgdata = packer.generate(
- {nullid},
+ {repo.nullid},
list(commonnodes),
False,
b'narrow_widen',
--- a/mercurial/bundlerepo.py Sun May 02 16:56:20 2021 -0400
+++ b/mercurial/bundlerepo.py Fri May 07 22:06:25 2021 -0400
@@ -19,7 +19,6 @@
from .i18n import _
from .node import (
hex,
- nullid,
nullrev,
)
@@ -47,9 +46,13 @@
urlutil,
)
+from .revlogutils import (
+ constants as revlog_constants,
+)
+
class bundlerevlog(revlog.revlog):
- def __init__(self, opener, indexfile, cgunpacker, linkmapper):
+ def __init__(self, opener, target, indexfile, cgunpacker, linkmapper):
# How it works:
# To retrieve a revision, we need to know the offset of the revision in
# the bundle (an unbundle object). We store this offset in the index
@@ -58,7 +61,7 @@
# To differentiate a rev in the bundle from a rev in the revlog, we
# check revision against repotiprev.
opener = vfsmod.readonlyvfs(opener)
- revlog.revlog.__init__(self, opener, indexfile)
+ revlog.revlog.__init__(self, opener, target=target, indexfile=indexfile)
self.bundle = cgunpacker
n = len(self)
self.repotiprev = n - 1
@@ -172,7 +175,12 @@
changelog.changelog.__init__(self, opener)
linkmapper = lambda x: x
bundlerevlog.__init__(
- self, opener, self.indexfile, cgunpacker, linkmapper
+ self,
+ opener,
+ (revlog_constants.KIND_CHANGELOG, None),
+ self.indexfile,
+ cgunpacker,
+ linkmapper,
)
@@ -188,7 +196,12 @@
):
manifest.manifestrevlog.__init__(self, nodeconstants, opener, tree=dir)
bundlerevlog.__init__(
- self, opener, self.indexfile, cgunpacker, linkmapper
+ self,
+ opener,
+ (revlog_constants.KIND_MANIFESTLOG, dir),
+ self.indexfile,
+ cgunpacker,
+ linkmapper,
)
if dirlogstarts is None:
dirlogstarts = {}
@@ -215,7 +228,12 @@
def __init__(self, opener, path, cgunpacker, linkmapper):
filelog.filelog.__init__(self, opener, path)
self._revlog = bundlerevlog(
- opener, self.indexfile, cgunpacker, linkmapper
+ opener,
+ # XXX should use the unencoded path
+ target=(revlog_constants.KIND_FILELOG, path),
+ indexfile=self.indexfile,
+ cgunpacker=cgunpacker,
+ linkmapper=linkmapper,
)
@@ -447,7 +465,9 @@
return encoding.getcwd() # always outside the repo
# Check if parents exist in localrepo before setting
- def setparents(self, p1, p2=nullid):
+ def setparents(self, p1, p2=None):
+ if p2 is None:
+ p2 = self.nullid
p1rev = self.changelog.rev(p1)
p2rev = self.changelog.rev(p2)
msg = _(b"setting parent to node %s that only exists in the bundle\n")
--- a/mercurial/cext/manifest.c Sun May 02 16:56:20 2021 -0400
+++ b/mercurial/cext/manifest.c Fri May 07 22:06:25 2021 -0400
@@ -28,6 +28,7 @@
typedef struct {
PyObject_HEAD
PyObject *pydata;
+ Py_ssize_t nodelen;
line *lines;
int numlines; /* number of line entries */
int livelines; /* number of non-deleted lines */
@@ -49,12 +50,11 @@
}
/* get the node value of a single line */
-static PyObject *nodeof(line *l, char *flag)
+static PyObject *nodeof(Py_ssize_t nodelen, line *l, char *flag)
{
char *s = l->start;
Py_ssize_t llen = pathlen(l);
Py_ssize_t hlen = l->len - llen - 2;
- Py_ssize_t hlen_raw;
PyObject *hash;
if (llen + 1 + 40 + 1 > l->len) { /* path '\0' hash '\n' */
PyErr_SetString(PyExc_ValueError, "manifest line too short");
@@ -73,36 +73,29 @@
break;
}
- switch (hlen) {
- case 40: /* sha1 */
- hlen_raw = 20;
- break;
- case 64: /* new hash */
- hlen_raw = 32;
- break;
- default:
+ if (hlen != 2 * nodelen) {
PyErr_SetString(PyExc_ValueError, "invalid node length in manifest");
return NULL;
}
- hash = unhexlify(s + llen + 1, hlen_raw * 2);
+ hash = unhexlify(s + llen + 1, nodelen * 2);
if (!hash) {
return NULL;
}
if (l->hash_suffix != '\0') {
char newhash[33];
- memcpy(newhash, PyBytes_AsString(hash), hlen_raw);
+ memcpy(newhash, PyBytes_AsString(hash), nodelen);
Py_DECREF(hash);
- newhash[hlen_raw] = l->hash_suffix;
- hash = PyBytes_FromStringAndSize(newhash, hlen_raw+1);
+ newhash[nodelen] = l->hash_suffix;
+ hash = PyBytes_FromStringAndSize(newhash, nodelen + 1);
}
return hash;
}
/* get the node hash and flags of a line as a tuple */
-static PyObject *hashflags(line *l)
+static PyObject *hashflags(Py_ssize_t nodelen, line *l)
{
char flag;
- PyObject *hash = nodeof(l, &flag);
+ PyObject *hash = nodeof(nodelen, l, &flag);
PyObject *flags;
PyObject *tup;
@@ -190,17 +183,23 @@
static int lazymanifest_init(lazymanifest *self, PyObject *args)
{
char *data;
- Py_ssize_t len;
+ Py_ssize_t nodelen, len;
int err, ret;
PyObject *pydata;
lazymanifest_init_early(self);
- if (!PyArg_ParseTuple(args, "S", &pydata)) {
+ if (!PyArg_ParseTuple(args, "nS", &nodelen, &pydata)) {
return -1;
}
- err = PyBytes_AsStringAndSize(pydata, &data, &len);
+ if (nodelen != 20 && nodelen != 32) {
+ /* See fixed buffer in nodeof */
+ PyErr_Format(PyExc_ValueError, "Unsupported node length");
+ return -1;
+ }
+ self->nodelen = nodelen;
+ self->dirty = false;
- self->dirty = false;
+ err = PyBytes_AsStringAndSize(pydata, &data, &len);
if (err == -1)
return -1;
self->pydata = pydata;
@@ -291,17 +290,18 @@
static PyObject *lmiter_iterentriesnext(PyObject *o)
{
+ lmIter *self = (lmIter *)o;
Py_ssize_t pl;
line *l;
char flag;
PyObject *ret = NULL, *path = NULL, *hash = NULL, *flags = NULL;
- l = lmiter_nextline((lmIter *)o);
+ l = lmiter_nextline(self);
if (!l) {
goto done;
}
pl = pathlen(l);
path = PyBytes_FromStringAndSize(l->start, pl);
- hash = nodeof(l, &flag);
+ hash = nodeof(self->m->nodelen, l, &flag);
if (!path || !hash) {
goto done;
}
@@ -471,7 +471,7 @@
PyErr_Format(PyExc_KeyError, "No such manifest entry.");
return NULL;
}
- return hashflags(hit);
+ return hashflags(self->nodelen, hit);
}
static int lazymanifest_delitem(lazymanifest *self, PyObject *key)
@@ -568,13 +568,13 @@
pyhash = PyTuple_GetItem(value, 0);
if (!PyBytes_Check(pyhash)) {
PyErr_Format(PyExc_TypeError,
- "node must be a 20 or 32 bytes string");
+ "node must be a %zi bytes string", self->nodelen);
return -1;
}
hlen = PyBytes_Size(pyhash);
- if (hlen != 20 && hlen != 32) {
+ if (hlen != self->nodelen) {
PyErr_Format(PyExc_TypeError,
- "node must be a 20 or 32 bytes string");
+ "node must be a %zi bytes string", self->nodelen);
return -1;
}
hash = PyBytes_AsString(pyhash);
@@ -739,6 +739,7 @@
goto nomem;
}
lazymanifest_init_early(copy);
+ copy->nodelen = self->nodelen;
copy->numlines = self->numlines;
copy->livelines = self->livelines;
copy->dirty = false;
@@ -777,6 +778,7 @@
goto nomem;
}
lazymanifest_init_early(copy);
+ copy->nodelen = self->nodelen;
copy->dirty = true;
copy->lines = malloc(self->maxlines * sizeof(line));
if (!copy->lines) {
@@ -872,7 +874,7 @@
if (!key)
goto nomem;
if (result < 0) {
- PyObject *l = hashflags(left);
+ PyObject *l = hashflags(self->nodelen, left);
if (!l) {
goto nomem;
}
@@ -885,7 +887,7 @@
Py_DECREF(outer);
sneedle++;
} else if (result > 0) {
- PyObject *r = hashflags(right);
+ PyObject *r = hashflags(self->nodelen, right);
if (!r) {
goto nomem;
}
@@ -902,12 +904,12 @@
if (left->len != right->len
|| memcmp(left->start, right->start, left->len)
|| left->hash_suffix != right->hash_suffix) {
- PyObject *l = hashflags(left);
+ PyObject *l = hashflags(self->nodelen, left);
PyObject *r;
if (!l) {
goto nomem;
}
- r = hashflags(right);
+ r = hashflags(self->nodelen, right);
if (!r) {
Py_DECREF(l);
goto nomem;
--- a/mercurial/cext/parsers.c Sun May 02 16:56:20 2021 -0400
+++ b/mercurial/cext/parsers.c Fri May 07 22:06:25 2021 -0400
@@ -668,7 +668,7 @@
void manifest_module_init(PyObject *mod);
void revlog_module_init(PyObject *mod);
-static const int version = 17;
+static const int version = 18;
static void module_init(PyObject *mod)
{
--- a/mercurial/cext/parsers.pyi Sun May 02 16:56:20 2021 -0400
+++ b/mercurial/cext/parsers.pyi Fri May 07 22:06:25 2021 -0400
@@ -29,7 +29,7 @@
# From manifest.c
class lazymanifest:
- def __init__(self, data: bytes): ...
+ def __init__(self, nodelen: int, data: bytes): ...
def __iter__(self) -> Iterator[bytes]: ...
def __len__(self) -> int: ...
--- a/mercurial/cext/revlog.c Sun May 02 16:56:20 2021 -0400
+++ b/mercurial/cext/revlog.c Fri May 07 22:06:25 2021 -0400
@@ -342,6 +342,46 @@
sidedata_offset, sidedata_comp_len);
}
}
+/*
+ * Pack header information in binary
+ */
+static PyObject *index_pack_header(indexObject *self, PyObject *args)
+{
+ int header;
+ char out[4];
+ if (!PyArg_ParseTuple(args, "I", &header)) {
+ return NULL;
+ }
+ putbe32(header, out);
+ return PyBytes_FromStringAndSize(out, 4);
+}
+/*
+ * Return the raw binary string representing a revision
+ */
+static PyObject *index_entry_binary(indexObject *self, PyObject *value)
+{
+ long rev;
+ const char *data;
+ Py_ssize_t length = index_length(self);
+
+ if (!pylong_to_long(value, &rev)) {
+ return NULL;
+ }
+ if (rev < 0 || rev >= length) {
+ PyErr_Format(PyExc_ValueError, "revlog index out of range: %ld",
+ rev);
+ return NULL;
+ };
+
+ data = index_deref(self, rev);
+ if (data == NULL)
+ return NULL;
+ if (rev == 0) {
+ /* the header is eating the start of the first entry */
+ return PyBytes_FromStringAndSize(data + 4, self->hdrsize - 4);
+ }
+ return PyBytes_FromStringAndSize(data, self->hdrsize);
+}
/*
* Return the hash of node corresponding to the given rev.
@@ -463,14 +503,14 @@
inside the transaction that creates the given revision. */
static PyObject *index_replace_sidedata_info(indexObject *self, PyObject *args)
{
- uint64_t sidedata_offset;
+ uint64_t offset_flags, sidedata_offset;
int rev;
Py_ssize_t sidedata_comp_len;
char *data;
#if LONG_MAX == 0x7fffffffL
- const char *const sidedata_format = PY23("nKi", "nKi");
+ const char *const sidedata_format = PY23("nKiK", "nKiK");
#else
- const char *const sidedata_format = PY23("nki", "nki");
+ const char *const sidedata_format = PY23("nkik", "nkik");
#endif
if (self->hdrsize == v1_hdrsize || self->inlined) {
@@ -485,7 +525,7 @@
}
if (!PyArg_ParseTuple(args, sidedata_format, &rev, &sidedata_offset,
- &sidedata_comp_len))
+ &sidedata_comp_len, &offset_flags))
return NULL;
if (rev < 0 || rev >= index_length(self)) {
@@ -502,6 +542,7 @@
/* Find the newly added node, offset from the "already on-disk" length
*/
data = self->added + self->hdrsize * (rev - self->length);
+ putbe64(offset_flags, data);
putbe64(sidedata_offset, data + 64);
putbe32(sidedata_comp_len, data + 72);
@@ -2859,6 +2900,10 @@
{"shortest", (PyCFunction)index_shortest, METH_VARARGS,
"find length of shortest hex nodeid of a binary ID"},
{"stats", (PyCFunction)index_stats, METH_NOARGS, "stats for the index"},
+ {"entry_binary", (PyCFunction)index_entry_binary, METH_O,
+ "return an entry in binary form"},
+ {"pack_header", (PyCFunction)index_pack_header, METH_VARARGS,
+ "pack the revlog header information into binary"},
{NULL} /* Sentinel */
};
--- a/mercurial/changegroup.py Sun May 02 16:56:20 2021 -0400
+++ b/mercurial/changegroup.py Fri May 07 22:06:25 2021 -0400
@@ -7,7 +7,6 @@
from __future__ import absolute_import
-import collections
import os
import struct
import weakref
@@ -15,7 +14,6 @@
from .i18n import _
from .node import (
hex,
- nullid,
nullrev,
short,
)
@@ -34,10 +32,13 @@
from .interfaces import repository
from .revlogutils import sidedata as sidedatamod
+from .revlogutils import constants as revlog_constants
+from .utils import storageutil
_CHANGEGROUPV1_DELTA_HEADER = struct.Struct(b"20s20s20s20s")
_CHANGEGROUPV2_DELTA_HEADER = struct.Struct(b"20s20s20s20s20s")
_CHANGEGROUPV3_DELTA_HEADER = struct.Struct(b">20s20s20s20s20sH")
+_CHANGEGROUPV4_DELTA_HEADER = struct.Struct(b">B20s20s20s20s20sH")
LFS_REQUIREMENT = b'lfs'
@@ -194,7 +195,8 @@
else:
deltabase = prevnode
flags = 0
- return node, p1, p2, deltabase, cs, flags
+ protocol_flags = 0
+ return node, p1, p2, deltabase, cs, flags, protocol_flags
def deltachunk(self, prevnode):
l = self._chunklength()
@@ -203,10 +205,9 @@
headerdata = readexactly(self._stream, self.deltaheadersize)
header = self.deltaheader.unpack(headerdata)
delta = readexactly(self._stream, l - self.deltaheadersize)
- node, p1, p2, deltabase, cs, flags = self._deltaheader(header, prevnode)
- # cg4 forward-compat
- sidedata = {}
- return (node, p1, p2, cs, deltabase, delta, flags, sidedata)
+ header = self._deltaheader(header, prevnode)
+ node, p1, p2, deltabase, cs, flags, protocol_flags = header
+ return node, p1, p2, cs, deltabase, delta, flags, protocol_flags
def getchunks(self):
"""returns all the chunks contains in the bundle
@@ -293,8 +294,13 @@
# Only useful if we're adding sidedata categories. If both peers have
# the same categories, then we simply don't do anything.
- if self.version == b'04' and srctype == b'pull':
- sidedata_helpers = get_sidedata_helpers(
+ adding_sidedata = (
+ requirements.REVLOGV2_REQUIREMENT in repo.requirements
+ and self.version == b'04'
+ and srctype == b'pull'
+ )
+ if adding_sidedata:
+ sidedata_helpers = sidedatamod.get_sidedata_helpers(
repo,
sidedata_categories or set(),
pull=True,
@@ -386,15 +392,16 @@
_(b'manifests'), unit=_(b'chunks'), total=changesets
)
on_manifest_rev = None
- if sidedata_helpers and b'manifest' in sidedata_helpers[1]:
+ if sidedata_helpers:
+ if revlog_constants.KIND_MANIFESTLOG in sidedata_helpers[1]:
- def on_manifest_rev(manifest, rev):
- range = touched_manifests.get(manifest)
- if not range:
- touched_manifests[manifest] = (rev, rev)
- else:
- assert rev == range[1] + 1
- touched_manifests[manifest] = (range[0], rev)
+ def on_manifest_rev(manifest, rev):
+ range = touched_manifests.get(manifest)
+ if not range:
+ touched_manifests[manifest] = (rev, rev)
+ else:
+ assert rev == range[1] + 1
+ touched_manifests[manifest] = (range[0], rev)
self._unpackmanifests(
repo,
@@ -417,15 +424,16 @@
needfiles.setdefault(f, set()).add(n)
on_filelog_rev = None
- if sidedata_helpers and b'filelog' in sidedata_helpers[1]:
+ if sidedata_helpers:
+ if revlog_constants.KIND_FILELOG in sidedata_helpers[1]:
- def on_filelog_rev(filelog, rev):
- range = touched_filelogs.get(filelog)
- if not range:
- touched_filelogs[filelog] = (rev, rev)
- else:
- assert rev == range[1] + 1
- touched_filelogs[filelog] = (range[0], rev)
+ def on_filelog_rev(filelog, rev):
+ range = touched_filelogs.get(filelog)
+ if not range:
+ touched_filelogs[filelog] = (rev, rev)
+ else:
+ assert rev == range[1] + 1
+ touched_filelogs[filelog] = (range[0], rev)
# process the files
repo.ui.status(_(b"adding file changes\n"))
@@ -440,7 +448,7 @@
)
if sidedata_helpers:
- if b'changelog' in sidedata_helpers[1]:
+ if revlog_constants.KIND_CHANGELOG in sidedata_helpers[1]:
cl.rewrite_sidedata(sidedata_helpers, clstart, clend - 1)
for mf, (startrev, endrev) in touched_manifests.items():
mf.rewrite_sidedata(sidedata_helpers, startrev, endrev)
@@ -590,7 +598,8 @@
def _deltaheader(self, headertuple, prevnode):
node, p1, p2, deltabase, cs = headertuple
flags = 0
- return node, p1, p2, deltabase, cs, flags
+ protocol_flags = 0
+ return node, p1, p2, deltabase, cs, flags, protocol_flags
class cg3unpacker(cg2unpacker):
@@ -608,7 +617,8 @@
def _deltaheader(self, headertuple, prevnode):
node, p1, p2, deltabase, cs, flags = headertuple
- return node, p1, p2, deltabase, cs, flags
+ protocol_flags = 0
+ return node, p1, p2, deltabase, cs, flags, protocol_flags
def _unpackmanifests(self, repo, revmap, trp, prog, addrevisioncb=None):
super(cg3unpacker, self)._unpackmanifests(
@@ -631,18 +641,24 @@
cg4 streams add support for exchanging sidedata.
"""
+ deltaheader = _CHANGEGROUPV4_DELTA_HEADER
+ deltaheadersize = deltaheader.size
version = b'04'
+ def _deltaheader(self, headertuple, prevnode):
+ protocol_flags, node, p1, p2, deltabase, cs, flags = headertuple
+ return node, p1, p2, deltabase, cs, flags, protocol_flags
+
def deltachunk(self, prevnode):
res = super(cg4unpacker, self).deltachunk(prevnode)
if not res:
return res
- (node, p1, p2, cs, deltabase, delta, flags, _sidedata) = res
+ (node, p1, p2, cs, deltabase, delta, flags, protocol_flags) = res
- sidedata_raw = getchunk(self._stream)
sidedata = {}
- if len(sidedata_raw) > 0:
+ if protocol_flags & storageutil.CG_FLAG_SIDEDATA:
+ sidedata_raw = getchunk(self._stream)
sidedata = sidedatamod.deserialize_sidedata(sidedata_raw)
return node, p1, p2, cs, deltabase, delta, flags, sidedata
@@ -673,7 +689,7 @@
if delta.delta is not None:
prefix, data = b'', delta.delta
- elif delta.basenode == nullid:
+ elif delta.basenode == repo.nullid:
data = delta.revision
prefix = mdiff.trivialdiffheader(len(data))
else:
@@ -688,10 +704,10 @@
yield prefix
yield data
- sidedata = delta.sidedata
- if sidedata is not None:
+ if delta.protocol_flags & storageutil.CG_FLAG_SIDEDATA:
# Need a separate chunk for sidedata to be able to differentiate
# "raw delta" length and sidedata length
+ sidedata = delta.sidedata
yield chunkheader(len(sidedata))
yield sidedata
@@ -828,7 +844,8 @@
If topic is not None, progress detail will be generated using this
topic name (e.g. changesets, manifests, etc).
- See `storageutil.emitrevisions` for the doc on `sidedata_helpers`.
+ See `revlogutil.sidedata.get_sidedata_helpers` for the doc on
+ `sidedata_helpers`.
"""
if not nodes:
return
@@ -1056,7 +1073,9 @@
# TODO a better approach would be for the strip bundle to
# correctly advertise its sidedata categories directly.
remote_sidedata = repo._wanted_sidedata
- sidedata_helpers = get_sidedata_helpers(repo, remote_sidedata)
+ sidedata_helpers = sidedatamod.get_sidedata_helpers(
+ repo, remote_sidedata
+ )
clstate, deltas = self._generatechangelog(
cl,
@@ -1194,7 +1213,8 @@
if generate is False, the state will be fully populated and no chunk
stream will be yielded
- See `storageutil.emitrevisions` for the doc on `sidedata_helpers`.
+ See `revlogutil.sidedata.get_sidedata_helpers` for the doc on
+ `sidedata_helpers`.
"""
clrevorder = {}
manifests = {}
@@ -1299,7 +1319,8 @@
`source` is unused here, but is used by extensions like remotefilelog to
change what is sent based in pulls vs pushes, etc.
- See `storageutil.emitrevisions` for the doc on `sidedata_helpers`.
+ See `revlogutil.sidedata.get_sidedata_helpers` for the doc on
+ `sidedata_helpers`.
"""
repo = self._repo
mfl = repo.manifestlog
@@ -1633,11 +1654,18 @@
fullnodes=None,
remote_sidedata=None,
):
- # Same header func as cg3. Sidedata is in a separate chunk from the delta to
- # differenciate "raw delta" and sidedata.
- builddeltaheader = lambda d: _CHANGEGROUPV3_DELTA_HEADER.pack(
- d.node, d.p1node, d.p2node, d.basenode, d.linknode, d.flags
- )
+ # Sidedata is in a separate chunk from the delta to differentiate
+ # "raw delta" and sidedata.
+ def builddeltaheader(d):
+ return _CHANGEGROUPV4_DELTA_HEADER.pack(
+ d.protocol_flags,
+ d.node,
+ d.p1node,
+ d.p2node,
+ d.basenode,
+ d.linknode,
+ d.flags,
+ )
return cgpacker(
repo,
@@ -1682,11 +1710,14 @@
#
# (or even to push subset of history)
needv03 = True
- has_revlogv2 = requirements.REVLOGV2_REQUIREMENT in repo.requirements
- if not has_revlogv2:
- versions.discard(b'04')
if not needv03:
versions.discard(b'03')
+ want_v4 = (
+ repo.ui.configbool(b'experimental', b'changegroup4')
+ or requirements.REVLOGV2_REQUIREMENT in repo.requirements
+ )
+ if not want_v4:
+ versions.discard(b'04')
return versions
@@ -1913,25 +1944,3 @@
)
return revisions, files
-
-
-def get_sidedata_helpers(repo, remote_sd_categories, pull=False):
- # Computers for computing sidedata on-the-fly
- sd_computers = collections.defaultdict(list)
- # Computers for categories to remove from sidedata
- sd_removers = collections.defaultdict(list)
-
- to_generate = remote_sd_categories - repo._wanted_sidedata
- to_remove = repo._wanted_sidedata - remote_sd_categories
- if pull:
- to_generate, to_remove = to_remove, to_generate
-
- for revlog_kind, computers in repo._sidedata_computers.items():
- for category, computer in computers.items():
- if category in to_generate:
- sd_computers[revlog_kind].append(computer)
- if category in to_remove:
- sd_removers[revlog_kind].append(computer)
-
- sidedata_helpers = (repo, sd_computers, sd_removers)
- return sidedata_helpers
--- a/mercurial/changelog.py Sun May 02 16:56:20 2021 -0400
+++ b/mercurial/changelog.py Fri May 07 22:06:25 2021 -0400
@@ -11,7 +11,6 @@
from .node import (
bin,
hex,
- nullid,
)
from .thirdparty import attr
@@ -26,7 +25,10 @@
dateutil,
stringutil,
)
-from .revlogutils import flagutil
+from .revlogutils import (
+ constants as revlog_constants,
+ flagutil,
+)
_defaultextra = {b'branch': b'default'}
@@ -221,7 +223,7 @@
def __new__(cls, cl, text, sidedata, cpsd):
if not text:
- return _changelogrevision(extra=_defaultextra, manifest=nullid)
+ return _changelogrevision(extra=_defaultextra, manifest=cl.nullid)
self = super(changelogrevision, cls).__new__(cls)
# We could return here and implement the following as an __init__.
@@ -402,7 +404,8 @@
revlog.revlog.__init__(
self,
opener,
- indexfile,
+ target=(revlog_constants.KIND_CHANGELOG, None),
+ indexfile=indexfile,
datafile=datafile,
checkambig=True,
mmaplargeindex=True,
@@ -428,7 +431,6 @@
self._filteredrevs = frozenset()
self._filteredrevs_hashcache = {}
self._copiesstorage = opener.options.get(b'copies-storage')
- self.revlog_kind = b'changelog'
@property
def filteredrevs(self):
--- a/mercurial/cmdutil.py Sun May 02 16:56:20 2021 -0400
+++ b/mercurial/cmdutil.py Fri May 07 22:06:25 2021 -0400
@@ -15,7 +15,6 @@
from .i18n import _
from .node import (
hex,
- nullid,
nullrev,
short,
)
@@ -62,6 +61,10 @@
stringutil,
)
+from .revlogutils import (
+ constants as revlog_constants,
+)
+
if pycompat.TYPE_CHECKING:
from typing import (
Any,
@@ -998,11 +1001,6 @@
_(b"a branch of the same name already exists")
)
- if repo.revs(b'obsolete() and %ld', revs):
- raise error.InputError(
- _(b"cannot change branch of a obsolete changeset")
- )
-
# make sure only topological heads
if repo.revs(b'heads(%ld) - head()', revs):
raise error.InputError(
@@ -1097,7 +1095,7 @@
'hint' is the usual hint given to Abort exception.
"""
- if merge and repo.dirstate.p2() != nullid:
+ if merge and repo.dirstate.p2() != repo.nullid:
raise error.StateError(_(b'outstanding uncommitted merge'), hint=hint)
st = repo.status()
if st.modified or st.added or st.removed or st.deleted:
@@ -1434,8 +1432,12 @@
raise error.CommandError(cmd, _(b'invalid arguments'))
if not os.path.isfile(file_):
raise error.InputError(_(b"revlog '%s' not found") % file_)
+
+ target = (revlog_constants.KIND_OTHER, b'free-form:%s' % file_)
r = revlog.revlog(
- vfsmod.vfs(encoding.getcwd(), audit=False), file_[:-2] + b".i"
+ vfsmod.vfs(encoding.getcwd(), audit=False),
+ target=target,
+ indexfile=file_[:-2] + b".i",
)
return r
@@ -1849,7 +1851,10 @@
continue
copylist.append((tfn(pat, dest, srcs), srcs))
if not copylist:
- raise error.InputError(_(b'no files to copy'))
+ hint = None
+ if rename:
+ hint = _(b'maybe you meant to use --after --at-rev=.')
+ raise error.InputError(_(b'no files to copy'), hint=hint)
errors = 0
for targetpath, srcs in copylist:
@@ -2104,7 +2109,7 @@
if parents:
prev = parents[0]
else:
- prev = nullid
+ prev = repo.nullid
fm.context(ctx=ctx)
fm.plain(b'# HG changeset patch\n')
@@ -2967,7 +2972,7 @@
ms.reset()
# Reroute the working copy parent to the new changeset
- repo.setparents(newid, nullid)
+ repo.setparents(newid, repo.nullid)
# Fixing the dirstate because localrepo.commitctx does not update
# it. This is rather convenient because we did not need to update
@@ -3322,7 +3327,7 @@
# in case of merge, files that are actually added can be reported as
# modified, we need to post process the result
- if p2 != nullid:
+ if p2 != repo.nullid:
mergeadd = set(dsmodified)
for path in dsmodified:
if path in mf:
@@ -3593,7 +3598,7 @@
# We're reverting to our parent. If possible, we'd like status
# to report the file as clean. We have to use normallookup for
# merges to avoid losing information about merged/dirty files.
- if p2 != nullid:
+ if p2 != repo.nullid:
normal = repo.dirstate.normallookup
else:
normal = repo.dirstate.normal
@@ -3690,7 +3695,7 @@
repo.dirstate.add(f)
normal = repo.dirstate.normallookup
- if node == parent and p2 == nullid:
+ if node == parent and p2 == repo.nullid:
normal = repo.dirstate.normal
for f in actions[b'undelete'][0]:
if interactive:
--- a/mercurial/commands.py Sun May 02 16:56:20 2021 -0400
+++ b/mercurial/commands.py Fri May 07 22:06:25 2021 -0400
@@ -15,10 +15,8 @@
from .i18n import _
from .node import (
hex,
- nullid,
nullrev,
short,
- wdirhex,
wdirrev,
)
from .pycompat import open
@@ -486,7 +484,7 @@
return b'%d ' % rev
def formathex(h):
- if h == wdirhex:
+ if h == repo.nodeconstants.wdirhex:
return b'%s+' % shorthex(hex(ctx.p1().node()))
else:
return b'%s ' % shorthex(h)
@@ -809,9 +807,9 @@
)
p1, p2 = repo.changelog.parents(node)
- if p1 == nullid:
+ if p1 == repo.nullid:
raise error.InputError(_(b'cannot backout a change with no parents'))
- if p2 != nullid:
+ if p2 != repo.nullid:
if not opts.get(b'parent'):
raise error.InputError(_(b'cannot backout a merge changeset'))
p = repo.lookup(opts[b'parent'])
@@ -1085,7 +1083,7 @@
)
else:
node, p2 = repo.dirstate.parents()
- if p2 != nullid:
+ if p2 != repo.nullid:
raise error.StateError(_(b'current bisect revision is a merge'))
if rev:
if not nodes:
@@ -2204,6 +2202,7 @@
(b'u', b'untrusted', None, _(b'show untrusted configuration options')),
(b'e', b'edit', None, _(b'edit user config')),
(b'l', b'local', None, _(b'edit repository config')),
+ (b'', b'source', None, _(b'show source of configuration value')),
(
b'',
b'shared',
@@ -2234,7 +2233,7 @@
--global, edit the system-wide config file. With --local, edit the
repository-level config file.
- With --debug, the source (filename and line number) is printed
+ With --source, the source (filename and line number) is printed
for each config item.
See :hg:`help config` for more information about config files.
@@ -2337,6 +2336,7 @@
selentries = set(selentries)
matched = False
+ show_source = ui.debugflag or opts.get(b'source')
for section, name, value in ui.walkconfig(untrusted=untrusted):
source = ui.configsource(section, name, untrusted)
value = pycompat.bytestr(value)
@@ -2348,7 +2348,7 @@
if values and not (section in selsections or entryname in selentries):
continue
fm.startitem()
- fm.condwrite(ui.debugflag, b'source', b'%s: ', source)
+ fm.condwrite(show_source, b'source', b'%s: ', source)
if uniquesel:
fm.data(name=entryname)
fm.write(b'value', b'%s\n', value)
@@ -4847,7 +4847,7 @@
opts = pycompat.byteskwargs(opts)
abort = opts.get(b'abort')
- if abort and repo.dirstate.p2() == nullid:
+ if abort and repo.dirstate.p2() == repo.nullid:
cmdutil.wrongtooltocontinue(repo, _(b'merge'))
cmdutil.check_incompatible_arguments(opts, b'abort', [b'rev', b'preview'])
if abort:
@@ -5072,7 +5072,7 @@
displayer = logcmdutil.changesetdisplayer(ui, repo, opts)
for n in p:
- if n != nullid:
+ if n != repo.nullid:
displayer.show(repo[n])
displayer.close()
@@ -5128,15 +5128,9 @@
"""
opts = pycompat.byteskwargs(opts)
+
+ pathitems = urlutil.list_paths(ui, search)
ui.pager(b'paths')
- if search:
- pathitems = [
- (name, path)
- for name, path in pycompat.iteritems(ui.paths)
- if name == search
- ]
- else:
- pathitems = sorted(pycompat.iteritems(ui.paths))
fm = ui.formatter(b'paths', opts)
if fm.isplain():
@@ -6105,7 +6099,7 @@
with repo.wlock():
ms = mergestatemod.mergestate.read(repo)
- if not (ms.active() or repo.dirstate.p2() != nullid):
+ if not (ms.active() or repo.dirstate.p2() != repo.nullid):
raise error.StateError(
_(b'resolve command not applicable when not merging')
)
@@ -6223,7 +6217,7 @@
raise
ms.commit()
- branchmerge = repo.dirstate.p2() != nullid
+ branchmerge = repo.dirstate.p2() != repo.nullid
mergestatemod.recordupdates(repo, ms.actions(), branchmerge, None)
if not didwork and pats:
@@ -6315,7 +6309,7 @@
opts[b"rev"] = cmdutil.finddate(ui, repo, opts[b"date"])
parent, p2 = repo.dirstate.parents()
- if not opts.get(b'rev') and p2 != nullid:
+ if not opts.get(b'rev') and p2 != repo.nullid:
# revert after merge is a trap for new users (issue2915)
raise error.InputError(
_(b'uncommitted merge with no revision specified'),
@@ -6335,7 +6329,7 @@
or opts.get(b'interactive')
):
msg = _(b"no files or directories specified")
- if p2 != nullid:
+ if p2 != repo.nullid:
hint = _(
b"uncommitted merge, use --all to discard all changes,"
b" or 'hg update -C .' to abort the merge"
@@ -7396,7 +7390,7 @@
for n in names:
if repo.tagtype(n) == b'global':
alltags = tagsmod.findglobaltags(ui, repo)
- if alltags[n][0] == nullid:
+ if alltags[n][0] == repo.nullid:
raise error.InputError(
_(b"tag '%s' is already removed") % n
)
@@ -7423,7 +7417,7 @@
)
if not opts.get(b'local'):
p1, p2 = repo.dirstate.parents()
- if p2 != nullid:
+ if p2 != repo.nullid:
raise error.StateError(_(b'uncommitted merge'))
bheads = repo.branchheads()
if not opts.get(b'force') and bheads and p1 not in bheads:
--- a/mercurial/commit.py Sun May 02 16:56:20 2021 -0400
+++ b/mercurial/commit.py Fri May 07 22:06:25 2021 -0400
@@ -10,7 +10,6 @@
from .i18n import _
from .node import (
hex,
- nullid,
nullrev,
)
@@ -277,10 +276,10 @@
"""
fname = fctx.path()
- fparent1 = manifest1.get(fname, nullid)
- fparent2 = manifest2.get(fname, nullid)
+ fparent1 = manifest1.get(fname, repo.nullid)
+ fparent2 = manifest2.get(fname, repo.nullid)
touched = None
- if fparent1 == fparent2 == nullid:
+ if fparent1 == fparent2 == repo.nullid:
touched = 'added'
if isinstance(fctx, context.filectx):
@@ -291,9 +290,11 @@
if node in [fparent1, fparent2]:
repo.ui.debug(b'reusing %s filelog entry\n' % fname)
if (
- fparent1 != nullid and manifest1.flags(fname) != fctx.flags()
+ fparent1 != repo.nullid
+ and manifest1.flags(fname) != fctx.flags()
) or (
- fparent2 != nullid and manifest2.flags(fname) != fctx.flags()
+ fparent2 != repo.nullid
+ and manifest2.flags(fname) != fctx.flags()
):
touched = 'modified'
return node, touched
@@ -327,7 +328,9 @@
newfparent = fparent2
if manifest2: # branch merge
- if fparent2 == nullid or cnode is None: # copied on remote side
+ if (
+ fparent2 == repo.nullid or cnode is None
+ ): # copied on remote side
if cfname in manifest2:
cnode = manifest2[cfname]
newfparent = fparent1
@@ -346,7 +349,7 @@
if includecopymeta:
meta[b"copy"] = cfname
meta[b"copyrev"] = hex(cnode)
- fparent1, fparent2 = nullid, newfparent
+ fparent1, fparent2 = repo.nullid, newfparent
else:
repo.ui.warn(
_(
@@ -356,20 +359,20 @@
% (fname, cfname)
)
- elif fparent1 == nullid:
- fparent1, fparent2 = fparent2, nullid
- elif fparent2 != nullid:
+ elif fparent1 == repo.nullid:
+ fparent1, fparent2 = fparent2, repo.nullid
+ elif fparent2 != repo.nullid:
if ms.active() and ms.extras(fname).get(b'filenode-source') == b'other':
- fparent1, fparent2 = fparent2, nullid
+ fparent1, fparent2 = fparent2, repo.nullid
elif ms.active() and ms.extras(fname).get(b'merged') != b'yes':
- fparent1, fparent2 = fparent1, nullid
+ fparent1, fparent2 = fparent1, repo.nullid
# is one parent an ancestor of the other?
else:
fparentancestors = flog.commonancestorsheads(fparent1, fparent2)
if fparent1 in fparentancestors:
- fparent1, fparent2 = fparent2, nullid
+ fparent1, fparent2 = fparent2, repo.nullid
elif fparent2 in fparentancestors:
- fparent2 = nullid
+ fparent2 = repo.nullid
force_new_node = False
# The file might have been deleted by merge code and user explicitly choose
@@ -384,9 +387,14 @@
force_new_node = True
# is the file changed?
text = fctx.data()
- if fparent2 != nullid or meta or flog.cmp(fparent1, text) or force_new_node:
+ if (
+ fparent2 != repo.nullid
+ or meta
+ or flog.cmp(fparent1, text)
+ or force_new_node
+ ):
if touched is None: # do not overwrite added
- if fparent2 == nullid:
+ if fparent2 == repo.nullid:
touched = 'modified'
else:
touched = 'merged'
--- a/mercurial/configitems.py Sun May 02 16:56:20 2021 -0400
+++ b/mercurial/configitems.py Fri May 07 22:06:25 2021 -0400
@@ -904,6 +904,11 @@
)
coreconfigitem(
b'experimental',
+ b'changegroup4',
+ default=False,
+)
+coreconfigitem(
+ b'experimental',
b'cleanup-as-archived',
default=False,
)
@@ -954,6 +959,11 @@
)
coreconfigitem(
b'experimental',
+ b'dirstate-tree.in-memory',
+ default=False,
+)
+coreconfigitem(
+ b'experimental',
b'editortmpinhg',
default=False,
)
--- a/mercurial/context.py Sun May 02 16:56:20 2021 -0400
+++ b/mercurial/context.py Fri May 07 22:06:25 2021 -0400
@@ -14,14 +14,9 @@
from .i18n import _
from .node import (
- addednodeid,
hex,
- modifiednodeid,
- nullid,
nullrev,
short,
- wdirfilenodeids,
- wdirhex,
)
from .pycompat import (
getattr,
@@ -140,7 +135,7 @@
removed.append(fn)
elif flag1 != flag2:
modified.append(fn)
- elif node2 not in wdirfilenodeids:
+ elif node2 not in self._repo.nodeconstants.wdirfilenodeids:
# When comparing files between two commits, we save time by
# not comparing the file contents when the nodeids differ.
# Note that this means we incorrectly report a reverted change
@@ -737,7 +732,7 @@
n2 = c2._parents[0]._node
cahs = self._repo.changelog.commonancestorsheads(self._node, n2)
if not cahs:
- anc = nullid
+ anc = self._repo.nodeconstants.nullid
elif len(cahs) == 1:
anc = cahs[0]
else:
@@ -1132,7 +1127,11 @@
_path = self._path
fl = self._filelog
parents = self._filelog.parents(self._filenode)
- pl = [(_path, node, fl) for node in parents if node != nullid]
+ pl = [
+ (_path, node, fl)
+ for node in parents
+ if node != self._repo.nodeconstants.nullid
+ ]
r = fl.renamed(self._filenode)
if r:
@@ -1556,12 +1555,12 @@
return self._repo.dirstate[key] not in b"?r"
def hex(self):
- return wdirhex
+ return self._repo.nodeconstants.wdirhex
@propertycache
def _parents(self):
p = self._repo.dirstate.parents()
- if p[1] == nullid:
+ if p[1] == self._repo.nodeconstants.nullid:
p = p[:-1]
# use unfiltered repo to delay/avoid loading obsmarkers
unfi = self._repo.unfiltered()
@@ -1572,7 +1571,9 @@
for n in p
]
- def setparents(self, p1node, p2node=nullid):
+ def setparents(self, p1node, p2node=None):
+ if p2node is None:
+ p2node = self._repo.nodeconstants.nullid
dirstate = self._repo.dirstate
with dirstate.parentchange():
copies = dirstate.setparents(p1node, p2node)
@@ -1584,7 +1585,7 @@
for f in copies:
if f not in pctx and copies[f] in pctx:
dirstate.copy(copies[f], f)
- if p2node == nullid:
+ if p2node == self._repo.nodeconstants.nullid:
for f, s in sorted(dirstate.copies().items()):
if f not in pctx and s not in pctx:
dirstate.copy(None, f)
@@ -1944,8 +1945,8 @@
ff = self._flagfunc
for i, l in (
- (addednodeid, status.added),
- (modifiednodeid, status.modified),
+ (self._repo.nodeconstants.addednodeid, status.added),
+ (self._repo.nodeconstants.modifiednodeid, status.modified),
):
for f in l:
man[f] = i
@@ -2070,13 +2071,18 @@
path = self.copysource()
if not path:
return None
- return path, self._changectx._parents[0]._manifest.get(path, nullid)
+ return (
+ path,
+ self._changectx._parents[0]._manifest.get(
+ path, self._repo.nodeconstants.nullid
+ ),
+ )
def parents(self):
'''return parent filectxs, following copies if necessary'''
def filenode(ctx, path):
- return ctx._manifest.get(path, nullid)
+ return ctx._manifest.get(path, self._repo.nodeconstants.nullid)
path = self._path
fl = self._filelog
@@ -2094,7 +2100,7 @@
return [
self._parentfilectx(p, fileid=n, filelog=l)
for p, n, l in pl
- if n != nullid
+ if n != self._repo.nodeconstants.nullid
]
def children(self):
@@ -2222,7 +2228,9 @@
# ``overlayworkingctx`` (e.g. with --collapse).
util.clearcachedproperty(self, b'_manifest')
- def setparents(self, p1node, p2node=nullid):
+ def setparents(self, p1node, p2node=None):
+ if p2node is None:
+ p2node = self._repo.nodeconstants.nullid
assert p1node == self._wrappedctx.node()
self._parents = [self._wrappedctx, self._repo.unfiltered()[p2node]]
@@ -2248,10 +2256,10 @@
flag = self._flagfunc
for path in self.added():
- man[path] = addednodeid
+ man[path] = self._repo.nodeconstants.addednodeid
man.setflag(path, flag(path))
for path in self.modified():
- man[path] = modifiednodeid
+ man[path] = self._repo.nodeconstants.modifiednodeid
man.setflag(path, flag(path))
for path in self.removed():
del man[path]
@@ -2827,7 +2835,7 @@
)
self._rev = None
self._node = None
- parents = [(p or nullid) for p in parents]
+ parents = [(p or self._repo.nodeconstants.nullid) for p in parents]
p1, p2 = parents
self._parents = [self._repo[p] for p in (p1, p2)]
files = sorted(set(files))
@@ -2866,10 +2874,10 @@
man = pctx.manifest().copy()
for f in self._status.modified:
- man[f] = modifiednodeid
+ man[f] = self._repo.nodeconstants.modifiednodeid
for f in self._status.added:
- man[f] = addednodeid
+ man[f] = self._repo.nodeconstants.addednodeid
for f in self._status.removed:
if f in man:
@@ -3006,12 +3014,12 @@
# sanity check to ensure that the reused manifest parents are
# manifests of our commit parents
mp1, mp2 = self.manifestctx().parents
- if p1 != nullid and p1.manifestnode() != mp1:
+ if p1 != self._repo.nodeconstants.nullid and p1.manifestnode() != mp1:
raise RuntimeError(
r"can't reuse the manifest: its p1 "
r"doesn't match the new ctx p1"
)
- if p2 != nullid and p2.manifestnode() != mp2:
+ if p2 != self._repo.nodeconstants.nullid and p2.manifestnode() != mp2:
raise RuntimeError(
r"can't reuse the manifest: "
r"its p2 doesn't match the new ctx p2"
--- a/mercurial/copies.py Sun May 02 16:56:20 2021 -0400
+++ b/mercurial/copies.py Fri May 07 22:06:25 2021 -0400
@@ -12,10 +12,7 @@
import os
from .i18n import _
-from .node import (
- nullid,
- nullrev,
-)
+from .node import nullrev
from . import (
match as matchmod,
@@ -579,7 +576,7 @@
parents = fctx._filelog.parents(fctx._filenode)
nb_parents = 0
for n in parents:
- if n != nullid:
+ if n != repo.nullid:
nb_parents += 1
return nb_parents >= 2
--- a/mercurial/debugcommands.py Sun May 02 16:56:20 2021 -0400
+++ b/mercurial/debugcommands.py Fri May 07 22:06:25 2021 -0400
@@ -30,7 +30,6 @@
from .node import (
bin,
hex,
- nullid,
nullrev,
short,
)
@@ -1667,7 +1666,7 @@
node = r.node(i)
pp = r.parents(node)
ui.write(b"\t%d -> %d\n" % (r.rev(pp[0]), i))
- if pp[1] != nullid:
+ if pp[1] != repo.nullid:
ui.write(b"\t%d -> %d\n" % (r.rev(pp[1]), i))
ui.write(b"}\n")
@@ -1675,7 +1674,7 @@
@command(b'debugindexstats', [])
def debugindexstats(ui, repo):
"""show stats related to the changelog index"""
- repo.changelog.shortest(nullid, 1)
+ repo.changelog.shortest(repo.nullid, 1)
index = repo.changelog.index
if not util.safehasattr(index, b'stats'):
raise error.Abort(_(b'debugindexstats only works with native code'))
@@ -2425,7 +2424,7 @@
# arbitrary node identifiers, possibly not present in the
# local repository.
n = bin(s)
- if len(n) != len(nullid):
+ if len(n) != repo.nodeconstants.nodelen:
raise TypeError()
return n
except TypeError:
@@ -3328,7 +3327,7 @@
try:
pp = r.parents(node)
except Exception:
- pp = [nullid, nullid]
+ pp = [repo.nullid, repo.nullid]
if ui.verbose:
ui.write(
b"% 6d % 9d % 7d % 7d %s %s %s\n"
@@ -3742,7 +3741,9 @@
for n in chlist:
if limit is not None and count >= limit:
break
- parents = [True for p in other.changelog.parents(n) if p != nullid]
+ parents = [
+ True for p in other.changelog.parents(n) if p != repo.nullid
+ ]
if opts.get(b"no_merges") and len(parents) == 2:
continue
count += 1
--- a/mercurial/dirstate.py Sun May 02 16:56:20 2021 -0400
+++ b/mercurial/dirstate.py Fri May 07 22:06:25 2021 -0400
@@ -14,7 +14,6 @@
import stat
from .i18n import _
-from .node import nullid
from .pycompat import delattr
from hgdemandimport import tracing
@@ -314,7 +313,7 @@
def branch(self):
return encoding.tolocal(self._branch)
- def setparents(self, p1, p2=nullid):
+ def setparents(self, p1, p2=None):
"""Set dirstate parents to p1 and p2.
When moving from two parents to one, 'm' merged entries a
@@ -323,6 +322,8 @@
See localrepo.setparents()
"""
+ if p2 is None:
+ p2 = self._nodeconstants.nullid
if self._parentwriters == 0:
raise ValueError(
b"cannot set dirstate parent outside of "
@@ -335,10 +336,12 @@
self._origpl = self._pl
self._map.setparents(p1, p2)
copies = {}
- if oldp2 != nullid and p2 == nullid:
- candidatefiles = self._map.nonnormalset.union(
- self._map.otherparentset
- )
+ if (
+ oldp2 != self._nodeconstants.nullid
+ and p2 == self._nodeconstants.nullid
+ ):
+ candidatefiles = self._map.non_normal_or_other_parent_paths()
+
for f in candidatefiles:
s = self._map.get(f)
if s is None:
@@ -459,7 +462,7 @@
def normallookup(self, f):
'''Mark a file normal, but possibly dirty.'''
- if self._pl[1] != nullid:
+ if self._pl[1] != self._nodeconstants.nullid:
# if there is a merge going on and the file was either
# in state 'm' (-1) or coming from other parent (-2) before
# being removed, restore that state.
@@ -481,7 +484,7 @@
def otherparent(self, f):
'''Mark as coming from the other parent, always dirty.'''
- if self._pl[1] == nullid:
+ if self._pl[1] == self._nodeconstants.nullid:
raise error.Abort(
_(b"setting %r to other parent only allowed in merges") % f
)
@@ -503,7 +506,7 @@
self._dirty = True
oldstate = self[f]
size = 0
- if self._pl[1] != nullid:
+ if self._pl[1] != self._nodeconstants.nullid:
entry = self._map.get(f)
if entry is not None:
# backup the previous state
@@ -519,7 +522,7 @@
def merge(self, f):
'''Mark a file merged.'''
- if self._pl[1] == nullid:
+ if self._pl[1] == self._nodeconstants.nullid:
return self.normallookup(f)
return self.otherparent(f)
@@ -638,7 +641,7 @@
if self._origpl is None:
self._origpl = self._pl
- self._map.setparents(parent, nullid)
+ self._map.setparents(parent, self._nodeconstants.nullid)
for f in to_lookup:
self.normallookup(f)
@@ -1459,7 +1462,7 @@
def clear(self):
self._map.clear()
self.copymap.clear()
- self.setparents(nullid, nullid)
+ self.setparents(self._nodeconstants.nullid, self._nodeconstants.nullid)
util.clearcachedproperty(self, b"_dirs")
util.clearcachedproperty(self, b"_alldirs")
util.clearcachedproperty(self, b"filefoldmap")
@@ -1636,7 +1639,10 @@
st[self._nodelen : 2 * self._nodelen],
)
elif l == 0:
- self._parents = (nullid, nullid)
+ self._parents = (
+ self._nodeconstants.nullid,
+ self._nodeconstants.nullid,
+ )
else:
raise error.Abort(
_(b'working directory state appears damaged!')
@@ -1718,6 +1724,9 @@
self.nonnormalset = nonnorm
return otherparents
+ def non_normal_or_other_parent_paths(self):
+ return self.nonnormalset.union(self.otherparentset)
+
@propertycache
def identity(self):
self._map
@@ -1741,6 +1750,7 @@
self._opener = opener
self._root = root
self._filename = b'dirstate'
+ self._nodelen = 20
self._parents = None
self._dirtyparents = False
@@ -1765,25 +1775,6 @@
def get(self, *args, **kwargs):
return self._rustmap.get(*args, **kwargs)
- @propertycache
- def _rustmap(self):
- """
- Fills the Dirstatemap when called.
- Use `self._inner_rustmap` if reading the dirstate is not necessary.
- """
- self._rustmap = self._inner_rustmap
- self.read()
- return self._rustmap
-
- @propertycache
- def _inner_rustmap(self):
- """
- Does not fill the Dirstatemap when called. This allows for
- optimizations where only setting/getting the parents is needed.
- """
- self._inner_rustmap = rustmod.DirstateMap(self._root)
- return self._inner_rustmap
-
@property
def copymap(self):
return self._rustmap.copymap()
@@ -1793,8 +1784,9 @@
def clear(self):
self._rustmap.clear()
- self._inner_rustmap.clear()
- self.setparents(nullid, nullid)
+ self.setparents(
+ self._nodeconstants.nullid, self._nodeconstants.nullid
+ )
util.clearcachedproperty(self, b"_dirs")
util.clearcachedproperty(self, b"_alldirs")
util.clearcachedproperty(self, b"dirfoldmap")
@@ -1833,7 +1825,6 @@
return fp
def setparents(self, p1, p2):
- self._rustmap.setparents(p1, p2)
self._parents = (p1, p2)
self._dirtyparents = True
@@ -1849,16 +1840,29 @@
# File doesn't exist, so the current state is empty
st = b''
- try:
- self._parents = self._inner_rustmap.parents(st)
- except ValueError:
+ l = len(st)
+ if l == self._nodelen * 2:
+ self._parents = (
+ st[: self._nodelen],
+ st[self._nodelen : 2 * self._nodelen],
+ )
+ elif l == 0:
+ self._parents = (
+ self._nodeconstants.nullid,
+ self._nodeconstants.nullid,
+ )
+ else:
raise error.Abort(
_(b'working directory state appears damaged!')
)
return self._parents
- def read(self):
+ @propertycache
+ def _rustmap(self):
+ """
+ Fills the Dirstatemap when called.
+ """
# ignore HG_PENDING because identity is used only for writing
self.identity = util.filestat.frompath(
self._opener.join(self._filename)
@@ -1873,18 +1877,24 @@
except IOError as err:
if err.errno != errno.ENOENT:
raise
- return
- if not st:
- return
+ st = b''
- parse_dirstate = util.nogc(self._rustmap.read)
- parents = parse_dirstate(st)
+ use_dirstate_tree = self._ui.configbool(
+ b"experimental",
+ b"dirstate-tree.in-memory",
+ False,
+ )
+ self._rustmap, parents = rustmod.DirstateMap.new(
+ use_dirstate_tree, st
+ )
+
if parents and not self._dirtyparents:
self.setparents(*parents)
self.__contains__ = self._rustmap.__contains__
self.__getitem__ = self._rustmap.__getitem__
self.get = self._rustmap.get
+ return self._rustmap
def write(self, st, now):
parents = self.parents()
@@ -1930,6 +1940,9 @@
otherparents = self._rustmap.other_parent_entries()
return otherparents
+ def non_normal_or_other_parent_paths(self):
+ return self._rustmap.non_normal_or_other_parent_paths()
+
@propertycache
def dirfoldmap(self):
f = {}
--- a/mercurial/discovery.py Sun May 02 16:56:20 2021 -0400
+++ b/mercurial/discovery.py Fri May 07 22:06:25 2021 -0400
@@ -12,7 +12,6 @@
from .i18n import _
from .node import (
hex,
- nullid,
short,
)
@@ -107,7 +106,7 @@
if missingroots:
discbases = []
for n in missingroots:
- discbases.extend([p for p in cl.parents(n) if p != nullid])
+ discbases.extend([p for p in cl.parents(n) if p != repo.nullid])
# TODO remove call to nodesbetween.
# TODO populate attributes on outgoing instance instead of setting
# discbases.
@@ -116,7 +115,7 @@
ancestorsof = heads
commonheads = [n for n in discbases if n not in included]
elif not commonheads:
- commonheads = [nullid]
+ commonheads = [repo.nullid]
self.commonheads = commonheads
self.ancestorsof = ancestorsof
self._revlog = cl
@@ -381,7 +380,7 @@
# - a local outgoing head descended from update
# - a remote head that's known locally and not
# ancestral to an outgoing head
- if remoteheads == [nullid]:
+ if remoteheads == [repo.nullid]:
# remote is empty, nothing to check.
return
--- a/mercurial/dispatch.py Sun May 02 16:56:20 2021 -0400
+++ b/mercurial/dispatch.py Fri May 07 22:06:25 2021 -0400
@@ -1064,6 +1064,16 @@
if req.earlyoptions[b'profile']:
for ui_ in uis:
ui_.setconfig(b'profiling', b'enabled', b'true', b'--profile')
+ elif req.earlyoptions[b'profile'] is False:
+ # Check for it being set already, so that we don't pollute the config
+ # with this when using chg in the very common case that it's not
+ # enabled.
+ if lui.configbool(b'profiling', b'enabled'):
+ # Only do this on lui so that `chg foo` with a user config setting
+ # profiling.enabled=1 still shows profiling information (chg will
+ # specify `--no-profile` when `hg serve` is starting up, we don't
+ # want that to propagate to every later invocation).
+ lui.setconfig(b'profiling', b'enabled', b'false', b'--no-profile')
profile = lui.configbool(b'profiling', b'enabled')
with profiling.profile(lui, enabled=profile) as profiler:
--- a/mercurial/exchange.py Sun May 02 16:56:20 2021 -0400
+++ b/mercurial/exchange.py Fri May 07 22:06:25 2021 -0400
@@ -13,7 +13,6 @@
from .i18n import _
from .node import (
hex,
- nullid,
nullrev,
)
from . import (
@@ -164,7 +163,7 @@
hasnode = cl.hasnode
common = [n for n in common if hasnode(n)]
else:
- common = [nullid]
+ common = [repo.nullid]
if not heads:
heads = cl.heads()
return discovery.outgoing(repo, common, heads)
@@ -1839,7 +1838,7 @@
if (
pullop.remote.capable(b'clonebundles')
and pullop.heads is None
- and list(pullop.common) == [nullid]
+ and list(pullop.common) == [pullop.repo.nullid]
):
kwargs[b'cbattempted'] = pullop.clonebundleattempted
@@ -1849,7 +1848,7 @@
pullop.repo.ui.status(_(b"no changes found\n"))
pullop.cgresult = 0
else:
- if pullop.heads is None and list(pullop.common) == [nullid]:
+ if pullop.heads is None and list(pullop.common) == [pullop.repo.nullid]:
pullop.repo.ui.status(_(b"requesting all changes\n"))
if obsolete.isenabled(pullop.repo, obsolete.exchangeopt):
remoteversions = bundle2.obsmarkersversion(pullop.remotebundle2caps)
@@ -1920,7 +1919,7 @@
pullop.cgresult = 0
return
tr = pullop.gettransaction()
- if pullop.heads is None and list(pullop.common) == [nullid]:
+ if pullop.heads is None and list(pullop.common) == [pullop.repo.nullid]:
pullop.repo.ui.status(_(b"requesting all changes\n"))
elif pullop.heads is None and pullop.remote.capable(b'changegroupsubset'):
# issue1320, avoid a race if remote changed after discovery
--- a/mercurial/exchangev2.py Sun May 02 16:56:20 2021 -0400
+++ b/mercurial/exchangev2.py Fri May 07 22:06:25 2021 -0400
@@ -11,10 +11,7 @@
import weakref
from .i18n import _
-from .node import (
- nullid,
- short,
-)
+from .node import short
from . import (
bookmarks,
error,
@@ -304,7 +301,7 @@
if set(remoteheads).issubset(common):
fetch = []
- common.discard(nullid)
+ common.discard(repo.nullid)
return common, fetch, remoteheads
@@ -413,7 +410,7 @@
# Linknode is always itself for changesets.
cset[b'node'],
# We always send full revisions. So delta base is not set.
- nullid,
+ repo.nullid,
mdiff.trivialdiffheader(len(data)) + data,
# Flags not yet supported.
0,
@@ -478,7 +475,7 @@
basenode = manifest[b'deltabasenode']
delta = extrafields[b'delta']
elif b'revision' in extrafields:
- basenode = nullid
+ basenode = repo.nullid
revision = extrafields[b'revision']
delta = mdiff.trivialdiffheader(len(revision)) + revision
else:
@@ -610,7 +607,7 @@
basenode = filerevision[b'deltabasenode']
delta = extrafields[b'delta']
elif b'revision' in extrafields:
- basenode = nullid
+ basenode = repo.nullid
revision = extrafields[b'revision']
delta = mdiff.trivialdiffheader(len(revision)) + revision
else:
@@ -705,7 +702,7 @@
basenode = filerevision[b'deltabasenode']
delta = extrafields[b'delta']
elif b'revision' in extrafields:
- basenode = nullid
+ basenode = repo.nullid
revision = extrafields[b'revision']
delta = mdiff.trivialdiffheader(len(revision)) + revision
else:
--- a/mercurial/filelog.py Sun May 02 16:56:20 2021 -0400
+++ b/mercurial/filelog.py Fri May 07 22:06:25 2021 -0400
@@ -8,10 +8,7 @@
from __future__ import absolute_import
from .i18n import _
-from .node import (
- nullid,
- nullrev,
-)
+from .node import nullrev
from . import (
error,
revlog,
@@ -21,18 +18,24 @@
util as interfaceutil,
)
from .utils import storageutil
+from .revlogutils import (
+ constants as revlog_constants,
+)
@interfaceutil.implementer(repository.ifilestorage)
class filelog(object):
def __init__(self, opener, path):
self._revlog = revlog.revlog(
- opener, b'/'.join((b'data', path + b'.i')), censorable=True
+ opener,
+ # XXX should use the unencoded path
+ target=(revlog_constants.KIND_FILELOG, path),
+ indexfile=b'/'.join((b'data', path + b'.i')),
+ censorable=True,
)
# Full name of the user visible file, relative to the repository root.
# Used by LFS.
self._revlog.filename = path
- self._revlog.revlog_kind = b'filelog'
self.nullid = self._revlog.nullid
def __len__(self):
@@ -42,7 +45,7 @@
return self._revlog.__iter__()
def hasnode(self, node):
- if node in (nullid, nullrev):
+ if node in (self.nullid, nullrev):
return False
try:
--- a/mercurial/filemerge.py Sun May 02 16:56:20 2021 -0400
+++ b/mercurial/filemerge.py Fri May 07 22:06:25 2021 -0400
@@ -15,7 +15,6 @@
from .i18n import _
from .node import (
hex,
- nullid,
short,
)
from .pycompat import (
@@ -111,7 +110,7 @@
return None
def filenode(self):
- return nullid
+ return self._ctx.repo().nullid
_customcmp = True
--- a/mercurial/help.py Sun May 02 16:56:20 2021 -0400
+++ b/mercurial/help.py Fri May 07 22:06:25 2021 -0400
@@ -540,6 +540,12 @@
TOPIC_CATEGORY_CONCEPTS,
),
(
+ [b"evolution"],
+ _(b"Safely rewriting history (EXPERIMENTAL)"),
+ loaddoc(b'evolution'),
+ TOPIC_CATEGORY_CONCEPTS,
+ ),
+ (
[b'scripting'],
_(b'Using Mercurial from scripts and automation'),
loaddoc(b'scripting'),
--- a/mercurial/helptext/config.txt Sun May 02 16:56:20 2021 -0400
+++ b/mercurial/helptext/config.txt Fri May 07 22:06:25 2021 -0400
@@ -5,7 +5,7 @@
===============
If you're having problems with your configuration,
-:hg:`config --debug` can help you understand what is introducing
+:hg:`config --source` can help you understand what is introducing
a setting into your environment.
See :hg:`help config.syntax` and :hg:`help config.files`
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/mercurial/helptext/evolution.txt Fri May 07 22:06:25 2021 -0400
@@ -0,0 +1,56 @@
+Obsolescence markers make it possible to mark changesets that have been
+deleted or superseded in a new version of the changeset.
+
+Unlike the previous way of handling such changes, by stripping the old
+changesets from the repository, obsolescence markers can be propagated
+between repositories. This allows for a safe and simple way of exchanging
+mutable history and altering it after the fact. Changeset phases are
+respected, such that only draft and secret changesets can be altered (see
+:hg:`help phases` for details).
+
+Obsolescence is tracked using "obsolescence markers", a piece of metadata
+tracking which changesets have been made obsolete, potential successors for
+a given changeset, the moment the changeset was marked as obsolete, and the
+user who performed the rewriting operation. The markers are stored
+separately from standard changeset data can be exchanged without any of the
+precursor changesets, preventing unnecessary exchange of obsolescence data.
+
+The complete set of obsolescence markers describes a history of changeset
+modifications that is orthogonal to the repository history of file
+modifications. This changeset history allows for detection and automatic
+resolution of edge cases arising from multiple users rewriting the same part
+of history concurrently.
+
+Current feature status
+======================
+
+This feature is still in development.
+
+Instability
+===========
+
+Rewriting changesets might introduce instability.
+
+There are two main kinds of instability: orphaning and diverging.
+
+Orphans are changesets left behind when their ancestors are rewritten.
+Divergence has two variants:
+
+* Content-divergence occurs when independent rewrites of the same changesets
+ lead to different results.
+
+* Phase-divergence occurs when the old (obsolete) version of a changeset
+ becomes public.
+
+It is possible to prevent local creation of orphans by using the following config::
+
+ [experimental]
+ evolution.createmarkers = true
+ evolution.exchange = true
+
+You can also enable that option explicitly::
+
+ [experimental]
+ evolution.createmarkers = true
+ evolution.exchange = true
+ evolution.allowunstable = true
--- a/mercurial/helptext/internals/changegroups.txt Sun May 02 16:56:20 2021 -0400
+++ b/mercurial/helptext/internals/changegroups.txt Fri May 07 22:06:25 2021 -0400
@@ -2,12 +2,13 @@
the changelog data, root/flat manifest data, treemanifest data, and
filelogs.
-There are 3 versions of changegroups: ``1``, ``2``, and ``3``. From a
+There are 4 versions of changegroups: ``1``, ``2``, ``3`` and ``4``. From a
high-level, versions ``1`` and ``2`` are almost exactly the same, with the
only difference being an additional item in the *delta header*. Version
``3`` adds support for storage flags in the *delta header* and optionally
exchanging treemanifests (enabled by setting an option on the
-``changegroup`` part in the bundle2).
+``changegroup`` part in the bundle2). Version ``4`` adds support for exchanging
+sidedata (additional revision metadata not part of the digest).
Changegroups when not exchanging treemanifests consist of 3 logical
segments::
@@ -74,8 +75,8 @@
entry (either that the recipient already has, or previously specified in the
bundle/changegroup).
-The *delta header* is different between versions ``1``, ``2``, and
-``3`` of the changegroup format.
+The *delta header* is different between versions ``1``, ``2``, ``3`` and ``4``
+of the changegroup format.
Version 1 (headerlen=80)::
@@ -104,6 +105,15 @@
| | | | | | |
+------------------------------------------------------------------------------+
+Version 4 (headerlen=103)::
+
+ +------------------------------------------------------------------------------+----------+
+ | | | | | | | |
+ | node | p1 node | p2 node | base node | link node | flags | pflags |
+ | (20 bytes) | (20 bytes) | (20 bytes) | (20 bytes) | (20 bytes) | (2 bytes) | (1 byte) |
+ | | | | | | | |
+ +------------------------------------------------------------------------------+----------+
+
The *delta data* consists of ``chunklen - 4 - headerlen`` bytes, which contain a
series of *delta*s, densely packed (no separators). These deltas describe a diff
from an existing entry (either that the recipient already has, or previously
@@ -140,12 +150,24 @@
Externally stored. The revision fulltext contains ``key:value`` ``\n``
delimited metadata defining an object stored elsewhere. Used by the LFS
extension.
+4096
+ Contains copy information. This revision changes files in a way that could
+ affect copy tracing. This does *not* affect changegroup handling, but is
+ relevant for other parts of Mercurial.
For historical reasons, the integer values are identical to revlog version 1
per-revision storage flags and correspond to bits being set in this 2-byte
field. Bits were allocated starting from the most-significant bit, hence the
reverse ordering and allocation of these flags.
+The *pflags* (protocol flags) field holds bitwise flags affecting the protocol
+itself. They are first in the header since they may affect the handling of the
+rest of the fields in a future version. They are defined as such:
+
+1 indicates whether to read a chunk of sidedata (of variable length) right
+ after the revision flags.
+
+
Changeset Segment
=================
@@ -166,9 +188,9 @@
Treemanifests Segment
---------------------
-The *treemanifests segment* only exists in changegroup version ``3``, and
-only if the 'treemanifest' param is part of the bundle2 changegroup part
-(it is not possible to use changegroup version 3 outside of bundle2).
+The *treemanifests segment* only exists in changegroup version ``3`` and ``4``,
+and only if the 'treemanifest' param is part of the bundle2 changegroup part
+(it is not possible to use changegroup version 3 or 4 outside of bundle2).
Aside from the filenames in the *treemanifests segment* containing a
trailing ``/`` character, it behaves identically to the *filelogs segment*
(see below). The final sub-segment is followed by an *empty chunk* (logically,
--- a/mercurial/hg.py Sun May 02 16:56:20 2021 -0400
+++ b/mercurial/hg.py Fri May 07 22:06:25 2021 -0400
@@ -16,8 +16,7 @@
from .i18n import _
from .node import (
hex,
- nullhex,
- nullid,
+ sha1nodeconstants,
short,
)
from .pycompat import getattr
@@ -772,7 +771,7 @@
},
).result()
- if rootnode != nullid:
+ if rootnode != sha1nodeconstants.nullid:
sharepath = os.path.join(sharepool, hex(rootnode))
else:
ui.status(
@@ -883,7 +882,9 @@
# we need to re-init the repo after manually copying the data
# into it
destpeer = peer(srcrepo, peeropts, dest)
- srcrepo.hook(b'outgoing', source=b'clone', node=nullhex)
+ srcrepo.hook(
+ b'outgoing', source=b'clone', node=srcrepo.nodeconstants.nullhex
+ )
else:
try:
# only pass ui when no srcrepo
@@ -1329,7 +1330,9 @@
for n in chlist:
if limit is not None and count >= limit:
break
- parents = [p for p in other.changelog.parents(n) if p != nullid]
+ parents = [
+ p for p in other.changelog.parents(n) if p != repo.nullid
+ ]
if opts.get(b'no_merges') and len(parents) == 2:
continue
count += 1
@@ -1406,7 +1409,7 @@
for n in revs:
if limit is not None and count >= limit:
break
- parents = [p for p in cl.parents(n) if p != nullid]
+ parents = [p for p in cl.parents(n) if p != repo.nullid]
if no_merges and len(parents) == 2:
continue
count += 1
--- a/mercurial/hgweb/webutil.py Sun May 02 16:56:20 2021 -0400
+++ b/mercurial/hgweb/webutil.py Fri May 07 22:06:25 2021 -0400
@@ -14,7 +14,7 @@
import re
from ..i18n import _
-from ..node import hex, nullid, short
+from ..node import hex, short
from ..pycompat import setattr
from .common import (
@@ -220,7 +220,7 @@
def _siblings(siblings=None, hiderev=None):
if siblings is None:
siblings = []
- siblings = [s for s in siblings if s.node() != nullid]
+ siblings = [s for s in siblings if s.node() != s.repo().nullid]
if len(siblings) == 1 and siblings[0].rev() == hiderev:
siblings = []
return templateutil.mappinggenerator(_ctxsgen, args=(siblings,))
@@ -316,12 +316,16 @@
yield {name: t}
-def showtag(repo, t1, node=nullid):
+def showtag(repo, t1, node=None):
+ if node is None:
+ node = repo.nullid
args = (repo.nodetags, node, b'tag')
return templateutil.mappinggenerator(_nodenamesgen, args=args, name=t1)
-def showbookmark(repo, t1, node=nullid):
+def showbookmark(repo, t1, node=None):
+ if node is None:
+ node = repo.nullid
args = (repo.nodebookmarks, node, b'bookmark')
return templateutil.mappinggenerator(_nodenamesgen, args=args, name=t1)
--- a/mercurial/interfaces/dirstate.py Sun May 02 16:56:20 2021 -0400
+++ b/mercurial/interfaces/dirstate.py Fri May 07 22:06:25 2021 -0400
@@ -2,8 +2,6 @@
import contextlib
-from .. import node as nodemod
-
from . import util as interfaceutil
@@ -97,7 +95,7 @@
def branch():
pass
- def setparents(p1, p2=nodemod.nullid):
+ def setparents(p1, p2=None):
"""Set dirstate parents to p1 and p2.
When moving from two parents to one, 'm' merged entries a
--- a/mercurial/interfaces/repository.py Sun May 02 16:56:20 2021 -0400
+++ b/mercurial/interfaces/repository.py Fri May 07 22:06:25 2021 -0400
@@ -27,14 +27,12 @@
REVISION_FLAG_CENSORED = 1 << 15
REVISION_FLAG_ELLIPSIS = 1 << 14
REVISION_FLAG_EXTSTORED = 1 << 13
-REVISION_FLAG_SIDEDATA = 1 << 12
-REVISION_FLAG_HASCOPIESINFO = 1 << 11
+REVISION_FLAG_HASCOPIESINFO = 1 << 12
REVISION_FLAGS_KNOWN = (
REVISION_FLAG_CENSORED
| REVISION_FLAG_ELLIPSIS
| REVISION_FLAG_EXTSTORED
- | REVISION_FLAG_SIDEDATA
| REVISION_FLAG_HASCOPIESINFO
)
@@ -457,6 +455,13 @@
"""Raw sidedata bytes for the given revision."""
)
+ protocol_flags = interfaceutil.Attribute(
+ """Single byte of integer flags that can influence the protocol.
+
+ This is a bitwise composition of the ``storageutil.CG_FLAG*`` constants.
+ """
+ )
+
class ifilerevisionssequence(interfaceutil.Interface):
"""Contains index data for all revisions of a file.
@@ -1851,7 +1856,9 @@
def savecommitmessage(text):
pass
- def register_sidedata_computer(kind, category, keys, computer):
+ def register_sidedata_computer(
+ kind, category, keys, computer, flags, replace=False
+ ):
pass
def register_wanted_sidedata(category):
--- a/mercurial/localrepo.py Sun May 02 16:56:20 2021 -0400
+++ b/mercurial/localrepo.py Fri May 07 22:06:25 2021 -0400
@@ -19,7 +19,6 @@
from .node import (
bin,
hex,
- nullid,
nullrev,
sha1nodeconstants,
short,
@@ -50,7 +49,6 @@
match as matchmod,
mergestate as mergestatemod,
mergeutil,
- metadata as metadatamod,
namespaces,
narrowspec,
obsolete,
@@ -91,6 +89,7 @@
from .revlogutils import (
concurrency_checker as revlogchecker,
constants as revlogconst,
+ sidedata as sidedatamod,
)
release = lockmod.release
@@ -1408,7 +1407,7 @@
self._wanted_sidedata = set()
self._sidedata_computers = {}
- metadatamod.set_sidedata_spec_for_repo(self)
+ sidedatamod.set_sidedata_spec_for_repo(self)
def _getvfsward(self, origfunc):
"""build a ward for self.vfs"""
@@ -1702,7 +1701,7 @@
_(b"warning: ignoring unknown working parent %s!\n")
% short(node)
)
- return nullid
+ return self.nullid
@storecache(narrowspec.FILENAME)
def narrowpats(self):
@@ -1753,9 +1752,9 @@
@unfilteredpropertycache
def _quick_access_changeid_null(self):
return {
- b'null': (nullrev, nullid),
- nullrev: (nullrev, nullid),
- nullid: (nullrev, nullid),
+ b'null': (nullrev, self.nodeconstants.nullid),
+ nullrev: (nullrev, self.nodeconstants.nullid),
+ self.nullid: (nullrev, self.nullid),
}
@unfilteredpropertycache
@@ -1765,7 +1764,7 @@
quick = self._quick_access_changeid_null.copy()
cl = self.unfiltered().changelog
for node in self.dirstate.parents():
- if node == nullid:
+ if node == self.nullid:
continue
rev = cl.index.get_rev(node)
if rev is None:
@@ -1785,7 +1784,7 @@
quick[r] = pair
quick[n] = pair
p1node = self.dirstate.p1()
- if p1node != nullid:
+ if p1node != self.nullid:
quick[b'.'] = quick[p1node]
return quick
@@ -1841,7 +1840,7 @@
# when we know that '.' won't be hidden
node = self.dirstate.p1()
rev = self.unfiltered().changelog.rev(node)
- elif len(changeid) == 20:
+ elif len(changeid) == self.nodeconstants.nodelen:
try:
node = changeid
rev = self.changelog.rev(changeid)
@@ -1862,7 +1861,7 @@
changeid = hex(changeid) # for the error message
raise
- elif len(changeid) == 40:
+ elif len(changeid) == 2 * self.nodeconstants.nodelen:
node = bin(changeid)
rev = self.changelog.rev(node)
else:
@@ -2037,7 +2036,7 @@
# local encoding.
tags = {}
for (name, (node, hist)) in pycompat.iteritems(alltags):
- if node != nullid:
+ if node != self.nullid:
tags[encoding.tolocal(name)] = node
tags[b'tip'] = self.changelog.tip()
tagtypes = {
@@ -2161,7 +2160,9 @@
def wjoin(self, f, *insidef):
return self.vfs.reljoin(self.root, f, *insidef)
- def setparents(self, p1, p2=nullid):
+ def setparents(self, p1, p2=None):
+ if p2 is None:
+ p2 = self.nullid
self[None].setparents(p1, p2)
self._quick_access_changeid_invalidate()
@@ -3094,7 +3095,7 @@
subrepoutil.writestate(self, newstate)
p1, p2 = self.dirstate.parents()
- hookp1, hookp2 = hex(p1), (p2 != nullid and hex(p2) or b'')
+ hookp1, hookp2 = hex(p1), (p2 != self.nullid and hex(p2) or b'')
try:
self.hook(
b"precommit", throw=True, parent1=hookp1, parent2=hookp2
@@ -3267,7 +3268,7 @@
t = n
while True:
p = self.changelog.parents(n)
- if p[1] != nullid or p[0] == nullid:
+ if p[1] != self.nullid or p[0] == self.nullid:
b.append((t, n, p[0], p[1]))
break
n = p[0]
@@ -3280,7 +3281,7 @@
n, l, i = top, [], 0
f = 1
- while n != bottom and n != nullid:
+ while n != bottom and n != self.nullid:
p = self.changelog.parents(n)[0]
if i == f:
l.append(n)
@@ -3364,20 +3365,32 @@
return self.pathto(fp.name[len(self.root) + 1 :])
def register_wanted_sidedata(self, category):
+ if requirementsmod.REVLOGV2_REQUIREMENT not in self.requirements:
+ # Only revlogv2 repos can want sidedata.
+ return
self._wanted_sidedata.add(pycompat.bytestr(category))
- def register_sidedata_computer(self, kind, category, keys, computer):
- if kind not in (b"changelog", b"manifest", b"filelog"):
+ def register_sidedata_computer(
+ self, kind, category, keys, computer, flags, replace=False
+ ):
+ if kind not in revlogconst.ALL_KINDS:
msg = _(b"unexpected revlog kind '%s'.")
raise error.ProgrammingError(msg % kind)
category = pycompat.bytestr(category)
- if category in self._sidedata_computers.get(kind, []):
+ already_registered = category in self._sidedata_computers.get(kind, [])
+ if already_registered and not replace:
msg = _(
b"cannot register a sidedata computer twice for category '%s'."
)
raise error.ProgrammingError(msg % category)
+ if replace and not already_registered:
+ msg = _(
+ b"cannot replace a sidedata computer that isn't registered "
+ b"for category '%s'."
+ )
+ raise error.ProgrammingError(msg % category)
self._sidedata_computers.setdefault(kind, {})
- self._sidedata_computers[kind][category] = (keys, computer)
+ self._sidedata_computers[kind][category] = (keys, computer, flags)
# used to avoid circular references so destructors work
--- a/mercurial/logcmdutil.py Sun May 02 16:56:20 2021 -0400
+++ b/mercurial/logcmdutil.py Fri May 07 22:06:25 2021 -0400
@@ -12,12 +12,7 @@
import posixpath
from .i18n import _
-from .node import (
- nullid,
- nullrev,
- wdirid,
- wdirrev,
-)
+from .node import nullrev, wdirrev
from .thirdparty import attr
@@ -357,7 +352,7 @@
if self.ui.debugflag:
mnode = ctx.manifestnode()
if mnode is None:
- mnode = wdirid
+ mnode = self.repo.nodeconstants.wdirid
mrev = wdirrev
else:
mrev = self.repo.manifestlog.rev(mnode)
@@ -505,7 +500,11 @@
)
if self.ui.debugflag or b'manifest' in datahint:
- fm.data(manifest=fm.hexfunc(ctx.manifestnode() or wdirid))
+ fm.data(
+ manifest=fm.hexfunc(
+ ctx.manifestnode() or self.repo.nodeconstants.wdirid
+ )
+ )
if self.ui.debugflag or b'extra' in datahint:
fm.data(extra=fm.formatdict(ctx.extra()))
@@ -991,7 +990,7 @@
"""Return the initial set of revisions to be filtered or followed"""
if wopts.revspec:
revs = scmutil.revrange(repo, wopts.revspec)
- elif wopts.follow and repo.dirstate.p1() == nullid:
+ elif wopts.follow and repo.dirstate.p1() == repo.nullid:
revs = smartset.baseset()
elif wopts.follow:
revs = repo.revs(b'.')
--- a/mercurial/manifest.py Sun May 02 16:56:20 2021 -0400
+++ b/mercurial/manifest.py Fri May 07 22:06:25 2021 -0400
@@ -16,7 +16,6 @@
from .node import (
bin,
hex,
- nullid,
nullrev,
)
from .pycompat import getattr
@@ -35,6 +34,9 @@
repository,
util as interfaceutil,
)
+from .revlogutils import (
+ constants as revlog_constants,
+)
parsers = policy.importmod('parsers')
propertycache = util.propertycache
@@ -43,7 +45,7 @@
FASTDELTA_TEXTDIFF_THRESHOLD = 1000
-def _parse(data):
+def _parse(nodelen, data):
# This method does a little bit of excessive-looking
# precondition checking. This is so that the behavior of this
# class exactly matches its C counterpart to try and help
@@ -64,7 +66,7 @@
nl -= 1
else:
flags = b''
- if nl not in (40, 64):
+ if nl != 2 * nodelen:
raise ValueError(b'Invalid manifest line')
yield f, bin(n), flags
@@ -132,7 +134,7 @@
else:
hlen = nlpos - zeropos - 1
flags = b''
- if hlen not in (40, 64):
+ if hlen != 2 * self.lm._nodelen:
raise error.StorageError(b'Invalid manifest line')
hashval = unhexlify(
data, self.lm.extrainfo[self.pos], zeropos + 1, hlen
@@ -177,12 +179,14 @@
def __init__(
self,
+ nodelen,
data,
positions=None,
extrainfo=None,
extradata=None,
hasremovals=False,
):
+ self._nodelen = nodelen
if positions is None:
self.positions = self.findlines(data)
self.extrainfo = [0] * len(self.positions)
@@ -289,7 +293,7 @@
hlen -= 1
else:
flags = b''
- if hlen not in (40, 64):
+ if hlen != 2 * self._nodelen:
raise error.StorageError(b'Invalid manifest line')
hashval = unhexlify(data, self.extrainfo[needle], zeropos + 1, hlen)
return (hashval, flags)
@@ -345,6 +349,7 @@
def copy(self):
# XXX call _compact like in C?
return _lazymanifest(
+ self._nodelen,
self.data,
self.positions,
self.extrainfo,
@@ -455,7 +460,7 @@
def filtercopy(self, filterfn):
# XXX should be optimized
- c = _lazymanifest(b'')
+ c = _lazymanifest(self._nodelen, b'')
for f, n, fl in self.iterentries():
if filterfn(f):
c[f] = n, fl
@@ -470,8 +475,9 @@
@interfaceutil.implementer(repository.imanifestdict)
class manifestdict(object):
- def __init__(self, data=b''):
- self._lm = _lazymanifest(data)
+ def __init__(self, nodelen, data=b''):
+ self._nodelen = nodelen
+ self._lm = _lazymanifest(nodelen, data)
def __getitem__(self, key):
return self._lm[key][0]
@@ -579,14 +585,14 @@
return self.copy()
if self._filesfastpath(match):
- m = manifestdict()
+ m = manifestdict(self._nodelen)
lm = self._lm
for fn in match.files():
if fn in lm:
m._lm[fn] = lm[fn]
return m
- m = manifestdict()
+ m = manifestdict(self._nodelen)
m._lm = self._lm.filtercopy(match)
return m
@@ -629,7 +635,7 @@
return b''
def copy(self):
- c = manifestdict()
+ c = manifestdict(self._nodelen)
c._lm = self._lm.copy()
return c
@@ -795,7 +801,8 @@
def __init__(self, nodeconstants, dir=b'', text=b''):
self._dir = dir
self.nodeconstants = nodeconstants
- self._node = nullid
+ self._node = self.nodeconstants.nullid
+ self._nodelen = self.nodeconstants.nodelen
self._loadfunc = _noop
self._copyfunc = _noop
self._dirty = False
@@ -1323,7 +1330,7 @@
def parse(self, text, readsubtree):
selflazy = self._lazydirs
- for f, n, fl in _parse(text):
+ for f, n, fl in _parse(self._nodelen, text):
if fl == b't':
f = f + b'/'
# False below means "doesn't need to be copied" and can use the
@@ -1391,7 +1398,7 @@
continue
subp1 = getnode(m1, d)
subp2 = getnode(m2, d)
- if subp1 == nullid:
+ if subp1 == self.nodeconstants.nullid:
subp1, subp2 = subp2, subp1
writesubtree(subm, subp1, subp2, match)
@@ -1606,7 +1613,8 @@
self._revlog = revlog.revlog(
opener,
- indexfile,
+ target=(revlog_constants.KIND_MANIFESTLOG, self.tree),
+ indexfile=indexfile,
# only root indexfile is cached
checkambig=not bool(tree),
mmaplargeindex=True,
@@ -1617,7 +1625,6 @@
self.index = self._revlog.index
self.version = self._revlog.version
self._generaldelta = self._revlog._generaldelta
- self._revlog.revlog_kind = b'manifest'
def _setupmanifestcachehooks(self, repo):
"""Persist the manifestfulltextcache on lock release"""
@@ -1994,7 +2001,7 @@
else:
m = manifestctx(self, node)
- if node != nullid:
+ if node != self.nodeconstants.nullid:
mancache = self._dirmancache.get(tree)
if not mancache:
mancache = util.lrucachedict(self._cachesize)
@@ -2020,7 +2027,7 @@
class memmanifestctx(object):
def __init__(self, manifestlog):
self._manifestlog = manifestlog
- self._manifestdict = manifestdict()
+ self._manifestdict = manifestdict(manifestlog.nodeconstants.nodelen)
def _storage(self):
return self._manifestlog.getstorage(b'')
@@ -2082,8 +2089,9 @@
def read(self):
if self._data is None:
- if self._node == nullid:
- self._data = manifestdict()
+ nc = self._manifestlog.nodeconstants
+ if self._node == nc.nullid:
+ self._data = manifestdict(nc.nodelen)
else:
store = self._storage()
if self._node in store.fulltextcache:
@@ -2092,7 +2100,7 @@
text = store.revision(self._node)
arraytext = bytearray(text)
store.fulltextcache[self._node] = arraytext
- self._data = manifestdict(text)
+ self._data = manifestdict(nc.nodelen, text)
return self._data
def readfast(self, shallow=False):
@@ -2119,7 +2127,7 @@
store = self._storage()
r = store.rev(self._node)
d = mdiff.patchtext(store.revdiff(store.deltaparent(r), r))
- return manifestdict(d)
+ return manifestdict(store.nodeconstants.nodelen, d)
def find(self, key):
return self.read().find(key)
@@ -2188,7 +2196,7 @@
def read(self):
if self._data is None:
store = self._storage()
- if self._node == nullid:
+ if self._node == self._manifestlog.nodeconstants.nullid:
self._data = treemanifest(self._manifestlog.nodeconstants)
# TODO accessing non-public API
elif store._treeondisk:
@@ -2245,7 +2253,7 @@
if shallow:
r = store.rev(self._node)
d = mdiff.patchtext(store.revdiff(store.deltaparent(r), r))
- return manifestdict(d)
+ return manifestdict(store.nodeconstants.nodelen, d)
else:
# Need to perform a slow delta
r0 = store.deltaparent(store.rev(self._node))
@@ -2274,7 +2282,9 @@
return self.readdelta(shallow=shallow)
if shallow:
- return manifestdict(store.revision(self._node))
+ return manifestdict(
+ store.nodeconstants.nodelen, store.revision(self._node)
+ )
else:
return self.read()
--- a/mercurial/merge.py Sun May 02 16:56:20 2021 -0400
+++ b/mercurial/merge.py Fri May 07 22:06:25 2021 -0400
@@ -13,12 +13,7 @@
import struct
from .i18n import _
-from .node import (
- addednodeid,
- modifiednodeid,
- nullid,
- nullrev,
-)
+from .node import nullrev
from .thirdparty import attr
from .utils import stringutil
from . import (
@@ -779,7 +774,7 @@
# to flag the change. If wctx is a committed revision, we shouldn't
# care for the dirty state of the working directory.
if any(wctx.sub(s).dirty() for s in wctx.substate):
- m1[b'.hgsubstate'] = modifiednodeid
+ m1[b'.hgsubstate'] = repo.nodeconstants.modifiednodeid
# Don't use m2-vs-ma optimization if:
# - ma is the same as m1 or m2, which we're just going to diff again later
@@ -944,7 +939,7 @@
mresult.addcommitinfo(
f, b'merge-removal-candidate', b'yes'
)
- elif n1 == addednodeid:
+ elif n1 == repo.nodeconstants.addednodeid:
# This file was locally added. We should forget it instead of
# deleting it.
mresult.addfile(
@@ -1785,7 +1780,7 @@
if (
fsmonitorwarning
and not fsmonitorenabled
- and p1node == nullid
+ and p1node == repo.nullid
and num_gets >= fsmonitorthreshold
and pycompat.sysplatform.startswith((b'linux', b'darwin'))
):
@@ -1913,7 +1908,7 @@
else:
if repo.ui.configlist(b'merge', b'preferancestor') == [b'*']:
cahs = repo.changelog.commonancestorsheads(p1.node(), p2.node())
- pas = [repo[anc] for anc in (sorted(cahs) or [nullid])]
+ pas = [repo[anc] for anc in (sorted(cahs) or [repo.nullid])]
else:
pas = [p1.ancestor(p2, warn=branchmerge)]
@@ -2112,7 +2107,7 @@
### apply phase
if not branchmerge: # just jump to the new rev
- fp1, fp2, xp1, xp2 = fp2, nullid, xp2, b''
+ fp1, fp2, xp1, xp2 = fp2, repo.nullid, xp2, b''
# If we're doing a partial update, we need to skip updating
# the dirstate.
always = matcher is None or matcher.always()
@@ -2281,14 +2276,14 @@
if keepconflictparent and stats.unresolvedcount:
pother = ctx.node()
else:
- pother = nullid
+ pother = repo.nullid
parents = ctx.parents()
if keepparent and len(parents) == 2 and base in parents:
parents.remove(base)
pother = parents[0].node()
# Never set both parents equal to each other
if pother == pctx.node():
- pother = nullid
+ pother = repo.nullid
if wctx.isinmemory():
wctx.setparents(pctx.node(), pother)
--- a/mercurial/mergestate.py Sun May 02 16:56:20 2021 -0400
+++ b/mercurial/mergestate.py Fri May 07 22:06:25 2021 -0400
@@ -9,7 +9,6 @@
from .node import (
bin,
hex,
- nullhex,
nullrev,
)
from . import (
@@ -32,7 +31,7 @@
def _filectxorabsent(hexnode, ctx, f):
- if hexnode == nullhex:
+ if hexnode == ctx.repo().nodeconstants.nullhex:
return filemerge.absentfilectx(ctx, f)
else:
return ctx[f]
@@ -248,7 +247,7 @@
note: also write the local version to the `.hg/merge` directory.
"""
if fcl.isabsent():
- localkey = nullhex
+ localkey = self._repo.nodeconstants.nullhex
else:
localkey = mergestate.getlocalkey(fcl.path())
self._make_backup(fcl, localkey)
@@ -354,7 +353,7 @@
flags = flo
if preresolve:
# restore local
- if localkey != nullhex:
+ if localkey != self._repo.nodeconstants.nullhex:
self._restore_backup(wctx[dfile], localkey, flags)
else:
wctx[dfile].remove(ignoremissing=True)
@@ -658,7 +657,10 @@
records.append(
(RECORD_PATH_CONFLICT, b'\0'.join([filename] + v))
)
- elif v[1] == nullhex or v[6] == nullhex:
+ elif (
+ v[1] == self._repo.nodeconstants.nullhex
+ or v[6] == self._repo.nodeconstants.nullhex
+ ):
# Change/Delete or Delete/Change conflicts. These are stored in
# 'C' records. v[1] is the local file, and is nullhex when the
# file is deleted locally ('dc'). v[6] is the remote file, and
--- a/mercurial/metadata.py Sun May 02 16:56:20 2021 -0400
+++ b/mercurial/metadata.py Fri May 07 22:06:25 2021 -0400
@@ -11,14 +11,9 @@
import multiprocessing
import struct
-from .node import (
- nullid,
- nullrev,
-)
+from .node import nullrev
from . import (
error,
- pycompat,
- requirements as requirementsmod,
util,
)
@@ -617,7 +612,7 @@
if f in ctx:
fctx = ctx[f]
parents = fctx._filelog.parents(fctx._filenode)
- if parents[1] != nullid:
+ if parents[1] != ctx.repo().nullid:
merged.append(f)
return merged
@@ -822,26 +817,9 @@
def copies_sidedata_computer(repo, revlog, rev, existing_sidedata):
- return _getsidedata(repo, rev)[0]
-
-
-def set_sidedata_spec_for_repo(repo):
- if requirementsmod.COPIESSDC_REQUIREMENT in repo.requirements:
- repo.register_wanted_sidedata(sidedatamod.SD_FILES)
- repo.register_sidedata_computer(
- b"changelog",
- sidedatamod.SD_FILES,
- (sidedatamod.SD_FILES,),
- copies_sidedata_computer,
- )
-
-
-def getsidedataadder(srcrepo, destrepo):
- use_w = srcrepo.ui.configbool(b'experimental', b'worker.repository-upgrade')
- if pycompat.iswindows or not use_w:
- return _get_simple_sidedata_adder(srcrepo, destrepo)
- else:
- return _get_worker_sidedata_adder(srcrepo, destrepo)
+ sidedata, has_copies_info = _getsidedata(repo, rev)
+ flags_to_add = sidedataflag.REVIDX_HASCOPIESINFO if has_copies_info else 0
+ return sidedata, (flags_to_add, 0)
def _sidedata_worker(srcrepo, revs_queue, sidedata_queue, tokens):
@@ -910,57 +888,21 @@
# received, when shelve 43 for later use.
staging = {}
- def sidedata_companion(revlog, rev):
- data = {}, False
- if util.safehasattr(revlog, b'filteredrevs'): # this is a changelog
- # Is the data previously shelved ?
- data = staging.pop(rev, None)
- if data is None:
- # look at the queued result until we find the one we are lookig
- # for (shelve the other ones)
+ def sidedata_companion(repo, revlog, rev, old_sidedata):
+ # Is the data previously shelved ?
+ data = staging.pop(rev, None)
+ if data is None:
+ # look at the queued result until we find the one we are lookig
+ # for (shelve the other ones)
+ r, data = sidedataq.get()
+ while r != rev:
+ staging[r] = data
r, data = sidedataq.get()
- while r != rev:
- staging[r] = data
- r, data = sidedataq.get()
- tokens.release()
+ tokens.release()
sidedata, has_copies_info = data
new_flag = 0
if has_copies_info:
new_flag = sidedataflag.REVIDX_HASCOPIESINFO
- return False, (), sidedata, new_flag, 0
+ return sidedata, (new_flag, 0)
return sidedata_companion
-
-
-def _get_simple_sidedata_adder(srcrepo, destrepo):
- """The simple version of the sidedata computation
-
- It just compute it in the same thread on request"""
-
- def sidedatacompanion(revlog, rev):
- sidedata, has_copies_info = {}, False
- if util.safehasattr(revlog, 'filteredrevs'): # this is a changelog
- sidedata, has_copies_info = _getsidedata(srcrepo, rev)
- new_flag = 0
- if has_copies_info:
- new_flag = sidedataflag.REVIDX_HASCOPIESINFO
-
- return False, (), sidedata, new_flag, 0
-
- return sidedatacompanion
-
-
-def getsidedataremover(srcrepo, destrepo):
- def sidedatacompanion(revlog, rev):
- f = ()
- if util.safehasattr(revlog, 'filteredrevs'): # this is a changelog
- if revlog.flags(rev) & sidedataflag.REVIDX_SIDEDATA:
- f = (
- sidedatamod.SD_P1COPIES,
- sidedatamod.SD_P2COPIES,
- sidedatamod.SD_FILESADDED,
- sidedatamod.SD_FILESREMOVED,
- )
- return False, f, {}, 0, sidedataflag.REVIDX_HASCOPIESINFO
-
- return sidedatacompanion
--- a/mercurial/obsolete.py Sun May 02 16:56:20 2021 -0400
+++ b/mercurial/obsolete.py Fri May 07 22:06:25 2021 -0400
@@ -73,11 +73,14 @@
import struct
from .i18n import _
+from .node import (
+ bin,
+ hex,
+)
from .pycompat import getattr
from .node import (
bin,
hex,
- nullid,
)
from . import (
encoding,
@@ -103,6 +106,7 @@
# Options for obsolescence
createmarkersopt = b'createmarkers'
allowunstableopt = b'allowunstable'
+allowdivergenceopt = b'allowdivergence'
exchangeopt = b'exchange'
@@ -141,10 +145,13 @@
createmarkersvalue = _getoptionvalue(repo, createmarkersopt)
unstablevalue = _getoptionvalue(repo, allowunstableopt)
+ divergencevalue = _getoptionvalue(repo, allowdivergenceopt)
exchangevalue = _getoptionvalue(repo, exchangeopt)
# createmarkers must be enabled if other options are enabled
- if (unstablevalue or exchangevalue) and not createmarkersvalue:
+ if (
+ unstablevalue or divergencevalue or exchangevalue
+ ) and not createmarkersvalue:
raise error.Abort(
_(
b"'createmarkers' obsolete option must be enabled "
@@ -155,6 +162,7 @@
return {
createmarkersopt: createmarkersvalue,
allowunstableopt: unstablevalue,
+ allowdivergenceopt: divergencevalue,
exchangeopt: exchangevalue,
}
@@ -526,14 +534,14 @@
children.setdefault(p, set()).add(mark)
-def _checkinvalidmarkers(markers):
+def _checkinvalidmarkers(repo, markers):
"""search for marker with invalid data and raise error if needed
Exist as a separated function to allow the evolve extension for a more
subtle handling.
"""
for mark in markers:
- if nullid in mark[1]:
+ if repo.nullid in mark[1]:
raise error.Abort(
_(
b'bad obsolescence marker detected: '
@@ -727,7 +735,7 @@
return []
self._version, markers = _readmarkers(data)
markers = list(markers)
- _checkinvalidmarkers(markers)
+ _checkinvalidmarkers(self.repo, markers)
return markers
@propertycache
@@ -761,7 +769,7 @@
_addpredecessors(self.predecessors, markers)
if self._cached('children'):
_addchildren(self.children, markers)
- _checkinvalidmarkers(markers)
+ _checkinvalidmarkers(self.repo, markers)
def relevantmarkers(self, nodes):
"""return a set of all obsolescence markers relevant to a set of nodes.
--- a/mercurial/patch.py Sun May 02 16:56:20 2021 -0400
+++ b/mercurial/patch.py Fri May 07 22:06:25 2021 -0400
@@ -20,7 +20,7 @@
from .i18n import _
from .node import (
hex,
- nullhex,
+ sha1nodeconstants,
short,
)
from .pycompat import open
@@ -3100,8 +3100,8 @@
ctx1, fctx1, path1, flag1, content1, date1 = data1
ctx2, fctx2, path2, flag2, content2, date2 = data2
- index1 = _gitindex(content1) if path1 in ctx1 else nullhex
- index2 = _gitindex(content2) if path2 in ctx2 else nullhex
+ index1 = _gitindex(content1) if path1 in ctx1 else sha1nodeconstants.nullhex
+ index2 = _gitindex(content2) if path2 in ctx2 else sha1nodeconstants.nullhex
if binary and opts.git and not opts.nobinary:
text = mdiff.b85diff(content1, content2)
if text:
--- a/mercurial/phases.py Sun May 02 16:56:20 2021 -0400
+++ b/mercurial/phases.py Fri May 07 22:06:25 2021 -0400
@@ -109,7 +109,6 @@
from .node import (
bin,
hex,
- nullid,
nullrev,
short,
wdirrev,
@@ -862,7 +861,7 @@
node = bin(nhex)
phase = int(phase)
if phase == public:
- if node != nullid:
+ if node != repo.nullid:
repo.ui.warn(
_(
b'ignoring inconsistent public root'
@@ -919,10 +918,10 @@
rev = cl.index.get_rev
if not roots:
return heads
- if not heads or heads == [nullid]:
+ if not heads or heads == [repo.nullid]:
return []
# The logic operated on revisions, convert arguments early for convenience
- new_heads = {rev(n) for n in heads if n != nullid}
+ new_heads = {rev(n) for n in heads if n != repo.nullid}
roots = [rev(n) for n in roots]
# compute the area we need to remove
affected_zone = repo.revs(b"(%ld::%ld)", roots, new_heads)
--- a/mercurial/policy.py Sun May 02 16:56:20 2021 -0400
+++ b/mercurial/policy.py Fri May 07 22:06:25 2021 -0400
@@ -80,7 +80,7 @@
('cext', 'bdiff'): 3,
('cext', 'mpatch'): 1,
('cext', 'osutil'): 4,
- ('cext', 'parsers'): 17,
+ ('cext', 'parsers'): 18,
}
# map import request to other package or module
--- a/mercurial/pure/parsers.py Sun May 02 16:56:20 2021 -0400
+++ b/mercurial/pure/parsers.py Fri May 07 22:06:25 2021 -0400
@@ -10,7 +10,10 @@
import struct
import zlib
-from ..node import nullid, nullrev
+from ..node import (
+ nullrev,
+ sha1nodeconstants,
+)
from .. import (
pycompat,
util,
@@ -50,7 +53,7 @@
# Size of a C long int, platform independent
int_size = struct.calcsize(b'>i')
# An empty index entry, used as a default value to be overridden, or nullrev
- null_item = (0, 0, 0, -1, -1, -1, -1, nullid)
+ null_item = (0, 0, 0, -1, -1, -1, -1, sha1nodeconstants.nullid)
@util.propertycache
def entry_size(self):
@@ -64,7 +67,7 @@
@util.propertycache
def _nodemap(self):
- nodemap = nodemaputil.NodeMap({nullid: nullrev})
+ nodemap = nodemaputil.NodeMap({sha1nodeconstants.nullid: nullrev})
for r in range(0, len(self)):
n = self[r][7]
nodemap[n] = r
@@ -124,10 +127,27 @@
r = (offset_type(0, gettype(r[0])),) + r[1:]
return r
+ def pack_header(self, header):
+ """pack header information as binary"""
+ v_fmt = revlog_constants.INDEX_HEADER
+ return v_fmt.pack(header)
+
+ def entry_binary(self, rev):
+ """return the raw binary string representing a revision"""
+ entry = self[rev]
+ p = revlog_constants.INDEX_ENTRY_V1.pack(*entry)
+ if rev == 0:
+ p = p[revlog_constants.INDEX_HEADER.size :]
+ return p
+
class IndexObject(BaseIndexObject):
def __init__(self, data):
- assert len(data) % self.entry_size == 0
+ assert len(data) % self.entry_size == 0, (
+ len(data),
+ self.entry_size,
+ len(data) % self.entry_size,
+ )
self._data = data
self._lgt = len(data) // self.entry_size
self._extra = []
@@ -246,9 +266,11 @@
class Index2Mixin(object):
index_format = revlog_constants.INDEX_ENTRY_V2
- null_item = (0, 0, 0, -1, -1, -1, -1, nullid, 0, 0)
+ null_item = (0, 0, 0, -1, -1, -1, -1, sha1nodeconstants.nullid, 0, 0)
- def replace_sidedata_info(self, i, sidedata_offset, sidedata_length):
+ def replace_sidedata_info(
+ self, i, sidedata_offset, sidedata_length, offset_flags
+ ):
"""
Replace an existing index entry's sidedata offset and length with new
ones.
@@ -263,12 +285,21 @@
if i >= self._lgt:
packed = _pack(sidedata_format, sidedata_offset, sidedata_length)
old = self._extra[i - self._lgt]
- new = old[:64] + packed + old[64 + packed_size :]
+ offset_flags = struct.pack(b">Q", offset_flags)
+ new = offset_flags + old[8:64] + packed + old[64 + packed_size :]
self._extra[i - self._lgt] = new
else:
msg = b"cannot rewrite entries outside of this transaction"
raise KeyError(msg)
+ def entry_binary(self, rev):
+ """return the raw binary string representing a revision"""
+ entry = self[rev]
+ p = revlog_constants.INDEX_ENTRY_V2.pack(*entry)
+ if rev == 0:
+ p = p[revlog_constants.INDEX_HEADER.size :]
+ return p
+
class IndexObject2(Index2Mixin, IndexObject):
pass
--- a/mercurial/revlog.py Sun May 02 16:56:20 2021 -0400
+++ b/mercurial/revlog.py Fri May 07 22:06:25 2021 -0400
@@ -26,24 +26,17 @@
from .node import (
bin,
hex,
- nullhex,
- nullid,
nullrev,
sha1nodeconstants,
short,
- wdirfilenodeids,
- wdirhex,
- wdirid,
wdirrev,
)
from .i18n import _
from .pycompat import getattr
from .revlogutils.constants import (
+ ALL_KINDS,
FLAG_GENERALDELTA,
FLAG_INLINE_DATA,
- INDEX_ENTRY_V0,
- INDEX_ENTRY_V1,
- INDEX_ENTRY_V2,
INDEX_HEADER,
REVLOGV0,
REVLOGV1,
@@ -62,7 +55,6 @@
REVIDX_HASCOPIESINFO,
REVIDX_ISCENSORED,
REVIDX_RAWTEXT_CHANGING_FLAGS,
- REVIDX_SIDEDATA,
)
from .thirdparty import attr
from . import (
@@ -83,6 +75,7 @@
deltas as deltautil,
flagutil,
nodemap as nodemaputil,
+ revlogv0,
sidedata as sidedatautil,
)
from .utils import (
@@ -92,6 +85,7 @@
# blanked usage of all the name to prevent pyflakes constraints
# We need these name available in the module for extensions.
+
REVLOGV0
REVLOGV1
REVLOGV2
@@ -104,7 +98,6 @@
REVLOGV2_FLAGS
REVIDX_ISCENSORED
REVIDX_ELLIPSIS
-REVIDX_SIDEDATA
REVIDX_HASCOPIESINFO
REVIDX_EXTSTORED
REVIDX_DEFAULT_FLAGS
@@ -143,14 +136,6 @@
)
-def getoffset(q):
- return int(q >> 16)
-
-
-def gettype(q):
- return int(q & 0xFFFF)
-
-
def offset_type(offset, type):
if (type & ~flagutil.REVIDX_KNOWN_FLAGS) != 0:
raise ValueError(b'unknown revlog index flags')
@@ -210,6 +195,7 @@
revision = attr.ib()
delta = attr.ib()
sidedata = attr.ib()
+ protocol_flags = attr.ib()
linknode = attr.ib(default=None)
@@ -221,110 +207,32 @@
node = attr.ib(default=None)
-class revlogoldindex(list):
- entry_size = INDEX_ENTRY_V0.size
-
- @property
- def nodemap(self):
- msg = b"index.nodemap is deprecated, use index.[has_node|rev|get_rev]"
- util.nouideprecwarn(msg, b'5.3', stacklevel=2)
- return self._nodemap
-
- @util.propertycache
- def _nodemap(self):
- nodemap = nodemaputil.NodeMap({nullid: nullrev})
- for r in range(0, len(self)):
- n = self[r][7]
- nodemap[n] = r
- return nodemap
-
- def has_node(self, node):
- """return True if the node exist in the index"""
- return node in self._nodemap
-
- def rev(self, node):
- """return a revision for a node
-
- If the node is unknown, raise a RevlogError"""
- return self._nodemap[node]
-
- def get_rev(self, node):
- """return a revision for a node
-
- If the node is unknown, return None"""
- return self._nodemap.get(node)
-
- def append(self, tup):
- self._nodemap[tup[7]] = len(self)
- super(revlogoldindex, self).append(tup)
-
- def __delitem__(self, i):
- if not isinstance(i, slice) or not i.stop == -1 or i.step is not None:
- raise ValueError(b"deleting slices only supports a:-1 with step 1")
- for r in pycompat.xrange(i.start, len(self)):
- del self._nodemap[self[r][7]]
- super(revlogoldindex, self).__delitem__(i)
-
- def clearcaches(self):
- self.__dict__.pop('_nodemap', None)
-
- def __getitem__(self, i):
- if i == -1:
- return (0, 0, 0, -1, -1, -1, -1, nullid)
- return list.__getitem__(self, i)
-
-
-class revlogoldio(object):
- def parseindex(self, data, inline):
- s = INDEX_ENTRY_V0.size
- index = []
- nodemap = nodemaputil.NodeMap({nullid: nullrev})
- n = off = 0
- l = len(data)
- while off + s <= l:
- cur = data[off : off + s]
- off += s
- e = INDEX_ENTRY_V0.unpack(cur)
- # transform to revlogv1 format
- e2 = (
- offset_type(e[0], 0),
- e[1],
- -1,
- e[2],
- e[3],
- nodemap.get(e[4], nullrev),
- nodemap.get(e[5], nullrev),
- e[6],
- )
- index.append(e2)
- nodemap[e[6]] = n
- n += 1
-
- index = revlogoldindex(index)
- return index, None
-
- def packentry(self, entry, node, version, rev):
- """return the binary representation of an entry
-
- entry: a tuple containing all the values (see index.__getitem__)
- node: a callback to convert a revision to nodeid
- version: the changelog version
- rev: the revision number
- """
- if gettype(entry[0]):
- raise error.RevlogError(
- _(b'index entry flags need revlog version 1')
- )
- e2 = (
- getoffset(entry[0]),
- entry[1],
- entry[3],
- entry[4],
- node(entry[5]),
- node(entry[6]),
- entry[7],
- )
- return INDEX_ENTRY_V0.pack(*e2)
+def parse_index_v1(data, inline):
+ # call the C implementation to parse the index data
+ index, cache = parsers.parse_index2(data, inline)
+ return index, cache
+
+
+def parse_index_v2(data, inline):
+ # call the C implementation to parse the index data
+ index, cache = parsers.parse_index2(data, inline, revlogv2=True)
+ return index, cache
+
+
+if util.safehasattr(parsers, 'parse_index_devel_nodemap'):
+
+ def parse_index_v1_nodemap(data, inline):
+ index, cache = parsers.parse_index_devel_nodemap(data, inline)
+ return index, cache
+
+
+else:
+ parse_index_v1_nodemap = None
+
+
+def parse_index_v1_mixed(data, inline):
+ index, cache = parse_index_v1(data, inline)
+ return rustrevlog.MixedIndex(index), cache
# corresponds to uncompressed length of indexformatng (2 gigs, 4-byte
@@ -332,52 +240,6 @@
_maxentrysize = 0x7FFFFFFF
-class revlogio(object):
- def parseindex(self, data, inline):
- # call the C implementation to parse the index data
- index, cache = parsers.parse_index2(data, inline)
- return index, cache
-
- def packentry(self, entry, node, version, rev):
- p = INDEX_ENTRY_V1.pack(*entry)
- if rev == 0:
- p = INDEX_HEADER.pack(version) + p[4:]
- return p
-
-
-class revlogv2io(object):
- def parseindex(self, data, inline):
- index, cache = parsers.parse_index2(data, inline, revlogv2=True)
- return index, cache
-
- def packentry(self, entry, node, version, rev):
- p = INDEX_ENTRY_V2.pack(*entry)
- if rev == 0:
- p = INDEX_HEADER.pack(version) + p[4:]
- return p
-
-
-NodemapRevlogIO = None
-
-if util.safehasattr(parsers, 'parse_index_devel_nodemap'):
-
- class NodemapRevlogIO(revlogio):
- """A debug oriented IO class that return a PersistentNodeMapIndexObject
-
- The PersistentNodeMapIndexObject object is meant to test the persistent nodemap feature.
- """
-
- def parseindex(self, data, inline):
- index, cache = parsers.parse_index_devel_nodemap(data, inline)
- return index, cache
-
-
-class rustrevlogio(revlogio):
- def parseindex(self, data, inline):
- index, cache = super(rustrevlogio, self).parseindex(data, inline)
- return rustrevlog.MixedIndex(index), cache
-
-
class revlog(object):
"""
the underlying revision storage object
@@ -426,7 +288,8 @@
def __init__(
self,
opener,
- indexfile,
+ target,
+ indexfile=None,
datafile=None,
checkambig=False,
mmaplargeindex=False,
@@ -441,6 +304,12 @@
opener is a function that abstracts the file opening operation
and can be used to implement COW semantics or the like.
+ `target`: a (KIND, ID) tuple that identify the content stored in
+ this revlog. It help the rest of the code to understand what the revlog
+ is about without having to resort to heuristic and index filename
+ analysis. Note: that this must be reliably be set by normal code, but
+ that test, debug, or performance measurement code might not set this to
+ accurate value.
"""
self.upperboundcomp = upperboundcomp
self.indexfile = indexfile
@@ -452,6 +321,9 @@
)
self.opener = opener
+ assert target[0] in ALL_KINDS
+ assert len(target) == 2
+ self.target = target
# When True, indexfile is opened with checkambig=True at writing, to
# avoid file stat ambiguity.
self._checkambig = checkambig
@@ -636,7 +508,7 @@
devel_nodemap = (
self.nodemap_file
and opts.get(b'devel-force-nodemap', False)
- and NodemapRevlogIO is not None
+ and parse_index_v1_nodemap is not None
)
use_rust_index = False
@@ -646,17 +518,17 @@
else:
use_rust_index = self.opener.options.get(b'rust.index')
- self._io = revlogio()
+ self._parse_index = parse_index_v1
if self.version == REVLOGV0:
- self._io = revlogoldio()
+ self._parse_index = revlogv0.parse_index_v0
elif fmt == REVLOGV2:
- self._io = revlogv2io()
+ self._parse_index = parse_index_v2
elif devel_nodemap:
- self._io = NodemapRevlogIO()
+ self._parse_index = parse_index_v1_nodemap
elif use_rust_index:
- self._io = rustrevlogio()
+ self._parse_index = parse_index_v1_mixed
try:
- d = self._io.parseindex(indexdata, self._inline)
+ d = self._parse_index(indexdata, self._inline)
index, _chunkcache = d
use_nodemap = (
not self._inline
@@ -687,6 +559,10 @@
self._decompressors = {}
@util.propertycache
+ def revlog_kind(self):
+ return self.target[0]
+
+ @util.propertycache
def _compressor(self):
engine = util.compengines[self._compengine]
return engine.revlogcompressor(self._compengineopts)
@@ -818,7 +694,10 @@
raise
except error.RevlogError:
# parsers.c radix tree lookup failed
- if node == wdirid or node in wdirfilenodeids:
+ if (
+ node == self.nodeconstants.wdirid
+ or node in self.nodeconstants.wdirfilenodeids
+ ):
raise error.WdirUnsupported
raise error.LookupError(node, self.indexfile, _(b'no node'))
@@ -909,7 +788,7 @@
i = self.index
d = i[self.rev(node)]
# inline node() to avoid function call overhead
- if d[5] == nullid:
+ if d[5] == self.nullid:
return i[d[6]][7], i[d[5]][7]
else:
return i[d[5]][7], i[d[6]][7]
@@ -1027,7 +906,7 @@
not supplied, uses all of the revlog's heads. If common is not
supplied, uses nullid."""
if common is None:
- common = [nullid]
+ common = [self.nullid]
if heads is None:
heads = self.heads()
@@ -1133,7 +1012,7 @@
not supplied, uses all of the revlog's heads. If common is not
supplied, uses nullid."""
if common is None:
- common = [nullid]
+ common = [self.nullid]
if heads is None:
heads = self.heads()
@@ -1171,11 +1050,15 @@
return nonodes
lowestrev = min([self.rev(n) for n in roots])
else:
- roots = [nullid] # Everybody's a descendant of nullid
+ roots = [self.nullid] # Everybody's a descendant of nullid
lowestrev = nullrev
if (lowestrev == nullrev) and (heads is None):
# We want _all_ the nodes!
- return ([self.node(r) for r in self], [nullid], list(self.heads()))
+ return (
+ [self.node(r) for r in self],
+ [self.nullid],
+ list(self.heads()),
+ )
if heads is None:
# All nodes are ancestors, so the latest ancestor is the last
# node.
@@ -1201,7 +1084,7 @@
# grab a node to tag
n = nodestotag.pop()
# Never tag nullid
- if n == nullid:
+ if n == self.nullid:
continue
# A node's revision number represents its place in a
# topologically sorted list of nodes.
@@ -1213,7 +1096,7 @@
ancestors.add(n) # Mark as ancestor
# Add non-nullid parents to list of nodes to tag.
nodestotag.update(
- [p for p in self.parents(n) if p != nullid]
+ [p for p in self.parents(n) if p != self.nullid]
)
elif n in heads: # We've seen it before, is it a fake head?
# So it is, real heads should not be the ancestors of
@@ -1241,7 +1124,7 @@
# We are descending from nullid, and don't need to care about
# any other roots.
lowestrev = nullrev
- roots = [nullid]
+ roots = [self.nullid]
# Transform our roots list into a set.
descendants = set(roots)
# Also, keep the original roots so we can filter out roots that aren't
@@ -1335,7 +1218,7 @@
"""
if start is None and stop is None:
if not len(self):
- return [nullid]
+ return [self.nullid]
return [self.node(r) for r in self.headrevs()]
if start is None:
@@ -1425,13 +1308,13 @@
if ancs:
# choose a consistent winner when there's a tie
return min(map(self.node, ancs))
- return nullid
+ return self.nullid
def _match(self, id):
if isinstance(id, int):
# rev
return self.node(id)
- if len(id) == 20:
+ if len(id) == self.nodeconstants.nodelen:
# possibly a binary node
# odds of a binary node being all hex in ASCII are 1 in 10**25
try:
@@ -1452,7 +1335,7 @@
return self.node(rev)
except (ValueError, OverflowError):
pass
- if len(id) == 40:
+ if len(id) == 2 * self.nodeconstants.nodelen:
try:
# a full hex nodeid?
node = bin(id)
@@ -1463,7 +1346,7 @@
def _partialmatch(self, id):
# we don't care wdirfilenodeids as they should be always full hash
- maybewdir = wdirhex.startswith(id)
+ maybewdir = self.nodeconstants.wdirhex.startswith(id)
try:
partial = self.index.partialmatch(id)
if partial and self.hasnode(partial):
@@ -1499,8 +1382,8 @@
nl = [
n for n in nl if hex(n).startswith(id) and self.hasnode(n)
]
- if nullhex.startswith(id):
- nl.append(nullid)
+ if self.nodeconstants.nullhex.startswith(id):
+ nl.append(self.nullid)
if len(nl) > 0:
if len(nl) == 1 and not maybewdir:
self._pcache[id] = nl[0]
@@ -1560,13 +1443,13 @@
length = max(self.index.shortest(node), minlength)
return disambiguate(hexnode, length)
except error.RevlogError:
- if node != wdirid:
+ if node != self.nodeconstants.wdirid:
raise error.LookupError(node, self.indexfile, _(b'no node'))
except AttributeError:
# Fall through to pure code
pass
- if node == wdirid:
+ if node == self.nodeconstants.wdirid:
for length in range(minlength, len(hexnode) + 1):
prefix = hexnode[:length]
if isvalid(prefix):
@@ -1881,7 +1764,7 @@
rev = None
# fast path the special `nullid` rev
- if node == nullid:
+ if node == self.nullid:
return b"", {}
# ``rawtext`` is the text as stored inside the revlog. Might be the
@@ -2064,9 +1947,11 @@
with self._indexfp(b'w') as fp:
self.version &= ~FLAG_INLINE_DATA
self._inline = False
- io = self._io
for i in self:
- e = io.packentry(self.index[i], self.node, self.version, i)
+ e = self.index.entry_binary(i)
+ if i == 0:
+ header = self.index.pack_header(self.version)
+ e = header + e
fp.write(e)
# the temp file replace the real index when we exit the context
@@ -2113,7 +1998,7 @@
if sidedata is None:
sidedata = {}
- elif not self.hassidedata:
+ elif sidedata and not self.hassidedata:
raise error.ProgrammingError(
_(b"trying to add sidedata to a revlog who don't support them")
)
@@ -2302,11 +2187,14 @@
- rawtext is optional (can be None); if not set, cachedelta must be set.
if both are set, they must correspond to each other.
"""
- if node == nullid:
+ if node == self.nullid:
raise error.RevlogError(
_(b"%s: attempt to add null revision") % self.indexfile
)
- if node == wdirid or node in wdirfilenodeids:
+ if (
+ node == self.nodeconstants.wdirid
+ or node in self.nodeconstants.wdirfilenodeids
+ ):
raise error.RevlogError(
_(b"%s: attempt to add wdir revision") % self.indexfile
)
@@ -2358,7 +2246,7 @@
deltainfo = deltacomputer.finddeltainfo(revinfo, fh)
- if sidedata:
+ if sidedata and self.version & 0xFFFF == REVLOGV2:
serialized_sidedata = sidedatautil.serialize_sidedata(sidedata)
sidedata_offset = offset + deltainfo.deltalen
else:
@@ -2385,7 +2273,10 @@
e = e[:8]
self.index.append(e)
- entry = self._io.packentry(e, self.node, self.version, curr)
+ entry = self.index.entry_binary(curr)
+ if curr == 0:
+ header = self.index.pack_header(self.version)
+ entry = header + entry
self._writeentry(
transaction,
ifh,
@@ -2768,7 +2659,7 @@
addrevisioncb=None,
deltareuse=DELTAREUSESAMEREVS,
forcedeltabothparents=None,
- sidedatacompanion=None,
+ sidedata_helpers=None,
):
"""Copy this revlog to another, possibly with format changes.
@@ -2811,21 +2702,8 @@
argument controls whether to force compute deltas against both parents
for merges. By default, the current default is used.
- If not None, the `sidedatacompanion` is callable that accept two
- arguments:
-
- (srcrevlog, rev)
-
- and return a quintet that control changes to sidedata content from the
- old revision to the new clone result:
-
- (dropall, filterout, update, new_flags, dropped_flags)
-
- * if `dropall` is True, all sidedata should be dropped
- * `filterout` is a set of sidedata keys that should be dropped
- * `update` is a mapping of additionnal/new key -> value
- * new_flags is a bitfields of new flags that the revision should get
- * dropped_flags is a bitfields of new flags that the revision shoudl not longer have
+ See `revlogutil.sidedata.get_sidedata_helpers` for the doc on
+ `sidedata_helpers`.
"""
if deltareuse not in self.DELTAREUSEALL:
raise ValueError(
@@ -2865,7 +2743,7 @@
addrevisioncb,
deltareuse,
forcedeltabothparents,
- sidedatacompanion,
+ sidedata_helpers,
)
finally:
@@ -2880,7 +2758,7 @@
addrevisioncb,
deltareuse,
forcedeltabothparents,
- sidedatacompanion,
+ sidedata_helpers,
):
"""perform the core duty of `revlog.clone` after parameter processing"""
deltacomputer = deltautil.deltacomputer(destrevlog)
@@ -2896,31 +2774,18 @@
p2 = index[entry[6]][7]
node = entry[7]
- sidedataactions = (False, [], {}, 0, 0)
- if sidedatacompanion is not None:
- sidedataactions = sidedatacompanion(self, rev)
-
# (Possibly) reuse the delta from the revlog if allowed and
# the revlog chunk is a delta.
cachedelta = None
rawtext = None
- if any(sidedataactions) or deltareuse == self.DELTAREUSEFULLADD:
- dropall = sidedataactions[0]
- filterout = sidedataactions[1]
- update = sidedataactions[2]
- new_flags = sidedataactions[3]
- dropped_flags = sidedataactions[4]
+ if deltareuse == self.DELTAREUSEFULLADD:
text, sidedata = self._revisiondata(rev)
- if dropall:
- sidedata = {}
- for key in filterout:
- sidedata.pop(key, None)
- sidedata.update(update)
- if not sidedata:
- sidedata = None
-
- flags |= new_flags
- flags &= ~dropped_flags
+
+ if sidedata_helpers is not None:
+ (sidedata, new_flags) = sidedatautil.run_sidedata_helpers(
+ self, sidedata_helpers, sidedata, rev
+ )
+ flags = flags | new_flags[0] & ~new_flags[1]
destrevlog.addrevision(
text,
@@ -2940,8 +2805,17 @@
if dp != nullrev:
cachedelta = (dp, bytes(self._chunk(rev)))
+ sidedata = None
if not cachedelta:
- rawtext = self.rawdata(rev)
+ rawtext, sidedata = self._revisiondata(rev)
+ if sidedata is None:
+ sidedata = self.sidedata(rev)
+
+ if sidedata_helpers is not None:
+ (sidedata, new_flags) = sidedatautil.run_sidedata_helpers(
+ self, sidedata_helpers, sidedata, rev
+ )
+ flags = flags | new_flags[0] & ~new_flags[1]
ifh = destrevlog.opener(
destrevlog.indexfile, b'a+', checkambig=False
@@ -2962,6 +2836,7 @@
ifh,
dfh,
deltacomputer=deltacomputer,
+ sidedata=sidedata,
)
finally:
if dfh:
@@ -2993,10 +2868,16 @@
newdatafile = self.datafile + b'.tmpcensored'
# This is a bit dangerous. We could easily have a mismatch of state.
- newrl = revlog(self.opener, newindexfile, newdatafile, censorable=True)
+ newrl = revlog(
+ self.opener,
+ target=self.target,
+ indexfile=newindexfile,
+ datafile=newdatafile,
+ censorable=True,
+ )
newrl.version = self.version
newrl._generaldelta = self._generaldelta
- newrl._io = self._io
+ newrl._parse_index = self._parse_index
for rev in self.revs():
node = self.node(rev)
@@ -3209,7 +3090,7 @@
current_offset = fp.tell()
for rev in range(startrev, endrev + 1):
entry = self.index[rev]
- new_sidedata = storageutil.run_sidedata_helpers(
+ new_sidedata, flags = sidedatautil.run_sidedata_helpers(
store=self,
sidedata_helpers=helpers,
sidedata={},
@@ -3225,7 +3106,11 @@
# revlog.
msg = b"Rewriting existing sidedata is not supported yet"
raise error.Abort(msg)
- entry = entry[:8]
+
+ # Apply (potential) flags to add and to remove after running
+ # the sidedata helpers
+ new_offset_flags = entry[0] | flags[0] & ~flags[1]
+ entry = (new_offset_flags,) + entry[1:8]
entry += (current_offset, len(serialized_sidedata))
fp.write(serialized_sidedata)
@@ -3235,8 +3120,11 @@
# rewrite the new index entries
with self._indexfp(b'w+') as fp:
fp.seek(startrev * self.index.entry_size)
- for i, entry in enumerate(new_entries):
+ for i, e in enumerate(new_entries):
rev = startrev + i
- self.index.replace_sidedata_info(rev, entry[8], entry[9])
- packed = self._io.packentry(entry, self.node, self.version, rev)
+ self.index.replace_sidedata_info(rev, e[8], e[9], e[0])
+ packed = self.index.entry_binary(rev)
+ if rev == 0:
+ header = self.index.pack_header(self.version)
+ packed = header + packed
fp.write(packed)
--- a/mercurial/revlogutils/constants.py Sun May 02 16:56:20 2021 -0400
+++ b/mercurial/revlogutils/constants.py Fri May 07 22:06:25 2021 -0400
@@ -13,6 +13,20 @@
from ..interfaces import repository
+### Internal utily constants
+
+KIND_CHANGELOG = 1001 # over 256 to not be comparable with a bytes
+KIND_MANIFESTLOG = 1002
+KIND_FILELOG = 1003
+KIND_OTHER = 1004
+
+ALL_KINDS = {
+ KIND_CHANGELOG,
+ KIND_MANIFESTLOG,
+ KIND_FILELOG,
+ KIND_OTHER,
+}
+
### main revlog header
INDEX_HEADER = struct.Struct(b">I")
@@ -85,8 +99,6 @@
REVIDX_ELLIPSIS = repository.REVISION_FLAG_ELLIPSIS
# revision data is stored externally
REVIDX_EXTSTORED = repository.REVISION_FLAG_EXTSTORED
-# revision data contains extra metadata not part of the official digest
-REVIDX_SIDEDATA = repository.REVISION_FLAG_SIDEDATA
# revision changes files in a way that could affect copy tracing.
REVIDX_HASCOPIESINFO = repository.REVISION_FLAG_HASCOPIESINFO
REVIDX_DEFAULT_FLAGS = 0
@@ -95,13 +107,10 @@
REVIDX_ISCENSORED,
REVIDX_ELLIPSIS,
REVIDX_EXTSTORED,
- REVIDX_SIDEDATA,
REVIDX_HASCOPIESINFO,
]
# bitmark for flags that could cause rawdata content change
-REVIDX_RAWTEXT_CHANGING_FLAGS = (
- REVIDX_ISCENSORED | REVIDX_EXTSTORED | REVIDX_SIDEDATA
-)
+REVIDX_RAWTEXT_CHANGING_FLAGS = REVIDX_ISCENSORED | REVIDX_EXTSTORED
SPARSE_REVLOG_MAX_CHAIN_LENGTH = 1000
--- a/mercurial/revlogutils/flagutil.py Sun May 02 16:56:20 2021 -0400
+++ b/mercurial/revlogutils/flagutil.py Fri May 07 22:06:25 2021 -0400
@@ -18,7 +18,6 @@
REVIDX_HASCOPIESINFO,
REVIDX_ISCENSORED,
REVIDX_RAWTEXT_CHANGING_FLAGS,
- REVIDX_SIDEDATA,
)
from .. import error, util
@@ -28,7 +27,6 @@
REVIDX_ISCENSORED
REVIDX_ELLIPSIS
REVIDX_EXTSTORED
-REVIDX_SIDEDATA
REVIDX_HASCOPIESINFO,
REVIDX_DEFAULT_FLAGS
REVIDX_FLAGS_ORDER
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/mercurial/revlogutils/revlogv0.py Fri May 07 22:06:25 2021 -0400
@@ -0,0 +1,144 @@
+# revlogv0 - code related to revlog format "V0"
+#
+# Copyright 2005-2007 Olivia Mackall <olivia@selenic.com>
+#
+# This software may be used and distributed according to the terms of the
+# GNU General Public License version 2 or any later version.
+from __future__ import absolute_import
+
+
+from ..node import sha1nodeconstants
+from .constants import (
+ INDEX_ENTRY_V0,
+)
+from ..i18n import _
+
+from .. import (
+ error,
+ node,
+ pycompat,
+ util,
+)
+
+from . import (
+ flagutil,
+ nodemap as nodemaputil,
+)
+
+
+def getoffset(q):
+ return int(q >> 16)
+
+
+def gettype(q):
+ return int(q & 0xFFFF)
+
+
+def offset_type(offset, type):
+ if (type & ~flagutil.REVIDX_KNOWN_FLAGS) != 0:
+ raise ValueError(b'unknown revlog index flags')
+ return int(int(offset) << 16 | type)
+
+
+class revlogoldindex(list):
+ entry_size = INDEX_ENTRY_V0.size
+
+ @property
+ def nodemap(self):
+ msg = b"index.nodemap is deprecated, use index.[has_node|rev|get_rev]"
+ util.nouideprecwarn(msg, b'5.3', stacklevel=2)
+ return self._nodemap
+
+ @util.propertycache
+ def _nodemap(self):
+ nodemap = nodemaputil.NodeMap({sha1nodeconstants.nullid: node.nullrev})
+ for r in range(0, len(self)):
+ n = self[r][7]
+ nodemap[n] = r
+ return nodemap
+
+ def has_node(self, node):
+ """return True if the node exist in the index"""
+ return node in self._nodemap
+
+ def rev(self, node):
+ """return a revision for a node
+
+ If the node is unknown, raise a RevlogError"""
+ return self._nodemap[node]
+
+ def get_rev(self, node):
+ """return a revision for a node
+
+ If the node is unknown, return None"""
+ return self._nodemap.get(node)
+
+ def append(self, tup):
+ self._nodemap[tup[7]] = len(self)
+ super(revlogoldindex, self).append(tup)
+
+ def __delitem__(self, i):
+ if not isinstance(i, slice) or not i.stop == -1 or i.step is not None:
+ raise ValueError(b"deleting slices only supports a:-1 with step 1")
+ for r in pycompat.xrange(i.start, len(self)):
+ del self._nodemap[self[r][7]]
+ super(revlogoldindex, self).__delitem__(i)
+
+ def clearcaches(self):
+ self.__dict__.pop('_nodemap', None)
+
+ def __getitem__(self, i):
+ if i == -1:
+ return (0, 0, 0, -1, -1, -1, -1, node.nullid)
+ return list.__getitem__(self, i)
+
+ def pack_header(self, header):
+ """pack header information in binary"""
+ return b''
+
+ def entry_binary(self, rev):
+ """return the raw binary string representing a revision"""
+ entry = self[rev]
+ if gettype(entry[0]):
+ raise error.RevlogError(
+ _(b'index entry flags need revlog version 1')
+ )
+ e2 = (
+ getoffset(entry[0]),
+ entry[1],
+ entry[3],
+ entry[4],
+ self[entry[5]][7],
+ self[entry[6]][7],
+ entry[7],
+ )
+ return INDEX_ENTRY_V0.pack(*e2)
+
+
+def parse_index_v0(data, inline):
+ s = INDEX_ENTRY_V0.size
+ index = []
+ nodemap = nodemaputil.NodeMap({node.nullid: node.nullrev})
+ n = off = 0
+ l = len(data)
+ while off + s <= l:
+ cur = data[off : off + s]
+ off += s
+ e = INDEX_ENTRY_V0.unpack(cur)
+ # transform to revlogv1 format
+ e2 = (
+ offset_type(e[0], 0),
+ e[1],
+ -1,
+ e[2],
+ e[3],
+ nodemap.get(e[4], node.nullrev),
+ nodemap.get(e[5], node.nullrev),
+ e[6],
+ )
+ index.append(e2)
+ nodemap[e[6]] = n
+ n += 1
+
+ index = revlogoldindex(index)
+ return index, None
--- a/mercurial/revlogutils/sidedata.py Sun May 02 16:56:20 2021 -0400
+++ b/mercurial/revlogutils/sidedata.py Fri May 07 22:06:25 2021 -0400
@@ -32,9 +32,11 @@
from __future__ import absolute_import
+import collections
import struct
-from .. import error
+from .. import error, requirements as requirementsmod
+from ..revlogutils import constants, flagutil
from ..utils import hashutil
## sidedata type constant
@@ -91,3 +93,83 @@
sidedata[key] = entrytext
dataoffset = nextdataoffset
return sidedata
+
+
+def get_sidedata_helpers(repo, remote_sd_categories, pull=False):
+ """
+ Returns a dictionary mapping revlog types to tuples of
+ `(repo, computers, removers)`:
+ * `repo` is used as an argument for computers
+ * `computers` is a list of `(category, (keys, computer, flags)` that
+ compute the missing sidedata categories that were asked:
+ * `category` is the sidedata category
+ * `keys` are the sidedata keys to be affected
+ * `flags` is a bitmask (an integer) of flags to remove when
+ removing the category.
+ * `computer` is the function `(repo, store, rev, sidedata)` that
+ returns a tuple of
+ `(new sidedata dict, (flags to add, flags to remove))`.
+ For example, it will return `({}, (0, 1 << 15))` to return no
+ sidedata, with no flags to add and one flag to remove.
+ * `removers` will remove the keys corresponding to the categories
+ that are present, but not needed.
+ If both `computers` and `removers` are empty, sidedata will simply not
+ be transformed.
+ """
+ # Computers for computing sidedata on-the-fly
+ sd_computers = collections.defaultdict(list)
+ # Computers for categories to remove from sidedata
+ sd_removers = collections.defaultdict(list)
+ to_generate = remote_sd_categories - repo._wanted_sidedata
+ to_remove = repo._wanted_sidedata - remote_sd_categories
+ if pull:
+ to_generate, to_remove = to_remove, to_generate
+
+ for revlog_kind, computers in repo._sidedata_computers.items():
+ for category, computer in computers.items():
+ if category in to_generate:
+ sd_computers[revlog_kind].append(computer)
+ if category in to_remove:
+ sd_removers[revlog_kind].append(computer)
+
+ sidedata_helpers = (repo, sd_computers, sd_removers)
+ return sidedata_helpers
+
+
+def run_sidedata_helpers(store, sidedata_helpers, sidedata, rev):
+ """Returns the sidedata for the given revision after running through
+ the given helpers.
+ - `store`: the revlog this applies to (changelog, manifest, or filelog
+ instance)
+ - `sidedata_helpers`: see `get_sidedata_helpers`
+ - `sidedata`: previous sidedata at the given rev, if any
+ - `rev`: affected rev of `store`
+ """
+ repo, sd_computers, sd_removers = sidedata_helpers
+ kind = store.revlog_kind
+ flags_to_add = 0
+ flags_to_remove = 0
+ for _keys, sd_computer, _flags in sd_computers.get(kind, []):
+ sidedata, flags = sd_computer(repo, store, rev, sidedata)
+ flags_to_add |= flags[0]
+ flags_to_remove |= flags[1]
+ for keys, _computer, flags in sd_removers.get(kind, []):
+ for key in keys:
+ sidedata.pop(key, None)
+ flags_to_remove |= flags
+ return sidedata, (flags_to_add, flags_to_remove)
+
+
+def set_sidedata_spec_for_repo(repo):
+ # prevent cycle metadata -> revlogutils.sidedata -> metadata
+ from .. import metadata
+
+ if requirementsmod.COPIESSDC_REQUIREMENT in repo.requirements:
+ repo.register_wanted_sidedata(SD_FILES)
+ repo.register_sidedata_computer(
+ constants.KIND_CHANGELOG,
+ SD_FILES,
+ (SD_FILES,),
+ metadata.copies_sidedata_computer,
+ flagutil.REVIDX_HASCOPIESINFO,
+ )
--- a/mercurial/revset.py Sun May 02 16:56:20 2021 -0400
+++ b/mercurial/revset.py Fri May 07 22:06:25 2021 -0400
@@ -1724,7 +1724,7 @@
def _node(repo, n):
"""process a node input"""
rn = None
- if len(n) == 40:
+ if len(n) == 2 * repo.nodeconstants.nodelen:
try:
rn = repo.changelog.rev(bin(n))
except error.WdirUnsupported:
--- a/mercurial/rewriteutil.py Sun May 02 16:56:20 2021 -0400
+++ b/mercurial/rewriteutil.py Fri May 07 22:06:25 2021 -0400
@@ -17,16 +17,38 @@
from . import (
error,
+ node,
obsolete,
obsutil,
revset,
scmutil,
+ util,
)
NODE_RE = re.compile(br'\b[0-9a-f]{6,64}\b')
+def _formatrevs(repo, revs, maxrevs=4):
+ """returns a string summarizing revisions in a decent size
+
+ If there are few enough revisions, we list them all. Otherwise we display a
+ summary of the form:
+
+ 1ea73414a91b and 5 others
+ """
+ tonode = repo.changelog.node
+ numrevs = len(revs)
+ if numrevs < maxrevs:
+ shorts = [node.short(tonode(r)) for r in revs]
+ summary = b', '.join(shorts)
+ else:
+ first = revs.first()
+ summary = _(b'%s and %d others')
+ summary %= (node.short(tonode(first)), numrevs - 1)
+ return summary
+
+
def precheck(repo, revs, action=b'rewrite'):
"""check if revs can be rewritten
action is used to control the error message.
@@ -34,22 +56,66 @@
Make sure this function is called after taking the lock.
"""
if nullrev in revs:
- msg = _(b"cannot %s null changeset") % action
+ msg = _(b"cannot %s the null revision") % action
hint = _(b"no changeset checked out")
raise error.InputError(msg, hint=hint)
+ if any(util.safehasattr(r, 'rev') for r in revs):
+ repo.ui.develwarn(b"rewriteutil.precheck called with ctx not revs")
+ revs = (r.rev() for r in revs)
+
if len(repo[None].parents()) > 1:
- raise error.StateError(_(b"cannot %s while merging") % action)
+ raise error.StateError(
+ _(b"cannot %s changesets while merging") % action
+ )
publicrevs = repo.revs(b'%ld and public()', revs)
if publicrevs:
- msg = _(b"cannot %s public changesets") % action
+ summary = _formatrevs(repo, publicrevs)
+ msg = _(b"cannot %s public changesets: %s") % (action, summary)
hint = _(b"see 'hg help phases' for details")
raise error.InputError(msg, hint=hint)
newunstable = disallowednewunstable(repo, revs)
if newunstable:
- raise error.InputError(_(b"cannot %s changeset with children") % action)
+ hint = _(b"see 'hg help evolution.instability'")
+ raise error.InputError(
+ _(b"cannot %s changeset, as that will orphan %d descendants")
+ % (action, len(newunstable)),
+ hint=hint,
+ )
+
+ if not obsolete.isenabled(repo, obsolete.allowdivergenceopt):
+ new_divergence = _find_new_divergence(repo, revs)
+ if new_divergence:
+ local_ctx, other_ctx, base_ctx = new_divergence
+ msg = _(
+ b'cannot %s %s, as that creates content-divergence with %s'
+ ) % (
+ action,
+ local_ctx,
+ other_ctx,
+ )
+ if local_ctx.rev() != base_ctx.rev():
+ msg += _(b', from %s') % base_ctx
+ if repo.ui.verbose:
+ if local_ctx.rev() != base_ctx.rev():
+ msg += _(
+ b'\n changeset %s is a successor of ' b'changeset %s'
+ ) % (local_ctx, base_ctx)
+ msg += _(
+ b'\n changeset %s already has a successor in '
+ b'changeset %s\n'
+ b' rewriting changeset %s would create '
+ b'"content-divergence"\n'
+ b' set experimental.evolution.allowdivergence=True to '
+ b'skip this check'
+ ) % (base_ctx, other_ctx, local_ctx)
+ raise error.InputError(msg)
+ else:
+ raise error.InputError(
+ msg, hint=_(b"add --verbose for details")
+ )
def disallowednewunstable(repo, revs):
@@ -65,6 +131,40 @@
return repo.revs(b"(%ld::) - %ld", revs, revs)
+def _find_new_divergence(repo, revs):
+ obsrevs = repo.revs(b'%ld and obsolete()', revs)
+ for r in obsrevs:
+ div = find_new_divergence_from(repo, repo[r])
+ if div:
+ return (repo[r], repo[div[0]], repo[div[1]])
+ return None
+
+
+def find_new_divergence_from(repo, ctx):
+ """return divergent revision if rewriting an obsolete cset (ctx) will
+ create divergence
+
+ Returns (<other node>, <common ancestor node>) or None
+ """
+ if not ctx.obsolete():
+ return None
+ # We need to check two cases that can cause divergence:
+ # case 1: the rev being rewritten has a non-obsolete successor (easily
+ # detected by successorssets)
+ sset = obsutil.successorssets(repo, ctx.node())
+ if sset:
+ return (sset[0][0], ctx.node())
+ else:
+ # case 2: one of the precursors of the rev being revived has a
+ # non-obsolete successor (we need divergentsets for this)
+ divsets = obsutil.divergentsets(repo, ctx)
+ if divsets:
+ nsuccset = divsets[0][b'divergentnodes']
+ prec = divsets[0][b'commonpredecessor']
+ return (nsuccset[0], prec)
+ return None
+
+
def skip_empty_successor(ui, command):
empty_successor = ui.config(b'rewrite', b'empty-successor')
if empty_successor == b'skip':
--- a/mercurial/scmutil.py Sun May 02 16:56:20 2021 -0400
+++ b/mercurial/scmutil.py Fri May 07 22:06:25 2021 -0400
@@ -19,10 +19,8 @@
from .node import (
bin,
hex,
- nullid,
nullrev,
short,
- wdirid,
wdirrev,
)
from .pycompat import getattr
@@ -450,7 +448,7 @@
"""Return binary node id for a given basectx"""
node = ctx.node()
if node is None:
- return wdirid
+ return ctx.repo().nodeconstants.wdirid
return node
@@ -645,7 +643,7 @@
except (ValueError, OverflowError, IndexError):
pass
- if len(symbol) == 40:
+ if len(symbol) == 2 * repo.nodeconstants.nodelen:
try:
node = bin(symbol)
rev = repo.changelog.rev(node)
@@ -1108,7 +1106,7 @@
if roots:
newnode = roots[0].node()
else:
- newnode = nullid
+ newnode = repo.nullid
else:
newnode = newnodes[0]
moves[oldnode] = newnode
@@ -1506,7 +1504,7 @@
oldctx = repo[b'.']
ds = repo.dirstate
copies = dict(ds.copies())
- ds.setparents(newctx.node(), nullid)
+ ds.setparents(newctx.node(), repo.nullid)
s = newctx.status(oldctx, match=match)
for f in s.modified:
if ds[f] == b'r':
--- a/mercurial/setdiscovery.py Sun May 02 16:56:20 2021 -0400
+++ b/mercurial/setdiscovery.py Fri May 07 22:06:25 2021 -0400
@@ -46,10 +46,7 @@
import random
from .i18n import _
-from .node import (
- nullid,
- nullrev,
-)
+from .node import nullrev
from . import (
error,
policy,
@@ -391,9 +388,9 @@
audit[b'total-roundtrips'] = 1
if cl.tiprev() == nullrev:
- if srvheadhashes != [nullid]:
- return [nullid], True, srvheadhashes
- return [nullid], False, []
+ if srvheadhashes != [cl.nullid]:
+ return [cl.nullid], True, srvheadhashes
+ return [cl.nullid], False, []
else:
# we still need the remote head for the function return
with remote.commandexecutor() as e:
@@ -406,7 +403,7 @@
knownsrvheads = [] # revnos of remote heads that are known locally
for node in srvheadhashes:
- if node == nullid:
+ if node == cl.nullid:
continue
try:
@@ -503,17 +500,17 @@
if audit is not None:
audit[b'total-roundtrips'] = roundtrips
- if not result and srvheadhashes != [nullid]:
+ if not result and srvheadhashes != [cl.nullid]:
if abortwhenunrelated:
raise error.Abort(_(b"repository is unrelated"))
else:
ui.warn(_(b"warning: repository is unrelated\n"))
return (
- {nullid},
+ {cl.nullid},
True,
srvheadhashes,
)
- anyincoming = srvheadhashes != [nullid]
+ anyincoming = srvheadhashes != [cl.nullid]
result = {clnode(r) for r in result}
return result, anyincoming, srvheadhashes
--- a/mercurial/shelve.py Sun May 02 16:56:20 2021 -0400
+++ b/mercurial/shelve.py Fri May 07 22:06:25 2021 -0400
@@ -31,7 +31,6 @@
from .node import (
bin,
hex,
- nullid,
nullrev,
)
from . import (
@@ -822,7 +821,7 @@
pendingctx = state.pendingctx
with repo.dirstate.parentchange():
- repo.setparents(state.pendingctx.node(), nullid)
+ repo.setparents(state.pendingctx.node(), repo.nullid)
repo.dirstate.write(repo.currenttransaction())
targetphase = phases.internal
@@ -831,7 +830,7 @@
overrides = {(b'phases', b'new-commit'): targetphase}
with repo.ui.configoverride(overrides, b'unshelve'):
with repo.dirstate.parentchange():
- repo.setparents(state.parents[0], nullid)
+ repo.setparents(state.parents[0], repo.nullid)
newnode, ispartialunshelve = _createunshelvectx(
ui, repo, shelvectx, basename, interactive, opts
)
@@ -1027,7 +1026,7 @@
raise error.ConflictResolutionRequired(b'unshelve')
with repo.dirstate.parentchange():
- repo.setparents(tmpwctx.node(), nullid)
+ repo.setparents(tmpwctx.node(), repo.nullid)
newnode, ispartialunshelve = _createunshelvectx(
ui, repo, shelvectx, basename, interactive, opts
)
--- a/mercurial/sparse.py Sun May 02 16:56:20 2021 -0400
+++ b/mercurial/sparse.py Fri May 07 22:06:25 2021 -0400
@@ -10,10 +10,7 @@
import os
from .i18n import _
-from .node import (
- hex,
- nullid,
-)
+from .node import hex
from . import (
error,
match as matchmod,
@@ -177,7 +174,7 @@
revs = [
repo.changelog.rev(node)
for node in repo.dirstate.parents()
- if node != nullid
+ if node != repo.nullid
]
allincludes = set()
@@ -321,7 +318,7 @@
revs = [
repo.changelog.rev(node)
for node in repo.dirstate.parents()
- if node != nullid
+ if node != repo.nullid
]
signature = configsignature(repo, includetemp=includetemp)
--- a/mercurial/strip.py Sun May 02 16:56:20 2021 -0400
+++ b/mercurial/strip.py Fri May 07 22:06:25 2021 -0400
@@ -2,7 +2,6 @@
from .i18n import _
from .pycompat import getattr
-from .node import nullid
from . import (
bookmarks as bookmarksmod,
cmdutil,
@@ -39,7 +38,7 @@
if (
util.safehasattr(repo, b'mq')
- and p2 != nullid
+ and p2 != repo.nullid
and p2 in [x.node for x in repo.mq.applied]
):
unode = p2
@@ -218,7 +217,7 @@
# if one of the wdir parent is stripped we'll need
# to update away to an earlier revision
update = any(
- p != nullid and cl.rev(p) in strippedrevs
+ p != repo.nullid and cl.rev(p) in strippedrevs
for p in repo.dirstate.parents()
)
--- a/mercurial/subrepo.py Sun May 02 16:56:20 2021 -0400
+++ b/mercurial/subrepo.py Fri May 07 22:06:25 2021 -0400
@@ -21,7 +21,6 @@
from .node import (
bin,
hex,
- nullid,
short,
)
from . import (
@@ -686,7 +685,7 @@
# we can't fully delete the repository as it may contain
# local-only history
self.ui.note(_(b'removing subrepo %s\n') % subrelpath(self))
- hg.clean(self._repo, nullid, False)
+ hg.clean(self._repo, self._repo.nullid, False)
def _get(self, state):
source, revision, kind = state
--- a/mercurial/tagmerge.py Sun May 02 16:56:20 2021 -0400
+++ b/mercurial/tagmerge.py Fri May 07 22:06:25 2021 -0400
@@ -74,9 +74,6 @@
from __future__ import absolute_import
from .i18n import _
-from .node import (
- nullhex,
-)
from . import (
tags as tagsmod,
util,
@@ -243,8 +240,8 @@
pnlosttagset = basetagset - pntagset
for t in pnlosttagset:
pntags[t] = basetags[t]
- if pntags[t][-1][0] != nullhex:
- pntags[t].append([nullhex, None])
+ if pntags[t][-1][0] != repo.nodeconstants.nullhex:
+ pntags[t].append([repo.nodeconstants.nullhex, None])
conflictedtags = [] # for reporting purposes
mergedtags = util.sortdict(p1tags)
--- a/mercurial/tags.py Sun May 02 16:56:20 2021 -0400
+++ b/mercurial/tags.py Fri May 07 22:06:25 2021 -0400
@@ -18,7 +18,6 @@
from .node import (
bin,
hex,
- nullid,
nullrev,
short,
)
@@ -96,12 +95,12 @@
return fnodes
-def _nulltonone(value):
+def _nulltonone(repo, value):
"""convert nullid to None
For tag value, nullid means "deleted". This small utility function helps
translating that to None."""
- if value == nullid:
+ if value == repo.nullid:
return None
return value
@@ -123,14 +122,14 @@
# list of (tag, old, new): None means missing
entries = []
for tag, (new, __) in newtags.items():
- new = _nulltonone(new)
+ new = _nulltonone(repo, new)
old, __ = oldtags.pop(tag, (None, None))
- old = _nulltonone(old)
+ old = _nulltonone(repo, old)
if old != new:
entries.append((tag, old, new))
# handle deleted tags
for tag, (old, __) in oldtags.items():
- old = _nulltonone(old)
+ old = _nulltonone(repo, old)
if old is not None:
entries.append((tag, old, None))
entries.sort()
@@ -452,7 +451,7 @@
repoheads = repo.heads()
# Case 2 (uncommon): empty repo; get out quickly and don't bother
# writing an empty cache.
- if repoheads == [nullid]:
+ if repoheads == [repo.nullid]:
return ([], {}, valid, {}, False)
# Case 3 (uncommon): cache file missing or empty.
@@ -499,7 +498,7 @@
for node in nodes:
fnode = fnodescache.getfnode(node)
flog = repo.file(b'.hgtags')
- if fnode != nullid:
+ if fnode != repo.nullid:
if fnode not in validated_fnodes:
if flog.hasnode(fnode):
validated_fnodes.add(fnode)
@@ -510,7 +509,7 @@
if unknown_entries:
fixed_nodemap = fnodescache.refresh_invalid_nodes(unknown_entries)
for node, fnode in pycompat.iteritems(fixed_nodemap):
- if fnode != nullid:
+ if fnode != repo.nullid:
cachefnode[node] = fnode
fnodescache.write()
@@ -632,7 +631,7 @@
m = name
if repo._tagscache.tagtypes and name in repo._tagscache.tagtypes:
- old = repo.tags().get(name, nullid)
+ old = repo.tags().get(name, repo.nullid)
fp.write(b'%s %s\n' % (hex(old), m))
fp.write(b'%s %s\n' % (hex(node), m))
fp.close()
@@ -762,8 +761,8 @@
If an .hgtags does not exist at the specified revision, nullid is
returned.
"""
- if node == nullid:
- return nullid
+ if node == self._repo.nullid:
+ return node
ctx = self._repo[node]
rev = ctx.rev()
@@ -826,7 +825,7 @@
fnode = ctx.filenode(b'.hgtags')
except error.LookupError:
# No .hgtags file on this revision.
- fnode = nullid
+ fnode = self._repo.nullid
return fnode
def setfnode(self, node, fnode):
--- a/mercurial/templatefuncs.py Sun May 02 16:56:20 2021 -0400
+++ b/mercurial/templatefuncs.py Fri May 07 22:06:25 2021 -0400
@@ -10,10 +10,7 @@
import re
from .i18n import _
-from .node import (
- bin,
- wdirid,
-)
+from .node import bin
from . import (
color,
dagop,
@@ -767,9 +764,10 @@
)
repo = context.resource(mapping, b'repo')
- if len(hexnode) > 40:
+ hexnodelen = 2 * repo.nodeconstants.nodelen
+ if len(hexnode) > hexnodelen:
return hexnode
- elif len(hexnode) == 40:
+ elif len(hexnode) == hexnodelen:
try:
node = bin(hexnode)
except TypeError:
@@ -778,7 +776,7 @@
try:
node = scmutil.resolvehexnodeidprefix(repo, hexnode)
except error.WdirUnsupported:
- node = wdirid
+ node = repo.nodeconstants.wdirid
except error.LookupError:
return hexnode
if not node:
--- a/mercurial/templatekw.py Sun May 02 16:56:20 2021 -0400
+++ b/mercurial/templatekw.py Fri May 07 22:06:25 2021 -0400
@@ -10,8 +10,6 @@
from .i18n import _
from .node import (
hex,
- nullid,
- wdirid,
wdirrev,
)
@@ -29,7 +27,10 @@
templateutil,
util,
)
-from .utils import stringutil
+from .utils import (
+ stringutil,
+ urlutil,
+)
_hybrid = templateutil.hybrid
hybriddict = templateutil.hybriddict
@@ -412,7 +413,7 @@
def getgraphnodecurrent(repo, ctx, cache):
wpnodes = repo.dirstate.parents()
- if wpnodes[1] == nullid:
+ if wpnodes[1] == repo.nullid:
wpnodes = wpnodes[:1]
if ctx.node() in wpnodes:
return b'@'
@@ -525,11 +526,12 @@
ctx = context.resource(mapping, b'ctx')
mnode = ctx.manifestnode()
if mnode is None:
- mnode = wdirid
+ mnode = repo.nodeconstants.wdirid
mrev = wdirrev
+ mhex = repo.nodeconstants.wdirhex
else:
mrev = repo.manifestlog.rev(mnode)
- mhex = hex(mnode)
+ mhex = hex(mnode)
mapping = context.overlaymap(mapping, {b'rev': mrev, b'node': mhex})
f = context.process(b'manifest', mapping)
return templateutil.hybriditem(
@@ -661,9 +663,8 @@
repo = context.resource(mapping, b'repo')
# see commands.paths() for naming of dictionary keys
paths = repo.ui.paths
- urls = util.sortdict(
- (k, p.rawloc) for k, p in sorted(pycompat.iteritems(paths))
- )
+ all_paths = urlutil.list_paths(repo.ui)
+ urls = util.sortdict((k, p.rawloc) for k, p in all_paths)
def makemap(k):
p = paths[k]
@@ -671,7 +672,10 @@
d.update((o, v) for o, v in sorted(pycompat.iteritems(p.suboptions)))
return d
- return _hybrid(None, urls, makemap, lambda k: b'%s=%s' % (k, urls[k]))
+ def format_one(k):
+ return b'%s=%s' % (k, urls[k])
+
+ return _hybrid(None, urls, makemap, format_one)
@templatekeyword(b"predecessors", requires={b'repo', b'ctx'})
--- a/mercurial/testing/storage.py Sun May 02 16:56:20 2021 -0400
+++ b/mercurial/testing/storage.py Fri May 07 22:06:25 2021 -0400
@@ -11,7 +11,6 @@
from ..node import (
hex,
- nullid,
nullrev,
)
from ..pycompat import getattr
@@ -51,7 +50,7 @@
self.assertFalse(f.hasnode(None))
self.assertFalse(f.hasnode(0))
self.assertFalse(f.hasnode(nullrev))
- self.assertFalse(f.hasnode(nullid))
+ self.assertFalse(f.hasnode(f.nullid))
self.assertFalse(f.hasnode(b'0'))
self.assertFalse(f.hasnode(b'a' * 20))
@@ -64,8 +63,8 @@
self.assertEqual(list(f.revs(start=20)), [])
- # parents() and parentrevs() work with nullid/nullrev.
- self.assertEqual(f.parents(nullid), (nullid, nullid))
+ # parents() and parentrevs() work with f.nullid/nullrev.
+ self.assertEqual(f.parents(f.nullid), (f.nullid, f.nullid))
self.assertEqual(f.parentrevs(nullrev), (nullrev, nullrev))
with self.assertRaises(error.LookupError):
@@ -78,9 +77,9 @@
with self.assertRaises(IndexError):
f.parentrevs(i)
- # nullid/nullrev lookup always works.
- self.assertEqual(f.rev(nullid), nullrev)
- self.assertEqual(f.node(nullrev), nullid)
+ # f.nullid/nullrev lookup always works.
+ self.assertEqual(f.rev(f.nullid), nullrev)
+ self.assertEqual(f.node(nullrev), f.nullid)
with self.assertRaises(error.LookupError):
f.rev(b'\x01' * 20)
@@ -92,16 +91,16 @@
with self.assertRaises(IndexError):
f.node(i)
- self.assertEqual(f.lookup(nullid), nullid)
- self.assertEqual(f.lookup(nullrev), nullid)
- self.assertEqual(f.lookup(hex(nullid)), nullid)
- self.assertEqual(f.lookup(b'%d' % nullrev), nullid)
+ self.assertEqual(f.lookup(f.nullid), f.nullid)
+ self.assertEqual(f.lookup(nullrev), f.nullid)
+ self.assertEqual(f.lookup(hex(f.nullid)), f.nullid)
+ self.assertEqual(f.lookup(b'%d' % nullrev), f.nullid)
with self.assertRaises(error.LookupError):
f.lookup(b'badvalue')
with self.assertRaises(error.LookupError):
- f.lookup(hex(nullid)[0:12])
+ f.lookup(hex(f.nullid)[0:12])
with self.assertRaises(error.LookupError):
f.lookup(b'-2')
@@ -140,19 +139,19 @@
with self.assertRaises(IndexError):
f.iscensored(i)
- self.assertEqual(list(f.commonancestorsheads(nullid, nullid)), [])
+ self.assertEqual(list(f.commonancestorsheads(f.nullid, f.nullid)), [])
with self.assertRaises(ValueError):
self.assertEqual(list(f.descendants([])), [])
self.assertEqual(list(f.descendants([nullrev])), [])
- self.assertEqual(f.heads(), [nullid])
- self.assertEqual(f.heads(nullid), [nullid])
- self.assertEqual(f.heads(None, [nullid]), [nullid])
- self.assertEqual(f.heads(nullid, [nullid]), [nullid])
+ self.assertEqual(f.heads(), [f.nullid])
+ self.assertEqual(f.heads(f.nullid), [f.nullid])
+ self.assertEqual(f.heads(None, [f.nullid]), [f.nullid])
+ self.assertEqual(f.heads(f.nullid, [f.nullid]), [f.nullid])
- self.assertEqual(f.children(nullid), [])
+ self.assertEqual(f.children(f.nullid), [])
with self.assertRaises(error.LookupError):
f.children(b'\x01' * 20)
@@ -160,7 +159,7 @@
def testsinglerevision(self):
f = self._makefilefn()
with self._maketransactionfn() as tr:
- node = f.add(b'initial', None, tr, 0, nullid, nullid)
+ node = f.add(b'initial', None, tr, 0, f.nullid, f.nullid)
self.assertEqual(len(f), 1)
self.assertEqual(list(f), [0])
@@ -174,7 +173,7 @@
self.assertTrue(f.hasnode(node))
self.assertFalse(f.hasnode(hex(node)))
self.assertFalse(f.hasnode(nullrev))
- self.assertFalse(f.hasnode(nullid))
+ self.assertFalse(f.hasnode(f.nullid))
self.assertFalse(f.hasnode(node[0:12]))
self.assertFalse(f.hasnode(hex(node)[0:20]))
@@ -188,7 +187,7 @@
self.assertEqual(list(f.revs(1, 0)), [1, 0])
self.assertEqual(list(f.revs(2, 0)), [2, 1, 0])
- self.assertEqual(f.parents(node), (nullid, nullid))
+ self.assertEqual(f.parents(node), (f.nullid, f.nullid))
self.assertEqual(f.parentrevs(0), (nullrev, nullrev))
with self.assertRaises(error.LookupError):
@@ -209,7 +208,7 @@
self.assertEqual(f.lookup(node), node)
self.assertEqual(f.lookup(0), node)
- self.assertEqual(f.lookup(-1), nullid)
+ self.assertEqual(f.lookup(-1), f.nullid)
self.assertEqual(f.lookup(b'0'), node)
self.assertEqual(f.lookup(hex(node)), node)
@@ -256,9 +255,9 @@
f = self._makefilefn()
with self._maketransactionfn() as tr:
- node0 = f.add(fulltext0, None, tr, 0, nullid, nullid)
- node1 = f.add(fulltext1, None, tr, 1, node0, nullid)
- node2 = f.add(fulltext2, None, tr, 3, node1, nullid)
+ node0 = f.add(fulltext0, None, tr, 0, f.nullid, f.nullid)
+ node1 = f.add(fulltext1, None, tr, 1, node0, f.nullid)
+ node2 = f.add(fulltext2, None, tr, 3, node1, f.nullid)
self.assertEqual(len(f), 3)
self.assertEqual(list(f), [0, 1, 2])
@@ -284,9 +283,9 @@
# TODO this is wrong
self.assertEqual(list(f.revs(3, 2)), [3, 2])
- self.assertEqual(f.parents(node0), (nullid, nullid))
- self.assertEqual(f.parents(node1), (node0, nullid))
- self.assertEqual(f.parents(node2), (node1, nullid))
+ self.assertEqual(f.parents(node0), (f.nullid, f.nullid))
+ self.assertEqual(f.parents(node1), (node0, f.nullid))
+ self.assertEqual(f.parents(node2), (node1, f.nullid))
self.assertEqual(f.parentrevs(0), (nullrev, nullrev))
self.assertEqual(f.parentrevs(1), (0, nullrev))
@@ -330,7 +329,7 @@
with self.assertRaises(IndexError):
f.iscensored(3)
- self.assertEqual(f.commonancestorsheads(node1, nullid), [])
+ self.assertEqual(f.commonancestorsheads(node1, f.nullid), [])
self.assertEqual(f.commonancestorsheads(node1, node0), [node0])
self.assertEqual(f.commonancestorsheads(node1, node1), [node1])
self.assertEqual(f.commonancestorsheads(node0, node1), [node0])
@@ -364,12 +363,12 @@
f = self._makefilefn()
with self._maketransactionfn() as tr:
- node0 = f.add(b'0', None, tr, 0, nullid, nullid)
- node1 = f.add(b'1', None, tr, 1, node0, nullid)
- node2 = f.add(b'2', None, tr, 2, node1, nullid)
- node3 = f.add(b'3', None, tr, 3, node0, nullid)
- node4 = f.add(b'4', None, tr, 4, node3, nullid)
- node5 = f.add(b'5', None, tr, 5, node0, nullid)
+ node0 = f.add(b'0', None, tr, 0, f.nullid, f.nullid)
+ node1 = f.add(b'1', None, tr, 1, node0, f.nullid)
+ node2 = f.add(b'2', None, tr, 2, node1, f.nullid)
+ node3 = f.add(b'3', None, tr, 3, node0, f.nullid)
+ node4 = f.add(b'4', None, tr, 4, node3, f.nullid)
+ node5 = f.add(b'5', None, tr, 5, node0, f.nullid)
self.assertEqual(len(f), 6)
@@ -427,24 +426,24 @@
with self.assertRaises(IndexError):
f.size(i)
- self.assertEqual(f.revision(nullid), b'')
- self.assertEqual(f.rawdata(nullid), b'')
+ self.assertEqual(f.revision(f.nullid), b'')
+ self.assertEqual(f.rawdata(f.nullid), b'')
with self.assertRaises(error.LookupError):
f.revision(b'\x01' * 20)
- self.assertEqual(f.read(nullid), b'')
+ self.assertEqual(f.read(f.nullid), b'')
with self.assertRaises(error.LookupError):
f.read(b'\x01' * 20)
- self.assertFalse(f.renamed(nullid))
+ self.assertFalse(f.renamed(f.nullid))
with self.assertRaises(error.LookupError):
f.read(b'\x01' * 20)
- self.assertTrue(f.cmp(nullid, b''))
- self.assertTrue(f.cmp(nullid, b'foo'))
+ self.assertTrue(f.cmp(f.nullid, b''))
+ self.assertTrue(f.cmp(f.nullid, b'foo'))
with self.assertRaises(error.LookupError):
f.cmp(b'\x01' * 20, b'irrelevant')
@@ -455,7 +454,7 @@
next(gen)
# Emitting null node yields nothing.
- gen = f.emitrevisions([nullid])
+ gen = f.emitrevisions([f.nullid])
with self.assertRaises(StopIteration):
next(gen)
@@ -468,7 +467,7 @@
f = self._makefilefn()
with self._maketransactionfn() as tr:
- node = f.add(fulltext, None, tr, 0, nullid, nullid)
+ node = f.add(fulltext, None, tr, 0, f.nullid, f.nullid)
self.assertEqual(f.storageinfo(), {})
self.assertEqual(
@@ -496,10 +495,10 @@
rev = next(gen)
self.assertEqual(rev.node, node)
- self.assertEqual(rev.p1node, nullid)
- self.assertEqual(rev.p2node, nullid)
+ self.assertEqual(rev.p1node, f.nullid)
+ self.assertEqual(rev.p2node, f.nullid)
self.assertIsNone(rev.linknode)
- self.assertEqual(rev.basenode, nullid)
+ self.assertEqual(rev.basenode, f.nullid)
self.assertIsNone(rev.baserevisionsize)
self.assertIsNone(rev.revision)
self.assertIsNone(rev.delta)
@@ -512,10 +511,10 @@
rev = next(gen)
self.assertEqual(rev.node, node)
- self.assertEqual(rev.p1node, nullid)
- self.assertEqual(rev.p2node, nullid)
+ self.assertEqual(rev.p1node, f.nullid)
+ self.assertEqual(rev.p2node, f.nullid)
self.assertIsNone(rev.linknode)
- self.assertEqual(rev.basenode, nullid)
+ self.assertEqual(rev.basenode, f.nullid)
self.assertIsNone(rev.baserevisionsize)
self.assertEqual(rev.revision, fulltext)
self.assertIsNone(rev.delta)
@@ -534,9 +533,9 @@
f = self._makefilefn()
with self._maketransactionfn() as tr:
- node0 = f.add(fulltext0, None, tr, 0, nullid, nullid)
- node1 = f.add(fulltext1, None, tr, 1, node0, nullid)
- node2 = f.add(fulltext2, None, tr, 3, node1, nullid)
+ node0 = f.add(fulltext0, None, tr, 0, f.nullid, f.nullid)
+ node1 = f.add(fulltext1, None, tr, 1, node0, f.nullid)
+ node2 = f.add(fulltext2, None, tr, 3, node1, f.nullid)
self.assertEqual(f.storageinfo(), {})
self.assertEqual(
@@ -596,10 +595,10 @@
rev = next(gen)
self.assertEqual(rev.node, node0)
- self.assertEqual(rev.p1node, nullid)
- self.assertEqual(rev.p2node, nullid)
+ self.assertEqual(rev.p1node, f.nullid)
+ self.assertEqual(rev.p2node, f.nullid)
self.assertIsNone(rev.linknode)
- self.assertEqual(rev.basenode, nullid)
+ self.assertEqual(rev.basenode, f.nullid)
self.assertIsNone(rev.baserevisionsize)
self.assertEqual(rev.revision, fulltext0)
self.assertIsNone(rev.delta)
@@ -608,7 +607,7 @@
self.assertEqual(rev.node, node1)
self.assertEqual(rev.p1node, node0)
- self.assertEqual(rev.p2node, nullid)
+ self.assertEqual(rev.p2node, f.nullid)
self.assertIsNone(rev.linknode)
self.assertEqual(rev.basenode, node0)
self.assertIsNone(rev.baserevisionsize)
@@ -622,7 +621,7 @@
self.assertEqual(rev.node, node2)
self.assertEqual(rev.p1node, node1)
- self.assertEqual(rev.p2node, nullid)
+ self.assertEqual(rev.p2node, f.nullid)
self.assertIsNone(rev.linknode)
self.assertEqual(rev.basenode, node1)
self.assertIsNone(rev.baserevisionsize)
@@ -641,10 +640,10 @@
rev = next(gen)
self.assertEqual(rev.node, node0)
- self.assertEqual(rev.p1node, nullid)
- self.assertEqual(rev.p2node, nullid)
+ self.assertEqual(rev.p1node, f.nullid)
+ self.assertEqual(rev.p2node, f.nullid)
self.assertIsNone(rev.linknode)
- self.assertEqual(rev.basenode, nullid)
+ self.assertEqual(rev.basenode, f.nullid)
self.assertIsNone(rev.baserevisionsize)
self.assertEqual(rev.revision, fulltext0)
self.assertIsNone(rev.delta)
@@ -653,7 +652,7 @@
self.assertEqual(rev.node, node1)
self.assertEqual(rev.p1node, node0)
- self.assertEqual(rev.p2node, nullid)
+ self.assertEqual(rev.p2node, f.nullid)
self.assertIsNone(rev.linknode)
self.assertEqual(rev.basenode, node0)
self.assertIsNone(rev.baserevisionsize)
@@ -667,7 +666,7 @@
self.assertEqual(rev.node, node2)
self.assertEqual(rev.p1node, node1)
- self.assertEqual(rev.p2node, nullid)
+ self.assertEqual(rev.p2node, f.nullid)
self.assertIsNone(rev.linknode)
self.assertEqual(rev.basenode, node1)
self.assertIsNone(rev.baserevisionsize)
@@ -700,16 +699,16 @@
rev = next(gen)
self.assertEqual(rev.node, node2)
self.assertEqual(rev.p1node, node1)
- self.assertEqual(rev.p2node, nullid)
- self.assertEqual(rev.basenode, nullid)
+ self.assertEqual(rev.p2node, f.nullid)
+ self.assertEqual(rev.basenode, f.nullid)
self.assertIsNone(rev.baserevisionsize)
self.assertEqual(rev.revision, fulltext2)
self.assertIsNone(rev.delta)
rev = next(gen)
self.assertEqual(rev.node, node0)
- self.assertEqual(rev.p1node, nullid)
- self.assertEqual(rev.p2node, nullid)
+ self.assertEqual(rev.p1node, f.nullid)
+ self.assertEqual(rev.p2node, f.nullid)
# Delta behavior is storage dependent, so we can't easily test it.
with self.assertRaises(StopIteration):
@@ -722,8 +721,8 @@
rev = next(gen)
self.assertEqual(rev.node, node1)
self.assertEqual(rev.p1node, node0)
- self.assertEqual(rev.p2node, nullid)
- self.assertEqual(rev.basenode, nullid)
+ self.assertEqual(rev.p2node, f.nullid)
+ self.assertEqual(rev.basenode, f.nullid)
self.assertIsNone(rev.baserevisionsize)
self.assertEqual(rev.revision, fulltext1)
self.assertIsNone(rev.delta)
@@ -731,7 +730,7 @@
rev = next(gen)
self.assertEqual(rev.node, node2)
self.assertEqual(rev.p1node, node1)
- self.assertEqual(rev.p2node, nullid)
+ self.assertEqual(rev.p2node, f.nullid)
self.assertEqual(rev.basenode, node1)
self.assertIsNone(rev.baserevisionsize)
self.assertIsNone(rev.revision)
@@ -751,7 +750,7 @@
rev = next(gen)
self.assertEqual(rev.node, node1)
self.assertEqual(rev.p1node, node0)
- self.assertEqual(rev.p2node, nullid)
+ self.assertEqual(rev.p2node, f.nullid)
self.assertEqual(rev.basenode, node0)
self.assertIsNone(rev.baserevisionsize)
self.assertIsNone(rev.revision)
@@ -768,9 +767,9 @@
rev = next(gen)
self.assertEqual(rev.node, node0)
- self.assertEqual(rev.p1node, nullid)
- self.assertEqual(rev.p2node, nullid)
- self.assertEqual(rev.basenode, nullid)
+ self.assertEqual(rev.p1node, f.nullid)
+ self.assertEqual(rev.p2node, f.nullid)
+ self.assertEqual(rev.basenode, f.nullid)
self.assertIsNone(rev.baserevisionsize)
self.assertIsNone(rev.revision)
self.assertEqual(
@@ -789,9 +788,9 @@
rev = next(gen)
self.assertEqual(rev.node, node0)
- self.assertEqual(rev.p1node, nullid)
- self.assertEqual(rev.p2node, nullid)
- self.assertEqual(rev.basenode, nullid)
+ self.assertEqual(rev.p1node, f.nullid)
+ self.assertEqual(rev.p2node, f.nullid)
+ self.assertEqual(rev.basenode, f.nullid)
self.assertIsNone(rev.baserevisionsize)
self.assertIsNone(rev.revision)
self.assertEqual(
@@ -802,7 +801,7 @@
rev = next(gen)
self.assertEqual(rev.node, node2)
self.assertEqual(rev.p1node, node1)
- self.assertEqual(rev.p2node, nullid)
+ self.assertEqual(rev.p2node, f.nullid)
self.assertEqual(rev.basenode, node0)
with self.assertRaises(StopIteration):
@@ -841,11 +840,11 @@
f = self._makefilefn()
with self._maketransactionfn() as tr:
- node0 = f.add(fulltext0, None, tr, 0, nullid, nullid)
- node1 = f.add(fulltext1, meta1, tr, 1, node0, nullid)
- node2 = f.add(fulltext2, meta2, tr, 2, nullid, nullid)
+ node0 = f.add(fulltext0, None, tr, 0, f.nullid, f.nullid)
+ node1 = f.add(fulltext1, meta1, tr, 1, node0, f.nullid)
+ node2 = f.add(fulltext2, meta2, tr, 2, f.nullid, f.nullid)
- # Metadata header isn't recognized when parent isn't nullid.
+ # Metadata header isn't recognized when parent isn't f.nullid.
self.assertEqual(f.size(1), len(stored1))
self.assertEqual(f.size(2), len(fulltext2))
@@ -886,8 +885,8 @@
f = self._makefilefn()
with self._maketransactionfn() as tr:
- node0 = f.add(fulltext0, {}, tr, 0, nullid, nullid)
- node1 = f.add(fulltext1, meta1, tr, 1, nullid, nullid)
+ node0 = f.add(fulltext0, {}, tr, 0, f.nullid, f.nullid)
+ node1 = f.add(fulltext1, meta1, tr, 1, f.nullid, f.nullid)
# TODO this is buggy.
self.assertEqual(f.size(0), len(fulltext0) + 4)
@@ -916,15 +915,15 @@
fulltext1 = fulltext0 + b'bar\n'
with self._maketransactionfn() as tr:
- node0 = f.add(fulltext0, None, tr, 0, nullid, nullid)
+ node0 = f.add(fulltext0, None, tr, 0, f.nullid, f.nullid)
node1 = b'\xaa' * 20
self._addrawrevisionfn(
- f, tr, node1, node0, nullid, 1, rawtext=fulltext1
+ f, tr, node1, node0, f.nullid, 1, rawtext=fulltext1
)
self.assertEqual(len(f), 2)
- self.assertEqual(f.parents(node1), (node0, nullid))
+ self.assertEqual(f.parents(node1), (node0, f.nullid))
# revision() raises since it performs hash verification.
with self.assertRaises(error.StorageError):
@@ -951,11 +950,11 @@
fulltext1 = fulltext0 + b'bar\n'
with self._maketransactionfn() as tr:
- node0 = f.add(fulltext0, None, tr, 0, nullid, nullid)
+ node0 = f.add(fulltext0, None, tr, 0, f.nullid, f.nullid)
node1 = b'\xaa' * 20
self._addrawrevisionfn(
- f, tr, node1, node0, nullid, 1, rawtext=fulltext1
+ f, tr, node1, node0, f.nullid, 1, rawtext=fulltext1
)
with self.assertRaises(error.StorageError):
@@ -973,11 +972,11 @@
fulltext1 = fulltext0 + b'bar\n'
with self._maketransactionfn() as tr:
- node0 = f.add(fulltext0, None, tr, 0, nullid, nullid)
+ node0 = f.add(fulltext0, None, tr, 0, f.nullid, f.nullid)
node1 = b'\xaa' * 20
self._addrawrevisionfn(
- f, tr, node1, node0, nullid, 1, rawtext=fulltext1
+ f, tr, node1, node0, f.nullid, 1, rawtext=fulltext1
)
with self.assertRaises(error.StorageError):
@@ -994,22 +993,22 @@
fulltext2 = fulltext1 + b'baz\n'
with self._maketransactionfn() as tr:
- node0 = f.add(fulltext0, None, tr, 0, nullid, nullid)
+ node0 = f.add(fulltext0, None, tr, 0, f.nullid, f.nullid)
node1 = b'\xaa' * 20
self._addrawrevisionfn(
- f, tr, node1, node0, nullid, 1, rawtext=fulltext1
+ f, tr, node1, node0, f.nullid, 1, rawtext=fulltext1
)
with self.assertRaises(error.StorageError):
f.read(node1)
- node2 = storageutil.hashrevisionsha1(fulltext2, node1, nullid)
+ node2 = storageutil.hashrevisionsha1(fulltext2, node1, f.nullid)
with self._maketransactionfn() as tr:
delta = mdiff.textdiff(fulltext1, fulltext2)
self._addrawrevisionfn(
- f, tr, node2, node1, nullid, 2, delta=(1, delta)
+ f, tr, node2, node1, f.nullid, 2, delta=(1, delta)
)
self.assertEqual(len(f), 3)
@@ -1029,13 +1028,13 @@
)
with self._maketransactionfn() as tr:
- node0 = f.add(b'foo', None, tr, 0, nullid, nullid)
+ node0 = f.add(b'foo', None, tr, 0, f.nullid, f.nullid)
# The node value doesn't matter since we can't verify it.
node1 = b'\xbb' * 20
self._addrawrevisionfn(
- f, tr, node1, node0, nullid, 1, stored1, censored=True
+ f, tr, node1, node0, f.nullid, 1, stored1, censored=True
)
self.assertTrue(f.iscensored(1))
@@ -1063,13 +1062,13 @@
)
with self._maketransactionfn() as tr:
- node0 = f.add(b'foo', None, tr, 0, nullid, nullid)
+ node0 = f.add(b'foo', None, tr, 0, f.nullid, f.nullid)
# The node value doesn't matter since we can't verify it.
node1 = b'\xbb' * 20
self._addrawrevisionfn(
- f, tr, node1, node0, nullid, 1, stored1, censored=True
+ f, tr, node1, node0, f.nullid, 1, stored1, censored=True
)
with self.assertRaises(error.CensoredNodeError):
@@ -1088,10 +1087,10 @@
def testaddnoop(self):
f = self._makefilefn()
with self._maketransactionfn() as tr:
- node0 = f.add(b'foo', None, tr, 0, nullid, nullid)
- node1 = f.add(b'foo', None, tr, 0, nullid, nullid)
+ node0 = f.add(b'foo', None, tr, 0, f.nullid, f.nullid)
+ node1 = f.add(b'foo', None, tr, 0, f.nullid, f.nullid)
# Varying by linkrev shouldn't impact hash.
- node2 = f.add(b'foo', None, tr, 1, nullid, nullid)
+ node2 = f.add(b'foo', None, tr, 1, f.nullid, f.nullid)
self.assertEqual(node1, node0)
self.assertEqual(node2, node0)
@@ -1102,7 +1101,9 @@
with self._maketransactionfn() as tr:
# Adding a revision with bad node value fails.
with self.assertRaises(error.StorageError):
- f.addrevision(b'foo', tr, 0, nullid, nullid, node=b'\x01' * 20)
+ f.addrevision(
+ b'foo', tr, 0, f.nullid, f.nullid, node=b'\x01' * 20
+ )
def testaddrevisionunknownflag(self):
f = self._makefilefn()
@@ -1113,7 +1114,7 @@
break
with self.assertRaises(error.StorageError):
- f.addrevision(b'foo', tr, 0, nullid, nullid, flags=flags)
+ f.addrevision(b'foo', tr, 0, f.nullid, f.nullid, flags=flags)
def testaddgroupsimple(self):
f = self._makefilefn()
@@ -1153,12 +1154,12 @@
delta0 = mdiff.trivialdiffheader(len(fulltext0)) + fulltext0
with self._maketransactionfn() as tr:
- node0 = f.add(fulltext0, None, tr, 0, nullid, nullid)
+ node0 = f.add(fulltext0, None, tr, 0, f.nullid, f.nullid)
f = self._makefilefn()
deltas = [
- (node0, nullid, nullid, nullid, nullid, delta0, 0, {}),
+ (node0, f.nullid, f.nullid, f.nullid, f.nullid, delta0, 0, {}),
]
with self._maketransactionfn() as tr:
@@ -1207,7 +1208,7 @@
nodes = []
with self._maketransactionfn() as tr:
for fulltext in fulltexts:
- nodes.append(f.add(fulltext, None, tr, 0, nullid, nullid))
+ nodes.append(f.add(fulltext, None, tr, 0, f.nullid, f.nullid))
f = self._makefilefn()
deltas = []
@@ -1215,7 +1216,7 @@
delta = mdiff.trivialdiffheader(len(fulltext)) + fulltext
deltas.append(
- (nodes[i], nullid, nullid, nullid, nullid, delta, 0, {})
+ (nodes[i], f.nullid, f.nullid, f.nullid, f.nullid, delta, 0, {})
)
with self._maketransactionfn() as tr:
@@ -1254,18 +1255,18 @@
)
with self._maketransactionfn() as tr:
- node0 = f.add(b'foo\n' * 30, None, tr, 0, nullid, nullid)
+ node0 = f.add(b'foo\n' * 30, None, tr, 0, f.nullid, f.nullid)
# The node value doesn't matter since we can't verify it.
node1 = b'\xbb' * 20
self._addrawrevisionfn(
- f, tr, node1, node0, nullid, 1, stored1, censored=True
+ f, tr, node1, node0, f.nullid, 1, stored1, censored=True
)
delta = mdiff.textdiff(b'bar\n' * 30, (b'bar\n' * 30) + b'baz\n')
deltas = [
- (b'\xcc' * 20, node1, nullid, b'\x01' * 20, node1, delta, 0, {})
+ (b'\xcc' * 20, node1, f.nullid, b'\x01' * 20, node1, delta, 0, {})
]
with self._maketransactionfn() as tr:
@@ -1276,9 +1277,9 @@
f = self._makefilefn()
with self._maketransactionfn() as tr:
- node0 = f.add(b'foo\n' * 30, None, tr, 0, nullid, nullid)
- node1 = f.add(b'foo\n' * 31, None, tr, 1, node0, nullid)
- node2 = f.add(b'foo\n' * 32, None, tr, 2, node1, nullid)
+ node0 = f.add(b'foo\n' * 30, None, tr, 0, f.nullid, f.nullid)
+ node1 = f.add(b'foo\n' * 31, None, tr, 1, node0, f.nullid)
+ node2 = f.add(b'foo\n' * 32, None, tr, 2, node1, f.nullid)
with self._maketransactionfn() as tr:
f.censorrevision(tr, node1)
@@ -1298,7 +1299,7 @@
with self._maketransactionfn() as tr:
for rev in range(10):
- f.add(b'%d' % rev, None, tr, rev, nullid, nullid)
+ f.add(b'%d' % rev, None, tr, rev, f.nullid, f.nullid)
for rev in range(10):
self.assertEqual(f.getstrippoint(rev), (rev, set()))
@@ -1308,10 +1309,10 @@
f = self._makefilefn()
with self._maketransactionfn() as tr:
- p1 = nullid
+ p1 = f.nullid
for rev in range(10):
- f.add(b'%d' % rev, None, tr, rev, p1, nullid)
+ f.add(b'%d' % rev, None, tr, rev, p1, f.nullid)
for rev in range(10):
self.assertEqual(f.getstrippoint(rev), (rev, set()))
@@ -1320,11 +1321,11 @@
f = self._makefilefn()
with self._maketransactionfn() as tr:
- node0 = f.add(b'0', None, tr, 0, nullid, nullid)
- node1 = f.add(b'1', None, tr, 1, node0, nullid)
- f.add(b'2', None, tr, 2, node1, nullid)
- f.add(b'3', None, tr, 3, node0, nullid)
- f.add(b'4', None, tr, 4, node0, nullid)
+ node0 = f.add(b'0', None, tr, 0, f.nullid, f.nullid)
+ node1 = f.add(b'1', None, tr, 1, node0, f.nullid)
+ f.add(b'2', None, tr, 2, node1, f.nullid)
+ f.add(b'3', None, tr, 3, node0, f.nullid)
+ f.add(b'4', None, tr, 4, node0, f.nullid)
for rev in range(5):
self.assertEqual(f.getstrippoint(rev), (rev, set()))
@@ -1333,9 +1334,9 @@
f = self._makefilefn()
with self._maketransactionfn() as tr:
- node0 = f.add(b'0', None, tr, 0, nullid, nullid)
- f.add(b'1', None, tr, 10, node0, nullid)
- f.add(b'2', None, tr, 5, node0, nullid)
+ node0 = f.add(b'0', None, tr, 0, f.nullid, f.nullid)
+ f.add(b'1', None, tr, 10, node0, f.nullid)
+ f.add(b'2', None, tr, 5, node0, f.nullid)
self.assertEqual(f.getstrippoint(0), (0, set()))
self.assertEqual(f.getstrippoint(1), (1, set()))
@@ -1362,9 +1363,9 @@
f = self._makefilefn()
with self._maketransactionfn() as tr:
- p1 = nullid
+ p1 = f.nullid
for rev in range(10):
- p1 = f.add(b'%d' % rev, None, tr, rev, p1, nullid)
+ p1 = f.add(b'%d' % rev, None, tr, rev, p1, f.nullid)
self.assertEqual(len(f), 10)
@@ -1377,9 +1378,9 @@
f = self._makefilefn()
with self._maketransactionfn() as tr:
- f.add(b'0', None, tr, 0, nullid, nullid)
- node1 = f.add(b'1', None, tr, 5, nullid, nullid)
- node2 = f.add(b'2', None, tr, 10, nullid, nullid)
+ f.add(b'0', None, tr, 0, f.nullid, f.nullid)
+ node1 = f.add(b'1', None, tr, 5, f.nullid, f.nullid)
+ node2 = f.add(b'2', None, tr, 10, f.nullid, f.nullid)
self.assertEqual(len(f), 3)
--- a/mercurial/treediscovery.py Sun May 02 16:56:20 2021 -0400
+++ b/mercurial/treediscovery.py Fri May 07 22:06:25 2021 -0400
@@ -10,10 +10,7 @@
import collections
from .i18n import _
-from .node import (
- nullid,
- short,
-)
+from .node import short
from . import (
error,
pycompat,
@@ -44,11 +41,11 @@
if audit is not None:
audit[b'total-roundtrips'] = 1
- if repo.changelog.tip() == nullid:
- base.add(nullid)
- if heads != [nullid]:
- return [nullid], [nullid], list(heads)
- return [nullid], [], heads
+ if repo.changelog.tip() == repo.nullid:
+ base.add(repo.nullid)
+ if heads != [repo.nullid]:
+ return [repo.nullid], [repo.nullid], list(heads)
+ return [repo.nullid], [], heads
# assume we're closer to the tip than the root
# and start by examining the heads
@@ -84,7 +81,7 @@
continue
repo.ui.debug(b"examining %s:%s\n" % (short(n[0]), short(n[1])))
- if n[0] == nullid: # found the end of the branch
+ if n[0] == repo.nullid: # found the end of the branch
pass
elif n in seenbranch:
repo.ui.debug(b"branch already found\n")
@@ -170,7 +167,7 @@
raise error.RepoError(_(b"already have changeset ") + short(f[:4]))
base = list(base)
- if base == [nullid]:
+ if base == [repo.nullid]:
if force:
repo.ui.warn(_(b"warning: repository is unrelated\n"))
else:
--- a/mercurial/ui.py Sun May 02 16:56:20 2021 -0400
+++ b/mercurial/ui.py Fri May 07 22:06:25 2021 -0400
@@ -1058,6 +1058,8 @@
This method exist as `getpath` need a ui for potential warning message.
"""
+ msg = b'ui.getpath is deprecated, use `get_*` functions from urlutil'
+ self.deprecwarn(msg, '6.0')
return self.paths.getpath(self, *args, **kwargs)
@property
--- a/mercurial/unionrepo.py Sun May 02 16:56:20 2021 -0400
+++ b/mercurial/unionrepo.py Fri May 07 22:06:25 2021 -0400
@@ -41,7 +41,11 @@
# To differentiate a rev in the second revlog from a rev in the revlog,
# we check revision against repotiprev.
opener = vfsmod.readonlyvfs(opener)
- revlog.revlog.__init__(self, opener, indexfile)
+ target = getattr(revlog2, 'target', None)
+ if target is None:
+ # a revlog wrapper, eg: the manifestlog that is not an actual revlog
+ target = revlog2._revlog.target
+ revlog.revlog.__init__(self, opener, target=target, indexfile=indexfile)
self.revlog2 = revlog2
n = len(self)
--- a/mercurial/upgrade_utils/engine.py Sun May 02 16:56:20 2021 -0400
+++ b/mercurial/upgrade_utils/engine.py Fri May 07 22:06:25 2021 -0400
@@ -19,13 +19,32 @@
metadata,
pycompat,
requirements,
- revlog,
scmutil,
store,
util,
vfs as vfsmod,
)
-from ..revlogutils import nodemap
+from ..revlogutils import (
+ constants as revlogconst,
+ flagutil,
+ nodemap,
+ sidedata as sidedatamod,
+)
+
+
+def get_sidedata_helpers(srcrepo, dstrepo):
+ use_w = srcrepo.ui.configbool(b'experimental', b'worker.repository-upgrade')
+ sequential = pycompat.iswindows or not use_w
+ if not sequential:
+ srcrepo.register_sidedata_computer(
+ revlogconst.KIND_CHANGELOG,
+ sidedatamod.SD_FILES,
+ (sidedatamod.SD_FILES,),
+ metadata._get_worker_sidedata_adder(srcrepo, dstrepo),
+ flagutil.REVIDX_HASCOPIESINFO,
+ replace=True,
+ )
+ return sidedatamod.get_sidedata_helpers(srcrepo, dstrepo._wanted_sidedata)
def _revlogfrompath(repo, rl_type, path):
@@ -89,25 +108,6 @@
)
-def getsidedatacompanion(srcrepo, dstrepo):
- sidedatacompanion = None
- removedreqs = srcrepo.requirements - dstrepo.requirements
- addedreqs = dstrepo.requirements - srcrepo.requirements
- if requirements.SIDEDATA_REQUIREMENT in removedreqs:
-
- def sidedatacompanion(rl, rev):
- rl = getattr(rl, '_revlog', rl)
- if rl.flags(rev) & revlog.REVIDX_SIDEDATA:
- return True, (), {}, 0, 0
- return False, (), {}, 0, 0
-
- elif requirements.COPIESSDC_REQUIREMENT in addedreqs:
- sidedatacompanion = metadata.getsidedataadder(srcrepo, dstrepo)
- elif requirements.COPIESSDC_REQUIREMENT in removedreqs:
- sidedatacompanion = metadata.getsidedataremover(srcrepo, dstrepo)
- return sidedatacompanion
-
-
def matchrevlog(revlogfilter, rl_type):
"""check if a revlog is selected for cloning.
@@ -131,7 +131,7 @@
rl_type,
unencoded,
upgrade_op,
- sidedatacompanion,
+ sidedata_helpers,
oncopiedrevision,
):
"""returns the new revlog object created"""
@@ -147,7 +147,7 @@
addrevisioncb=oncopiedrevision,
deltareuse=upgrade_op.delta_reuse_mode,
forcedeltabothparents=upgrade_op.force_re_delta_both_parents,
- sidedatacompanion=sidedatacompanion,
+ sidedata_helpers=sidedata_helpers,
)
else:
msg = _(b'blindly copying %s containing %i revisions\n')
@@ -257,7 +257,7 @@
def oncopiedrevision(rl, rev, node):
progress.increment()
- sidedatacompanion = getsidedatacompanion(srcrepo, dstrepo)
+ sidedata_helpers = get_sidedata_helpers(srcrepo, dstrepo)
# Migrating filelogs
ui.status(
@@ -282,7 +282,7 @@
rl_type,
unencoded,
upgrade_op,
- sidedatacompanion,
+ sidedata_helpers,
oncopiedrevision,
)
info = newrl.storageinfo(storedsize=True)
@@ -322,7 +322,7 @@
rl_type,
unencoded,
upgrade_op,
- sidedatacompanion,
+ sidedata_helpers,
oncopiedrevision,
)
info = newrl.storageinfo(storedsize=True)
@@ -361,7 +361,7 @@
rl_type,
unencoded,
upgrade_op,
- sidedatacompanion,
+ sidedata_helpers,
oncopiedrevision,
)
info = newrl.storageinfo(storedsize=True)
--- a/mercurial/util.py Sun May 02 16:56:20 2021 -0400
+++ b/mercurial/util.py Fri May 07 22:06:25 2021 -0400
@@ -34,6 +34,7 @@
import traceback
import warnings
+from .node import hex
from .thirdparty import attr
from .pycompat import (
delattr,
--- a/mercurial/utils/storageutil.py Sun May 02 16:56:20 2021 -0400
+++ b/mercurial/utils/storageutil.py Fri May 07 22:06:25 2021 -0400
@@ -13,8 +13,8 @@
from ..i18n import _
from ..node import (
bin,
- nullid,
nullrev,
+ sha1nodeconstants,
)
from .. import (
dagop,
@@ -26,7 +26,11 @@
from ..revlogutils import sidedata as sidedatamod
from ..utils import hashutil
-_nullhash = hashutil.sha1(nullid)
+_nullhash = hashutil.sha1(sha1nodeconstants.nullid)
+
+# revision data contains extra metadata not part of the official digest
+# Only used in changegroup >= v4.
+CG_FLAG_SIDEDATA = 1
def hashrevisionsha1(text, p1, p2):
@@ -37,7 +41,7 @@
content in the revision graph.
"""
# As of now, if one of the parent node is null, p2 is null
- if p2 == nullid:
+ if p2 == sha1nodeconstants.nullid:
# deep copy of a hash is faster than creating one
s = _nullhash.copy()
s.update(p1)
@@ -107,7 +111,7 @@
Returns ``False`` if the file has no copy metadata. Otherwise a
2-tuple of the source filename and node.
"""
- if store.parents(node)[0] != nullid:
+ if store.parents(node)[0] != sha1nodeconstants.nullid:
return False
meta = parsemeta(store.revision(node))[0]
@@ -360,19 +364,7 @@
``assumehaveparentrevisions``
``sidedata_helpers`` (optional)
If not None, means that sidedata should be included.
- A dictionary of revlog type to tuples of `(repo, computers, removers)`:
- * `repo` is used as an argument for computers
- * `computers` is a list of `(category, (keys, computer)` that
- compute the missing sidedata categories that were asked:
- * `category` is the sidedata category
- * `keys` are the sidedata keys to be affected
- * `computer` is the function `(repo, store, rev, sidedata)` that
- returns a new sidedata dict.
- * `removers` will remove the keys corresponding to the categories
- that are present, but not needed.
- If both `computers` and `removers` are empty, sidedata are simply not
- transformed.
- Revlog types are `changelog`, `manifest` or `filelog`.
+ See `revlogutil.sidedata.get_sidedata_helpers`.
"""
fnode = store.node
@@ -486,51 +478,43 @@
available.add(rev)
- sidedata = None
+ serialized_sidedata = None
+ sidedata_flags = (0, 0)
if sidedata_helpers:
- sidedata = store.sidedata(rev)
- sidedata = run_sidedata_helpers(
+ old_sidedata = store.sidedata(rev)
+ sidedata, sidedata_flags = sidedatamod.run_sidedata_helpers(
store=store,
sidedata_helpers=sidedata_helpers,
- sidedata=sidedata,
+ sidedata=old_sidedata,
rev=rev,
)
- sidedata = sidedatamod.serialize_sidedata(sidedata)
+ if sidedata:
+ serialized_sidedata = sidedatamod.serialize_sidedata(sidedata)
+
+ flags = flagsfn(rev) if flagsfn else 0
+ protocol_flags = 0
+ if serialized_sidedata:
+ # Advertise that sidedata exists to the other side
+ protocol_flags |= CG_FLAG_SIDEDATA
+ # Computers and removers can return flags to add and/or remove
+ flags = flags | sidedata_flags[0] & ~sidedata_flags[1]
yield resultcls(
node=node,
p1node=fnode(p1rev),
p2node=fnode(p2rev),
basenode=fnode(baserev),
- flags=flagsfn(rev) if flagsfn else 0,
+ flags=flags,
baserevisionsize=baserevisionsize,
revision=revision,
delta=delta,
- sidedata=sidedata,
+ sidedata=serialized_sidedata,
+ protocol_flags=protocol_flags,
)
prevrev = rev
-def run_sidedata_helpers(store, sidedata_helpers, sidedata, rev):
- """Returns the sidedata for the given revision after running through
- the given helpers.
- - `store`: the revlog this applies to (changelog, manifest, or filelog
- instance)
- - `sidedata_helpers`: see `storageutil.emitrevisions`
- - `sidedata`: previous sidedata at the given rev, if any
- - `rev`: affected rev of `store`
- """
- repo, sd_computers, sd_removers = sidedata_helpers
- kind = store.revlog_kind
- for _keys, sd_computer in sd_computers.get(kind, []):
- sidedata = sd_computer(repo, store, rev, sidedata)
- for keys, _computer in sd_removers.get(kind, []):
- for key in keys:
- sidedata.pop(key, None)
- return sidedata
-
-
def deltaiscensored(delta, baserev, baselenfn):
"""Determine if a delta represents censored revision data.
--- a/mercurial/utils/urlutil.py Sun May 02 16:56:20 2021 -0400
+++ b/mercurial/utils/urlutil.py Fri May 07 22:06:25 2021 -0400
@@ -445,6 +445,30 @@
return bytes(u)
+def list_paths(ui, target_path=None):
+ """list all the (name, paths) in the passed ui"""
+ if target_path is None:
+ return sorted(pycompat.iteritems(ui.paths))
+ else:
+ path = ui.paths.get(target_path)
+ if path is None:
+ return []
+ else:
+ return [(target_path, path)]
+
+
+def try_path(ui, url):
+ """try to build a path from a url
+
+ Return None if no Path could built.
+ """
+ try:
+ # we pass the ui instance are warning might need to be issued
+ return path(ui, None, rawloc=url)
+ except ValueError:
+ return None
+
+
def get_push_paths(repo, ui, dests):
"""yields all the `path` selected as push destination by `dests`"""
if not dests:
@@ -459,7 +483,15 @@
)
else:
for dest in dests:
- yield ui.getpath(dest)
+ if dest in ui.paths:
+ yield ui.paths[dest]
+ else:
+ path = try_path(ui, dest)
+ if path is None:
+ msg = _(b'repository %s does not exist')
+ msg %= dest
+ raise error.RepoError(msg)
+ yield path
def get_pull_paths(repo, ui, sources, default_branches=()):
@@ -471,10 +503,10 @@
url = ui.paths[source].rawloc
else:
# Try to resolve as a local path or URI.
- try:
- # we pass the ui instance are warning might need to be issued
- url = path(ui, None, rawloc=source).rawloc
- except ValueError:
+ path = try_path(ui, source)
+ if path is not None:
+ url = path.rawloc
+ else:
url = source
yield parseurl(url, default_branches)
@@ -520,10 +552,10 @@
url = ui.paths[source].rawloc
else:
# Try to resolve as a local path or URI.
- try:
- # we pass the ui instance are warning might need to be issued
- url = path(ui, None, rawloc=source).rawloc
- except ValueError:
+ path = try_path(ui, source)
+ if path is not None:
+ url = path.rawloc
+ else:
url = source
return parseurl(url, default_branches)
@@ -542,10 +574,10 @@
url = ui.paths[source].rawloc
else:
# Try to resolve as a local path or URI.
- try:
- # we pass the ui instance are warning might need to be issued
- url = path(ui, None, rawloc=source).rawloc
- except ValueError:
+ path = try_path(ui, source)
+ if path is not None:
+ url = path.rawloc
+ else:
url = source
clone_path, branch = parseurl(url, default_branches)
return url, clone_path, branch
@@ -590,6 +622,8 @@
Returns None if ``name`` is not a registered path, a URI, or a local
path to a repo.
"""
+ msg = b'getpath is deprecated, use `get_*` functions from urlutil'
+ self.deprecwarn(msg, '6.0')
# Only fall back to default if no path was requested.
if name is None:
if not default:
@@ -607,16 +641,14 @@
# This may need to raise in the future.
if not name:
return None
-
- try:
+ if name in self:
return self[name]
- except KeyError:
+ else:
# Try to resolve as a local path or URI.
- try:
- # we pass the ui instance are warning might need to be issued
- return path(ui, None, rawloc=name)
- except ValueError:
+ path = try_path(ui, name)
+ if path is None:
raise error.RepoError(_(b'repository %s does not exist') % name)
+ return path.rawloc
_pathsuboptions = {}
--- a/mercurial/verify.py Sun May 02 16:56:20 2021 -0400
+++ b/mercurial/verify.py Fri May 07 22:06:25 2021 -0400
@@ -10,13 +10,8 @@
import os
from .i18n import _
-from .node import (
- nullid,
- short,
-)
-from .utils import (
- stringutil,
-)
+from .node import short
+from .utils import stringutil
from . import (
error,
@@ -159,13 +154,13 @@
try:
p1, p2 = obj.parents(node)
- if p1 not in seen and p1 != nullid:
+ if p1 not in seen and p1 != self.repo.nullid:
self._err(
lr,
_(b"unknown parent 1 %s of %s") % (short(p1), short(node)),
f,
)
- if p2 not in seen and p2 != nullid:
+ if p2 not in seen and p2 != self.repo.nullid:
self._err(
lr,
_(b"unknown parent 2 %s of %s") % (short(p2), short(node)),
@@ -267,7 +262,7 @@
try:
changes = cl.read(n)
- if changes[0] != nullid:
+ if changes[0] != self.repo.nullid:
mflinkrevs.setdefault(changes[0], []).append(i)
self.refersmf = True
for f in changes[3]:
@@ -598,7 +593,7 @@
% (rp[0], short(rp[1])),
f,
)
- elif rp[1] == nullid:
+ elif rp[1] == self.repo.nullid:
ui.note(
_(
b"warning: %s@%s: copy source"
--- a/mercurial/wireprotov1server.py Sun May 02 16:56:20 2021 -0400
+++ b/mercurial/wireprotov1server.py Fri May 07 22:06:25 2021 -0400
@@ -11,10 +11,7 @@
import os
from .i18n import _
-from .node import (
- hex,
- nullid,
-)
+from .node import hex
from .pycompat import getattr
from . import (
@@ -470,7 +467,7 @@
clheads = set(repo.changelog.heads())
heads = set(opts.get(b'heads', set()))
common = set(opts.get(b'common', set()))
- common.discard(nullid)
+ common.discard(repo.nullid)
if (
repo.ui.configbool(b'server', b'pullbundle')
and b'partial-pull' in proto.getprotocaps()
--- a/mercurial/wireprotov2server.py Sun May 02 16:56:20 2021 -0400
+++ b/mercurial/wireprotov2server.py Fri May 07 22:06:25 2021 -0400
@@ -10,10 +10,7 @@
import contextlib
from .i18n import _
-from .node import (
- hex,
- nullid,
-)
+from .node import hex
from . import (
discovery,
encoding,
@@ -950,7 +947,7 @@
if spec[b'roots']:
common = [n for n in spec[b'roots'] if clhasnode(n)]
else:
- common = [nullid]
+ common = [repo.nullid]
for n in discovery.outgoing(repo, common, spec[b'heads']).missing:
if n not in seen:
--- a/relnotes/next Sun May 02 16:56:20 2021 -0400
+++ b/relnotes/next Fri May 07 22:06:25 2021 -0400
@@ -1,5 +1,8 @@
== New Features ==
-
+
+ * `hg config` now has a `--source` option to show where each
+ configuration value comes from.
+
== Default Format Change ==
--- a/rust/Cargo.lock Sun May 02 16:56:20 2021 -0400
+++ b/rust/Cargo.lock Fri May 07 22:06:25 2021 -0400
@@ -64,9 +64,9 @@
[[package]]
name = "bytes-cast"
-version = "0.1.0"
+version = "0.2.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "3196ba300c7bc9282a4331e878496cb3e9603a898a8f1446601317163e16ca52"
+checksum = "0d434f9a4ecbe987e7ccfda7274b6f82ea52c9b63742565a65cb5e8ba0f2c452"
dependencies = [
"bytes-cast-derive",
]
@@ -358,6 +358,7 @@
"format-bytes",
"home",
"im-rc",
+ "itertools",
"lazy_static",
"log",
"memmap",
--- a/rust/hg-core/Cargo.toml Sun May 02 16:56:20 2021 -0400
+++ b/rust/hg-core/Cargo.toml Fri May 07 22:06:25 2021 -0400
@@ -9,11 +9,12 @@
name = "hg"
[dependencies]
-bytes-cast = "0.1"
+bytes-cast = "0.2"
byteorder = "1.3.4"
derive_more = "0.99"
home = "0.5"
im-rc = "15.0.*"
+itertools = "0.9"
lazy_static = "1.4.0"
rand = "0.7.3"
rand_pcg = "0.2.1"
--- a/rust/hg-core/src/dirstate.rs Sun May 02 16:56:20 2021 -0400
+++ b/rust/hg-core/src/dirstate.rs Fri May 07 22:06:25 2021 -0400
@@ -7,9 +7,9 @@
use crate::errors::HgError;
use crate::revlog::Node;
-use crate::{utils::hg_path::HgPathBuf, FastHashMap};
+use crate::utils::hg_path::{HgPath, HgPathBuf};
+use crate::FastHashMap;
use bytes_cast::{unaligned, BytesCast};
-use std::collections::hash_map;
use std::convert::TryFrom;
pub mod dirs_multiset;
@@ -35,6 +35,29 @@
pub size: i32,
}
+impl DirstateEntry {
+ pub fn is_non_normal(&self) -> bool {
+ self.state != EntryState::Normal || self.mtime == MTIME_UNSET
+ }
+
+ pub fn is_from_other_parent(&self) -> bool {
+ self.state == EntryState::Normal && self.size == SIZE_FROM_OTHER_PARENT
+ }
+
+ // TODO: other platforms
+ #[cfg(unix)]
+ pub fn mode_changed(
+ &self,
+ filesystem_metadata: &std::fs::Metadata,
+ ) -> bool {
+ use std::os::unix::fs::MetadataExt;
+ const EXEC_BIT_MASK: u32 = 0o100;
+ let dirstate_exec_bit = (self.mode as u32) & EXEC_BIT_MASK;
+ let fs_exec_bit = filesystem_metadata.mode() & EXEC_BIT_MASK;
+ dirstate_exec_bit != fs_exec_bit
+ }
+}
+
#[derive(BytesCast)]
#[repr(C)]
struct RawEntry {
@@ -45,16 +68,20 @@
length: unaligned::I32Be,
}
+const MTIME_UNSET: i32 = -1;
+
/// A `DirstateEntry` with a size of `-2` means that it was merged from the
/// other parent. This allows revert to pick the right status back during a
/// merge.
pub const SIZE_FROM_OTHER_PARENT: i32 = -2;
pub type StateMap = FastHashMap<HgPathBuf, DirstateEntry>;
-pub type StateMapIter<'a> = hash_map::Iter<'a, HgPathBuf, DirstateEntry>;
+pub type StateMapIter<'a> =
+ Box<dyn Iterator<Item = (&'a HgPath, &'a DirstateEntry)> + Send + 'a>;
pub type CopyMap = FastHashMap<HgPathBuf, HgPathBuf>;
-pub type CopyMapIter<'a> = hash_map::Iter<'a, HgPathBuf, HgPathBuf>;
+pub type CopyMapIter<'a> =
+ Box<dyn Iterator<Item = (&'a HgPath, &'a HgPath)> + Send + 'a>;
#[derive(Copy, Clone, Debug, Eq, PartialEq)]
pub enum EntryState {
@@ -65,6 +92,16 @@
Unknown,
}
+impl EntryState {
+ pub fn is_tracked(self) -> bool {
+ use EntryState::*;
+ match self {
+ Normal | Added | Merged => true,
+ Removed | Unknown => false,
+ }
+ }
+}
+
impl TryFrom<u8> for EntryState {
type Error = HgError;
--- a/rust/hg-core/src/dirstate/dirs_multiset.rs Sun May 02 16:56:20 2021 -0400
+++ b/rust/hg-core/src/dirstate/dirs_multiset.rs Fri May 07 22:06:25 2021 -0400
@@ -14,7 +14,7 @@
files,
hg_path::{HgPath, HgPathBuf, HgPathError},
},
- DirstateEntry, DirstateMapError, FastHashMap, StateMap,
+ DirstateEntry, DirstateMapError, FastHashMap,
};
use std::collections::{hash_map, hash_map::Entry, HashMap, HashSet};
@@ -30,17 +30,22 @@
/// Initializes the multiset from a dirstate.
///
/// If `skip_state` is provided, skips dirstate entries with equal state.
- pub fn from_dirstate(
- dirstate: &StateMap,
+ pub fn from_dirstate<'a, I, P>(
+ dirstate: I,
skip_state: Option<EntryState>,
- ) -> Result<Self, DirstateMapError> {
+ ) -> Result<Self, DirstateMapError>
+ where
+ I: IntoIterator<Item = (P, &'a DirstateEntry)>,
+ P: AsRef<HgPath>,
+ {
let mut multiset = DirsMultiset {
inner: FastHashMap::default(),
};
- for (filename, DirstateEntry { state, .. }) in dirstate.iter() {
+ for (filename, entry) in dirstate {
+ let filename = filename.as_ref();
// This `if` is optimized out of the loop
if let Some(skip) = skip_state {
- if skip != *state {
+ if skip != entry.state {
multiset.add_path(filename)?;
}
} else {
@@ -207,6 +212,7 @@
#[cfg(test)]
mod tests {
use super::*;
+ use crate::StateMap;
#[test]
fn test_delete_path_path_not_found() {
@@ -356,7 +362,7 @@
};
assert_eq!(expected, new);
- let input_map = ["b/x", "a/c", "a/d/x"]
+ let input_map: HashMap<_, _> = ["b/x", "a/c", "a/d/x"]
.iter()
.map(|f| {
(
@@ -384,7 +390,7 @@
#[test]
fn test_dirsmultiset_new_skip() {
- let input_map = [
+ let input_map: HashMap<_, _> = [
("a/", EntryState::Normal),
("a/b", EntryState::Normal),
("a/c", EntryState::Removed),
--- a/rust/hg-core/src/dirstate/dirstate_map.rs Sun May 02 16:56:20 2021 -0400
+++ b/rust/hg-core/src/dirstate/dirstate_map.rs Fri May 07 22:06:25 2021 -0400
@@ -5,40 +5,28 @@
// This software may be used and distributed according to the terms of the
// GNU General Public License version 2 or any later version.
-use crate::errors::HgError;
-use crate::revlog::node::NULL_NODE;
+use crate::dirstate::parsers::clear_ambiguous_mtime;
+use crate::dirstate::parsers::Timestamp;
use crate::{
- dirstate::{parsers::PARENT_SIZE, EntryState, SIZE_FROM_OTHER_PARENT},
+ dirstate::EntryState,
pack_dirstate, parse_dirstate,
- utils::{
- files::normalize_case,
- hg_path::{HgPath, HgPathBuf},
- },
+ utils::hg_path::{HgPath, HgPathBuf},
CopyMap, DirsMultiset, DirstateEntry, DirstateError, DirstateMapError,
- DirstateParents, FastHashMap, StateMap,
+ DirstateParents, StateMap,
};
use micro_timer::timed;
use std::collections::HashSet;
-use std::convert::TryInto;
use std::iter::FromIterator;
use std::ops::Deref;
-use std::time::Duration;
-
-pub type FileFoldMap = FastHashMap<HgPathBuf, HgPathBuf>;
-
-const MTIME_UNSET: i32 = -1;
#[derive(Default)]
pub struct DirstateMap {
state_map: StateMap,
pub copy_map: CopyMap,
- file_fold_map: Option<FileFoldMap>,
pub dirs: Option<DirsMultiset>,
pub all_dirs: Option<DirsMultiset>,
non_normal_set: Option<HashSet<HgPathBuf>>,
other_parent_set: Option<HashSet<HgPathBuf>>,
- parents: Option<DirstateParents>,
- dirty_parents: bool,
}
/// Should only really be used in python interface code, for clarity
@@ -69,13 +57,8 @@
pub fn clear(&mut self) {
self.state_map = StateMap::default();
self.copy_map.clear();
- self.file_fold_map = None;
self.non_normal_set = None;
self.other_parent_set = None;
- self.set_parents(&DirstateParents {
- p1: NULL_NODE,
- p2: NULL_NODE,
- })
}
/// Add a tracked file to the dirstate
@@ -98,13 +81,13 @@
}
self.state_map.insert(filename.to_owned(), entry.to_owned());
- if entry.state != EntryState::Normal || entry.mtime == MTIME_UNSET {
+ if entry.is_non_normal() {
self.get_non_normal_other_parent_entries()
.0
.insert(filename.to_owned());
}
- if entry.size == SIZE_FROM_OTHER_PARENT {
+ if entry.is_from_other_parent() {
self.get_non_normal_other_parent_entries()
.1
.insert(filename.to_owned());
@@ -135,9 +118,6 @@
}
}
- if let Some(ref mut file_fold_map) = self.file_fold_map {
- file_fold_map.remove(&normalize_case(filename));
- }
self.state_map.insert(
filename.to_owned(),
DirstateEntry {
@@ -172,9 +152,6 @@
all_dirs.delete_path(filename)?;
}
}
- if let Some(ref mut file_fold_map) = self.file_fold_map {
- file_fold_map.remove(&normalize_case(filename));
- }
self.get_non_normal_other_parent_entries()
.0
.remove(filename);
@@ -188,32 +165,22 @@
now: i32,
) {
for filename in filenames {
- let mut changed = false;
if let Some(entry) = self.state_map.get_mut(&filename) {
- if entry.state == EntryState::Normal && entry.mtime == now {
- changed = true;
- *entry = DirstateEntry {
- mtime: MTIME_UNSET,
- ..*entry
- };
+ if clear_ambiguous_mtime(entry, now) {
+ self.get_non_normal_other_parent_entries()
+ .0
+ .insert(filename.to_owned());
}
}
- if changed {
- self.get_non_normal_other_parent_entries()
- .0
- .insert(filename.to_owned());
- }
}
}
- pub fn non_normal_entries_remove(
- &mut self,
- key: impl AsRef<HgPath>,
- ) -> bool {
+ pub fn non_normal_entries_remove(&mut self, key: impl AsRef<HgPath>) {
self.get_non_normal_other_parent_entries()
.0
- .remove(key.as_ref())
+ .remove(key.as_ref());
}
+
pub fn non_normal_entries_union(
&mut self,
other: HashSet<HgPathBuf>,
@@ -264,18 +231,11 @@
let mut non_normal = HashSet::new();
let mut other_parent = HashSet::new();
- for (
- filename,
- DirstateEntry {
- state, size, mtime, ..
- },
- ) in self.state_map.iter()
- {
- if *state != EntryState::Normal || *mtime == MTIME_UNSET {
+ for (filename, entry) in self.state_map.iter() {
+ if entry.is_non_normal() {
non_normal.insert(filename.to_owned());
}
- if *state == EntryState::Normal && *size == SIZE_FROM_OTHER_PARENT
- {
+ if entry.is_from_other_parent() {
other_parent.insert(filename.to_owned());
}
}
@@ -289,8 +249,10 @@
/// good idea.
pub fn set_all_dirs(&mut self) -> Result<(), DirstateMapError> {
if self.all_dirs.is_none() {
- self.all_dirs =
- Some(DirsMultiset::from_dirstate(&self.state_map, None)?);
+ self.all_dirs = Some(DirsMultiset::from_dirstate(
+ self.state_map.iter(),
+ None,
+ )?);
}
Ok(())
}
@@ -298,7 +260,7 @@
pub fn set_dirs(&mut self) -> Result<(), DirstateMapError> {
if self.dirs.is_none() {
self.dirs = Some(DirsMultiset::from_dirstate(
- &self.state_map,
+ self.state_map.iter(),
Some(EntryState::Removed),
)?);
}
@@ -321,46 +283,11 @@
Ok(self.all_dirs.as_ref().unwrap().contains(directory))
}
- pub fn parents(
+ #[timed]
+ pub fn read(
&mut self,
file_contents: &[u8],
- ) -> Result<&DirstateParents, DirstateError> {
- if let Some(ref parents) = self.parents {
- return Ok(parents);
- }
- let parents;
- if file_contents.len() == PARENT_SIZE * 2 {
- parents = DirstateParents {
- p1: file_contents[..PARENT_SIZE].try_into().unwrap(),
- p2: file_contents[PARENT_SIZE..PARENT_SIZE * 2]
- .try_into()
- .unwrap(),
- };
- } else if file_contents.is_empty() {
- parents = DirstateParents {
- p1: NULL_NODE,
- p2: NULL_NODE,
- };
- } else {
- return Err(
- HgError::corrupted("Dirstate appears to be damaged").into()
- );
- }
-
- self.parents = Some(parents);
- Ok(self.parents.as_ref().unwrap())
- }
-
- pub fn set_parents(&mut self, parents: &DirstateParents) {
- self.parents = Some(parents.clone());
- self.dirty_parents = true;
- }
-
- #[timed]
- pub fn read<'a>(
- &mut self,
- file_contents: &'a [u8],
- ) -> Result<Option<&'a DirstateParents>, DirstateError> {
+ ) -> Result<Option<DirstateParents>, DirstateError> {
if file_contents.is_empty() {
return Ok(None);
}
@@ -376,42 +303,20 @@
.into_iter()
.map(|(path, copy)| (path.to_owned(), copy.to_owned())),
);
-
- if !self.dirty_parents {
- self.set_parents(&parents);
- }
-
- Ok(Some(parents))
+ Ok(Some(parents.clone()))
}
pub fn pack(
&mut self,
parents: DirstateParents,
- now: Duration,
+ now: Timestamp,
) -> Result<Vec<u8>, DirstateError> {
let packed =
pack_dirstate(&mut self.state_map, &self.copy_map, parents, now)?;
- self.dirty_parents = false;
-
self.set_non_normal_other_parent_entries(true);
Ok(packed)
}
- pub fn build_file_fold_map(&mut self) -> &FileFoldMap {
- if let Some(ref file_fold_map) = self.file_fold_map {
- return file_fold_map;
- }
- let mut new_file_fold_map = FileFoldMap::default();
-
- for (filename, DirstateEntry { state, .. }) in self.state_map.iter() {
- if *state != EntryState::Removed {
- new_file_fold_map
- .insert(normalize_case(&filename), filename.to_owned());
- }
- }
- self.file_fold_map = Some(new_file_fold_map);
- self.file_fold_map.as_ref().unwrap()
- }
}
#[cfg(test)]
--- a/rust/hg-core/src/dirstate/parsers.rs Sun May 02 16:56:20 2021 -0400
+++ b/rust/hg-core/src/dirstate/parsers.rs Fri May 07 22:06:25 2021 -0400
@@ -13,7 +13,6 @@
use bytes_cast::BytesCast;
use micro_timer::timed;
use std::convert::{TryFrom, TryInto};
-use std::time::Duration;
/// Parents are stored in the dirstate as byte hashes.
pub const PARENT_SIZE: usize = 20;
@@ -35,10 +34,23 @@
}
#[timed]
-pub fn parse_dirstate(mut contents: &[u8]) -> Result<ParseResult, HgError> {
+pub fn parse_dirstate(contents: &[u8]) -> Result<ParseResult, HgError> {
let mut copies = Vec::new();
let mut entries = Vec::new();
+ let parents =
+ parse_dirstate_entries(contents, |path, entry, copy_source| {
+ if let Some(source) = copy_source {
+ copies.push((path, source));
+ }
+ entries.push((path, *entry));
+ })?;
+ Ok((parents, entries, copies))
+}
+pub fn parse_dirstate_entries<'a>(
+ mut contents: &'a [u8],
+ mut each_entry: impl FnMut(&'a HgPath, &DirstateEntry, Option<&'a HgPath>),
+) -> Result<&'a DirstateParents, HgError> {
let (parents, rest) = DirstateParents::from_bytes(contents)
.map_err(|_| HgError::corrupted("Too little data for dirstate."))?;
contents = rest;
@@ -62,34 +74,92 @@
let path = HgPath::new(
iter.next().expect("splitn always yields at least one item"),
);
- if let Some(copy_source) = iter.next() {
- copies.push((path, HgPath::new(copy_source)));
- }
+ let copy_source = iter.next().map(HgPath::new);
+ each_entry(path, &entry, copy_source);
- entries.push((path, entry));
contents = rest;
}
- Ok((parents, entries, copies))
+ Ok(parents)
+}
+
+fn packed_filename_and_copy_source_size(
+ filename: &HgPath,
+ copy_source: Option<&HgPath>,
+) -> usize {
+ filename.len()
+ + if let Some(source) = copy_source {
+ b"\0".len() + source.len()
+ } else {
+ 0
+ }
+}
+
+pub fn packed_entry_size(
+ filename: &HgPath,
+ copy_source: Option<&HgPath>,
+) -> usize {
+ MIN_ENTRY_SIZE
+ + packed_filename_and_copy_source_size(filename, copy_source)
}
-/// `now` is the duration in seconds since the Unix epoch
+pub fn pack_entry(
+ filename: &HgPath,
+ entry: &DirstateEntry,
+ copy_source: Option<&HgPath>,
+ packed: &mut Vec<u8>,
+) {
+ let length = packed_filename_and_copy_source_size(filename, copy_source);
+
+ // Unwrapping because `impl std::io::Write for Vec<u8>` never errors
+ packed.write_u8(entry.state.into()).unwrap();
+ packed.write_i32::<BigEndian>(entry.mode).unwrap();
+ packed.write_i32::<BigEndian>(entry.size).unwrap();
+ packed.write_i32::<BigEndian>(entry.mtime).unwrap();
+ packed.write_i32::<BigEndian>(length as i32).unwrap();
+ packed.extend(filename.as_bytes());
+ if let Some(source) = copy_source {
+ packed.push(b'\0');
+ packed.extend(source.as_bytes());
+ }
+}
+
+/// Seconds since the Unix epoch
+pub struct Timestamp(pub u64);
+
+pub fn clear_ambiguous_mtime(
+ entry: &mut DirstateEntry,
+ mtime_now: i32,
+) -> bool {
+ let ambiguous =
+ entry.state == EntryState::Normal && entry.mtime == mtime_now;
+ if ambiguous {
+ // The file was last modified "simultaneously" with the current
+ // write to dirstate (i.e. within the same second for file-
+ // systems with a granularity of 1 sec). This commonly happens
+ // for at least a couple of files on 'update'.
+ // The user could change the file without changing its size
+ // within the same second. Invalidate the file's mtime in
+ // dirstate, forcing future 'status' calls to compare the
+ // contents of the file if the size is the same. This prevents
+ // mistakenly treating such files as clean.
+ entry.mtime = -1;
+ }
+ ambiguous
+}
+
pub fn pack_dirstate(
state_map: &mut StateMap,
copy_map: &CopyMap,
parents: DirstateParents,
- now: Duration,
+ now: Timestamp,
) -> Result<Vec<u8>, HgError> {
// TODO move away from i32 before 2038.
- let now: i32 = now.as_secs().try_into().expect("time overflow");
+ let now: i32 = now.0.try_into().expect("time overflow");
let expected_size: usize = state_map
.iter()
.map(|(filename, _)| {
- let mut length = MIN_ENTRY_SIZE + filename.len();
- if let Some(copy) = copy_map.get(filename) {
- length += copy.len() + 1;
- }
- length
+ packed_entry_size(filename, copy_map.get(filename).map(|p| &**p))
})
.sum();
let expected_size = expected_size + PARENT_SIZE * 2;
@@ -100,39 +170,13 @@
packed.extend(parents.p2.as_bytes());
for (filename, entry) in state_map.iter_mut() {
- let new_filename = filename.to_owned();
- let mut new_mtime: i32 = entry.mtime;
- if entry.state == EntryState::Normal && entry.mtime == now {
- // The file was last modified "simultaneously" with the current
- // write to dirstate (i.e. within the same second for file-
- // systems with a granularity of 1 sec). This commonly happens
- // for at least a couple of files on 'update'.
- // The user could change the file without changing its size
- // within the same second. Invalidate the file's mtime in
- // dirstate, forcing future 'status' calls to compare the
- // contents of the file if the size is the same. This prevents
- // mistakenly treating such files as clean.
- new_mtime = -1;
- *entry = DirstateEntry {
- mtime: new_mtime,
- ..*entry
- };
- }
- let mut new_filename = new_filename.into_vec();
- if let Some(copy) = copy_map.get(filename) {
- new_filename.push(b'\0');
- new_filename.extend(copy.bytes());
- }
-
- // Unwrapping because `impl std::io::Write for Vec<u8>` never errors
- packed.write_u8(entry.state.into()).unwrap();
- packed.write_i32::<BigEndian>(entry.mode).unwrap();
- packed.write_i32::<BigEndian>(entry.size).unwrap();
- packed.write_i32::<BigEndian>(new_mtime).unwrap();
- packed
- .write_i32::<BigEndian>(new_filename.len() as i32)
- .unwrap();
- packed.extend(new_filename)
+ clear_ambiguous_mtime(entry, now);
+ pack_entry(
+ filename,
+ entry,
+ copy_map.get(filename).map(|p| &**p),
+ &mut packed,
+ )
}
if packed.len() != expected_size {
@@ -160,7 +204,7 @@
p1: b"12345678910111213141".into(),
p2: b"00000000000000000000".into(),
};
- let now = Duration::new(15000000, 0);
+ let now = Timestamp(15000000);
let expected = b"1234567891011121314100000000000000000000".to_vec();
assert_eq!(
@@ -191,7 +235,7 @@
p1: b"12345678910111213141".into(),
p2: b"00000000000000000000".into(),
};
- let now = Duration::new(15000000, 0);
+ let now = Timestamp(15000000);
let expected = [
49, 50, 51, 52, 53, 54, 55, 56, 57, 49, 48, 49, 49, 49, 50, 49,
51, 49, 52, 49, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48,
@@ -231,7 +275,7 @@
p1: b"12345678910111213141".into(),
p2: b"00000000000000000000".into(),
};
- let now = Duration::new(15000000, 0);
+ let now = Timestamp(15000000);
let expected = [
49, 50, 51, 52, 53, 54, 55, 56, 57, 49, 48, 49, 49, 49, 50, 49,
51, 49, 52, 49, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48,
@@ -271,7 +315,7 @@
p1: b"12345678910111213141".into(),
p2: b"00000000000000000000".into(),
};
- let now = Duration::new(15000000, 0);
+ let now = Timestamp(15000000);
let result =
pack_dirstate(&mut state_map, ©map, parents.clone(), now)
.unwrap();
@@ -349,7 +393,7 @@
p1: b"12345678910111213141".into(),
p2: b"00000000000000000000".into(),
};
- let now = Duration::new(15000000, 0);
+ let now = Timestamp(15000000);
let result =
pack_dirstate(&mut state_map, ©map, parents.clone(), now)
.unwrap();
@@ -395,7 +439,7 @@
p1: b"12345678910111213141".into(),
p2: b"00000000000000000000".into(),
};
- let now = Duration::new(15000000, 0);
+ let now = Timestamp(15000000);
let result =
pack_dirstate(&mut state_map, ©map, parents.clone(), now)
.unwrap();
--- a/rust/hg-core/src/dirstate/status.rs Sun May 02 16:56:20 2021 -0400
+++ b/rust/hg-core/src/dirstate/status.rs Fri May 07 22:06:25 2021 -0400
@@ -95,9 +95,10 @@
type IoResult<T> = std::io::Result<T>;
-/// `Box<dyn Trait>` is syntactic sugar for `Box<dyn Trait, 'static>`, so add
+/// `Box<dyn Trait>` is syntactic sugar for `Box<dyn Trait + 'static>`, so add
/// an explicit lifetime here to not fight `'static` bounds "out of nowhere".
-type IgnoreFnType<'a> = Box<dyn for<'r> Fn(&'r HgPath) -> bool + Sync + 'a>;
+pub type IgnoreFnType<'a> =
+ Box<dyn for<'r> Fn(&'r HgPath) -> bool + Sync + 'a>;
/// We have a good mix of owned (from directory traversal) and borrowed (from
/// the dirstate/explicit) paths, this comes up a lot.
@@ -254,16 +255,41 @@
pub collect_traversed_dirs: bool,
}
-#[derive(Debug)]
+#[derive(Debug, Default)]
pub struct DirstateStatus<'a> {
+ /// Tracked files whose contents have changed since the parent revision
pub modified: Vec<HgPathCow<'a>>,
+
+ /// Newly-tracked files that were not present in the parent
pub added: Vec<HgPathCow<'a>>,
+
+ /// Previously-tracked files that have been (re)moved with an hg command
pub removed: Vec<HgPathCow<'a>>,
+
+ /// (Still) tracked files that are missing, (re)moved with an non-hg
+ /// command
pub deleted: Vec<HgPathCow<'a>>,
+
+ /// Tracked files that are up to date with the parent.
+ /// Only pupulated if `StatusOptions::list_clean` is true.
pub clean: Vec<HgPathCow<'a>>,
+
+ /// Files in the working directory that are ignored with `.hgignore`.
+ /// Only pupulated if `StatusOptions::list_ignored` is true.
pub ignored: Vec<HgPathCow<'a>>,
+
+ /// Files in the working directory that are neither tracked nor ignored.
+ /// Only pupulated if `StatusOptions::list_unknown` is true.
pub unknown: Vec<HgPathCow<'a>>,
+
+ /// Was explicitly matched but cannot be found/accessed
pub bad: Vec<(HgPathCow<'a>, BadMatch)>,
+
+ /// Either clean or modified, but we can’t tell from filesystem metadata
+ /// alone. The file contents need to be read and compared with that in
+ /// the parent.
+ pub unsure: Vec<HgPathCow<'a>>,
+
/// Only filled if `collect_traversed_dirs` is `true`
pub traversed: Vec<HgPathBuf>,
}
@@ -292,7 +318,7 @@
/// Gives information about which files are changed in the working directory
/// and how, compared to the revision we're based on
-pub struct Status<'a, M: Matcher + Sync> {
+pub struct Status<'a, M: ?Sized + Matcher + Sync> {
dmap: &'a DirstateMap,
pub(crate) matcher: &'a M,
root_dir: PathBuf,
@@ -302,7 +328,7 @@
impl<'a, M> Status<'a, M>
where
- M: Matcher + Sync,
+ M: ?Sized + Matcher + Sync,
{
pub fn new(
dmap: &'a DirstateMap,
@@ -847,8 +873,8 @@
pub fn build_response<'a>(
results: impl IntoIterator<Item = DispatchedPath<'a>>,
traversed: Vec<HgPathBuf>,
-) -> (Vec<HgPathCow<'a>>, DirstateStatus<'a>) {
- let mut lookup = vec![];
+) -> DirstateStatus<'a> {
+ let mut unsure = vec![];
let mut modified = vec![];
let mut added = vec![];
let mut removed = vec![];
@@ -861,7 +887,7 @@
for (filename, dispatch) in results.into_iter() {
match dispatch {
Dispatch::Unknown => unknown.push(filename),
- Dispatch::Unsure => lookup.push(filename),
+ Dispatch::Unsure => unsure.push(filename),
Dispatch::Modified => modified.push(filename),
Dispatch::Added => added.push(filename),
Dispatch::Removed => removed.push(filename),
@@ -874,20 +900,18 @@
}
}
- (
- lookup,
- DirstateStatus {
- modified,
- added,
- removed,
- deleted,
- clean,
- ignored,
- unknown,
- bad,
- traversed,
- },
- )
+ DirstateStatus {
+ modified,
+ added,
+ removed,
+ deleted,
+ clean,
+ ignored,
+ unknown,
+ bad,
+ unsure,
+ traversed,
+ }
}
/// Get the status of files in the working directory.
@@ -898,14 +922,11 @@
#[timed]
pub fn status<'a>(
dmap: &'a DirstateMap,
- matcher: &'a (impl Matcher + Sync),
+ matcher: &'a (dyn Matcher + Sync),
root_dir: PathBuf,
ignore_files: Vec<PathBuf>,
options: StatusOptions,
-) -> StatusResult<(
- (Vec<HgPathCow<'a>>, DirstateStatus<'a>),
- Vec<PatternFileWarning>,
-)> {
+) -> StatusResult<(DirstateStatus<'a>, Vec<PatternFileWarning>)> {
let (status, warnings) =
Status::new(dmap, matcher, root_dir, ignore_files, options)?;
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/rust/hg-core/src/dirstate_tree.rs Fri May 07 22:06:25 2021 -0400
@@ -0,0 +1,4 @@
+pub mod dirstate_map;
+pub mod dispatch;
+pub mod path_with_basename;
+mod status;
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/rust/hg-core/src/dirstate_tree/dirstate_map.rs Fri May 07 22:06:25 2021 -0400
@@ -0,0 +1,623 @@
+use bytes_cast::BytesCast;
+use micro_timer::timed;
+use std::borrow::Cow;
+use std::convert::TryInto;
+use std::path::PathBuf;
+
+use super::path_with_basename::WithBasename;
+use crate::dirstate::parsers::clear_ambiguous_mtime;
+use crate::dirstate::parsers::pack_entry;
+use crate::dirstate::parsers::packed_entry_size;
+use crate::dirstate::parsers::parse_dirstate_entries;
+use crate::dirstate::parsers::Timestamp;
+use crate::matchers::Matcher;
+use crate::utils::hg_path::{HgPath, HgPathBuf};
+use crate::CopyMapIter;
+use crate::DirstateEntry;
+use crate::DirstateError;
+use crate::DirstateMapError;
+use crate::DirstateParents;
+use crate::DirstateStatus;
+use crate::EntryState;
+use crate::FastHashMap;
+use crate::PatternFileWarning;
+use crate::StateMapIter;
+use crate::StatusError;
+use crate::StatusOptions;
+
+pub struct DirstateMap<'on_disk> {
+ /// Contents of the `.hg/dirstate` file
+ on_disk: &'on_disk [u8],
+
+ pub(super) root: ChildNodes<'on_disk>,
+
+ /// Number of nodes anywhere in the tree that have `.entry.is_some()`.
+ nodes_with_entry_count: usize,
+
+ /// Number of nodes anywhere in the tree that have
+ /// `.copy_source.is_some()`.
+ nodes_with_copy_source_count: usize,
+}
+
+/// Using a plain `HgPathBuf` of the full path from the repository root as a
+/// map key would also work: all paths in a given map have the same parent
+/// path, so comparing full paths gives the same result as comparing base
+/// names. However `BTreeMap` would waste time always re-comparing the same
+/// string prefix.
+pub(super) type ChildNodes<'on_disk> =
+ FastHashMap<WithBasename<Cow<'on_disk, HgPath>>, Node<'on_disk>>;
+
+/// Represents a file or a directory
+#[derive(Default)]
+pub(super) struct Node<'on_disk> {
+ /// `None` for directories
+ pub(super) entry: Option<DirstateEntry>,
+
+ pub(super) copy_source: Option<Cow<'on_disk, HgPath>>,
+
+ pub(super) children: ChildNodes<'on_disk>,
+
+ /// How many (non-inclusive) descendants of this node are tracked files
+ tracked_descendants_count: usize,
+}
+
+impl Node<'_> {
+ pub(super) fn state(&self) -> Option<EntryState> {
+ self.entry.as_ref().map(|entry| entry.state)
+ }
+}
+
+/// `(full_path, entry, copy_source)`
+type NodeDataMut<'tree, 'on_disk> = (
+ &'tree HgPath,
+ &'tree mut Option<DirstateEntry>,
+ &'tree mut Option<Cow<'on_disk, HgPath>>,
+);
+
+impl<'on_disk> DirstateMap<'on_disk> {
+ pub fn new(
+ on_disk: &'on_disk [u8],
+ ) -> Result<(Self, Option<DirstateParents>), DirstateError> {
+ let mut map = Self {
+ on_disk,
+ root: ChildNodes::default(),
+ nodes_with_entry_count: 0,
+ nodes_with_copy_source_count: 0,
+ };
+ let parents = map.read()?;
+ Ok((map, parents))
+ }
+
+ /// Should only be called in `new`
+ #[timed]
+ fn read(&mut self) -> Result<Option<DirstateParents>, DirstateError> {
+ if self.on_disk.is_empty() {
+ return Ok(None);
+ }
+
+ let parents = parse_dirstate_entries(
+ self.on_disk,
+ |path, entry, copy_source| {
+ let tracked = entry.state.is_tracked();
+ let node = Self::get_or_insert_node(
+ &mut self.root,
+ path,
+ WithBasename::to_cow_borrowed,
+ |ancestor| {
+ if tracked {
+ ancestor.tracked_descendants_count += 1
+ }
+ },
+ );
+ assert!(
+ node.entry.is_none(),
+ "duplicate dirstate entry in read"
+ );
+ assert!(
+ node.copy_source.is_none(),
+ "duplicate dirstate entry in read"
+ );
+ node.entry = Some(*entry);
+ node.copy_source = copy_source.map(Cow::Borrowed);
+ self.nodes_with_entry_count += 1;
+ if copy_source.is_some() {
+ self.nodes_with_copy_source_count += 1
+ }
+ },
+ )?;
+
+ Ok(Some(parents.clone()))
+ }
+
+ fn get_node(&self, path: &HgPath) -> Option<&Node> {
+ let mut children = &self.root;
+ let mut components = path.components();
+ let mut component =
+ components.next().expect("expected at least one components");
+ loop {
+ let child = children.get(component)?;
+ if let Some(next_component) = components.next() {
+ component = next_component;
+ children = &child.children;
+ } else {
+ return Some(child);
+ }
+ }
+ }
+
+ /// Returns a mutable reference to the node at `path` if it exists
+ ///
+ /// This takes `root` instead of `&mut self` so that callers can mutate
+ /// other fields while the returned borrow is still valid
+ fn get_node_mut<'tree>(
+ root: &'tree mut ChildNodes<'on_disk>,
+ path: &HgPath,
+ ) -> Option<&'tree mut Node<'on_disk>> {
+ Self::get_node_mut_tracing_ancestors(root, path, |_| {})
+ }
+
+ /// Same as `get_node_mut`, and calls `each_ancestor` for each ancestor of
+ /// the node.
+ ///
+ /// Note that `each_ancestor` may be called (with what would be ancestors)
+ /// even if it turns out there is no node at `path`.
+ fn get_node_mut_tracing_ancestors<'tree>(
+ root: &'tree mut ChildNodes<'on_disk>,
+ path: &HgPath,
+ mut each_ancestor: impl FnMut(&mut Node),
+ ) -> Option<&'tree mut Node<'on_disk>> {
+ let mut children = root;
+ let mut components = path.components();
+ let mut component =
+ components.next().expect("expected at least one components");
+ loop {
+ let child = children.get_mut(component)?;
+ if let Some(next_component) = components.next() {
+ each_ancestor(child);
+ component = next_component;
+ children = &mut child.children;
+ } else {
+ return Some(child);
+ }
+ }
+ }
+
+ fn get_or_insert_node<'tree, 'path>(
+ root: &'tree mut ChildNodes<'on_disk>,
+ path: &'path HgPath,
+ to_cow: impl Fn(
+ WithBasename<&'path HgPath>,
+ ) -> WithBasename<Cow<'on_disk, HgPath>>,
+ mut each_ancestor: impl FnMut(&mut Node),
+ ) -> &'tree mut Node<'on_disk> {
+ let mut child_nodes = root;
+ let mut inclusive_ancestor_paths =
+ WithBasename::inclusive_ancestors_of(path);
+ let mut ancestor_path = inclusive_ancestor_paths
+ .next()
+ .expect("expected at least one inclusive ancestor");
+ loop {
+ // TODO: can we avoid allocating an owned key in cases where the
+ // map already contains that key, without introducing double
+ // lookup?
+ let child_node =
+ child_nodes.entry(to_cow(ancestor_path)).or_default();
+ if let Some(next) = inclusive_ancestor_paths.next() {
+ each_ancestor(child_node);
+ ancestor_path = next;
+ child_nodes = &mut child_node.children;
+ } else {
+ return child_node;
+ }
+ }
+ }
+
+ fn add_or_remove_file(
+ &mut self,
+ path: &HgPath,
+ old_state: EntryState,
+ new_entry: DirstateEntry,
+ ) {
+ let tracked_count_increment =
+ match (old_state.is_tracked(), new_entry.state.is_tracked()) {
+ (false, true) => 1,
+ (true, false) => -1,
+ _ => 0,
+ };
+
+ let node = Self::get_or_insert_node(
+ &mut self.root,
+ path,
+ WithBasename::to_cow_owned,
+ |ancestor| {
+ // We can’t use `+= increment` because the counter is unsigned,
+ // and we want debug builds to detect accidental underflow
+ // through zero
+ match tracked_count_increment {
+ 1 => ancestor.tracked_descendants_count += 1,
+ -1 => ancestor.tracked_descendants_count -= 1,
+ _ => {}
+ }
+ },
+ );
+ if node.entry.is_none() {
+ self.nodes_with_entry_count += 1
+ }
+ node.entry = Some(new_entry)
+ }
+
+ fn iter_nodes<'a>(
+ &'a self,
+ ) -> impl Iterator<Item = (&'a HgPath, &'a Node)> + 'a {
+ // Depth first tree traversal.
+ //
+ // If we could afford internal iteration and recursion,
+ // this would look like:
+ //
+ // ```
+ // fn traverse_children(
+ // children: &ChildNodes,
+ // each: &mut impl FnMut(&Node),
+ // ) {
+ // for child in children.values() {
+ // traverse_children(&child.children, each);
+ // each(child);
+ // }
+ // }
+ // ```
+ //
+ // However we want an external iterator and therefore can’t use the
+ // call stack. Use an explicit stack instead:
+ let mut stack = Vec::new();
+ let mut iter = self.root.iter();
+ std::iter::from_fn(move || {
+ while let Some((key, child_node)) = iter.next() {
+ // Pseudo-recursion
+ let new_iter = child_node.children.iter();
+ let old_iter = std::mem::replace(&mut iter, new_iter);
+ let key = &**key.full_path();
+ stack.push((key, child_node, old_iter));
+ }
+ // Found the end of a `children.iter()` iterator.
+ if let Some((key, child_node, next_iter)) = stack.pop() {
+ // "Return" from pseudo-recursion by restoring state from the
+ // explicit stack
+ iter = next_iter;
+
+ Some((key, child_node))
+ } else {
+ // Reached the bottom of the stack, we’re done
+ None
+ }
+ })
+ }
+
+ /// Mutable iterator for the `(entry, copy source)` of each node.
+ ///
+ /// It would not be safe to yield mutable references to nodes themeselves
+ /// with `-> impl Iterator<Item = &mut Node>` since child nodes are
+ /// reachable from their ancestor nodes, potentially creating multiple
+ /// `&mut` references to a given node.
+ fn iter_node_data_mut<'tree>(
+ &'tree mut self,
+ ) -> impl Iterator<Item = NodeDataMut<'tree, 'on_disk>> + 'tree {
+ // Explict stack for pseudo-recursion, see `iter_nodes` above.
+ let mut stack = Vec::new();
+ let mut iter = self.root.iter_mut();
+ std::iter::from_fn(move || {
+ while let Some((key, child_node)) = iter.next() {
+ // Pseudo-recursion
+ let data = (
+ &**key.full_path(),
+ &mut child_node.entry,
+ &mut child_node.copy_source,
+ );
+ let new_iter = child_node.children.iter_mut();
+ let old_iter = std::mem::replace(&mut iter, new_iter);
+ stack.push((data, old_iter));
+ }
+ // Found the end of a `children.values_mut()` iterator.
+ if let Some((data, next_iter)) = stack.pop() {
+ // "Return" from pseudo-recursion by restoring state from the
+ // explicit stack
+ iter = next_iter;
+
+ Some(data)
+ } else {
+ // Reached the bottom of the stack, we’re done
+ None
+ }
+ })
+ }
+}
+
+impl<'on_disk> super::dispatch::DirstateMapMethods for DirstateMap<'on_disk> {
+ fn clear(&mut self) {
+ self.root.clear();
+ self.nodes_with_entry_count = 0;
+ self.nodes_with_copy_source_count = 0;
+ }
+
+ fn add_file(
+ &mut self,
+ filename: &HgPath,
+ old_state: EntryState,
+ entry: DirstateEntry,
+ ) -> Result<(), DirstateMapError> {
+ self.add_or_remove_file(filename, old_state, entry);
+ Ok(())
+ }
+
+ fn remove_file(
+ &mut self,
+ filename: &HgPath,
+ old_state: EntryState,
+ size: i32,
+ ) -> Result<(), DirstateMapError> {
+ let entry = DirstateEntry {
+ state: EntryState::Removed,
+ mode: 0,
+ size,
+ mtime: 0,
+ };
+ self.add_or_remove_file(filename, old_state, entry);
+ Ok(())
+ }
+
+ fn drop_file(
+ &mut self,
+ filename: &HgPath,
+ old_state: EntryState,
+ ) -> Result<bool, DirstateMapError> {
+ let was_tracked = old_state.is_tracked();
+ if let Some(node) = Self::get_node_mut_tracing_ancestors(
+ &mut self.root,
+ filename,
+ |ancestor| {
+ if was_tracked {
+ ancestor.tracked_descendants_count -= 1
+ }
+ },
+ ) {
+ let had_entry = node.entry.is_some();
+ let had_copy_source = node.copy_source.is_some();
+
+ // TODO: this leaves in the tree a "non-file" node. Should we
+ // remove the node instead, together with ancestor nodes for
+ // directories that become empty?
+ node.entry = None;
+ node.copy_source = None;
+
+ if had_entry {
+ self.nodes_with_entry_count -= 1
+ }
+ if had_copy_source {
+ self.nodes_with_copy_source_count -= 1
+ }
+ Ok(had_entry)
+ } else {
+ assert!(!was_tracked);
+ Ok(false)
+ }
+ }
+
+ fn clear_ambiguous_times(&mut self, filenames: Vec<HgPathBuf>, now: i32) {
+ for filename in filenames {
+ if let Some(node) = Self::get_node_mut(&mut self.root, &filename) {
+ if let Some(entry) = node.entry.as_mut() {
+ clear_ambiguous_mtime(entry, now);
+ }
+ }
+ }
+ }
+
+ fn non_normal_entries_contains(&mut self, key: &HgPath) -> bool {
+ self.get_node(key)
+ .and_then(|node| node.entry.as_ref())
+ .map_or(false, DirstateEntry::is_non_normal)
+ }
+
+ fn non_normal_entries_remove(&mut self, _key: &HgPath) {
+ // Do nothing, this `DirstateMap` does not have a separate "non normal
+ // entries" set that need to be kept up to date
+ }
+
+ fn non_normal_or_other_parent_paths(
+ &mut self,
+ ) -> Box<dyn Iterator<Item = &HgPath> + '_> {
+ Box::new(self.iter_nodes().filter_map(|(path, node)| {
+ node.entry
+ .as_ref()
+ .filter(|entry| {
+ entry.is_non_normal() || entry.is_from_other_parent()
+ })
+ .map(|_| path)
+ }))
+ }
+
+ fn set_non_normal_other_parent_entries(&mut self, _force: bool) {
+ // Do nothing, this `DirstateMap` does not have a separate "non normal
+ // entries" and "from other parent" sets that need to be recomputed
+ }
+
+ fn iter_non_normal_paths(
+ &mut self,
+ ) -> Box<dyn Iterator<Item = &HgPath> + Send + '_> {
+ self.iter_non_normal_paths_panic()
+ }
+
+ fn iter_non_normal_paths_panic(
+ &self,
+ ) -> Box<dyn Iterator<Item = &HgPath> + Send + '_> {
+ Box::new(self.iter_nodes().filter_map(|(path, node)| {
+ node.entry
+ .as_ref()
+ .filter(|entry| entry.is_non_normal())
+ .map(|_| path)
+ }))
+ }
+
+ fn iter_other_parent_paths(
+ &mut self,
+ ) -> Box<dyn Iterator<Item = &HgPath> + Send + '_> {
+ Box::new(self.iter_nodes().filter_map(|(path, node)| {
+ node.entry
+ .as_ref()
+ .filter(|entry| entry.is_from_other_parent())
+ .map(|_| path)
+ }))
+ }
+
+ fn has_tracked_dir(
+ &mut self,
+ directory: &HgPath,
+ ) -> Result<bool, DirstateMapError> {
+ if let Some(node) = self.get_node(directory) {
+ // A node without a `DirstateEntry` was created to hold child
+ // nodes, and is therefore a directory.
+ Ok(node.entry.is_none() && node.tracked_descendants_count > 0)
+ } else {
+ Ok(false)
+ }
+ }
+
+ fn has_dir(
+ &mut self,
+ directory: &HgPath,
+ ) -> Result<bool, DirstateMapError> {
+ if let Some(node) = self.get_node(directory) {
+ // A node without a `DirstateEntry` was created to hold child
+ // nodes, and is therefore a directory.
+ Ok(node.entry.is_none())
+ } else {
+ Ok(false)
+ }
+ }
+
+ fn pack(
+ &mut self,
+ parents: DirstateParents,
+ now: Timestamp,
+ ) -> Result<Vec<u8>, DirstateError> {
+ // Optizimation (to be measured?): pre-compute size to avoid `Vec`
+ // reallocations
+ let mut size = parents.as_bytes().len();
+ for (path, node) in self.iter_nodes() {
+ if node.entry.is_some() {
+ size += packed_entry_size(
+ path,
+ node.copy_source.as_ref().map(|p| &**p),
+ )
+ }
+ }
+
+ let mut packed = Vec::with_capacity(size);
+ packed.extend(parents.as_bytes());
+
+ let now: i32 = now.0.try_into().expect("time overflow");
+ for (path, opt_entry, copy_source) in self.iter_node_data_mut() {
+ if let Some(entry) = opt_entry {
+ clear_ambiguous_mtime(entry, now);
+ pack_entry(
+ path,
+ entry,
+ copy_source.as_ref().map(|p| &**p),
+ &mut packed,
+ );
+ }
+ }
+ Ok(packed)
+ }
+
+ fn set_all_dirs(&mut self) -> Result<(), DirstateMapError> {
+ // Do nothing, this `DirstateMap` does not a separate `all_dirs` that
+ // needs to be recomputed
+ Ok(())
+ }
+
+ fn set_dirs(&mut self) -> Result<(), DirstateMapError> {
+ // Do nothing, this `DirstateMap` does not a separate `dirs` that needs
+ // to be recomputed
+ Ok(())
+ }
+
+ fn status<'a>(
+ &'a mut self,
+ matcher: &'a (dyn Matcher + Sync),
+ root_dir: PathBuf,
+ ignore_files: Vec<PathBuf>,
+ options: StatusOptions,
+ ) -> Result<(DirstateStatus<'a>, Vec<PatternFileWarning>), StatusError>
+ {
+ super::status::status(self, matcher, root_dir, ignore_files, options)
+ }
+
+ fn copy_map_len(&self) -> usize {
+ self.nodes_with_copy_source_count
+ }
+
+ fn copy_map_iter(&self) -> CopyMapIter<'_> {
+ Box::new(self.iter_nodes().filter_map(|(path, node)| {
+ node.copy_source
+ .as_ref()
+ .map(|copy_source| (path, &**copy_source))
+ }))
+ }
+
+ fn copy_map_contains_key(&self, key: &HgPath) -> bool {
+ if let Some(node) = self.get_node(key) {
+ node.copy_source.is_some()
+ } else {
+ false
+ }
+ }
+
+ fn copy_map_get(&self, key: &HgPath) -> Option<&HgPath> {
+ self.get_node(key)?.copy_source.as_ref().map(|p| &**p)
+ }
+
+ fn copy_map_remove(&mut self, key: &HgPath) -> Option<HgPathBuf> {
+ let count = &mut self.nodes_with_copy_source_count;
+ Self::get_node_mut(&mut self.root, key).and_then(|node| {
+ if node.copy_source.is_some() {
+ *count -= 1
+ }
+ node.copy_source.take().map(Cow::into_owned)
+ })
+ }
+
+ fn copy_map_insert(
+ &mut self,
+ key: HgPathBuf,
+ value: HgPathBuf,
+ ) -> Option<HgPathBuf> {
+ let node = Self::get_or_insert_node(
+ &mut self.root,
+ &key,
+ WithBasename::to_cow_owned,
+ |_ancestor| {},
+ );
+ if node.copy_source.is_none() {
+ self.nodes_with_copy_source_count += 1
+ }
+ node.copy_source.replace(value.into()).map(Cow::into_owned)
+ }
+
+ fn len(&self) -> usize {
+ self.nodes_with_entry_count
+ }
+
+ fn contains_key(&self, key: &HgPath) -> bool {
+ self.get(key).is_some()
+ }
+
+ fn get(&self, key: &HgPath) -> Option<&DirstateEntry> {
+ self.get_node(key)?.entry.as_ref()
+ }
+
+ fn iter(&self) -> StateMapIter<'_> {
+ Box::new(self.iter_nodes().filter_map(|(path, node)| {
+ node.entry.as_ref().map(|entry| (path, entry))
+ }))
+ }
+}
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/rust/hg-core/src/dirstate_tree/dispatch.rs Fri May 07 22:06:25 2021 -0400
@@ -0,0 +1,284 @@
+use std::path::PathBuf;
+
+use crate::dirstate::parsers::Timestamp;
+use crate::matchers::Matcher;
+use crate::utils::hg_path::{HgPath, HgPathBuf};
+use crate::CopyMapIter;
+use crate::DirstateEntry;
+use crate::DirstateError;
+use crate::DirstateMap;
+use crate::DirstateMapError;
+use crate::DirstateParents;
+use crate::DirstateStatus;
+use crate::EntryState;
+use crate::PatternFileWarning;
+use crate::StateMapIter;
+use crate::StatusError;
+use crate::StatusOptions;
+
+pub trait DirstateMapMethods {
+ fn clear(&mut self);
+
+ fn add_file(
+ &mut self,
+ filename: &HgPath,
+ old_state: EntryState,
+ entry: DirstateEntry,
+ ) -> Result<(), DirstateMapError>;
+
+ fn remove_file(
+ &mut self,
+ filename: &HgPath,
+ old_state: EntryState,
+ size: i32,
+ ) -> Result<(), DirstateMapError>;
+
+ fn drop_file(
+ &mut self,
+ filename: &HgPath,
+ old_state: EntryState,
+ ) -> Result<bool, DirstateMapError>;
+
+ fn clear_ambiguous_times(&mut self, filenames: Vec<HgPathBuf>, now: i32);
+
+ fn non_normal_entries_contains(&mut self, key: &HgPath) -> bool;
+
+ fn non_normal_entries_remove(&mut self, key: &HgPath);
+
+ fn non_normal_or_other_parent_paths(
+ &mut self,
+ ) -> Box<dyn Iterator<Item = &HgPath> + '_>;
+
+ fn set_non_normal_other_parent_entries(&mut self, force: bool);
+
+ fn iter_non_normal_paths(
+ &mut self,
+ ) -> Box<dyn Iterator<Item = &HgPath> + Send + '_>;
+
+ fn iter_non_normal_paths_panic(
+ &self,
+ ) -> Box<dyn Iterator<Item = &HgPath> + Send + '_>;
+
+ fn iter_other_parent_paths(
+ &mut self,
+ ) -> Box<dyn Iterator<Item = &HgPath> + Send + '_>;
+
+ fn has_tracked_dir(
+ &mut self,
+ directory: &HgPath,
+ ) -> Result<bool, DirstateMapError>;
+
+ fn has_dir(
+ &mut self,
+ directory: &HgPath,
+ ) -> Result<bool, DirstateMapError>;
+
+ fn pack(
+ &mut self,
+ parents: DirstateParents,
+ now: Timestamp,
+ ) -> Result<Vec<u8>, DirstateError>;
+
+ fn set_all_dirs(&mut self) -> Result<(), DirstateMapError>;
+
+ fn set_dirs(&mut self) -> Result<(), DirstateMapError>;
+
+ fn status<'a>(
+ &'a mut self,
+ matcher: &'a (dyn Matcher + Sync),
+ root_dir: PathBuf,
+ ignore_files: Vec<PathBuf>,
+ options: StatusOptions,
+ ) -> Result<(DirstateStatus<'a>, Vec<PatternFileWarning>), StatusError>;
+
+ fn copy_map_len(&self) -> usize;
+
+ fn copy_map_iter(&self) -> CopyMapIter<'_>;
+
+ fn copy_map_contains_key(&self, key: &HgPath) -> bool;
+
+ fn copy_map_get(&self, key: &HgPath) -> Option<&HgPath>;
+
+ fn copy_map_remove(&mut self, key: &HgPath) -> Option<HgPathBuf>;
+
+ fn copy_map_insert(
+ &mut self,
+ key: HgPathBuf,
+ value: HgPathBuf,
+ ) -> Option<HgPathBuf>;
+
+ fn len(&self) -> usize;
+
+ fn contains_key(&self, key: &HgPath) -> bool;
+
+ fn get(&self, key: &HgPath) -> Option<&DirstateEntry>;
+
+ fn iter(&self) -> StateMapIter<'_>;
+}
+
+impl DirstateMapMethods for DirstateMap {
+ fn clear(&mut self) {
+ self.clear()
+ }
+
+ fn add_file(
+ &mut self,
+ filename: &HgPath,
+ old_state: EntryState,
+ entry: DirstateEntry,
+ ) -> Result<(), DirstateMapError> {
+ self.add_file(filename, old_state, entry)
+ }
+
+ fn remove_file(
+ &mut self,
+ filename: &HgPath,
+ old_state: EntryState,
+ size: i32,
+ ) -> Result<(), DirstateMapError> {
+ self.remove_file(filename, old_state, size)
+ }
+
+ fn drop_file(
+ &mut self,
+ filename: &HgPath,
+ old_state: EntryState,
+ ) -> Result<bool, DirstateMapError> {
+ self.drop_file(filename, old_state)
+ }
+
+ fn clear_ambiguous_times(&mut self, filenames: Vec<HgPathBuf>, now: i32) {
+ self.clear_ambiguous_times(filenames, now)
+ }
+
+ fn non_normal_entries_contains(&mut self, key: &HgPath) -> bool {
+ let (non_normal, _other_parent) =
+ self.get_non_normal_other_parent_entries();
+ non_normal.contains(key)
+ }
+
+ fn non_normal_entries_remove(&mut self, key: &HgPath) {
+ self.non_normal_entries_remove(key)
+ }
+
+ fn non_normal_or_other_parent_paths(
+ &mut self,
+ ) -> Box<dyn Iterator<Item = &HgPath> + '_> {
+ let (non_normal, other_parent) =
+ self.get_non_normal_other_parent_entries();
+ Box::new(non_normal.union(other_parent).map(|p| &**p))
+ }
+
+ fn set_non_normal_other_parent_entries(&mut self, force: bool) {
+ self.set_non_normal_other_parent_entries(force)
+ }
+
+ fn iter_non_normal_paths(
+ &mut self,
+ ) -> Box<dyn Iterator<Item = &HgPath> + Send + '_> {
+ let (non_normal, _other_parent) =
+ self.get_non_normal_other_parent_entries();
+ Box::new(non_normal.iter().map(|p| &**p))
+ }
+
+ fn iter_non_normal_paths_panic(
+ &self,
+ ) -> Box<dyn Iterator<Item = &HgPath> + Send + '_> {
+ let (non_normal, _other_parent) =
+ self.get_non_normal_other_parent_entries_panic();
+ Box::new(non_normal.iter().map(|p| &**p))
+ }
+
+ fn iter_other_parent_paths(
+ &mut self,
+ ) -> Box<dyn Iterator<Item = &HgPath> + Send + '_> {
+ let (_non_normal, other_parent) =
+ self.get_non_normal_other_parent_entries();
+ Box::new(other_parent.iter().map(|p| &**p))
+ }
+
+ fn has_tracked_dir(
+ &mut self,
+ directory: &HgPath,
+ ) -> Result<bool, DirstateMapError> {
+ self.has_tracked_dir(directory)
+ }
+
+ fn has_dir(
+ &mut self,
+ directory: &HgPath,
+ ) -> Result<bool, DirstateMapError> {
+ self.has_dir(directory)
+ }
+
+ fn pack(
+ &mut self,
+ parents: DirstateParents,
+ now: Timestamp,
+ ) -> Result<Vec<u8>, DirstateError> {
+ self.pack(parents, now)
+ }
+
+ fn set_all_dirs(&mut self) -> Result<(), DirstateMapError> {
+ self.set_all_dirs()
+ }
+
+ fn set_dirs(&mut self) -> Result<(), DirstateMapError> {
+ self.set_dirs()
+ }
+
+ fn status<'a>(
+ &'a mut self,
+ matcher: &'a (dyn Matcher + Sync),
+ root_dir: PathBuf,
+ ignore_files: Vec<PathBuf>,
+ options: StatusOptions,
+ ) -> Result<(DirstateStatus<'a>, Vec<PatternFileWarning>), StatusError>
+ {
+ crate::status(self, matcher, root_dir, ignore_files, options)
+ }
+
+ fn copy_map_len(&self) -> usize {
+ self.copy_map.len()
+ }
+
+ fn copy_map_iter(&self) -> CopyMapIter<'_> {
+ Box::new(self.copy_map.iter().map(|(key, value)| (&**key, &**value)))
+ }
+
+ fn copy_map_contains_key(&self, key: &HgPath) -> bool {
+ self.copy_map.contains_key(key)
+ }
+
+ fn copy_map_get(&self, key: &HgPath) -> Option<&HgPath> {
+ self.copy_map.get(key).map(|p| &**p)
+ }
+
+ fn copy_map_remove(&mut self, key: &HgPath) -> Option<HgPathBuf> {
+ self.copy_map.remove(key)
+ }
+
+ fn copy_map_insert(
+ &mut self,
+ key: HgPathBuf,
+ value: HgPathBuf,
+ ) -> Option<HgPathBuf> {
+ self.copy_map.insert(key, value)
+ }
+
+ fn len(&self) -> usize {
+ (&**self).len()
+ }
+
+ fn contains_key(&self, key: &HgPath) -> bool {
+ (&**self).contains_key(key)
+ }
+
+ fn get(&self, key: &HgPath) -> Option<&DirstateEntry> {
+ (&**self).get(key)
+ }
+
+ fn iter(&self) -> StateMapIter<'_> {
+ Box::new((&**self).iter().map(|(key, value)| (&**key, value)))
+ }
+}
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/rust/hg-core/src/dirstate_tree/path_with_basename.rs Fri May 07 22:06:25 2021 -0400
@@ -0,0 +1,172 @@
+use crate::utils::hg_path::HgPath;
+use std::borrow::{Borrow, Cow};
+
+/// Wraps `HgPath` or `HgPathBuf` to make it behave "as" its last path
+/// component, a.k.a. its base name (as in Python’s `os.path.basename`), but
+/// also allow recovering the full path.
+///
+/// "Behaving as" means that equality and comparison consider only the base
+/// name, and `std::borrow::Borrow` is implemented to return only the base
+/// name. This allows using the base name as a map key while still being able
+/// to recover the full path, in a single memory allocation.
+#[derive(Debug)]
+pub struct WithBasename<T> {
+ full_path: T,
+
+ /// The position after the last slash separator in `full_path`, or `0`
+ /// if there is no slash.
+ base_name_start: usize,
+}
+
+impl<T> WithBasename<T> {
+ pub fn full_path(&self) -> &T {
+ &self.full_path
+ }
+}
+
+impl<T: AsRef<HgPath>> WithBasename<T> {
+ pub fn new(full_path: T) -> Self {
+ let base_name_start = if let Some(last_slash_position) = full_path
+ .as_ref()
+ .as_bytes()
+ .iter()
+ .rposition(|&byte| byte == b'/')
+ {
+ last_slash_position + 1
+ } else {
+ 0
+ };
+ Self {
+ base_name_start,
+ full_path,
+ }
+ }
+
+ pub fn base_name(&self) -> &HgPath {
+ HgPath::new(
+ &self.full_path.as_ref().as_bytes()[self.base_name_start..],
+ )
+ }
+}
+
+impl<T: AsRef<HgPath>> Borrow<HgPath> for WithBasename<T> {
+ fn borrow(&self) -> &HgPath {
+ self.base_name()
+ }
+}
+
+impl<T: AsRef<HgPath>> std::hash::Hash for WithBasename<T> {
+ fn hash<H: std::hash::Hasher>(&self, hasher: &mut H) {
+ self.base_name().hash(hasher)
+ }
+}
+
+impl<T: AsRef<HgPath> + PartialEq> PartialEq for WithBasename<T> {
+ fn eq(&self, other: &Self) -> bool {
+ self.base_name() == other.base_name()
+ }
+}
+
+impl<T: AsRef<HgPath> + Eq> Eq for WithBasename<T> {}
+
+impl<T: AsRef<HgPath> + PartialOrd> PartialOrd for WithBasename<T> {
+ fn partial_cmp(&self, other: &Self) -> Option<std::cmp::Ordering> {
+ self.base_name().partial_cmp(other.base_name())
+ }
+}
+
+impl<T: AsRef<HgPath> + Ord> Ord for WithBasename<T> {
+ fn cmp(&self, other: &Self) -> std::cmp::Ordering {
+ self.base_name().cmp(other.base_name())
+ }
+}
+
+impl<'a> WithBasename<&'a HgPath> {
+ pub fn to_cow_borrowed(self) -> WithBasename<Cow<'a, HgPath>> {
+ WithBasename {
+ full_path: Cow::Borrowed(self.full_path),
+ base_name_start: self.base_name_start,
+ }
+ }
+
+ pub fn to_cow_owned<'b>(self) -> WithBasename<Cow<'b, HgPath>> {
+ WithBasename {
+ full_path: Cow::Owned(self.full_path.to_owned()),
+ base_name_start: self.base_name_start,
+ }
+ }
+}
+
+impl<'a> WithBasename<&'a HgPath> {
+ /// Returns an iterator of `WithBasename<&HgPath>` for the ancestor
+ /// directory paths of the given `path`, as well as `path` itself.
+ ///
+ /// For example, the full paths of inclusive ancestors of "a/b/c" are "a",
+ /// "a/b", and "a/b/c" in that order.
+ pub fn inclusive_ancestors_of(
+ path: &'a HgPath,
+ ) -> impl Iterator<Item = WithBasename<&'a HgPath>> {
+ let mut slash_positions =
+ path.as_bytes().iter().enumerate().filter_map(|(i, &byte)| {
+ if byte == b'/' {
+ Some(i)
+ } else {
+ None
+ }
+ });
+ let mut opt_next_component_start = Some(0);
+ std::iter::from_fn(move || {
+ opt_next_component_start.take().map(|next_component_start| {
+ if let Some(slash_pos) = slash_positions.next() {
+ opt_next_component_start = Some(slash_pos + 1);
+ Self {
+ full_path: HgPath::new(&path.as_bytes()[..slash_pos]),
+ base_name_start: next_component_start,
+ }
+ } else {
+ // Not setting `opt_next_component_start` here: there will
+ // be no iteration after this one because `.take()` set it
+ // to `None`.
+ Self {
+ full_path: path,
+ base_name_start: next_component_start,
+ }
+ }
+ })
+ })
+ }
+}
+
+#[test]
+fn test() {
+ let a = WithBasename::new(HgPath::new("a").to_owned());
+ assert_eq!(&**a.full_path(), HgPath::new(b"a"));
+ assert_eq!(a.base_name(), HgPath::new(b"a"));
+
+ let cba = WithBasename::new(HgPath::new("c/b/a").to_owned());
+ assert_eq!(&**cba.full_path(), HgPath::new(b"c/b/a"));
+ assert_eq!(cba.base_name(), HgPath::new(b"a"));
+
+ assert_eq!(a, cba);
+ let borrowed: &HgPath = cba.borrow();
+ assert_eq!(borrowed, HgPath::new("a"));
+}
+
+#[test]
+fn test_inclusive_ancestors() {
+ let mut iter = WithBasename::inclusive_ancestors_of(HgPath::new("a/bb/c"));
+
+ let next = iter.next().unwrap();
+ assert_eq!(*next.full_path(), HgPath::new("a"));
+ assert_eq!(next.base_name(), HgPath::new("a"));
+
+ let next = iter.next().unwrap();
+ assert_eq!(*next.full_path(), HgPath::new("a/bb"));
+ assert_eq!(next.base_name(), HgPath::new("bb"));
+
+ let next = iter.next().unwrap();
+ assert_eq!(*next.full_path(), HgPath::new("a/bb/c"));
+ assert_eq!(next.base_name(), HgPath::new("c"));
+
+ assert!(iter.next().is_none());
+}
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/rust/hg-core/src/dirstate_tree/status.rs Fri May 07 22:06:25 2021 -0400
@@ -0,0 +1,428 @@
+use crate::dirstate::status::IgnoreFnType;
+use crate::dirstate_tree::dirstate_map::ChildNodes;
+use crate::dirstate_tree::dirstate_map::DirstateMap;
+use crate::dirstate_tree::dirstate_map::Node;
+use crate::matchers::get_ignore_function;
+use crate::matchers::Matcher;
+use crate::utils::files::get_bytes_from_os_string;
+use crate::utils::hg_path::HgPath;
+use crate::BadMatch;
+use crate::DirstateStatus;
+use crate::EntryState;
+use crate::HgPathBuf;
+use crate::PatternFileWarning;
+use crate::StatusError;
+use crate::StatusOptions;
+use micro_timer::timed;
+use rayon::prelude::*;
+use std::borrow::Cow;
+use std::io;
+use std::path::Path;
+use std::path::PathBuf;
+use std::sync::Mutex;
+
+/// Returns the status of the working directory compared to its parent
+/// changeset.
+///
+/// This algorithm is based on traversing the filesystem tree (`fs` in function
+/// and variable names) and dirstate tree at the same time. The core of this
+/// traversal is the recursive `traverse_fs_directory_and_dirstate` function
+/// and its use of `itertools::merge_join_by`. When reaching a path that only
+/// exists in one of the two trees, depending on information requested by
+/// `options` we may need to traverse the remaining subtree.
+#[timed]
+pub fn status<'tree>(
+ dmap: &'tree mut DirstateMap,
+ matcher: &(dyn Matcher + Sync),
+ root_dir: PathBuf,
+ ignore_files: Vec<PathBuf>,
+ options: StatusOptions,
+) -> Result<(DirstateStatus<'tree>, Vec<PatternFileWarning>), StatusError> {
+ let (ignore_fn, warnings): (IgnoreFnType, _) =
+ if options.list_ignored || options.list_unknown {
+ get_ignore_function(ignore_files, &root_dir)?
+ } else {
+ (Box::new(|&_| true), vec![])
+ };
+
+ let common = StatusCommon {
+ options,
+ matcher,
+ ignore_fn,
+ outcome: Mutex::new(DirstateStatus::default()),
+ };
+ let is_at_repo_root = true;
+ let hg_path = HgPath::new("");
+ let has_ignored_ancestor = false;
+ common.traverse_fs_directory_and_dirstate(
+ has_ignored_ancestor,
+ &mut dmap.root,
+ hg_path,
+ &root_dir,
+ is_at_repo_root,
+ );
+ Ok((common.outcome.into_inner().unwrap(), warnings))
+}
+
+/// Bag of random things needed by various parts of the algorithm. Reduces the
+/// number of parameters passed to functions.
+struct StatusCommon<'tree, 'a> {
+ options: StatusOptions,
+ matcher: &'a (dyn Matcher + Sync),
+ ignore_fn: IgnoreFnType<'a>,
+ outcome: Mutex<DirstateStatus<'tree>>,
+}
+
+impl<'tree, 'a> StatusCommon<'tree, 'a> {
+ fn read_dir(
+ &self,
+ hg_path: &HgPath,
+ fs_path: &Path,
+ is_at_repo_root: bool,
+ ) -> Result<Vec<DirEntry>, ()> {
+ DirEntry::read_dir(fs_path, is_at_repo_root).map_err(|error| {
+ let errno = error.raw_os_error().expect("expected real OS error");
+ self.outcome
+ .lock()
+ .unwrap()
+ .bad
+ .push((hg_path.to_owned().into(), BadMatch::OsError(errno)))
+ })
+ }
+
+ fn traverse_fs_directory_and_dirstate(
+ &self,
+ has_ignored_ancestor: bool,
+ dirstate_nodes: &'tree mut ChildNodes,
+ directory_hg_path: &'tree HgPath,
+ directory_fs_path: &Path,
+ is_at_repo_root: bool,
+ ) {
+ let mut fs_entries = if let Ok(entries) = self.read_dir(
+ directory_hg_path,
+ directory_fs_path,
+ is_at_repo_root,
+ ) {
+ entries
+ } else {
+ return;
+ };
+
+ // `merge_join_by` requires both its input iterators to be sorted:
+
+ let mut dirstate_nodes: Vec<_> = dirstate_nodes.iter_mut().collect();
+ // `sort_unstable_by_key` doesn’t allow keys borrowing from the value:
+ // https://github.com/rust-lang/rust/issues/34162
+ dirstate_nodes
+ .sort_unstable_by(|(path1, _), (path2, _)| path1.cmp(path2));
+ fs_entries.sort_unstable_by(|e1, e2| e1.base_name.cmp(&e2.base_name));
+
+ itertools::merge_join_by(
+ dirstate_nodes,
+ &fs_entries,
+ |(full_path, _node), fs_entry| {
+ full_path.base_name().cmp(&fs_entry.base_name)
+ },
+ )
+ .par_bridge()
+ .for_each(|pair| {
+ use itertools::EitherOrBoth::*;
+ match pair {
+ Both((hg_path, dirstate_node), fs_entry) => {
+ self.traverse_fs_and_dirstate(
+ fs_entry,
+ hg_path.full_path(),
+ dirstate_node,
+ has_ignored_ancestor,
+ );
+ }
+ Left((hg_path, dirstate_node)) => self.traverse_dirstate_only(
+ hg_path.full_path(),
+ dirstate_node,
+ ),
+ Right(fs_entry) => self.traverse_fs_only(
+ has_ignored_ancestor,
+ directory_hg_path,
+ fs_entry,
+ ),
+ }
+ })
+ }
+
+ fn traverse_fs_and_dirstate(
+ &self,
+ fs_entry: &DirEntry,
+ hg_path: &'tree HgPath,
+ dirstate_node: &'tree mut Node,
+ has_ignored_ancestor: bool,
+ ) {
+ let file_type = fs_entry.metadata.file_type();
+ let file_or_symlink = file_type.is_file() || file_type.is_symlink();
+ if !file_or_symlink {
+ // If we previously had a file here, it was removed (with
+ // `hg rm` or similar) or deleted before it could be
+ // replaced by a directory or something else.
+ self.mark_removed_or_deleted_if_file(
+ hg_path,
+ dirstate_node.state(),
+ );
+ }
+ if file_type.is_dir() {
+ if self.options.collect_traversed_dirs {
+ self.outcome.lock().unwrap().traversed.push(hg_path.into())
+ }
+ let is_ignored = has_ignored_ancestor || (self.ignore_fn)(hg_path);
+ let is_at_repo_root = false;
+ self.traverse_fs_directory_and_dirstate(
+ is_ignored,
+ &mut dirstate_node.children,
+ hg_path,
+ &fs_entry.full_path,
+ is_at_repo_root,
+ );
+ } else {
+ if file_or_symlink && self.matcher.matches(hg_path) {
+ let full_path = Cow::from(hg_path);
+ if let Some(entry) = &dirstate_node.entry {
+ match entry.state {
+ EntryState::Added => {
+ self.outcome.lock().unwrap().added.push(full_path)
+ }
+ EntryState::Removed => self
+ .outcome
+ .lock()
+ .unwrap()
+ .removed
+ .push(full_path),
+ EntryState::Merged => self
+ .outcome
+ .lock()
+ .unwrap()
+ .modified
+ .push(full_path),
+ EntryState::Normal => {
+ self.handle_normal_file(
+ full_path,
+ dirstate_node,
+ entry,
+ fs_entry,
+ );
+ }
+ // This variant is not used in DirstateMap
+ // nodes
+ EntryState::Unknown => unreachable!(),
+ }
+ } else {
+ // `node.entry.is_none()` indicates a "directory"
+ // node, but the filesystem has a file
+ self.mark_unknown_or_ignored(
+ has_ignored_ancestor,
+ full_path,
+ )
+ }
+ }
+
+ for (child_hg_path, child_node) in &mut dirstate_node.children {
+ self.traverse_dirstate_only(
+ child_hg_path.full_path(),
+ child_node,
+ )
+ }
+ }
+ }
+
+ /// A file with `EntryState::Normal` in the dirstate was found in the
+ /// filesystem
+ fn handle_normal_file(
+ &self,
+ full_path: Cow<'tree, HgPath>,
+ dirstate_node: &Node,
+ entry: &crate::DirstateEntry,
+ fs_entry: &DirEntry,
+ ) {
+ // Keep the low 31 bits
+ fn truncate_u64(value: u64) -> i32 {
+ (value & 0x7FFF_FFFF) as i32
+ }
+ fn truncate_i64(value: i64) -> i32 {
+ (value & 0x7FFF_FFFF) as i32
+ }
+
+ let mode_changed = || {
+ self.options.check_exec && entry.mode_changed(&fs_entry.metadata)
+ };
+ let size_changed = entry.size != truncate_u64(fs_entry.metadata.len());
+ if entry.size >= 0
+ && size_changed
+ && fs_entry.metadata.file_type().is_symlink()
+ {
+ // issue6456: Size returned may be longer due to encryption
+ // on EXT-4 fscrypt. TODO maybe only do it on EXT4?
+ self.outcome.lock().unwrap().unsure.push(full_path)
+ } else if dirstate_node.copy_source.is_some()
+ || entry.is_from_other_parent()
+ || (entry.size >= 0 && (size_changed || mode_changed()))
+ {
+ self.outcome.lock().unwrap().modified.push(full_path)
+ } else {
+ let mtime = mtime_seconds(&fs_entry.metadata);
+ if truncate_i64(mtime) != entry.mtime
+ || mtime == self.options.last_normal_time
+ {
+ self.outcome.lock().unwrap().unsure.push(full_path)
+ } else if self.options.list_clean {
+ self.outcome.lock().unwrap().clean.push(full_path)
+ }
+ }
+ }
+
+ /// A node in the dirstate tree has no corresponding filesystem entry
+ fn traverse_dirstate_only(
+ &self,
+ hg_path: &'tree HgPath,
+ dirstate_node: &'tree mut Node,
+ ) {
+ self.mark_removed_or_deleted_if_file(hg_path, dirstate_node.state());
+ dirstate_node.children.par_iter_mut().for_each(
+ |(child_hg_path, child_node)| {
+ self.traverse_dirstate_only(
+ child_hg_path.full_path(),
+ child_node,
+ )
+ },
+ )
+ }
+
+ /// A node in the dirstate tree has no corresponding *file* on the
+ /// filesystem
+ ///
+ /// Does nothing on a "directory" node
+ fn mark_removed_or_deleted_if_file(
+ &self,
+ hg_path: &'tree HgPath,
+ dirstate_node_state: Option<EntryState>,
+ ) {
+ if let Some(state) = dirstate_node_state {
+ if self.matcher.matches(hg_path) {
+ if let EntryState::Removed = state {
+ self.outcome.lock().unwrap().removed.push(hg_path.into())
+ } else {
+ self.outcome.lock().unwrap().deleted.push(hg_path.into())
+ }
+ }
+ }
+ }
+
+ /// Something in the filesystem has no corresponding dirstate node
+ fn traverse_fs_only(
+ &self,
+ has_ignored_ancestor: bool,
+ directory_hg_path: &HgPath,
+ fs_entry: &DirEntry,
+ ) {
+ let hg_path = directory_hg_path.join(&fs_entry.base_name);
+ let file_type = fs_entry.metadata.file_type();
+ let file_or_symlink = file_type.is_file() || file_type.is_symlink();
+ if file_type.is_dir() {
+ let is_ignored =
+ has_ignored_ancestor || (self.ignore_fn)(&hg_path);
+ let traverse_children = if is_ignored {
+ // Descendants of an ignored directory are all ignored
+ self.options.list_ignored
+ } else {
+ // Descendants of an unknown directory may be either unknown or
+ // ignored
+ self.options.list_unknown || self.options.list_ignored
+ };
+ if traverse_children {
+ let is_at_repo_root = false;
+ if let Ok(children_fs_entries) = self.read_dir(
+ &hg_path,
+ &fs_entry.full_path,
+ is_at_repo_root,
+ ) {
+ children_fs_entries.par_iter().for_each(|child_fs_entry| {
+ self.traverse_fs_only(
+ is_ignored,
+ &hg_path,
+ child_fs_entry,
+ )
+ })
+ }
+ }
+ if self.options.collect_traversed_dirs {
+ self.outcome.lock().unwrap().traversed.push(hg_path.into())
+ }
+ } else if file_or_symlink && self.matcher.matches(&hg_path) {
+ self.mark_unknown_or_ignored(has_ignored_ancestor, hg_path.into())
+ }
+ }
+
+ fn mark_unknown_or_ignored(
+ &self,
+ has_ignored_ancestor: bool,
+ hg_path: Cow<'tree, HgPath>,
+ ) {
+ let is_ignored = has_ignored_ancestor || (self.ignore_fn)(&hg_path);
+ if is_ignored {
+ if self.options.list_ignored {
+ self.outcome.lock().unwrap().ignored.push(hg_path)
+ }
+ } else {
+ if self.options.list_unknown {
+ self.outcome.lock().unwrap().unknown.push(hg_path)
+ }
+ }
+ }
+}
+
+#[cfg(unix)] // TODO
+fn mtime_seconds(metadata: &std::fs::Metadata) -> i64 {
+ // Going through `Metadata::modified()` would be portable, but would take
+ // care to construct a `SystemTime` value with sub-second precision just
+ // for us to throw that away here.
+ use std::os::unix::fs::MetadataExt;
+ metadata.mtime()
+}
+
+struct DirEntry {
+ base_name: HgPathBuf,
+ full_path: PathBuf,
+ metadata: std::fs::Metadata,
+}
+
+impl DirEntry {
+ /// Returns **unsorted** entries in the given directory, with name and
+ /// metadata.
+ ///
+ /// If a `.hg` sub-directory is encountered:
+ ///
+ /// * At the repository root, ignore that sub-directory
+ /// * Elsewhere, we’re listing the content of a sub-repo. Return an empty
+ /// list instead.
+ fn read_dir(path: &Path, is_at_repo_root: bool) -> io::Result<Vec<Self>> {
+ let mut results = Vec::new();
+ for entry in path.read_dir()? {
+ let entry = entry?;
+ let metadata = entry.metadata()?;
+ let name = get_bytes_from_os_string(entry.file_name());
+ // FIXME don't do this when cached
+ if name == b".hg" {
+ if is_at_repo_root {
+ // Skip the repo’s own .hg (might be a symlink)
+ continue;
+ } else if metadata.is_dir() {
+ // A .hg sub-directory at another location means a subrepo,
+ // skip it entirely.
+ return Ok(Vec::new());
+ }
+ }
+ results.push(DirEntry {
+ base_name: name.into(),
+ full_path: entry.path(),
+ metadata,
+ })
+ }
+ Ok(results)
+ }
+}
--- a/rust/hg-core/src/lib.rs Sun May 02 16:56:20 2021 -0400
+++ b/rust/hg-core/src/lib.rs Fri May 07 22:06:25 2021 -0400
@@ -8,7 +8,8 @@
pub mod dagops;
pub mod errors;
pub use ancestors::{AncestorsIterator, LazyAncestors, MissingAncestors};
-mod dirstate;
+pub mod dirstate;
+pub mod dirstate_tree;
pub mod discovery;
pub mod requirements;
pub mod testing; // unconditionally built, for use from integration tests
--- a/rust/hg-core/src/operations/dirstate_status.rs Sun May 02 16:56:20 2021 -0400
+++ b/rust/hg-core/src/operations/dirstate_status.rs Fri May 07 22:06:25 2021 -0400
@@ -5,17 +5,12 @@
// This software may be used and distributed according to the terms of the
// GNU General Public License version 2 or any later version.
-use crate::dirstate::status::{build_response, Dispatch, HgPathCow, Status};
+use crate::dirstate::status::{build_response, Dispatch, Status};
use crate::matchers::Matcher;
use crate::{DirstateStatus, StatusError};
-/// A tuple of the paths that need to be checked in the filelog because it's
-/// ambiguous whether they've changed, and the rest of the already dispatched
-/// files.
-pub type LookupAndStatus<'a> = (Vec<HgPathCow<'a>>, DirstateStatus<'a>);
-
-impl<'a, M: Matcher + Sync> Status<'a, M> {
- pub(crate) fn run(&self) -> Result<LookupAndStatus<'a>, StatusError> {
+impl<'a, M: ?Sized + Matcher + Sync> Status<'a, M> {
+ pub(crate) fn run(&self) -> Result<DirstateStatus<'a>, StatusError> {
let (traversed_sender, traversed_receiver) =
crossbeam_channel::unbounded();
--- a/rust/hg-core/src/utils/files.rs Sun May 02 16:56:20 2021 -0400
+++ b/rust/hg-core/src/utils/files.rs Fri May 07 22:06:25 2021 -0400
@@ -17,7 +17,7 @@
use lazy_static::lazy_static;
use same_file::is_same_file;
use std::borrow::{Cow, ToOwned};
-use std::ffi::OsStr;
+use std::ffi::{OsStr, OsString};
use std::fs::Metadata;
use std::iter::FusedIterator;
use std::ops::Deref;
@@ -53,6 +53,12 @@
str.as_ref().as_bytes().to_vec()
}
+#[cfg(unix)]
+pub fn get_bytes_from_os_string(str: OsString) -> Vec<u8> {
+ use std::os::unix::ffi::OsStringExt;
+ str.into_vec()
+}
+
/// An iterator over repository path yielding itself and its ancestors.
#[derive(Copy, Clone, Debug)]
pub struct Ancestors<'a> {
--- a/rust/hg-core/src/utils/hg_path.rs Sun May 02 16:56:20 2021 -0400
+++ b/rust/hg-core/src/utils/hg_path.rs Fri May 07 22:06:25 2021 -0400
@@ -6,6 +6,7 @@
// GNU General Public License version 2 or any later version.
use std::borrow::Borrow;
+use std::borrow::Cow;
use std::convert::TryFrom;
use std::ffi::{OsStr, OsString};
use std::fmt;
@@ -226,6 +227,11 @@
inner.extend(other.as_ref().bytes());
HgPathBuf::from_bytes(&inner)
}
+
+ pub fn components(&self) -> impl Iterator<Item = &HgPath> {
+ self.inner.split(|&byte| byte == b'/').map(HgPath::new)
+ }
+
pub fn parent(&self) -> &Self {
let inner = self.as_bytes();
HgPath::new(match inner.iter().rposition(|b| *b == b'/') {
@@ -530,6 +536,24 @@
}
}
+impl From<HgPathBuf> for Cow<'_, HgPath> {
+ fn from(path: HgPathBuf) -> Self {
+ Cow::Owned(path)
+ }
+}
+
+impl<'a> From<&'a HgPath> for Cow<'a, HgPath> {
+ fn from(path: &'a HgPath) -> Self {
+ Cow::Borrowed(path)
+ }
+}
+
+impl<'a> From<&'a HgPathBuf> for Cow<'a, HgPath> {
+ fn from(path: &'a HgPathBuf) -> Self {
+ Cow::Borrowed(&**path)
+ }
+}
+
#[cfg(test)]
mod tests {
use super::*;
--- a/rust/hg-cpython/src/dirstate.rs Sun May 02 16:56:20 2021 -0400
+++ b/rust/hg-cpython/src/dirstate.rs Fri May 07 22:06:25 2021 -0400
@@ -12,7 +12,9 @@
mod copymap;
mod dirs_multiset;
mod dirstate_map;
+mod dispatch;
mod non_normal_entries;
+mod owning;
mod status;
use crate::{
dirstate::{
--- a/rust/hg-cpython/src/dirstate/copymap.rs Sun May 02 16:56:20 2021 -0400
+++ b/rust/hg-cpython/src/dirstate/copymap.rs Fri May 07 22:06:25 2021 -0400
@@ -14,7 +14,8 @@
use std::cell::RefCell;
use crate::dirstate::dirstate_map::DirstateMap;
-use hg::{utils::hg_path::HgPathBuf, CopyMapIter};
+use hg::utils::hg_path::HgPath;
+use hg::CopyMapIter;
py_class!(pub class CopyMap |py| {
data dirstate_map: DirstateMap;
@@ -87,13 +88,13 @@
}
fn translate_key(
py: Python,
- res: (&HgPathBuf, &HgPathBuf),
+ res: (&HgPath, &HgPath),
) -> PyResult<Option<PyBytes>> {
Ok(Some(PyBytes::new(py, res.0.as_bytes())))
}
fn translate_key_value(
py: Python,
- res: (&HgPathBuf, &HgPathBuf),
+ res: (&HgPath, &HgPath),
) -> PyResult<Option<(PyBytes, PyBytes)>> {
let (k, v) = res;
Ok(Some((
--- a/rust/hg-cpython/src/dirstate/dirstate_map.rs Sun May 02 16:56:20 2021 -0400
+++ b/rust/hg-cpython/src/dirstate/dirstate_map.rs Fri May 07 22:06:25 2021 -0400
@@ -8,14 +8,13 @@
//! Bindings for the `hg::dirstate::dirstate_map` file provided by the
//! `hg-core` package.
-use std::cell::{Ref, RefCell};
+use std::cell::{RefCell, RefMut};
use std::convert::TryInto;
-use std::time::Duration;
use cpython::{
exc, ObjectProtocol, PyBool, PyBytes, PyClone, PyDict, PyErr, PyList,
- PyObject, PyResult, PySet, PyString, PyTuple, Python, PythonObject,
- ToPyObject, UnsafePyLeaked,
+ PyObject, PyResult, PySet, PyString, Python, PythonObject, ToPyObject,
+ UnsafePyLeaked,
};
use crate::{
@@ -23,15 +22,20 @@
dirstate::non_normal_entries::{
NonNormalEntries, NonNormalEntriesIterator,
},
+ dirstate::owning::OwningDirstateMap,
dirstate::{dirs_multiset::Dirs, make_dirstate_tuple},
parsers::dirstate_parents_to_pytuple,
};
use hg::{
+ dirstate::parsers::Timestamp,
+ dirstate_tree::dispatch::DirstateMapMethods,
errors::HgError,
revlog::Node,
+ utils::files::normalize_case,
utils::hg_path::{HgPath, HgPathBuf},
- DirsMultiset, DirstateEntry, DirstateMap as RustDirstateMap,
- DirstateMapError, DirstateParents, EntryState, StateMapIter,
+ DirsMultiset, DirstateEntry, DirstateError,
+ DirstateMap as RustDirstateMap, DirstateMapError, DirstateParents,
+ EntryState, StateMapIter,
};
// TODO
@@ -47,11 +51,28 @@
// All attributes also have to have a separate refcount data attribute for
// leaks, with all methods that go along for reference sharing.
py_class!(pub class DirstateMap |py| {
- @shared data inner: RustDirstateMap;
+ @shared data inner: Box<dyn DirstateMapMethods + Send>;
- def __new__(_cls, _root: PyObject) -> PyResult<Self> {
- let inner = RustDirstateMap::default();
- Self::create_instance(py, inner)
+ /// Returns a `(dirstate_map, parents)` tuple
+ @staticmethod
+ def new(use_dirstate_tree: bool, on_disk: PyBytes) -> PyResult<PyObject> {
+ let dirstate_error = |_: DirstateError| {
+ PyErr::new::<exc::OSError, _>(py, "Dirstate error".to_string())
+ };
+ let (inner, parents) = if use_dirstate_tree {
+ let (map, parents) =
+ OwningDirstateMap::new(py, on_disk)
+ .map_err(dirstate_error)?;
+ (Box::new(map) as _, parents)
+ } else {
+ let bytes = on_disk.data(py);
+ let mut map = RustDirstateMap::default();
+ let parents = map.read(bytes).map_err(dirstate_error)?;
+ (Box::new(map) as _, parents)
+ };
+ let map = Self::create_instance(py, inner)?;
+ let parents = parents.map(|p| dirstate_parents_to_pytuple(py, &p));
+ Ok((map, parents).to_py_object(py).into_object())
}
def clear(&self) -> PyResult<PyObject> {
@@ -172,11 +193,8 @@
def other_parent_entries(&self) -> PyResult<PyObject> {
let mut inner_shared = self.inner(py).borrow_mut();
- let (_, other_parent) =
- inner_shared.get_non_normal_other_parent_entries();
-
let set = PySet::empty(py)?;
- for path in other_parent.iter() {
+ for path in inner_shared.iter_other_parent_paths() {
set.add(py, PyBytes::new(py, path.as_bytes()))?;
}
Ok(set.into_object())
@@ -191,8 +209,7 @@
Ok(self
.inner(py)
.borrow_mut()
- .get_non_normal_other_parent_entries().0
- .contains(HgPath::new(key.data(py))))
+ .non_normal_entries_contains(HgPath::new(key.data(py))))
}
def non_normal_entries_display(&self) -> PyResult<PyString> {
@@ -200,14 +217,17 @@
PyString::new(
py,
&format!(
- "NonNormalEntries: {:?}",
- self
- .inner(py)
- .borrow_mut()
- .get_non_normal_other_parent_entries().0
- .iter().map(|o| o))
+ "NonNormalEntries: {}",
+ hg::utils::join_display(
+ self
+ .inner(py)
+ .borrow_mut()
+ .iter_non_normal_paths(),
+ ", "
+ )
)
)
+ )
}
def non_normal_entries_remove(&self, key: PyObject) -> PyResult<PyObject> {
@@ -219,22 +239,11 @@
Ok(py.None())
}
- def non_normal_entries_union(&self, other: PyObject) -> PyResult<PyList> {
- let other: PyResult<_> = other.iter(py)?
- .map(|f| {
- Ok(HgPathBuf::from_bytes(
- f?.extract::<PyBytes>(py)?.data(py),
- ))
- })
- .collect();
-
- let res = self
- .inner(py)
- .borrow_mut()
- .non_normal_entries_union(other?);
+ def non_normal_or_other_parent_paths(&self) -> PyResult<PyList> {
+ let mut inner = self.inner(py).borrow_mut();
let ret = PyList::new(py, &[]);
- for filename in res.iter() {
+ for filename in inner.non_normal_or_other_parent_paths() {
let as_pystring = PyBytes::new(py, filename.as_bytes());
ret.append(py, as_pystring.into_object());
}
@@ -252,7 +261,7 @@
NonNormalEntriesIterator::from_inner(py, unsafe {
leaked_ref.map(py, |o| {
- o.get_non_normal_other_parent_entries_panic().0.iter()
+ o.iter_non_normal_paths_panic()
})
})
}
@@ -277,49 +286,13 @@
.to_py_object(py))
}
- def parents(&self, st: PyObject) -> PyResult<PyTuple> {
- self.inner(py).borrow_mut()
- .parents(st.extract::<PyBytes>(py)?.data(py))
- .map(|parents| dirstate_parents_to_pytuple(py, parents))
- .or_else(|_| {
- Err(PyErr::new::<exc::OSError, _>(
- py,
- "Dirstate error".to_string(),
- ))
- })
- }
-
- def setparents(&self, p1: PyObject, p2: PyObject) -> PyResult<PyObject> {
- let p1 = extract_node_id(py, &p1)?;
- let p2 = extract_node_id(py, &p2)?;
-
- self.inner(py).borrow_mut()
- .set_parents(&DirstateParents { p1, p2 });
- Ok(py.None())
- }
-
- def read(&self, st: PyObject) -> PyResult<Option<PyObject>> {
- match self.inner(py).borrow_mut()
- .read(st.extract::<PyBytes>(py)?.data(py))
- {
- Ok(Some(parents)) => Ok(Some(
- dirstate_parents_to_pytuple(py, parents)
- .into_object()
- )),
- Ok(None) => Ok(Some(py.None())),
- Err(_) => Err(PyErr::new::<exc::OSError, _>(
- py,
- "Dirstate error".to_string(),
- )),
- }
- }
def write(
&self,
p1: PyObject,
p2: PyObject,
now: PyObject
) -> PyResult<PyBytes> {
- let now = Duration::new(now.extract(py)?, 0);
+ let now = Timestamp(now.extract(py)?);
let parents = DirstateParents {
p1: extract_node_id(py, &p1)?,
p2: extract_node_id(py, &p2)?,
@@ -336,14 +309,16 @@
def filefoldmapasdict(&self) -> PyResult<PyDict> {
let dict = PyDict::new(py);
- for (key, value) in
- self.inner(py).borrow_mut().build_file_fold_map().iter()
- {
- dict.set_item(
- py,
- PyBytes::new(py, key.as_bytes()).into_object(),
- PyBytes::new(py, value.as_bytes()).into_object(),
- )?;
+ for (path, entry) in self.inner(py).borrow_mut().iter() {
+ if entry.state != EntryState::Removed {
+ let key = normalize_case(path);
+ let value = path;
+ dict.set_item(
+ py,
+ PyBytes::new(py, key.as_bytes()).into_object(),
+ PyBytes::new(py, value.as_bytes()).into_object(),
+ )?;
+ }
}
Ok(dict)
}
@@ -404,7 +379,7 @@
Dirs::from_inner(
py,
DirsMultiset::from_dirstate(
- &self.inner(py).borrow(),
+ self.inner(py).borrow().iter(),
Some(EntryState::Removed),
)
.map_err(|e| {
@@ -421,7 +396,7 @@
Dirs::from_inner(
py,
DirsMultiset::from_dirstate(
- &self.inner(py).borrow(),
+ self.inner(py).borrow().iter(),
None,
).map_err(|e| {
PyErr::new::<exc::ValueError, _>(py, e.to_string())
@@ -432,7 +407,7 @@
// TODO all copymap* methods, see docstring above
def copymapcopy(&self) -> PyResult<PyDict> {
let dict = PyDict::new(py);
- for (key, value) in self.inner(py).borrow().copy_map.iter() {
+ for (key, value) in self.inner(py).borrow().copy_map_iter() {
dict.set_item(
py,
PyBytes::new(py, key.as_bytes()),
@@ -444,7 +419,7 @@
def copymapgetitem(&self, key: PyObject) -> PyResult<PyBytes> {
let key = key.extract::<PyBytes>(py)?;
- match self.inner(py).borrow().copy_map.get(HgPath::new(key.data(py))) {
+ match self.inner(py).borrow().copy_map_get(HgPath::new(key.data(py))) {
Some(copy) => Ok(PyBytes::new(py, copy.as_bytes())),
None => Err(PyErr::new::<exc::KeyError, _>(
py,
@@ -457,15 +432,14 @@
}
def copymaplen(&self) -> PyResult<usize> {
- Ok(self.inner(py).borrow().copy_map.len())
+ Ok(self.inner(py).borrow().copy_map_len())
}
def copymapcontains(&self, key: PyObject) -> PyResult<bool> {
let key = key.extract::<PyBytes>(py)?;
Ok(self
.inner(py)
.borrow()
- .copy_map
- .contains_key(HgPath::new(key.data(py))))
+ .copy_map_contains_key(HgPath::new(key.data(py))))
}
def copymapget(
&self,
@@ -476,8 +450,7 @@
match self
.inner(py)
.borrow()
- .copy_map
- .get(HgPath::new(key.data(py)))
+ .copy_map_get(HgPath::new(key.data(py)))
{
Some(copy) => Ok(Some(
PyBytes::new(py, copy.as_bytes()).into_object(),
@@ -492,7 +465,7 @@
) -> PyResult<PyObject> {
let key = key.extract::<PyBytes>(py)?;
let value = value.extract::<PyBytes>(py)?;
- self.inner(py).borrow_mut().copy_map.insert(
+ self.inner(py).borrow_mut().copy_map_insert(
HgPathBuf::from_bytes(key.data(py)),
HgPathBuf::from_bytes(value.data(py)),
);
@@ -507,8 +480,7 @@
match self
.inner(py)
.borrow_mut()
- .copy_map
- .remove(HgPath::new(key.data(py)))
+ .copy_map_remove(HgPath::new(key.data(py)))
{
Some(_) => Ok(None),
None => Ok(default),
@@ -519,7 +491,7 @@
let leaked_ref = self.inner(py).leak_immutable();
CopyMapKeysIterator::from_inner(
py,
- unsafe { leaked_ref.map(py, |o| o.copy_map.iter()) },
+ unsafe { leaked_ref.map(py, |o| o.copy_map_iter()) },
)
}
@@ -527,28 +499,28 @@
let leaked_ref = self.inner(py).leak_immutable();
CopyMapItemsIterator::from_inner(
py,
- unsafe { leaked_ref.map(py, |o| o.copy_map.iter()) },
+ unsafe { leaked_ref.map(py, |o| o.copy_map_iter()) },
)
}
});
impl DirstateMap {
- pub fn get_inner<'a>(
+ pub fn get_inner_mut<'a>(
&'a self,
py: Python<'a>,
- ) -> Ref<'a, RustDirstateMap> {
- self.inner(py).borrow()
+ ) -> RefMut<'a, Box<dyn DirstateMapMethods + Send>> {
+ self.inner(py).borrow_mut()
}
fn translate_key(
py: Python,
- res: (&HgPathBuf, &DirstateEntry),
+ res: (&HgPath, &DirstateEntry),
) -> PyResult<Option<PyBytes>> {
Ok(Some(PyBytes::new(py, res.0.as_bytes())))
}
fn translate_key_value(
py: Python,
- res: (&HgPathBuf, &DirstateEntry),
+ res: (&HgPath, &DirstateEntry),
) -> PyResult<Option<(PyBytes, PyObject)>> {
let (f, entry) = res;
Ok(Some((
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/rust/hg-cpython/src/dirstate/dispatch.rs Fri May 07 22:06:25 2021 -0400
@@ -0,0 +1,175 @@
+use crate::dirstate::owning::OwningDirstateMap;
+use hg::dirstate::parsers::Timestamp;
+use hg::dirstate_tree::dispatch::DirstateMapMethods;
+use hg::matchers::Matcher;
+use hg::utils::hg_path::{HgPath, HgPathBuf};
+use hg::CopyMapIter;
+use hg::DirstateEntry;
+use hg::DirstateError;
+use hg::DirstateMapError;
+use hg::DirstateParents;
+use hg::DirstateStatus;
+use hg::EntryState;
+use hg::PatternFileWarning;
+use hg::StateMapIter;
+use hg::StatusError;
+use hg::StatusOptions;
+use std::path::PathBuf;
+
+impl DirstateMapMethods for OwningDirstateMap {
+ fn clear(&mut self) {
+ self.get_mut().clear()
+ }
+
+ fn add_file(
+ &mut self,
+ filename: &HgPath,
+ old_state: EntryState,
+ entry: DirstateEntry,
+ ) -> Result<(), DirstateMapError> {
+ self.get_mut().add_file(filename, old_state, entry)
+ }
+
+ fn remove_file(
+ &mut self,
+ filename: &HgPath,
+ old_state: EntryState,
+ size: i32,
+ ) -> Result<(), DirstateMapError> {
+ self.get_mut().remove_file(filename, old_state, size)
+ }
+
+ fn drop_file(
+ &mut self,
+ filename: &HgPath,
+ old_state: EntryState,
+ ) -> Result<bool, DirstateMapError> {
+ self.get_mut().drop_file(filename, old_state)
+ }
+
+ fn clear_ambiguous_times(&mut self, filenames: Vec<HgPathBuf>, now: i32) {
+ self.get_mut().clear_ambiguous_times(filenames, now)
+ }
+
+ fn non_normal_entries_contains(&mut self, key: &HgPath) -> bool {
+ self.get_mut().non_normal_entries_contains(key)
+ }
+
+ fn non_normal_entries_remove(&mut self, key: &HgPath) {
+ self.get_mut().non_normal_entries_remove(key)
+ }
+
+ fn non_normal_or_other_parent_paths(
+ &mut self,
+ ) -> Box<dyn Iterator<Item = &HgPath> + '_> {
+ self.get_mut().non_normal_or_other_parent_paths()
+ }
+
+ fn set_non_normal_other_parent_entries(&mut self, force: bool) {
+ self.get_mut().set_non_normal_other_parent_entries(force)
+ }
+
+ fn iter_non_normal_paths(
+ &mut self,
+ ) -> Box<dyn Iterator<Item = &HgPath> + Send + '_> {
+ self.get_mut().iter_non_normal_paths()
+ }
+
+ fn iter_non_normal_paths_panic(
+ &self,
+ ) -> Box<dyn Iterator<Item = &HgPath> + Send + '_> {
+ self.get().iter_non_normal_paths_panic()
+ }
+
+ fn iter_other_parent_paths(
+ &mut self,
+ ) -> Box<dyn Iterator<Item = &HgPath> + Send + '_> {
+ self.get_mut().iter_other_parent_paths()
+ }
+
+ fn has_tracked_dir(
+ &mut self,
+ directory: &HgPath,
+ ) -> Result<bool, DirstateMapError> {
+ self.get_mut().has_tracked_dir(directory)
+ }
+
+ fn has_dir(
+ &mut self,
+ directory: &HgPath,
+ ) -> Result<bool, DirstateMapError> {
+ self.get_mut().has_dir(directory)
+ }
+
+ fn pack(
+ &mut self,
+ parents: DirstateParents,
+ now: Timestamp,
+ ) -> Result<Vec<u8>, DirstateError> {
+ self.get_mut().pack(parents, now)
+ }
+
+ fn set_all_dirs(&mut self) -> Result<(), DirstateMapError> {
+ self.get_mut().set_all_dirs()
+ }
+
+ fn set_dirs(&mut self) -> Result<(), DirstateMapError> {
+ self.get_mut().set_dirs()
+ }
+
+ fn status<'a>(
+ &'a mut self,
+ matcher: &'a (dyn Matcher + Sync),
+ root_dir: PathBuf,
+ ignore_files: Vec<PathBuf>,
+ options: StatusOptions,
+ ) -> Result<(DirstateStatus<'a>, Vec<PatternFileWarning>), StatusError>
+ {
+ self.get_mut()
+ .status(matcher, root_dir, ignore_files, options)
+ }
+
+ fn copy_map_len(&self) -> usize {
+ self.get().copy_map_len()
+ }
+
+ fn copy_map_iter(&self) -> CopyMapIter<'_> {
+ self.get().copy_map_iter()
+ }
+
+ fn copy_map_contains_key(&self, key: &HgPath) -> bool {
+ self.get().copy_map_contains_key(key)
+ }
+
+ fn copy_map_get(&self, key: &HgPath) -> Option<&HgPath> {
+ self.get().copy_map_get(key)
+ }
+
+ fn copy_map_remove(&mut self, key: &HgPath) -> Option<HgPathBuf> {
+ self.get_mut().copy_map_remove(key)
+ }
+
+ fn copy_map_insert(
+ &mut self,
+ key: HgPathBuf,
+ value: HgPathBuf,
+ ) -> Option<HgPathBuf> {
+ self.get_mut().copy_map_insert(key, value)
+ }
+
+ fn len(&self) -> usize {
+ self.get().len()
+ }
+
+ fn contains_key(&self, key: &HgPath) -> bool {
+ self.get().contains_key(key)
+ }
+
+ fn get(&self, key: &HgPath) -> Option<&DirstateEntry> {
+ self.get().get(key)
+ }
+
+ fn iter(&self) -> StateMapIter<'_> {
+ self.get().iter()
+ }
+}
--- a/rust/hg-cpython/src/dirstate/non_normal_entries.rs Sun May 02 16:56:20 2021 -0400
+++ b/rust/hg-cpython/src/dirstate/non_normal_entries.rs Fri May 07 22:06:25 2021 -0400
@@ -7,14 +7,13 @@
use cpython::{
exc::NotImplementedError, CompareOp, ObjectProtocol, PyBytes, PyClone,
- PyErr, PyList, PyObject, PyResult, PyString, Python, PythonObject,
- ToPyObject, UnsafePyLeaked,
+ PyErr, PyObject, PyResult, PyString, Python, PythonObject, ToPyObject,
+ UnsafePyLeaked,
};
use crate::dirstate::DirstateMap;
-use hg::utils::hg_path::HgPathBuf;
+use hg::utils::hg_path::HgPath;
use std::cell::RefCell;
-use std::collections::hash_set;
py_class!(pub class NonNormalEntries |py| {
data dmap: DirstateMap;
@@ -25,9 +24,6 @@
def remove(&self, key: PyObject) -> PyResult<PyObject> {
self.dmap(py).non_normal_entries_remove(py, key)
}
- def union(&self, other: PyObject) -> PyResult<PyList> {
- self.dmap(py).non_normal_entries_union(py, other)
- }
def __richcmp__(&self, other: PyObject, op: CompareOp) -> PyResult<bool> {
match op {
CompareOp::Eq => self.is_equal_to(py, other),
@@ -58,15 +54,13 @@
Ok(true)
}
- fn translate_key(
- py: Python,
- key: &HgPathBuf,
- ) -> PyResult<Option<PyBytes>> {
+ fn translate_key(py: Python, key: &HgPath) -> PyResult<Option<PyBytes>> {
Ok(Some(PyBytes::new(py, key.as_bytes())))
}
}
-type NonNormalEntriesIter<'a> = hash_set::Iter<'a, HgPathBuf>;
+type NonNormalEntriesIter<'a> =
+ Box<dyn Iterator<Item = &'a HgPath> + Send + 'a>;
py_shared_iterator!(
NonNormalEntriesIterator,
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/rust/hg-cpython/src/dirstate/owning.rs Fri May 07 22:06:25 2021 -0400
@@ -0,0 +1,97 @@
+use cpython::PyBytes;
+use cpython::Python;
+use hg::dirstate_tree::dirstate_map::DirstateMap;
+use hg::DirstateError;
+use hg::DirstateParents;
+
+/// Keep a `DirstateMap<'on_disk>` next to the `on_disk` buffer that it
+/// borrows. This is similar to the owning-ref crate.
+///
+/// This is similar to [`OwningRef`] which is more limited because it
+/// represents exactly one `&T` reference next to the value it borrows, as
+/// opposed to a struct that may contain an arbitrary number of references in
+/// arbitrarily-nested data structures.
+///
+/// [`OwningRef`]: https://docs.rs/owning_ref/0.4.1/owning_ref/struct.OwningRef.html
+pub(super) struct OwningDirstateMap {
+ /// Owned handle to a bytes buffer with a stable address.
+ ///
+ /// See <https://docs.rs/owning_ref/0.4.1/owning_ref/trait.StableAddress.html>.
+ on_disk: PyBytes,
+
+ /// Pointer for `Box<DirstateMap<'on_disk>>`, typed-erased because the
+ /// language cannot represent a lifetime referencing a sibling field.
+ /// This is not quite a self-referencial struct (moving this struct is not
+ /// a problem as it doesn’t change the address of the bytes buffer owned
+ /// by `PyBytes`) but touches similar borrow-checker limitations.
+ ptr: *mut (),
+}
+
+impl OwningDirstateMap {
+ pub fn new(
+ py: Python,
+ on_disk: PyBytes,
+ ) -> Result<(Self, Option<DirstateParents>), DirstateError> {
+ let bytes: &'_ [u8] = on_disk.data(py);
+ let (map, parents) = DirstateMap::new(bytes)?;
+
+ // Like in `bytes` above, this `'_` lifetime parameter borrows from
+ // the bytes buffer owned by `on_disk`.
+ let ptr: *mut DirstateMap<'_> = Box::into_raw(Box::new(map));
+
+ // Erase the pointed type entirely in order to erase the lifetime.
+ let ptr: *mut () = ptr.cast();
+
+ Ok((Self { on_disk, ptr }, parents))
+ }
+
+ pub fn get_mut<'a>(&'a mut self) -> &'a mut DirstateMap<'a> {
+ // SAFETY: We cast the type-erased pointer back to the same type it had
+ // in `new`, except with a different lifetime parameter. This time we
+ // connect the lifetime to that of `self`. This cast is valid because
+ // `self` owns the same `PyBytes` whose buffer `DirstateMap`
+ // references. That buffer has a stable memory address because the byte
+ // string value of a `PyBytes` is immutable.
+ let ptr: *mut DirstateMap<'a> = self.ptr.cast();
+ // SAFETY: we dereference that pointer, connecting the lifetime of the
+ // new `&mut` to that of `self`. This is valid because the
+ // raw pointer is to a boxed value, and `self` owns that box.
+ unsafe { &mut *ptr }
+ }
+
+ pub fn get<'a>(&'a self) -> &'a DirstateMap<'a> {
+ // SAFETY: same reasoning as in `get_mut` above.
+ let ptr: *mut DirstateMap<'a> = self.ptr.cast();
+ unsafe { &*ptr }
+ }
+}
+
+impl Drop for OwningDirstateMap {
+ fn drop(&mut self) {
+ // Silence a "field is never read" warning, and demonstrate that this
+ // value is still alive.
+ let _ = &self.on_disk;
+ // SAFETY: this cast is the same as in `get_mut`, and is valid for the
+ // same reason. `self.on_disk` still exists at this point, drop glue
+ // will drop it implicitly after this `drop` method returns.
+ let ptr: *mut DirstateMap<'_> = self.ptr.cast();
+ // SAFETY: `Box::from_raw` takes ownership of the box away from `self`.
+ // This is fine because drop glue does nothig for `*mut ()` and we’re
+ // in `drop`, so `get` and `get_mut` cannot be called again.
+ unsafe { drop(Box::from_raw(ptr)) }
+ }
+}
+
+fn _static_assert_is_send<T: Send>() {}
+
+fn _static_assert_fields_are_send() {
+ _static_assert_is_send::<PyBytes>();
+ _static_assert_is_send::<Box<DirstateMap<'_>>>();
+}
+
+// SAFETY: we don’t get this impl implicitly because `*mut (): !Send` because
+// thread-safety of raw pointers is unknown in the general case. However this
+// particular raw pointer represents a `Box<DirstateMap<'on_disk>>` that we
+// own. Since that `Box` and `PyBytes` are both `Send` as shown in above, it
+// is sound to mark this struct as `Send` too.
+unsafe impl Send for OwningDirstateMap {}
--- a/rust/hg-cpython/src/dirstate/status.rs Sun May 02 16:56:20 2021 -0400
+++ b/rust/hg-cpython/src/dirstate/status.rs Fri May 07 22:06:25 2021 -0400
@@ -17,7 +17,7 @@
};
use hg::{
matchers::{AlwaysMatcher, FileMatcher, IncludeMatcher},
- parse_pattern_syntax, status,
+ parse_pattern_syntax,
utils::{
files::{get_bytes_from_path, get_path_from_bytes},
hg_path::{HgPath, HgPathBuf},
@@ -25,7 +25,7 @@
BadMatch, DirstateStatus, IgnorePattern, PatternFileWarning, StatusError,
StatusOptions,
};
-use std::borrow::{Borrow, Cow};
+use std::borrow::Borrow;
/// This will be useless once trait impls for collection are added to `PyBytes`
/// upstream.
@@ -112,7 +112,7 @@
let root_dir = get_path_from_bytes(bytes.data(py));
let dmap: DirstateMap = dmap.to_py_object(py);
- let dmap = dmap.get_inner(py);
+ let mut dmap = dmap.get_inner_mut(py);
let ignore_files: PyResult<Vec<_>> = ignore_files
.iter(py)
@@ -126,22 +126,22 @@
match matcher.get_type(py).name(py).borrow() {
"alwaysmatcher" => {
let matcher = AlwaysMatcher;
- let ((lookup, status_res), warnings) = status(
- &dmap,
- &matcher,
- root_dir.to_path_buf(),
- ignore_files,
- StatusOptions {
- check_exec,
- last_normal_time,
- list_clean,
- list_ignored,
- list_unknown,
- collect_traversed_dirs,
- },
- )
- .map_err(|e| handle_fallback(py, e))?;
- build_response(py, lookup, status_res, warnings)
+ let (status_res, warnings) = dmap
+ .status(
+ &matcher,
+ root_dir.to_path_buf(),
+ ignore_files,
+ StatusOptions {
+ check_exec,
+ last_normal_time,
+ list_clean,
+ list_ignored,
+ list_unknown,
+ collect_traversed_dirs,
+ },
+ )
+ .map_err(|e| handle_fallback(py, e))?;
+ build_response(py, status_res, warnings)
}
"exactmatcher" => {
let files = matcher.call_method(
@@ -163,22 +163,22 @@
let files = files?;
let matcher = FileMatcher::new(files.as_ref())
.map_err(|e| PyErr::new::<ValueError, _>(py, e.to_string()))?;
- let ((lookup, status_res), warnings) = status(
- &dmap,
- &matcher,
- root_dir.to_path_buf(),
- ignore_files,
- StatusOptions {
- check_exec,
- last_normal_time,
- list_clean,
- list_ignored,
- list_unknown,
- collect_traversed_dirs,
- },
- )
- .map_err(|e| handle_fallback(py, e))?;
- build_response(py, lookup, status_res, warnings)
+ let (status_res, warnings) = dmap
+ .status(
+ &matcher,
+ root_dir.to_path_buf(),
+ ignore_files,
+ StatusOptions {
+ check_exec,
+ last_normal_time,
+ list_clean,
+ list_ignored,
+ list_unknown,
+ collect_traversed_dirs,
+ },
+ )
+ .map_err(|e| handle_fallback(py, e))?;
+ build_response(py, status_res, warnings)
}
"includematcher" => {
// Get the patterns from Python even though most of them are
@@ -218,25 +218,25 @@
.map_err(|e| handle_fallback(py, e.into()))?;
all_warnings.extend(warnings);
- let ((lookup, status_res), warnings) = status(
- &dmap,
- &matcher,
- root_dir.to_path_buf(),
- ignore_files,
- StatusOptions {
- check_exec,
- last_normal_time,
- list_clean,
- list_ignored,
- list_unknown,
- collect_traversed_dirs,
- },
- )
- .map_err(|e| handle_fallback(py, e))?;
+ let (status_res, warnings) = dmap
+ .status(
+ &matcher,
+ root_dir.to_path_buf(),
+ ignore_files,
+ StatusOptions {
+ check_exec,
+ last_normal_time,
+ list_clean,
+ list_ignored,
+ list_unknown,
+ collect_traversed_dirs,
+ },
+ )
+ .map_err(|e| handle_fallback(py, e))?;
all_warnings.extend(warnings);
- build_response(py, lookup, status_res, all_warnings)
+ build_response(py, status_res, all_warnings)
}
e => Err(PyErr::new::<ValueError, _>(
py,
@@ -247,7 +247,6 @@
fn build_response(
py: Python,
- lookup: Vec<Cow<HgPath>>,
status_res: DirstateStatus,
warnings: Vec<PatternFileWarning>,
) -> PyResult<PyTuple> {
@@ -258,7 +257,7 @@
let clean = collect_pybytes_list(py, status_res.clean.as_ref());
let ignored = collect_pybytes_list(py, status_res.ignored.as_ref());
let unknown = collect_pybytes_list(py, status_res.unknown.as_ref());
- let lookup = collect_pybytes_list(py, lookup.as_ref());
+ let unsure = collect_pybytes_list(py, status_res.unsure.as_ref());
let bad = collect_bad_matches(py, status_res.bad.as_ref())?;
let traversed = collect_pybytes_list(py, status_res.traversed.as_ref());
let py_warnings = PyList::new(py, &[]);
@@ -287,7 +286,7 @@
Ok(PyTuple::new(
py,
&[
- lookup.into_object(),
+ unsure.into_object(),
modified.into_object(),
added.into_object(),
removed.into_object(),
--- a/rust/hg-cpython/src/parsers.rs Sun May 02 16:56:20 2021 -0400
+++ b/rust/hg-cpython/src/parsers.rs Fri May 07 22:06:25 2021 -0400
@@ -14,13 +14,13 @@
PythonObject, ToPyObject,
};
use hg::{
- pack_dirstate, parse_dirstate, utils::hg_path::HgPathBuf, DirstateEntry,
- DirstateParents, FastHashMap, PARENT_SIZE,
+ dirstate::parsers::Timestamp, pack_dirstate, parse_dirstate,
+ utils::hg_path::HgPathBuf, DirstateEntry, DirstateParents, FastHashMap,
+ PARENT_SIZE,
};
use std::convert::TryInto;
use crate::dirstate::{extract_dirstate, make_dirstate_tuple};
-use std::time::Duration;
fn parse_dirstate_wrapper(
py: Python,
@@ -98,7 +98,7 @@
p1: p1.try_into().unwrap(),
p2: p2.try_into().unwrap(),
},
- Duration::from_secs(now.as_object().extract::<u64>(py)?),
+ Timestamp(now.as_object().extract::<u64>(py)?),
) {
Ok(packed) => {
for (filename, entry) in dirstate_map.iter() {
--- a/rust/hg-cpython/src/revlog.rs Sun May 02 16:56:20 2021 -0400
+++ b/rust/hg-cpython/src/revlog.rs Fri May 07 22:06:25 2021 -0400
@@ -172,6 +172,16 @@
self.call_cindex(py, "clearcaches", args, kw)
}
+ /// return the raw binary string representing a revision
+ def entry_binary(&self, *args, **kw) -> PyResult<PyObject> {
+ self.call_cindex(py, "entry_binary", args, kw)
+ }
+
+ /// return a binary packed version of the header
+ def pack_header(&self, *args, **kw) -> PyResult<PyObject> {
+ self.call_cindex(py, "pack_header", args, kw)
+ }
+
/// get an index entry
def get(&self, *args, **kw) -> PyResult<PyObject> {
self.call_cindex(py, "get", args, kw)
--- a/rust/rhg/src/commands/status.rs Sun May 02 16:56:20 2021 -0400
+++ b/rust/rhg/src/commands/status.rs Fri May 07 22:06:25 2021 -0400
@@ -181,7 +181,7 @@
collect_traversed_dirs: false,
};
let ignore_file = repo.working_directory_vfs().join(".hgignore"); // TODO hardcoded
- let ((lookup, ds_status), pattern_warnings) = hg::status(
+ let (ds_status, pattern_warnings) = hg::status(
&dmap,
&AlwaysMatcher,
repo.working_directory_path().to_owned(),
@@ -195,10 +195,10 @@
if !ds_status.bad.is_empty() {
warn!("Bad matches {:?}", &(ds_status.bad))
}
- if !lookup.is_empty() {
+ if !ds_status.unsure.is_empty() {
info!(
"Files to be rechecked by retrieval from filelog: {:?}",
- &lookup
+ &ds_status.unsure
);
}
// TODO check ordering to match `hg status` output.
@@ -206,7 +206,7 @@
if display_states.modified {
display_status_paths(ui, &(ds_status.modified), b"M")?;
}
- if !lookup.is_empty() {
+ if !ds_status.unsure.is_empty() {
let p1: Node = parents
.expect(
"Dirstate with no parents should not list any file to
@@ -217,7 +217,7 @@
let p1_hex = format!("{:x}", p1);
let mut rechecked_modified: Vec<HgPathCow> = Vec::new();
let mut rechecked_clean: Vec<HgPathCow> = Vec::new();
- for to_check in lookup {
+ for to_check in ds_status.unsure {
if cat_file_is_modified(repo, &to_check, &p1_hex)? {
rechecked_modified.push(to_check);
} else {
--- a/tests/drawdag.py Sun May 02 16:56:20 2021 -0400
+++ b/tests/drawdag.py Fri May 07 22:06:25 2021 -0400
@@ -86,7 +86,6 @@
import itertools
import re
-from mercurial.node import nullid
from mercurial.i18n import _
from mercurial import (
context,
@@ -299,7 +298,7 @@
self._added = added
self._parents = parentctxs
while len(self._parents) < 2:
- self._parents.append(repo[nullid])
+ self._parents.append(repo[repo.nullid])
def filectx(self, key):
return simplefilectx(key, self._added[key])
@@ -388,7 +387,7 @@
content = content.replace(br'\n', b'\n').replace(br'\1', b'\1')
files[name][path] = content
- committed = {None: nullid} # {name: node}
+ committed = {None: repo.nullid} # {name: node}
# for leaf nodes, try to find existing nodes in repo
for name, parents in edges.items():
--- a/tests/simplestorerepo.py Sun May 02 16:56:20 2021 -0400
+++ b/tests/simplestorerepo.py Fri May 07 22:06:25 2021 -0400
@@ -18,7 +18,6 @@
from mercurial.node import (
bin,
hex,
- nullid,
nullrev,
)
from mercurial.thirdparty import attr
@@ -136,18 +135,18 @@
self._indexbynode[entry[b'node']] = entry
self._indexbyrev[i] = entry
- self._indexbynode[nullid] = {
- b'node': nullid,
- b'p1': nullid,
- b'p2': nullid,
+ self._indexbynode[self._repo.nullid] = {
+ b'node': self._repo.nullid,
+ b'p1': self._repo.nullid,
+ b'p2': self._repo.nullid,
b'linkrev': nullrev,
b'flags': 0,
}
self._indexbyrev[nullrev] = {
- b'node': nullid,
- b'p1': nullid,
- b'p2': nullid,
+ b'node': self._repo.nullid,
+ b'p1': self._repo.nullid,
+ b'p2': self._repo.nullid,
b'linkrev': nullrev,
b'flags': 0,
}
@@ -160,7 +159,7 @@
(0, 0, 0, -1, entry[b'linkrev'], p1rev, p2rev, entry[b'node'])
)
- self._index.append((0, 0, 0, -1, -1, -1, -1, nullid))
+ self._index.append((0, 0, 0, -1, -1, -1, -1, self._repo.nullid))
def __len__(self):
return len(self._indexdata)
@@ -288,7 +287,7 @@
node = nodeorrev
validatenode(node)
- if node == nullid:
+ if node == self._repo.nullid:
return b''
rev = self.rev(node)
@@ -325,7 +324,7 @@
def renamed(self, node):
validatenode(node)
- if self.parents(node)[0] != nullid:
+ if self.parents(node)[0] != self._repo.nullid:
return False
fulltext = self.revision(node)
@@ -451,7 +450,7 @@
sidedata_helpers=None,
):
# TODO this will probably break on some ordering options.
- nodes = [n for n in nodes if n != nullid]
+ nodes = [n for n in nodes if n != self._repo.nullid]
if not nodes:
return
for delta in storageutil.emitrevisions(
@@ -559,7 +558,7 @@
continue
# Need to resolve the fulltext from the delta base.
- if deltabase == nullid:
+ if deltabase == self._repo.nullid:
text = mdiff.patch(b'', delta)
else:
text = mdiff.patch(self.revision(deltabase), delta)
@@ -588,11 +587,11 @@
# This is copied from revlog.py.
if start is None and stop is None:
if not len(self):
- return [nullid]
+ return [self._repo.nullid]
return [self.node(r) for r in self._headrevs()]
if start is None:
- start = nullid
+ start = self._repo.nullid
if stop is None:
stop = []
stoprevs = {self.rev(n) for n in stop}
--- a/tests/test-amend.t Sun May 02 16:56:20 2021 -0400
+++ b/tests/test-amend.t Fri May 07 22:06:25 2021 -0400
@@ -196,7 +196,8 @@
$ hg update -q B
$ echo 2 >> B
$ hg amend
- abort: cannot amend changeset with children
+ abort: cannot amend changeset, as that will orphan 1 descendants
+ (see 'hg help evolution.instability')
[10]
#if obsstore-on
@@ -231,6 +232,17 @@
$ hg debugobsolete -r .
112478962961147124edd43549aedd1a335e44bf be169c7e8dbe21cd10b3d79691cbe7f241e3c21c 0 (Thu Jan 01 00:00:00 1970 +0000) {'ef1': '8', 'operation': 'amend', 'user': 'test'}
be169c7e8dbe21cd10b3d79691cbe7f241e3c21c 16084da537dd8f84cfdb3055c633772269d62e1b 0 (Thu Jan 01 00:00:00 1970 +0000) {'ef1': '8', 'note': 'adding bar', 'operation': 'amend', 'user': 'test'}
+
+Cannot cause divergence by default
+
+ $ hg co --hidden 1
+ 1 files updated, 0 files merged, 1 files removed, 0 files unresolved
+ $ hg amend -m divergent
+ abort: cannot amend 112478962961, as that creates content-divergence with 16084da537dd
+ (add --verbose for details)
+ [10]
+ $ hg amend -m divergent --config experimental.evolution.allowdivergence=true
+ 2 new content-divergent changesets
#endif
Cannot amend public changeset
@@ -238,7 +250,7 @@
$ hg phase -r A --public
$ hg update -C -q A
$ hg amend -m AMEND
- abort: cannot amend public changesets
+ abort: cannot amend public changesets: 426bada5c675
(see 'hg help phases' for details)
[10]
--- a/tests/test-annotate.t Sun May 02 16:56:20 2021 -0400
+++ b/tests/test-annotate.t Fri May 07 22:06:25 2021 -0400
@@ -479,19 +479,19 @@
$ cat > ../legacyrepo.py <<EOF
> from __future__ import absolute_import
- > from mercurial import commit, error, extensions, node
+ > from mercurial import commit, error, extensions
> def _filecommit(orig, repo, fctx, manifest1, manifest2,
> linkrev, tr, includecopymeta, ms):
> fname = fctx.path()
> text = fctx.data()
> flog = repo.file(fname)
- > fparent1 = manifest1.get(fname, node.nullid)
- > fparent2 = manifest2.get(fname, node.nullid)
+ > fparent1 = manifest1.get(fname, repo.nullid)
+ > fparent2 = manifest2.get(fname, repo.nullid)
> meta = {}
> copy = fctx.copysource()
> if copy and copy != fname:
> raise error.Abort('copying is not supported')
- > if fparent2 != node.nullid:
+ > if fparent2 != repo.nullid:
> return flog.add(text, meta, tr, linkrev,
> fparent1, fparent2), 'modified'
> raise error.Abort('only merging is supported')
--- a/tests/test-blackbox.t Sun May 02 16:56:20 2021 -0400
+++ b/tests/test-blackbox.t Fri May 07 22:06:25 2021 -0400
@@ -221,7 +221,7 @@
1970/01/01 00:00:00 bob @6563da9dcf87b1949716e38ff3e3dfaa3198eb06 (5000)> pythonhook-preupdate: hgext.eol.preupdate finished in * seconds (glob)
1970/01/01 00:00:00 bob @d02f48003e62c24e2659d97d30f2a83abe5d5d51 (5000)> exthook-update: echo hooked finished in * seconds (glob)
1970/01/01 00:00:00 bob @d02f48003e62c24e2659d97d30f2a83abe5d5d51 (5000)> update exited 0 after * seconds (glob)
- 1970/01/01 00:00:00 bob @d02f48003e62c24e2659d97d30f2a83abe5d5d51 (5000)> serve --cmdserver chgunix --address $TESTTMP.chgsock/server.* --daemon-postexec 'chdir:/' (glob) (chg !)
+ 1970/01/01 00:00:00 bob @d02f48003e62c24e2659d97d30f2a83abe5d5d51 (5000)> serve --no-profile --cmdserver chgunix --address $TESTTMP.chgsock/server.* --daemon-postexec 'chdir:/' (glob) (chg !)
1970/01/01 00:00:00 bob @d02f48003e62c24e2659d97d30f2a83abe5d5d51 (5000)> blackbox -l 5
log rotation
--- a/tests/test-branch-change.t Sun May 02 16:56:20 2021 -0400
+++ b/tests/test-branch-change.t Fri May 07 22:06:25 2021 -0400
@@ -57,7 +57,8 @@
Change in middle of the stack (linear commits)
$ hg branch -r 1::3 foo
- abort: cannot change branch of changeset with children
+ abort: cannot change branch of changeset, as that will orphan 1 descendants
+ (see 'hg help evolution.instability')
[10]
Change with dirty working directory
@@ -128,7 +129,8 @@
Changing on a branch head which is not topological head
$ hg branch -r 2 stable
- abort: cannot change branch of changeset with children
+ abort: cannot change branch of changeset, as that will orphan 2 descendants
+ (see 'hg help evolution.instability')
[10]
Enabling the allowunstable config and trying to change branch on a branch head
@@ -148,7 +150,8 @@
[255]
$ hg branch -r 4 --hidden foobar
- abort: cannot change branch of a obsolete changeset
+ abort: cannot change branch of 3938acfb5c0f, as that creates content-divergence with 7c1991464886
+ (add --verbose for details)
[10]
Make sure bookmark movement is correct
@@ -366,7 +369,7 @@
$ hg phase -r . -p
$ hg branch -r . def
- abort: cannot change branch of public changesets
+ abort: cannot change branch of public changesets: d1c2addda4a2
(see 'hg help phases' for details)
[10]
--- a/tests/test-check-interfaces.py Sun May 02 16:56:20 2021 -0400
+++ b/tests/test-check-interfaces.py Fri May 07 22:06:25 2021 -0400
@@ -282,6 +282,7 @@
revision=b'',
sidedata=b'',
delta=None,
+ protocol_flags=b'',
)
checkzobject(rd)
--- a/tests/test-chg.t Sun May 02 16:56:20 2021 -0400
+++ b/tests/test-chg.t Fri May 07 22:06:25 2021 -0400
@@ -458,6 +458,7 @@
LC_CTYPE=
$ (unset LC_ALL; unset LANG; LC_CTYPE=unsupported_value chg \
> --config extensions.debugenv=$TESTTMP/debugenv.py debugenv)
+ *cannot change locale* (glob) (?)
LC_CTYPE=unsupported_value
$ (unset LC_ALL; unset LANG; LC_CTYPE= chg \
> --config extensions.debugenv=$TESTTMP/debugenv.py debugenv)
@@ -467,3 +468,72 @@
LC_ALL=
LC_CTYPE=
LANG=
+
+Profiling isn't permanently enabled or carried over between chg invocations that
+share the same server
+ $ cp $HGRCPATH.orig $HGRCPATH
+ $ hg init $TESTTMP/profiling
+ $ cd $TESTTMP/profiling
+ $ filteredchg() {
+ > CHGDEBUG=1 chg "$@" 2>&1 | egrep 'Sample count|start cmdserver' || true
+ > }
+ $ newchg() {
+ > chg --kill-chg-daemon
+ > filteredchg "$@" | egrep -v 'start cmdserver' || true
+ > }
+(--profile isn't permanently on just because it was specified when chg was
+started)
+ $ newchg log -r . --profile
+ Sample count: * (glob)
+ $ filteredchg log -r .
+(enabling profiling via config works, even on the first chg command that starts
+a cmdserver)
+ $ cat >> $HGRCPATH <<EOF
+ > [profiling]
+ > type=stat
+ > enabled=1
+ > EOF
+ $ newchg log -r .
+ Sample count: * (glob)
+ $ filteredchg log -r .
+ Sample count: * (glob)
+(test that we aren't accumulating more and more samples each run)
+ $ cat > $TESTTMP/debugsleep.py <<EOF
+ > import time
+ > from mercurial import registrar
+ > cmdtable = {}
+ > command = registrar.command(cmdtable)
+ > @command(b'debugsleep', [], b'', norepo=True)
+ > def debugsleep(ui):
+ > start = time.time()
+ > x = 0
+ > while time.time() < start + 0.5:
+ > time.sleep(.1)
+ > x += 1
+ > ui.status(b'%d debugsleep iterations in %.03fs\n' % (x, time.time() - start))
+ > EOF
+ $ cat >> $HGRCPATH <<EOF
+ > [extensions]
+ > debugsleep = $TESTTMP/debugsleep.py
+ > EOF
+ $ newchg debugsleep > run_1
+ $ filteredchg debugsleep > run_2
+ $ filteredchg debugsleep > run_3
+ $ filteredchg debugsleep > run_4
+FIXME: Run 4 should not be >3x Run 1's number of samples.
+ $ "$PYTHON" <<EOF
+ > r1 = int(open("run_1", "r").read().split()[-1])
+ > r4 = int(open("run_4", "r").read().split()[-1])
+ > print("Run 1: %d samples\nRun 4: %d samples\nRun 4 > 3 * Run 1: %s" %
+ > (r1, r4, r4 > (r1 * 3)))
+ > EOF
+ Run 1: * samples (glob)
+ Run 4: * samples (glob)
+ Run 4 > 3 * Run 1: False
+(Disabling with --no-profile on the commandline still works, but isn't permanent)
+ $ newchg log -r . --no-profile
+ $ filteredchg log -r .
+ Sample count: * (glob)
+ $ filteredchg log -r . --no-profile
+ $ filteredchg log -r .
+ Sample count: * (glob)
--- a/tests/test-commit-amend.t Sun May 02 16:56:20 2021 -0400
+++ b/tests/test-commit-amend.t Fri May 07 22:06:25 2021 -0400
@@ -10,7 +10,7 @@
$ hg phase -r . -p
$ hg ci --amend
- abort: cannot amend public changesets
+ abort: cannot amend public changesets: ad120869acf0
(see 'hg help phases' for details)
[10]
$ hg phase -r . -f -d
@@ -406,7 +406,7 @@
1 files updated, 0 files merged, 0 files removed, 0 files unresolved
(branch merge, don't forget to commit)
$ hg ci --amend
- abort: cannot amend while merging
+ abort: cannot amend changesets while merging
[20]
$ hg ci -m 'merge'
--- a/tests/test-commit.t Sun May 02 16:56:20 2021 -0400
+++ b/tests/test-commit.t Fri May 07 22:06:25 2021 -0400
@@ -646,14 +646,14 @@
verify pathauditor blocks evil filepaths
$ cat > evil-commit.py <<EOF
> from __future__ import absolute_import
- > from mercurial import context, hg, node, ui as uimod
+ > from mercurial import context, hg, ui as uimod
> notrc = u".h\u200cg".encode('utf-8') + b'/hgrc'
> u = uimod.ui.load()
> r = hg.repository(u, b'.')
> def filectxfn(repo, memctx, path):
> return context.memfilectx(repo, memctx, path,
> b'[hooks]\nupdate = echo owned')
- > c = context.memctx(r, [r.changelog.tip(), node.nullid],
+ > c = context.memctx(r, [r.changelog.tip(), r.nullid],
> b'evil', [notrc], filectxfn, 0)
> r.commitctx(c)
> EOF
@@ -672,14 +672,14 @@
repository tip rolled back to revision 2 (undo commit)
$ cat > evil-commit.py <<EOF
> from __future__ import absolute_import
- > from mercurial import context, hg, node, ui as uimod
+ > from mercurial import context, hg, ui as uimod
> notrc = b"HG~1/hgrc"
> u = uimod.ui.load()
> r = hg.repository(u, b'.')
> def filectxfn(repo, memctx, path):
> return context.memfilectx(repo, memctx, path,
> b'[hooks]\nupdate = echo owned')
- > c = context.memctx(r, [r[b'tip'].node(), node.nullid],
+ > c = context.memctx(r, [r[b'tip'].node(), r.nullid],
> b'evil', [notrc], filectxfn, 0)
> r.commitctx(c)
> EOF
@@ -692,14 +692,14 @@
repository tip rolled back to revision 2 (undo commit)
$ cat > evil-commit.py <<EOF
> from __future__ import absolute_import
- > from mercurial import context, hg, node, ui as uimod
+ > from mercurial import context, hg, ui as uimod
> notrc = b"HG8B6C~2/hgrc"
> u = uimod.ui.load()
> r = hg.repository(u, b'.')
> def filectxfn(repo, memctx, path):
> return context.memfilectx(repo, memctx, path,
> b'[hooks]\nupdate = echo owned')
- > c = context.memctx(r, [r[b'tip'].node(), node.nullid],
+ > c = context.memctx(r, [r[b'tip'].node(), r.nullid],
> b'evil', [notrc], filectxfn, 0)
> r.commitctx(c)
> EOF
--- a/tests/test-completion.t Sun May 02 16:56:20 2021 -0400
+++ b/tests/test-completion.t Fri May 07 22:06:25 2021 -0400
@@ -262,7 +262,7 @@
cat: output, rev, decode, include, exclude, template
clone: noupdate, updaterev, rev, branch, pull, uncompressed, stream, ssh, remotecmd, insecure
commit: addremove, close-branch, amend, secret, edit, force-close-branch, interactive, include, exclude, message, logfile, date, user, subrepos
- config: untrusted, edit, local, shared, non-shared, global, template
+ config: untrusted, edit, local, source, shared, non-shared, global, template
continue: dry-run
copy: forget, after, at-rev, force, include, exclude, dry-run
debugancestor:
--- a/tests/test-config.t Sun May 02 16:56:20 2021 -0400
+++ b/tests/test-config.t Fri May 07 22:06:25 2021 -0400
@@ -277,8 +277,7 @@
> emptysource = `pwd`/emptysource.py
> EOF
- $ hg config --debug empty.source
- read config from: * (glob)
+ $ hg config --source empty.source
none: value
$ hg config empty.source -Tjson
[
@@ -349,16 +348,16 @@
config affected by environment variables
- $ EDITOR=e1 VISUAL=e2 hg config --debug | grep 'ui\.editor'
+ $ EDITOR=e1 VISUAL=e2 hg config --source | grep 'ui\.editor'
$VISUAL: ui.editor=e2
- $ VISUAL=e2 hg config --debug --config ui.editor=e3 | grep 'ui\.editor'
+ $ VISUAL=e2 hg config --source --config ui.editor=e3 | grep 'ui\.editor'
--config: ui.editor=e3
- $ PAGER=p1 hg config --debug | grep 'pager\.pager'
+ $ PAGER=p1 hg config --source | grep 'pager\.pager'
$PAGER: pager.pager=p1
- $ PAGER=p1 hg config --debug --config pager.pager=p2 | grep 'pager\.pager'
+ $ PAGER=p1 hg config --source --config pager.pager=p2 | grep 'pager\.pager'
--config: pager.pager=p2
verify that aliases are evaluated as well
--- a/tests/test-copies-chain-merge.t Sun May 02 16:56:20 2021 -0400
+++ b/tests/test-copies-chain-merge.t Fri May 07 22:06:25 2021 -0400
@@ -1,4 +1,4 @@
-#testcases filelog compatibility changeset sidedata upgraded upgraded-parallel
+#testcases filelog compatibility changeset sidedata upgraded upgraded-parallel pull push pull-upgrade push-upgrade
=====================================================
Test Copy tracing for chain of copies involving merge
@@ -56,6 +56,41 @@
> EOF
#endif
+#if pull
+ $ cat >> $HGRCPATH << EOF
+ > [format]
+ > exp-use-side-data = yes
+ > exp-use-copies-side-data-changeset = yes
+ > EOF
+#endif
+
+#if push
+ $ cat >> $HGRCPATH << EOF
+ > [format]
+ > exp-use-side-data = yes
+ > exp-use-copies-side-data-changeset = yes
+ > EOF
+#endif
+
+#if pull-upgrade
+ $ cat >> $HGRCPATH << EOF
+ > [format]
+ > exp-use-side-data = no
+ > exp-use-copies-side-data-changeset = no
+ > [experimental]
+ > changegroup4 = yes
+ > EOF
+#endif
+
+#if push-upgrade
+ $ cat >> $HGRCPATH << EOF
+ > [format]
+ > exp-use-side-data = no
+ > exp-use-copies-side-data-changeset = no
+ > [experimental]
+ > changegroup4 = yes
+ > EOF
+#endif
$ cat > same-content.txt << EOF
> Here is some content that will be the same accros multiple file.
@@ -1689,6 +1724,81 @@
#endif
+#if pull
+ $ cd ..
+ $ mv repo-chain repo-source
+ $ hg init repo-chain
+ $ cd repo-chain
+ $ hg pull ../repo-source
+ pulling from ../repo-source
+ requesting all changes
+ adding changesets
+ adding manifests
+ adding file changes
+ added 80 changesets with 44 changes to 25 files (+39 heads)
+ new changesets a3a31bbefea6:908ce9259ffa
+ (run 'hg heads' to see heads, 'hg merge' to merge)
+#endif
+
+#if pull-upgrade
+ $ cat >> $HGRCPATH << EOF
+ > [format]
+ > exp-use-side-data = yes
+ > exp-use-copies-side-data-changeset = yes
+ > [experimental]
+ > changegroup4 = yes
+ > EOF
+ $ cd ..
+ $ mv repo-chain repo-source
+ $ hg init repo-chain
+ $ cd repo-chain
+ $ hg pull ../repo-source
+ pulling from ../repo-source
+ requesting all changes
+ adding changesets
+ adding manifests
+ adding file changes
+ added 80 changesets with 44 changes to 25 files (+39 heads)
+ new changesets a3a31bbefea6:908ce9259ffa
+ (run 'hg heads' to see heads, 'hg merge' to merge)
+#endif
+
+#if push
+ $ cd ..
+ $ mv repo-chain repo-source
+ $ hg init repo-chain
+ $ cd repo-source
+ $ hg push ../repo-chain
+ pushing to ../repo-chain
+ searching for changes
+ adding changesets
+ adding manifests
+ adding file changes
+ added 80 changesets with 44 changes to 25 files (+39 heads)
+ $ cd ../repo-chain
+#endif
+
+#if push-upgrade
+ $ cat >> $HGRCPATH << EOF
+ > [format]
+ > exp-use-side-data = yes
+ > exp-use-copies-side-data-changeset = yes
+ > [experimental]
+ > changegroup4 = yes
+ > EOF
+ $ cd ..
+ $ mv repo-chain repo-source
+ $ hg init repo-chain
+ $ cd repo-source
+ $ hg push ../repo-chain
+ pushing to ../repo-chain
+ searching for changes
+ adding changesets
+ adding manifests
+ adding file changes
+ added 80 changesets with 44 changes to 25 files (+39 heads)
+ $ cd ../repo-chain
+#endif
#if no-compatibility no-filelog no-changeset
@@ -3405,12 +3515,7 @@
$ hg status --copies --rev 'desc("i-0")' --rev 'desc("mBF-change-m-0")'
M b
A d
- h (filelog !)
- h (sidedata !)
- h (upgraded !)
- h (upgraded-parallel !)
- h (changeset !)
- h (compatibility !)
+ h
A t
p
R a
@@ -3564,24 +3669,15 @@
$ hg status --copies --rev 'desc("i-0")' --rev 'desc("mAEm")' f
A f
- a (filelog !)
- a (sidedata !)
- a (upgraded !)
- a (upgraded-parallel !)
+ a (no-changeset no-compatibility !)
$ hg status --copies --rev 'desc("i-0")' --rev 'desc("mAE,Km")' f
A f
- a (filelog !)
- a (sidedata !)
- a (upgraded !)
- a (upgraded-parallel !)
+ a (no-changeset no-compatibility !)
$ hg status --copies --rev 'desc("i-0")' --rev 'desc("mK,AEm")' f
A f
- a (filelog !)
- a (sidedata !)
- a (upgraded !)
- a (upgraded-parallel !)
+ a (no-changeset no-compatibility !)
The result from mEAm is the same for the subsequent merge:
@@ -3589,23 +3685,17 @@
$ hg status --copies --rev 'desc("i-0")' --rev 'desc("mEAm")' f
A f
a (filelog !)
- b (sidedata !)
- b (upgraded !)
- b (upgraded-parallel !)
+ b (no-changeset no-compatibility no-filelog !)
$ hg status --copies --rev 'desc("i-0")' --rev 'desc("mEA,Jm")' f
A f
a (filelog !)
- b (sidedata !)
- b (upgraded !)
- b (upgraded-parallel !)
+ b (no-changeset no-compatibility no-filelog !)
$ hg status --copies --rev 'desc("i-0")' --rev 'desc("mJ,EAm")' f
A f
a (filelog !)
- b (sidedata !)
- b (upgraded !)
- b (upgraded-parallel !)
+ b (no-changeset no-compatibility no-filelog !)
Subcase: chaining conflicting rename resolution
```````````````````````````````````````````````
@@ -3620,24 +3710,17 @@
$ hg status --copies --rev 'desc("i-0")' --rev 'desc("mPQm")' v
A v
r (filelog !)
- p (sidedata !)
- p (upgraded !)
- p (upgraded-parallel !)
+ p (no-changeset no-compatibility no-filelog !)
$ hg status --copies --rev 'desc("i-0")' --rev 'desc("mPQ,Tm")' v
A v
r (filelog !)
- p (sidedata !)
- p (upgraded !)
- p (upgraded-parallel !)
+ p (no-changeset no-compatibility no-filelog !)
$ hg status --copies --rev 'desc("i-0")' --rev 'desc("mT,PQm")' v
A v
r (filelog !)
- p (sidedata !)
- p (upgraded !)
- p (upgraded-parallel !)
-
+ p (no-changeset no-compatibility no-filelog !)
The result from mQPm is the same for the subsequent merge:
@@ -3652,9 +3735,7 @@
$ hg status --copies --rev 'desc("i-0")' --rev 'desc("mS,QPm")' v
A v
r (filelog !)
- r (sidedata !)
- r (upgraded !)
- r (upgraded-parallel !)
+ r (no-changeset no-compatibility no-filelog !)
Subcase: chaining salvage information during a merge
@@ -3733,30 +3814,22 @@
$ hg status --copies --rev 'desc("i-0")' --rev 'desc("mFGm")' d
A d
a (filelog !)
- h (sidedata !)
- h (upgraded !)
- h (upgraded-parallel !)
+ h (no-changeset no-compatibility no-filelog !)
$ hg status --copies --rev 'desc("i-0")' --rev 'desc("mGFm")' d
A d
a (filelog !)
- a (sidedata !)
- a (upgraded !)
- a (upgraded-parallel !)
+ a (no-changeset no-compatibility no-filelog !)
Chained output
$ hg status --copies --rev 'desc("i-0")' --rev 'desc("mO,FGm")' d
A d
a (filelog !)
- h (sidedata !)
- h (upgraded !)
- h (upgraded-parallel !)
+ h (no-changeset no-compatibility no-filelog !)
$ hg status --copies --rev 'desc("i-0")' --rev 'desc("mFG,Om")' d
A d
a (filelog !)
- h (sidedata !)
- h (upgraded !)
- h (upgraded-parallel !)
+ h (no-changeset no-compatibility no-filelog !)
$ hg status --copies --rev 'desc("i-0")' --rev 'desc("mGF,Nm")' d
@@ -3779,17 +3852,11 @@
$ hg status --copies --rev 'desc("i-0")' --rev 'desc("mAE-change-m")' f
A f
- a (filelog !)
- a (sidedata !)
- a (upgraded !)
- a (upgraded-parallel !)
+ a (no-changeset no-compatibility !)
$ hg status --copies --rev 'desc("i-0")' --rev 'desc("mAE-change,Km")' f
A f
- a (filelog !)
- a (sidedata !)
- a (upgraded !)
- a (upgraded-parallel !)
+ a (no-changeset no-compatibility !)
$ hg status --copies --rev 'desc("i-0")' --rev 'desc("mK,AE-change-m")' f
A f
@@ -3801,20 +3868,14 @@
$ hg status --copies --rev 'desc("i-0")' --rev 'desc("mEA-change-m")' f
A f
a (filelog !)
- b (sidedata !)
- b (upgraded !)
- b (upgraded-parallel !)
+ b (no-changeset no-compatibility no-filelog !)
$ hg status --copies --rev 'desc("i-0")' --rev 'desc("mEA-change,Jm")' f
A f
a (filelog !)
- b (sidedata !)
- b (upgraded !)
- b (upgraded-parallel !)
+ b (no-changeset no-compatibility no-filelog !)
$ hg status --copies --rev 'desc("i-0")' --rev 'desc("mJ,EA-change-m")' f
A f
a (filelog !)
- b (sidedata !)
- b (upgraded !)
- b (upgraded-parallel !)
+ b (no-changeset no-compatibility no-filelog !)
--- a/tests/test-copies-in-changeset.t Sun May 02 16:56:20 2021 -0400
+++ b/tests/test-copies-in-changeset.t Fri May 07 22:06:25 2021 -0400
@@ -419,7 +419,7 @@
Test upgrading/downgrading to sidedata storage
==============================================
-downgrading (keeping some sidedata)
+downgrading
$ hg debugformat -v
format-variant repo config default
@@ -465,11 +465,7 @@
compression: zstd zstd zstd (zstd !)
compression-level: default default default
$ hg debugsidedata -c -- 0
- 1 sidedata entries
- entry-0014 size 14
$ hg debugsidedata -c -- 1
- 1 sidedata entries
- entry-0014 size 14
$ hg debugsidedata -m -- 0
upgrading
--- a/tests/test-copy.t Sun May 02 16:56:20 2021 -0400
+++ b/tests/test-copy.t Fri May 07 22:06:25 2021 -0400
@@ -115,6 +115,7 @@
$ hg mv foo bar
foo: not copying - file is not managed
abort: no files to copy
+ (maybe you meant to use --after --at-rev=.)
[10]
$ hg st -A
? foo
@@ -124,14 +125,17 @@
$ hg mv ../foo ../bar
../foo: not copying - file is not managed
abort: no files to copy
+ (maybe you meant to use --after --at-rev=.)
[10]
$ hg mv ../foo ../bar --config ui.relative-paths=yes
../foo: not copying - file is not managed
abort: no files to copy
+ (maybe you meant to use --after --at-rev=.)
[10]
$ hg mv ../foo ../bar --config ui.relative-paths=no
foo: not copying - file is not managed
abort: no files to copy
+ (maybe you meant to use --after --at-rev=.)
[10]
$ cd ..
$ rmdir dir
--- a/tests/test-fastannotate-hg.t Sun May 02 16:56:20 2021 -0400
+++ b/tests/test-fastannotate-hg.t Fri May 07 22:06:25 2021 -0400
@@ -482,19 +482,19 @@
$ cat > ../legacyrepo.py <<EOF
> from __future__ import absolute_import
- > from mercurial import commit, error, extensions, node
+ > from mercurial import commit, error, extensions
> def _filecommit(orig, repo, fctx, manifest1, manifest2,
> linkrev, tr, includecopymeta, ms):
> fname = fctx.path()
> text = fctx.data()
> flog = repo.file(fname)
- > fparent1 = manifest1.get(fname, node.nullid)
- > fparent2 = manifest2.get(fname, node.nullid)
+ > fparent1 = manifest1.get(fname, repo.nullid)
+ > fparent2 = manifest2.get(fname, repo.nullid)
> meta = {}
> copy = fctx.copysource()
> if copy and copy != fname:
> raise error.Abort('copying is not supported')
- > if fparent2 != node.nullid:
+ > if fparent2 != repo.nullid:
> return flog.add(text, meta, tr, linkrev,
> fparent1, fparent2), 'modified'
> raise error.Abort('only merging is supported')
--- a/tests/test-filelog.py Sun May 02 16:56:20 2021 -0400
+++ b/tests/test-filelog.py Fri May 07 22:06:25 2021 -0400
@@ -4,10 +4,7 @@
"""
from __future__ import absolute_import, print_function
-from mercurial.node import (
- hex,
- nullid,
-)
+from mercurial.node import hex
from mercurial import (
hg,
ui as uimod,
@@ -22,7 +19,7 @@
def addrev(text, renamed=False):
if renamed:
# data doesn't matter. Just make sure filelog.renamed() returns True
- meta = {b'copyrev': hex(nullid), b'copy': b'bar'}
+ meta = {b'copyrev': hex(repo.nullid), b'copy': b'bar'}
else:
meta = {}
@@ -30,7 +27,7 @@
try:
lock = repo.lock()
t = repo.transaction(b'commit')
- node = fl.add(text, meta, t, 0, nullid, nullid)
+ node = fl.add(text, meta, t, 0, repo.nullid, repo.nullid)
return node
finally:
if t:
--- a/tests/test-fix.t Sun May 02 16:56:20 2021 -0400
+++ b/tests/test-fix.t Fri May 07 22:06:25 2021 -0400
@@ -266,11 +266,11 @@
$ hg commit -Aqm "hello"
$ hg phase -r 0 --public
$ hg fix -r 0
- abort: cannot fix public changesets
+ abort: cannot fix public changesets: 6470986d2e7b
(see 'hg help phases' for details)
[10]
$ hg fix -r 0 --working-dir
- abort: cannot fix public changesets
+ abort: cannot fix public changesets: 6470986d2e7b
(see 'hg help phases' for details)
[10]
$ hg cat -r tip hello.whole
@@ -1174,7 +1174,8 @@
$ printf "two\n" > foo.whole
$ hg commit -m "second"
$ hg --config experimental.evolution.allowunstable=False fix -r '.^'
- abort: cannot fix changeset with children
+ abort: cannot fix changeset, as that will orphan 1 descendants
+ (see 'hg help evolution.instability')
[10]
$ hg fix -r '.^'
1 new orphan changesets
--- a/tests/test-globalopts.t Sun May 02 16:56:20 2021 -0400
+++ b/tests/test-globalopts.t Fri May 07 22:06:25 2021 -0400
@@ -419,6 +419,7 @@
Concepts:
bundlespec Bundle File Formats
+ evolution Safely rewriting history (EXPERIMENTAL)
glossary Glossary
phases Working with Phases
subrepos Subrepositories
@@ -552,6 +553,7 @@
Concepts:
bundlespec Bundle File Formats
+ evolution Safely rewriting history (EXPERIMENTAL)
glossary Glossary
phases Working with Phases
subrepos Subrepositories
--- a/tests/test-help-hide.t Sun May 02 16:56:20 2021 -0400
+++ b/tests/test-help-hide.t Fri May 07 22:06:25 2021 -0400
@@ -117,6 +117,7 @@
Concepts:
bundlespec Bundle File Formats
+ evolution Safely rewriting history (EXPERIMENTAL)
glossary Glossary
phases Working with Phases
subrepos Subrepositories
@@ -254,6 +255,7 @@
Concepts:
bundlespec Bundle File Formats
+ evolution Safely rewriting history (EXPERIMENTAL)
glossary Glossary
phases Working with Phases
subrepos Subrepositories
--- a/tests/test-help.t Sun May 02 16:56:20 2021 -0400
+++ b/tests/test-help.t Fri May 07 22:06:25 2021 -0400
@@ -169,6 +169,7 @@
Concepts:
bundlespec Bundle File Formats
+ evolution Safely rewriting history (EXPERIMENTAL)
glossary Glossary
phases Working with Phases
subrepos Subrepositories
@@ -298,6 +299,7 @@
Concepts:
bundlespec Bundle File Formats
+ evolution Safely rewriting history (EXPERIMENTAL)
glossary Glossary
phases Working with Phases
subrepos Subrepositories
@@ -1134,12 +1136,13 @@
the changelog data, root/flat manifest data, treemanifest data, and
filelogs.
- There are 3 versions of changegroups: "1", "2", and "3". From a high-
+ There are 4 versions of changegroups: "1", "2", "3" and "4". From a high-
level, versions "1" and "2" are almost exactly the same, with the only
difference being an additional item in the *delta header*. Version "3"
adds support for storage flags in the *delta header* and optionally
exchanging treemanifests (enabled by setting an option on the
- "changegroup" part in the bundle2).
+ "changegroup" part in the bundle2). Version "4" adds support for
+ exchanging sidedata (additional revision metadata not part of the digest).
Changegroups when not exchanging treemanifests consist of 3 logical
segments:
@@ -1206,8 +1209,8 @@
existing entry (either that the recipient already has, or previously
specified in the bundle/changegroup).
- The *delta header* is different between versions "1", "2", and "3" of the
- changegroup format.
+ The *delta header* is different between versions "1", "2", "3" and "4" of
+ the changegroup format.
Version 1 (headerlen=80):
@@ -1236,6 +1239,15 @@
| | | | | | |
+------------------------------------------------------------------------------+
+ Version 4 (headerlen=103):
+
+ +------------------------------------------------------------------------------+----------+
+ | | | | | | | |
+ | node | p1 node | p2 node | base node | link node | flags | pflags |
+ | (20 bytes) | (20 bytes) | (20 bytes) | (20 bytes) | (20 bytes) | (2 bytes) | (1 byte) |
+ | | | | | | | |
+ +------------------------------------------------------------------------------+----------+
+
The *delta data* consists of "chunklen - 4 - headerlen" bytes, which
contain a series of *delta*s, densely packed (no separators). These deltas
describe a diff from an existing entry (either that the recipient already
@@ -1276,11 +1288,24 @@
delimited metadata defining an object stored elsewhere. Used by the LFS
extension.
+ 4096
+ Contains copy information. This revision changes files in a way that
+ could affect copy tracing. This does *not* affect changegroup handling,
+ but is relevant for other parts of Mercurial.
+
For historical reasons, the integer values are identical to revlog version
1 per-revision storage flags and correspond to bits being set in this
2-byte field. Bits were allocated starting from the most-significant bit,
hence the reverse ordering and allocation of these flags.
+ The *pflags* (protocol flags) field holds bitwise flags affecting the
+ protocol itself. They are first in the header since they may affect the
+ handling of the rest of the fields in a future version. They are defined
+ as such:
+
+ 1 indicates whether to read a chunk of sidedata (of variable length) right
+ after the revision flags.
+
Changeset Segment
=================
@@ -1301,14 +1326,14 @@
Treemanifests Segment
---------------------
- The *treemanifests segment* only exists in changegroup version "3", and
- only if the 'treemanifest' param is part of the bundle2 changegroup part
- (it is not possible to use changegroup version 3 outside of bundle2).
- Aside from the filenames in the *treemanifests segment* containing a
- trailing "/" character, it behaves identically to the *filelogs segment*
- (see below). The final sub-segment is followed by an *empty chunk*
- (logically, a sub-segment with filename size 0). This denotes the boundary
- to the *filelogs segment*.
+ The *treemanifests segment* only exists in changegroup version "3" and
+ "4", and only if the 'treemanifest' param is part of the bundle2
+ changegroup part (it is not possible to use changegroup version 3 or 4
+ outside of bundle2). Aside from the filenames in the *treemanifests
+ segment* containing a trailing "/" character, it behaves identically to
+ the *filelogs segment* (see below). The final sub-segment is followed by
+ an *empty chunk* (logically, a sub-segment with filename size 0). This
+ denotes the boundary to the *filelogs segment*.
Filelogs Segment
================
@@ -2274,6 +2299,13 @@
Environment Variables
</td></tr>
<tr><td>
+ <a href="/help/evolution">
+ evolution
+ </a>
+ </td><td>
+ Safely rewriting history (EXPERIMENTAL)
+ </td></tr>
+ <tr><td>
<a href="/help/extensions">
extensions
</a>
@@ -3639,12 +3671,13 @@
filelogs.
</p>
<p>
- There are 3 versions of changegroups: "1", "2", and "3". From a
+ There are 4 versions of changegroups: "1", "2", "3" and "4". From a
high-level, versions "1" and "2" are almost exactly the same, with the
only difference being an additional item in the *delta header*. Version
"3" adds support for storage flags in the *delta header* and optionally
exchanging treemanifests (enabled by setting an option on the
- "changegroup" part in the bundle2).
+ "changegroup" part in the bundle2). Version "4" adds support for exchanging
+ sidedata (additional revision metadata not part of the digest).
</p>
<p>
Changegroups when not exchanging treemanifests consist of 3 logical
@@ -3724,8 +3757,8 @@
bundle/changegroup).
</p>
<p>
- The *delta header* is different between versions "1", "2", and
- "3" of the changegroup format.
+ The *delta header* is different between versions "1", "2", "3" and "4"
+ of the changegroup format.
</p>
<p>
Version 1 (headerlen=80):
@@ -3761,6 +3794,17 @@
+------------------------------------------------------------------------------+
</pre>
<p>
+ Version 4 (headerlen=103):
+ </p>
+ <pre>
+ +------------------------------------------------------------------------------+----------+
+ | | | | | | | |
+ | node | p1 node | p2 node | base node | link node | flags | pflags |
+ | (20 bytes) | (20 bytes) | (20 bytes) | (20 bytes) | (20 bytes) | (2 bytes) | (1 byte) |
+ | | | | | | | |
+ +------------------------------------------------------------------------------+----------+
+ </pre>
+ <p>
The *delta data* consists of "chunklen - 4 - headerlen" bytes, which contain a
series of *delta*s, densely packed (no separators). These deltas describe a diff
from an existing entry (either that the recipient already has, or previously
@@ -3799,6 +3843,8 @@
<dd>Ellipsis revision. Revision hash does not match data (likely due to rewritten parents).
<dt>8192
<dd>Externally stored. The revision fulltext contains "key:value" "\n" delimited metadata defining an object stored elsewhere. Used by the LFS extension.
+ <dt>4096
+ <dd>Contains copy information. This revision changes files in a way that could affect copy tracing. This does *not* affect changegroup handling, but is relevant for other parts of Mercurial.
</dl>
<p>
For historical reasons, the integer values are identical to revlog version 1
@@ -3806,6 +3852,15 @@
field. Bits were allocated starting from the most-significant bit, hence the
reverse ordering and allocation of these flags.
</p>
+ <p>
+ The *pflags* (protocol flags) field holds bitwise flags affecting the protocol
+ itself. They are first in the header since they may affect the handling of the
+ rest of the fields in a future version. They are defined as such:
+ </p>
+ <dl>
+ <dt>1 indicates whether to read a chunk of sidedata (of variable length) right
+ <dd>after the revision flags.
+ </dl>
<h2>Changeset Segment</h2>
<p>
The *changeset segment* consists of a single *delta group* holding
@@ -3823,9 +3878,9 @@
</p>
<h3>Treemanifests Segment</h3>
<p>
- The *treemanifests segment* only exists in changegroup version "3", and
- only if the 'treemanifest' param is part of the bundle2 changegroup part
- (it is not possible to use changegroup version 3 outside of bundle2).
+ The *treemanifests segment* only exists in changegroup version "3" and "4",
+ and only if the 'treemanifest' param is part of the bundle2 changegroup part
+ (it is not possible to use changegroup version 3 or 4 outside of bundle2).
Aside from the filenames in the *treemanifests segment* containing a
trailing "/" character, it behaves identically to the *filelogs segment*
(see below). The final sub-segment is followed by an *empty chunk* (logically,
--- a/tests/test-hgrc.t Sun May 02 16:56:20 2021 -0400
+++ b/tests/test-hgrc.t Fri May 07 22:06:25 2021 -0400
@@ -253,9 +253,8 @@
> [paths]
> foo = bar
> EOF
- $ hg showconfig --debug paths
+ $ hg showconfig --source paths
plain: True
- read config from: $TESTTMP/hgrc
$TESTTMP/hgrc:17: paths.foo=$TESTTMP/bar
Test we can skip the user configuration
--- a/tests/test-hgweb-json.t Sun May 02 16:56:20 2021 -0400
+++ b/tests/test-hgweb-json.t Fri May 07 22:06:25 2021 -0400
@@ -2272,6 +2272,10 @@
"topic": "environment"
},
{
+ "summary": "Safely rewriting history (EXPERIMENTAL)",
+ "topic": "evolution"
+ },
+ {
"summary": "Using Additional Features",
"topic": "extensions"
},
--- a/tests/test-histedit-obsolete.t Sun May 02 16:56:20 2021 -0400
+++ b/tests/test-histedit-obsolete.t Fri May 07 22:06:25 2021 -0400
@@ -307,7 +307,7 @@
o 0:cb9a9f314b8b (public) a
$ hg histedit -r '.~2'
- abort: cannot edit public changesets
+ abort: cannot edit public changesets: cb9a9f314b8b, 40db8afa467b
(see 'hg help phases' for details)
[10]
--- a/tests/test-lfs-bundle.t Sun May 02 16:56:20 2021 -0400
+++ b/tests/test-lfs-bundle.t Fri May 07 22:06:25 2021 -0400
@@ -101,7 +101,7 @@
#if windows
$ unset LOCALAPPDATA
$ unset APPDATA
- $ HGRCPATH= hg config lfs --debug
+ $ HGRCPATH= hg config lfs --source
abort: unknown lfs usercache location
(define LOCALAPPDATA or APPDATA in the environment, or set lfs.usercache)
[255]
@@ -109,7 +109,7 @@
#if osx
$ unset HOME
- $ HGRCPATH= hg config lfs --debug
+ $ HGRCPATH= hg config lfs --source
abort: unknown lfs usercache location
(define HOME in the environment, or set lfs.usercache)
[255]
@@ -118,7 +118,7 @@
#if no-windows no-osx
$ unset XDG_CACHE_HOME
$ unset HOME
- $ HGRCPATH= hg config lfs --debug
+ $ HGRCPATH= hg config lfs --source
abort: unknown lfs usercache location
(define XDG_CACHE_HOME or HOME in the environment, or set lfs.usercache)
[255]
--- a/tests/test-lfs-serve.t Sun May 02 16:56:20 2021 -0400
+++ b/tests/test-lfs-serve.t Fri May 07 22:06:25 2021 -0400
@@ -355,11 +355,11 @@
# LFS required- both lfs and non-lfs revlogs have 0x2000 flag
*** runcommand debugprocessors lfs.bin -R ../server
registered processor '0x8000'
- registered processor '0x800'
+ registered processor '0x1000'
registered processor '0x2000'
*** runcommand debugprocessors nonlfs2.txt -R ../server
registered processor '0x8000'
- registered processor '0x800'
+ registered processor '0x1000'
registered processor '0x2000'
*** runcommand config extensions --cwd ../server
extensions.debugprocessors=$TESTTMP/debugprocessors.py
@@ -368,7 +368,7 @@
# LFS not enabled- revlogs don't have 0x2000 flag
*** runcommand debugprocessors nonlfs3.txt
registered processor '0x8000'
- registered processor '0x800'
+ registered processor '0x1000'
*** runcommand config extensions
extensions.debugprocessors=$TESTTMP/debugprocessors.py
@@ -411,11 +411,11 @@
# LFS enabled- both lfs and non-lfs revlogs have 0x2000 flag
*** runcommand debugprocessors lfs.bin -R ../server
registered processor '0x8000'
- registered processor '0x800'
+ registered processor '0x1000'
registered processor '0x2000'
*** runcommand debugprocessors nonlfs2.txt -R ../server
registered processor '0x8000'
- registered processor '0x800'
+ registered processor '0x1000'
registered processor '0x2000'
*** runcommand config extensions --cwd ../server
extensions.debugprocessors=$TESTTMP/debugprocessors.py
@@ -424,7 +424,7 @@
# LFS enabled without requirement- revlogs have 0x2000 flag
*** runcommand debugprocessors nonlfs3.txt
registered processor '0x8000'
- registered processor '0x800'
+ registered processor '0x1000'
registered processor '0x2000'
*** runcommand config extensions
extensions.debugprocessors=$TESTTMP/debugprocessors.py
@@ -433,7 +433,7 @@
# LFS disabled locally- revlogs don't have 0x2000 flag
*** runcommand debugprocessors nonlfs.txt -R ../nonlfs
registered processor '0x8000'
- registered processor '0x800'
+ registered processor '0x1000'
*** runcommand config extensions --cwd ../nonlfs
extensions.debugprocessors=$TESTTMP/debugprocessors.py
extensions.lfs=!
--- a/tests/test-manifest.py Sun May 02 16:56:20 2021 -0400
+++ b/tests/test-manifest.py Fri May 07 22:06:25 2021 -0400
@@ -81,12 +81,12 @@
raise NotImplementedError('parsemanifest not implemented by test case')
def testEmptyManifest(self):
- m = self.parsemanifest(EMTPY_MANIFEST)
+ m = self.parsemanifest(20, EMTPY_MANIFEST)
self.assertEqual(0, len(m))
self.assertEqual([], list(m))
def testManifest(self):
- m = self.parsemanifest(A_SHORT_MANIFEST)
+ m = self.parsemanifest(20, A_SHORT_MANIFEST)
self.assertEqual([b'bar/baz/qux.py', b'foo'], list(m))
self.assertEqual(BIN_HASH_2, m[b'bar/baz/qux.py'])
self.assertEqual(b'l', m.flags(b'bar/baz/qux.py'))
@@ -95,20 +95,16 @@
with self.assertRaises(KeyError):
m[b'wat']
- def testManifestLongHashes(self):
- m = self.parsemanifest(b'a\0' + b'f' * 64 + b'\n')
- self.assertEqual(binascii.unhexlify(b'f' * 64), m[b'a'])
-
def testSetItem(self):
want = BIN_HASH_1
- m = self.parsemanifest(EMTPY_MANIFEST)
+ m = self.parsemanifest(20, EMTPY_MANIFEST)
m[b'a'] = want
self.assertIn(b'a', m)
self.assertEqual(want, m[b'a'])
self.assertEqual(b'a\0' + HASH_1 + b'\n', m.text())
- m = self.parsemanifest(A_SHORT_MANIFEST)
+ m = self.parsemanifest(20, A_SHORT_MANIFEST)
m[b'a'] = want
self.assertEqual(want, m[b'a'])
self.assertEqual(b'a\0' + HASH_1 + b'\n' + A_SHORT_MANIFEST, m.text())
@@ -116,14 +112,14 @@
def testSetFlag(self):
want = b'x'
- m = self.parsemanifest(EMTPY_MANIFEST)
+ m = self.parsemanifest(20, EMTPY_MANIFEST)
# first add a file; a file-less flag makes no sense
m[b'a'] = BIN_HASH_1
m.setflag(b'a', want)
self.assertEqual(want, m.flags(b'a'))
self.assertEqual(b'a\0' + HASH_1 + want + b'\n', m.text())
- m = self.parsemanifest(A_SHORT_MANIFEST)
+ m = self.parsemanifest(20, A_SHORT_MANIFEST)
# first add a file; a file-less flag makes no sense
m[b'a'] = BIN_HASH_1
m.setflag(b'a', want)
@@ -133,7 +129,7 @@
)
def testCopy(self):
- m = self.parsemanifest(A_SHORT_MANIFEST)
+ m = self.parsemanifest(20, A_SHORT_MANIFEST)
m[b'a'] = BIN_HASH_1
m2 = m.copy()
del m
@@ -142,7 +138,7 @@
def testCompaction(self):
unhex = binascii.unhexlify
h1, h2 = unhex(HASH_1), unhex(HASH_2)
- m = self.parsemanifest(A_SHORT_MANIFEST)
+ m = self.parsemanifest(20, A_SHORT_MANIFEST)
m[b'alpha'] = h1
m[b'beta'] = h2
del m[b'foo']
@@ -164,7 +160,7 @@
m[b'foo']
def testMatchException(self):
- m = self.parsemanifest(A_SHORT_MANIFEST)
+ m = self.parsemanifest(20, A_SHORT_MANIFEST)
match = matchmod.match(util.localpath(b'/repo'), b'', [b're:.*'])
def filt(path):
@@ -177,7 +173,7 @@
m._matches(match)
def testRemoveItem(self):
- m = self.parsemanifest(A_SHORT_MANIFEST)
+ m = self.parsemanifest(20, A_SHORT_MANIFEST)
del m[b'foo']
with self.assertRaises(KeyError):
m[b'foo']
@@ -193,9 +189,9 @@
addl = b'z-only-in-left\0' + HASH_1 + b'\n'
addr = b'z-only-in-right\0' + HASH_2 + b'x\n'
left = self.parsemanifest(
- A_SHORT_MANIFEST.replace(HASH_1, HASH_3 + b'x') + addl
+ 20, A_SHORT_MANIFEST.replace(HASH_1, HASH_3 + b'x') + addl
)
- right = self.parsemanifest(A_SHORT_MANIFEST + addr)
+ right = self.parsemanifest(20, A_SHORT_MANIFEST + addr)
want = {
b'foo': ((BIN_HASH_3, b'x'), (BIN_HASH_1, b'')),
b'z-only-in-left': ((BIN_HASH_1, b''), MISSING),
@@ -208,14 +204,18 @@
b'foo': (MISSING, (BIN_HASH_3, b'x')),
b'z-only-in-left': (MISSING, (BIN_HASH_1, b'')),
}
- self.assertEqual(want, self.parsemanifest(EMTPY_MANIFEST).diff(left))
+ self.assertEqual(
+ want, self.parsemanifest(20, EMTPY_MANIFEST).diff(left)
+ )
want = {
b'bar/baz/qux.py': ((BIN_HASH_2, b'l'), MISSING),
b'foo': ((BIN_HASH_3, b'x'), MISSING),
b'z-only-in-left': ((BIN_HASH_1, b''), MISSING),
}
- self.assertEqual(want, left.diff(self.parsemanifest(EMTPY_MANIFEST)))
+ self.assertEqual(
+ want, left.diff(self.parsemanifest(20, EMTPY_MANIFEST))
+ )
copy = right.copy()
del copy[b'z-only-in-right']
del right[b'foo']
@@ -225,7 +225,7 @@
}
self.assertEqual(want, right.diff(copy))
- short = self.parsemanifest(A_SHORT_MANIFEST)
+ short = self.parsemanifest(20, A_SHORT_MANIFEST)
pruned = short.copy()
del pruned[b'foo']
want = {
@@ -247,27 +247,27 @@
l + b'\n' for l in reversed(A_SHORT_MANIFEST.split(b'\n')) if l
)
try:
- self.parsemanifest(backwards)
+ self.parsemanifest(20, backwards)
self.fail('Should have raised ValueError')
except ValueError as v:
self.assertIn('Manifest lines not in sorted order.', str(v))
def testNoTerminalNewline(self):
try:
- self.parsemanifest(A_SHORT_MANIFEST + b'wat')
+ self.parsemanifest(20, A_SHORT_MANIFEST + b'wat')
self.fail('Should have raised ValueError')
except ValueError as v:
self.assertIn('Manifest did not end in a newline.', str(v))
def testNoNewLineAtAll(self):
try:
- self.parsemanifest(b'wat')
+ self.parsemanifest(20, b'wat')
self.fail('Should have raised ValueError')
except ValueError as v:
self.assertIn('Manifest did not end in a newline.', str(v))
def testHugeManifest(self):
- m = self.parsemanifest(A_HUGE_MANIFEST)
+ m = self.parsemanifest(20, A_HUGE_MANIFEST)
self.assertEqual(HUGE_MANIFEST_ENTRIES, len(m))
self.assertEqual(len(m), len(list(m)))
@@ -275,7 +275,7 @@
"""Tests matches() for a few specific files to make sure that both
the set of files as well as their flags and nodeids are correct in
the resulting manifest."""
- m = self.parsemanifest(A_HUGE_MANIFEST)
+ m = self.parsemanifest(20, A_HUGE_MANIFEST)
match = matchmod.exact([b'file1', b'file200', b'file300'])
m2 = m._matches(match)
@@ -291,7 +291,7 @@
"""Tests matches() for a small set of specific files, including one
nonexistent file to make sure in only matches against existing files.
"""
- m = self.parsemanifest(A_DEEPER_MANIFEST)
+ m = self.parsemanifest(20, A_DEEPER_MANIFEST)
match = matchmod.exact(
[b'a/b/c/bar.txt', b'a/b/d/qux.py', b'readme.txt', b'nonexistent']
@@ -305,7 +305,7 @@
def testMatchesNonexistentDirectory(self):
"""Tests matches() for a relpath match on a directory that doesn't
actually exist."""
- m = self.parsemanifest(A_DEEPER_MANIFEST)
+ m = self.parsemanifest(20, A_DEEPER_MANIFEST)
match = matchmod.match(
util.localpath(b'/repo'), b'', [b'a/f'], default=b'relpath'
@@ -316,7 +316,7 @@
def testMatchesExactLarge(self):
"""Tests matches() for files matching a large list of exact files."""
- m = self.parsemanifest(A_HUGE_MANIFEST)
+ m = self.parsemanifest(20, A_HUGE_MANIFEST)
flist = m.keys()[80:300]
match = matchmod.exact(flist)
@@ -326,7 +326,7 @@
def testMatchesFull(self):
'''Tests matches() for what should be a full match.'''
- m = self.parsemanifest(A_DEEPER_MANIFEST)
+ m = self.parsemanifest(20, A_DEEPER_MANIFEST)
match = matchmod.match(util.localpath(b'/repo'), b'', [b''])
m2 = m._matches(match)
@@ -336,7 +336,7 @@
def testMatchesDirectory(self):
"""Tests matches() on a relpath match on a directory, which should
match against all files within said directory."""
- m = self.parsemanifest(A_DEEPER_MANIFEST)
+ m = self.parsemanifest(20, A_DEEPER_MANIFEST)
match = matchmod.match(
util.localpath(b'/repo'), b'', [b'a/b'], default=b'relpath'
@@ -362,7 +362,7 @@
"""Tests matches() on an exact match on a directory, which should
result in an empty manifest because you can't perform an exact match
against a directory."""
- m = self.parsemanifest(A_DEEPER_MANIFEST)
+ m = self.parsemanifest(20, A_DEEPER_MANIFEST)
match = matchmod.exact([b'a/b'])
m2 = m._matches(match)
@@ -372,7 +372,7 @@
def testMatchesCwd(self):
"""Tests matches() on a relpath match with the current directory ('.')
when not in the root directory."""
- m = self.parsemanifest(A_DEEPER_MANIFEST)
+ m = self.parsemanifest(20, A_DEEPER_MANIFEST)
match = matchmod.match(
util.localpath(b'/repo'), b'a/b', [b'.'], default=b'relpath'
@@ -397,7 +397,7 @@
def testMatchesWithPattern(self):
"""Tests matches() for files matching a pattern that reside
deeper than the specified directory."""
- m = self.parsemanifest(A_DEEPER_MANIFEST)
+ m = self.parsemanifest(20, A_DEEPER_MANIFEST)
match = matchmod.match(util.localpath(b'/repo'), b'', [b'a/b/*/*.txt'])
m2 = m._matches(match)
@@ -408,8 +408,12 @@
class testmanifestdict(unittest.TestCase, basemanifesttests):
- def parsemanifest(self, text):
- return manifestmod.manifestdict(text)
+ def parsemanifest(self, nodelen, text):
+ return manifestmod.manifestdict(nodelen, text)
+
+ def testManifestLongHashes(self):
+ m = self.parsemanifest(32, b'a\0' + b'f' * 64 + b'\n')
+ self.assertEqual(binascii.unhexlify(b'f' * 64), m[b'a'])
def testObviouslyBogusManifest(self):
# This is a 163k manifest that came from oss-fuzz. It was a
@@ -433,15 +437,15 @@
b'\xac\xbe'
)
with self.assertRaises(ValueError):
- self.parsemanifest(data)
+ self.parsemanifest(20, data)
class testtreemanifest(unittest.TestCase, basemanifesttests):
- def parsemanifest(self, text):
+ def parsemanifest(self, nodelen, text):
return manifestmod.treemanifest(sha1nodeconstants, b'', text)
def testWalkSubtrees(self):
- m = self.parsemanifest(A_DEEPER_MANIFEST)
+ m = self.parsemanifest(20, A_DEEPER_MANIFEST)
dirs = [s._dir for s in m.walksubtrees()]
self.assertEqual(
--- a/tests/test-merge-subrepos.t Sun May 02 16:56:20 2021 -0400
+++ b/tests/test-merge-subrepos.t Fri May 07 22:06:25 2021 -0400
@@ -61,7 +61,7 @@
> --config blackbox.track='command commandfinish'
9bfe45a197d7+ tip
$ cat .hg/blackbox.log
- * @9bfe45a197d7b0ab09bf287729dd57e9619c9da5+ (*)> serve --cmdserver chgunix * (glob) (chg !)
+ * @9bfe45a197d7b0ab09bf287729dd57e9619c9da5+ (*)> serve --no-profile --cmdserver chgunix * (glob) (chg !)
* @9bfe45a197d7b0ab09bf287729dd57e9619c9da5+ (*)> id --config *extensions.blackbox=* --config *blackbox.dirty=True* (glob)
* @9bfe45a197d7b0ab09bf287729dd57e9619c9da5+ (*)> id --config *extensions.blackbox=* --config *blackbox.dirty=True* exited 0 * (glob)
--- a/tests/test-narrow-clone-non-narrow-server.t Sun May 02 16:56:20 2021 -0400
+++ b/tests/test-narrow-clone-non-narrow-server.t Fri May 07 22:06:25 2021 -0400
@@ -57,6 +57,7 @@
comparing with http://localhost:$HGPORT1/
searching for changes
looking for local changes to affected paths
+ deleting unwanted files from working copy
$ hg tracked --addinclude f1 http://localhost:$HGPORT1/
nothing to widen or narrow
--- a/tests/test-narrow-patterns.t Sun May 02 16:56:20 2021 -0400
+++ b/tests/test-narrow-patterns.t Fri May 07 22:06:25 2021 -0400
@@ -193,6 +193,7 @@
deleting data/dir1/dirA/bar.i (reporevlogstore !)
deleting data/dir1/dirA/bar/0eca1d0cbdaea4651d1d04d71976a6d2d9bfaae5 (reposimplestore !)
deleting data/dir1/dirA/bar/index (reposimplestore !)
+ deleting unwanted files from working copy
saved backup bundle to $TESTTMP/narrow/.hg/strip-backup/*-widen.hg (glob)
adding changesets
adding manifests
@@ -249,6 +250,7 @@
deleting data/dir1/dirA/foo.i (reporevlogstore !)
deleting data/dir1/dirA/foo/162caeb3d55dceb1fee793aa631ac8c73fcb8b5e (reposimplestore !)
deleting data/dir1/dirA/foo/index (reposimplestore !)
+ deleting unwanted files from working copy
saved backup bundle to $TESTTMP/narrow/.hg/strip-backup/*-widen.hg (glob)
adding changesets
adding manifests
--- a/tests/test-narrow-share.t Sun May 02 16:56:20 2021 -0400
+++ b/tests/test-narrow-share.t Fri May 07 22:06:25 2021 -0400
@@ -94,6 +94,7 @@
deleting meta/d1/00manifest.i (tree !)
deleting meta/d3/00manifest.i (tree !)
deleting meta/d5/00manifest.i (tree !)
+ deleting unwanted files from working copy
$ hg -R main tracked
I path:d7
$ hg -R main files
--- a/tests/test-narrow-trackedcmd.t Sun May 02 16:56:20 2021 -0400
+++ b/tests/test-narrow-trackedcmd.t Fri May 07 22:06:25 2021 -0400
@@ -150,6 +150,7 @@
looking for local changes to affected paths
deleting data/inside/f.i
deleting meta/inside/00manifest.i (tree !)
+ deleting unwanted files from working copy
saved backup bundle to $TESTTMP/narrow/.hg/strip-backup/*-widen.hg (glob)
adding changesets
adding manifests
@@ -191,6 +192,7 @@
looking for local changes to affected paths
deleting data/widest/f.i
deleting meta/widest/00manifest.i (tree !)
+ deleting unwanted files from working copy
$ hg tracked
I path:outisde
I path:wider
--- a/tests/test-narrow.t Sun May 02 16:56:20 2021 -0400
+++ b/tests/test-narrow.t Fri May 07 22:06:25 2021 -0400
@@ -132,12 +132,14 @@
looking for local changes to affected paths
The following changeset(s) or their ancestors have local changes not on the remote:
* (glob)
+ moving unwanted changesets to backup
saved backup bundle to $TESTTMP/narrow-local-changes/.hg/strip-backup/*-narrow.hg (glob)
deleting data/d0/f.i (reporevlogstore !)
deleting meta/d0/00manifest.i (tree !)
deleting data/d0/f/362fef284ce2ca02aecc8de6d5e8a1c3af0556fe (reposimplestore !)
deleting data/d0/f/4374b5650fc5ae54ac857c0f0381971fdde376f7 (reposimplestore !)
deleting data/d0/f/index (reposimplestore !)
+ deleting unwanted files from working copy
$ hg log -T "{rev}: {desc} {outsidenarrow}\n"
7: local change to d3
@@ -164,12 +166,14 @@
comparing with ssh://user@dummy/master
searching for changes
looking for local changes to affected paths
+ moving unwanted changesets to backup
saved backup bundle to $TESTTMP/narrow-local-changes/.hg/strip-backup/*-narrow.hg (glob)
deleting data/d0/f.i (reporevlogstore !)
deleting meta/d0/00manifest.i (tree !)
deleting data/d0/f/362fef284ce2ca02aecc8de6d5e8a1c3af0556fe (reposimplestore !)
deleting data/d0/f/4374b5650fc5ae54ac857c0f0381971fdde376f7 (reposimplestore !)
deleting data/d0/f/index (reposimplestore !)
+ deleting unwanted files from working copy
Updates off of stripped commit if necessary
$ hg co -r 'desc("local change to d3")' -q
@@ -183,12 +187,14 @@
* (glob)
* (glob)
2 files updated, 0 files merged, 0 files removed, 0 files unresolved
+ moving unwanted changesets to backup
saved backup bundle to $TESTTMP/narrow-local-changes/.hg/strip-backup/*-narrow.hg (glob)
deleting data/d3/f.i (reporevlogstore !)
deleting meta/d3/00manifest.i (tree !)
deleting data/d3/f/2661d26c649684b482d10f91960cc3db683c38b4 (reposimplestore !)
deleting data/d3/f/99fa7136105a15e2045ce3d9152e4837c5349e4d (reposimplestore !)
deleting data/d3/f/index (reposimplestore !)
+ deleting unwanted files from working copy
$ hg log -T '{desc}\n' -r .
add d10/f
Updates to nullid if necessary
@@ -206,12 +212,14 @@
The following changeset(s) or their ancestors have local changes not on the remote:
* (glob)
0 files updated, 0 files merged, 1 files removed, 0 files unresolved
+ moving unwanted changesets to backup
saved backup bundle to $TESTTMP/narrow-local-changes/.hg/strip-backup/*-narrow.hg (glob)
deleting data/d3/f.i (reporevlogstore !)
deleting meta/d3/00manifest.i (tree !)
deleting data/d3/f/2661d26c649684b482d10f91960cc3db683c38b4 (reposimplestore !)
deleting data/d3/f/5ce0767945cbdbca3b924bb9fbf5143f72ab40ac (reposimplestore !)
deleting data/d3/f/index (reposimplestore !)
+ deleting unwanted files from working copy
$ hg id
000000000000
$ cd ..
@@ -272,6 +280,7 @@
deleting meta/d0/00manifest.i (tree !)
deleting data/d0/f/362fef284ce2ca02aecc8de6d5e8a1c3af0556fe (reposimplestore !)
deleting data/d0/f/index (reposimplestore !)
+ deleting unwanted files from working copy
$ hg tracked
$ hg files
[1]
@@ -332,6 +341,7 @@
deleting meta/d6/00manifest.i (tree !)
deleting data/d6/f/7339d30678f451ac8c3f38753beeb4cf2e1655c7 (reposimplestore !)
deleting data/d6/f/index (reposimplestore !)
+ deleting unwanted files from working copy
$ hg tracked
I path:d0
I path:d3
@@ -355,6 +365,7 @@
deleting data/d3/f.i (reporevlogstore !)
deleting data/d3/f/2661d26c649684b482d10f91960cc3db683c38b4 (reposimplestore !)
deleting data/d3/f/index (reposimplestore !)
+ deleting unwanted files from working copy
$ hg tracked
I path:d0
I path:d3
@@ -378,6 +389,7 @@
deleting meta/d0/00manifest.i (tree !)
deleting data/d0/f/362fef284ce2ca02aecc8de6d5e8a1c3af0556fe (reposimplestore !)
deleting data/d0/f/index (reposimplestore !)
+ deleting unwanted files from working copy
$ hg tracked
I path:d3
I path:d9
@@ -478,11 +490,13 @@
path:d2
remove these unused includes (yn)? y
looking for local changes to affected paths
+ moving unwanted changesets to backup
saved backup bundle to $TESTTMP/narrow-auto-remove/.hg/strip-backup/*-narrow.hg (glob)
deleting data/d0/f.i
deleting data/d2/f.i
deleting meta/d0/00manifest.i (tree !)
deleting meta/d2/00manifest.i (tree !)
+ deleting unwanted files from working copy
$ hg tracked
I path:d1
$ hg files
@@ -504,10 +518,12 @@
path:d2
remove these unused includes (yn)? y
looking for local changes to affected paths
+ deleting unwanted changesets
deleting data/d0/f.i
deleting data/d2/f.i
deleting meta/d0/00manifest.i (tree !)
deleting meta/d2/00manifest.i (tree !)
+ deleting unwanted files from working copy
$ ls .hg/strip-backup/
@@ -521,4 +537,5 @@
looking for local changes to affected paths
deleting data/d0/f.i
deleting meta/d0/00manifest.i (tree !)
+ deleting unwanted files from working copy
not deleting possibly dirty file d0/f
--- a/tests/test-obshistory.t Sun May 02 16:56:20 2021 -0400
+++ b/tests/test-obshistory.t Fri May 07 22:06:25 2021 -0400
@@ -13,6 +13,7 @@
> [experimental]
> evolution.createmarkers = yes
> evolution.effect-flags = yes
+ > evolution.allowdivergence=true
> EOF
Test output on amended commit
--- a/tests/test-obsmarker-template.t Sun May 02 16:56:20 2021 -0400
+++ b/tests/test-obsmarker-template.t Fri May 07 22:06:25 2021 -0400
@@ -11,6 +11,7 @@
> publish=False
> [experimental]
> evolution=true
+ > evolution.allowdivergence=true
> [templates]
> obsfatesuccessors = "{if(successors, " as ")}{join(successors, ", ")}"
> obsfateverb = "{obsfateverb(successors, markers)}"
--- a/tests/test-parseindex2.py Sun May 02 16:56:20 2021 -0400
+++ b/tests/test-parseindex2.py Fri May 07 22:06:25 2021 -0400
@@ -14,8 +14,8 @@
from mercurial.node import (
bin,
hex,
- nullid,
nullrev,
+ sha1nodeconstants,
)
from mercurial import (
policy,
@@ -40,7 +40,7 @@
s = 64
cache = None
index = []
- nodemap = {nullid: nullrev}
+ nodemap = {sha1nodeconstants.nullid: nullrev}
n = off = 0
l = len(data) - s
@@ -227,7 +227,7 @@
ix = parsers.parse_index2(data_inlined, True)[0]
for i, r in enumerate(ix):
- if r[7] == nullid:
+ if r[7] == sha1nodeconstants.nullid:
i = -1
try:
self.assertEqual(
@@ -240,7 +240,7 @@
break
def testminusone(self):
- want = (0, 0, 0, -1, -1, -1, -1, nullid)
+ want = (0, 0, 0, -1, -1, -1, -1, sha1nodeconstants.nullid)
index, junk = parsers.parse_index2(data_inlined, True)
got = index[-1]
self.assertEqual(want, got) # inline data
--- a/tests/test-phabricator.t Sun May 02 16:56:20 2021 -0400
+++ b/tests/test-phabricator.t Fri May 07 22:06:25 2021 -0400
@@ -509,9 +509,8 @@
A bad .arcconfig doesn't error out
$ echo 'garbage' > .arcconfig
- $ hg config phabricator --debug
+ $ hg config phabricator --source
invalid JSON in $TESTTMP/repo/.arcconfig
- read config from: */.hgrc (glob)
*/.hgrc:*: phabricator.debug=True (glob)
$TESTTMP/repo/.hg/hgrc:*: phabricator.url=https://phab.mercurial-scm.org/ (glob)
$TESTTMP/repo/.hg/hgrc:*: phabricator.callsign=HG (glob)
@@ -524,8 +523,7 @@
> EOF
$ cp $TESTDIR/../.arcconfig .
$ mv .hg/hgrc .hg/hgrc.bak
- $ hg config phabricator --debug
- read config from: */.hgrc (glob)
+ $ hg config phabricator --source
*/.hgrc:*: phabricator.debug=True (glob)
$TESTTMP/repo/.arcconfig: phabricator.callsign=HG
$TESTTMP/repo/.arcconfig: phabricator.url=https://phab.mercurial-scm.org/
@@ -536,8 +534,7 @@
> url = local
> callsign = local
> EOF
- $ hg config phabricator --debug
- read config from: */.hgrc (glob)
+ $ hg config phabricator --source
*/.hgrc:*: phabricator.debug=True (glob)
$TESTTMP/repo/.hg/hgrc:*: phabricator.url=local (glob)
$TESTTMP/repo/.hg/hgrc:*: phabricator.callsign=local (glob)
--- a/tests/test-rebase-collapse.t Sun May 02 16:56:20 2021 -0400
+++ b/tests/test-rebase-collapse.t Fri May 07 22:06:25 2021 -0400
@@ -549,8 +549,8 @@
o 0: f447d5abf5ea 'add'
$ hg rebase --collapse -r 1 -d 0
- abort: cannot rebase changeset with children
- (use --keep to keep original changesets)
+ abort: cannot rebase changeset, as that will orphan 1 descendants
+ (see 'hg help evolution.instability')
[10]
Test collapsing in place
--- a/tests/test-rebase-scenario-global.t Sun May 02 16:56:20 2021 -0400
+++ b/tests/test-rebase-scenario-global.t Fri May 07 22:06:25 2021 -0400
@@ -328,11 +328,11 @@
nothing to rebase
[1]
$ hg rebase -d 5 -b 6
- abort: cannot rebase public changesets
+ abort: cannot rebase public changesets: e1c4361dd923
(see 'hg help phases' for details)
[10]
$ hg rebase -d 5 -r '1 + (6::)'
- abort: cannot rebase public changesets
+ abort: cannot rebase public changesets: e1c4361dd923
(see 'hg help phases' for details)
[10]
@@ -452,8 +452,8 @@
$ hg clone -q -u . ah ah1
$ cd ah1
$ hg rebase -r '2::8' -d 1
- abort: cannot rebase changeset with children
- (use --keep to keep original changesets)
+ abort: cannot rebase changeset, as that will orphan 2 descendants
+ (see 'hg help evolution.instability')
[10]
$ hg rebase -r '2::8' -d 1 -k
rebasing 2:c9e50f6cdc55 "C"
@@ -498,8 +498,8 @@
$ hg clone -q -u . ah ah2
$ cd ah2
$ hg rebase -r '3::8' -d 1
- abort: cannot rebase changeset with children
- (use --keep to keep original changesets)
+ abort: cannot rebase changeset, as that will orphan 2 descendants
+ (see 'hg help evolution.instability')
[10]
$ hg rebase -r '3::8' -d 1 --keep
rebasing 3:ffd453c31098 "D"
@@ -541,8 +541,8 @@
$ hg clone -q -u . ah ah3
$ cd ah3
$ hg rebase -r '3::7' -d 1
- abort: cannot rebase changeset with children
- (use --keep to keep original changesets)
+ abort: cannot rebase changeset, as that will orphan 3 descendants
+ (see 'hg help evolution.instability')
[10]
$ hg rebase -r '3::7' -d 1 --keep
rebasing 3:ffd453c31098 "D"
@@ -581,8 +581,8 @@
$ hg clone -q -u . ah ah4
$ cd ah4
$ hg rebase -r '3::(7+5)' -d 1
- abort: cannot rebase changeset with children
- (use --keep to keep original changesets)
+ abort: cannot rebase changeset, as that will orphan 1 descendants
+ (see 'hg help evolution.instability')
[10]
$ hg rebase -r '3::(7+5)' -d 1 --keep
rebasing 3:ffd453c31098 "D"
--- a/tests/test-remotefilelog-datapack.py Sun May 02 16:56:20 2021 -0400
+++ b/tests/test-remotefilelog-datapack.py Fri May 07 22:06:25 2021 -0400
@@ -16,7 +16,7 @@
# Load the local remotefilelog, not the system one
sys.path[0:0] = [os.path.join(os.path.dirname(__file__), '..')]
-from mercurial.node import nullid
+from mercurial.node import sha1nodeconstants
from mercurial import policy
if not policy._packageprefs.get(policy.policy, (False, False))[1]:
@@ -63,7 +63,14 @@
def createPack(self, revisions=None, packdir=None):
if revisions is None:
- revisions = [(b"filename", self.getFakeHash(), nullid, b"content")]
+ revisions = [
+ (
+ b"filename",
+ self.getFakeHash(),
+ sha1nodeconstants.nullid,
+ b"content",
+ )
+ ]
if packdir is None:
packdir = self.makeTempDir()
@@ -86,7 +93,7 @@
filename = b"foo"
node = self.getHash(content)
- revisions = [(filename, node, nullid, content)]
+ revisions = [(filename, node, sha1nodeconstants.nullid, content)]
pack = self.createPack(revisions)
if self.paramsavailable:
self.assertEqual(
@@ -126,7 +133,7 @@
"""Test putting multiple delta blobs into a pack and read the chain."""
revisions = []
filename = b"foo"
- lastnode = nullid
+ lastnode = sha1nodeconstants.nullid
for i in range(10):
content = b"abcdef%d" % i
node = self.getHash(content)
@@ -157,7 +164,7 @@
for j in range(random.randint(1, 100)):
content = b"content-%d" % j
node = self.getHash(content)
- lastnode = nullid
+ lastnode = sha1nodeconstants.nullid
if len(filerevs) > 0:
lastnode = filerevs[random.randint(0, len(filerevs) - 1)]
filerevs.append(node)
@@ -185,7 +192,9 @@
b'Z': b'random_string',
b'_': b'\0' * i,
}
- revisions.append((filename, node, nullid, content, meta))
+ revisions.append(
+ (filename, node, sha1nodeconstants.nullid, content, meta)
+ )
pack = self.createPack(revisions)
for name, node, x, content, origmeta in revisions:
parsedmeta = pack.getmeta(name, node)
@@ -198,7 +207,7 @@
"""Test the getmissing() api."""
revisions = []
filename = b"foo"
- lastnode = nullid
+ lastnode = sha1nodeconstants.nullid
for i in range(10):
content = b"abcdef%d" % i
node = self.getHash(content)
@@ -225,7 +234,7 @@
pack = self.createPack()
try:
- pack.add(b'filename', nullid, b'contents')
+ pack.add(b'filename', sha1nodeconstants.nullid, b'contents')
self.assertTrue(False, "datapack.add should throw")
except RuntimeError:
pass
@@ -264,7 +273,9 @@
content = filename
node = self.getHash(content)
blobs[(filename, node)] = content
- revisions.append((filename, node, nullid, content))
+ revisions.append(
+ (filename, node, sha1nodeconstants.nullid, content)
+ )
pack = self.createPack(revisions)
if self.paramsavailable:
@@ -288,7 +299,12 @@
for i in range(numpacks):
chain = []
- revision = (b'%d' % i, self.getFakeHash(), nullid, b"content")
+ revision = (
+ b'%d' % i,
+ self.getFakeHash(),
+ sha1nodeconstants.nullid,
+ b"content",
+ )
for _ in range(revisionsperpack):
chain.append(revision)
@@ -346,7 +362,9 @@
filename = b"filename-%d" % i
content = b"content-%d" % i
node = self.getHash(content)
- revisions.append((filename, node, nullid, content))
+ revisions.append(
+ (filename, node, sha1nodeconstants.nullid, content)
+ )
path = self.createPack(revisions).path
--- a/tests/test-remotefilelog-histpack.py Sun May 02 16:56:20 2021 -0400
+++ b/tests/test-remotefilelog-histpack.py Fri May 07 22:06:25 2021 -0400
@@ -13,7 +13,7 @@
import silenttestrunner
-from mercurial.node import nullid
+from mercurial.node import sha1nodeconstants
from mercurial import (
pycompat,
ui as uimod,
@@ -59,8 +59,8 @@
(
b"filename",
self.getFakeHash(),
- nullid,
- nullid,
+ sha1nodeconstants.nullid,
+ sha1nodeconstants.nullid,
self.getFakeHash(),
None,
)
@@ -119,10 +119,19 @@
"""
revisions = []
filename = b"foo"
- lastnode = nullid
+ lastnode = sha1nodeconstants.nullid
for i in range(10):
node = self.getFakeHash()
- revisions.append((filename, node, lastnode, nullid, nullid, None))
+ revisions.append(
+ (
+ filename,
+ node,
+ lastnode,
+ sha1nodeconstants.nullid,
+ sha1nodeconstants.nullid,
+ None,
+ )
+ )
lastnode = node
# revisions must be added in topological order, newest first
@@ -148,17 +157,17 @@
for i in range(100):
filename = b"filename-%d" % i
entries = []
- p2 = nullid
- linknode = nullid
+ p2 = sha1nodeconstants.nullid
+ linknode = sha1nodeconstants.nullid
for j in range(random.randint(1, 100)):
node = self.getFakeHash()
- p1 = nullid
+ p1 = sha1nodeconstants.nullid
if len(entries) > 0:
p1 = entries[random.randint(0, len(entries) - 1)]
entries.append(node)
revisions.append((filename, node, p1, p2, linknode, None))
allentries[(filename, node)] = (p1, p2, linknode)
- if p1 == nullid:
+ if p1 == sha1nodeconstants.nullid:
ancestorcounts[(filename, node)] = 1
else:
newcount = ancestorcounts[(filename, p1)] + 1
@@ -182,10 +191,19 @@
def testGetNodeInfo(self):
revisions = []
filename = b"foo"
- lastnode = nullid
+ lastnode = sha1nodeconstants.nullid
for i in range(10):
node = self.getFakeHash()
- revisions.append((filename, node, lastnode, nullid, nullid, None))
+ revisions.append(
+ (
+ filename,
+ node,
+ lastnode,
+ sha1nodeconstants.nullid,
+ sha1nodeconstants.nullid,
+ None,
+ )
+ )
lastnode = node
pack = self.createPack(revisions)
@@ -233,7 +251,14 @@
pack = self.createPack()
try:
- pack.add(b'filename', nullid, nullid, nullid, nullid, None)
+ pack.add(
+ b'filename',
+ sha1nodeconstants.nullid,
+ sha1nodeconstants.nullid,
+ sha1nodeconstants.nullid,
+ sha1nodeconstants.nullid,
+ None,
+ )
self.assertTrue(False, "historypack.add should throw")
except RuntimeError:
pass
--- a/tests/test-remotefilelog-prefetch.t Sun May 02 16:56:20 2021 -0400
+++ b/tests/test-remotefilelog-prefetch.t Fri May 07 22:06:25 2021 -0400
@@ -237,6 +237,7 @@
$ hg mv z2 z3
z2: not copying - file is not managed
abort: no files to copy
+ (maybe you meant to use --after --at-rev=.)
[10]
$ find $CACHEDIR -type f | sort
.. The following output line about files fetches is globed because it is
--- a/tests/test-revlog-raw.py Sun May 02 16:56:20 2021 -0400
+++ b/tests/test-revlog-raw.py Fri May 07 22:06:25 2021 -0400
@@ -6,7 +6,6 @@
import hashlib
import sys
-from mercurial.node import nullid
from mercurial import (
encoding,
revlog,
@@ -15,6 +14,7 @@
)
from mercurial.revlogutils import (
+ constants,
deltas,
flagutil,
)
@@ -82,7 +82,9 @@
def newrevlog(name=b'_testrevlog.i', recreate=False):
if recreate:
tvfs.tryunlink(name)
- rlog = revlog.revlog(tvfs, name)
+ rlog = revlog.revlog(
+ tvfs, target=(constants.KIND_OTHER, b'test'), indexfile=name
+ )
return rlog
@@ -93,7 +95,7 @@
"""
nextrev = len(rlog)
p1 = rlog.node(nextrev - 1)
- p2 = nullid
+ p2 = rlog.nullid
if isext:
flags = revlog.REVIDX_EXTSTORED
else:
@@ -127,7 +129,7 @@
class dummychangegroup(object):
@staticmethod
def deltachunk(pnode):
- pnode = pnode or nullid
+ pnode = pnode or rlog.nullid
parentrev = rlog.rev(pnode)
r = parentrev + 1
if r >= len(rlog):
@@ -142,7 +144,7 @@
return {
b'node': rlog.node(r),
b'p1': pnode,
- b'p2': nullid,
+ b'p2': rlog.nullid,
b'cs': rlog.node(rlog.linkrev(r)),
b'flags': rlog.flags(r),
b'deltabase': rlog.node(deltaparent),
@@ -183,7 +185,7 @@
dlog = newrevlog(destname, recreate=True)
for r in rlog:
p1 = rlog.node(r - 1)
- p2 = nullid
+ p2 = rlog.nullid
if r == 0 or (rlog.flags(r) & revlog.REVIDX_EXTSTORED):
text = rlog.rawdata(r)
cachedelta = None
--- a/tests/test-revlog.t Sun May 02 16:56:20 2021 -0400
+++ b/tests/test-revlog.t Fri May 07 22:06:25 2021 -0400
@@ -45,9 +45,10 @@
0 2 99e0332bd498 000000000000 000000000000
1 3 6674f57a23d8 99e0332bd498 000000000000
+ >>> from mercurial.revlogutils.constants import KIND_OTHER
>>> from mercurial import revlog, vfs
>>> tvfs = vfs.vfs(b'.')
>>> tvfs.options = {b'revlogv1': True}
- >>> rl = revlog.revlog(tvfs, b'a.i')
+ >>> rl = revlog.revlog(tvfs, target=(KIND_OTHER, b'test'), indexfile=b'a.i')
>>> rl.revision(1)
mpatchError(*'patch cannot be decoded'*) (glob)
--- a/tests/test-setdiscovery.t Sun May 02 16:56:20 2021 -0400
+++ b/tests/test-setdiscovery.t Fri May 07 22:06:25 2021 -0400
@@ -1536,7 +1536,7 @@
searching for changes
101 102 103 104 105 106 107 108 109 110 (no-eol)
$ hg -R r1 --config extensions.blackbox= blackbox --config blackbox.track=
- * @5d0b986a083e0d91f116de4691e2aaa54d5bbec0 (*)> serve --cmdserver chgunix * (glob) (chg !)
+ * @5d0b986a083e0d91f116de4691e2aaa54d5bbec0 (*)> serve --no-profile --cmdserver chgunix * (glob) (chg !)
* @5d0b986a083e0d91f116de4691e2aaa54d5bbec0 (*)> -R r1 outgoing r2 *-T{rev} * --config *extensions.blackbox=* (glob)
* @5d0b986a083e0d91f116de4691e2aaa54d5bbec0 (*)> found 101 common and 1 unknown server heads, 1 roundtrips in *.????s (glob)
* @5d0b986a083e0d91f116de4691e2aaa54d5bbec0 (*)> -R r1 outgoing r2 *-T{rev} * --config *extensions.blackbox=* exited 0 after *.?? seconds (glob)
--- a/tests/test-single-head.t Sun May 02 16:56:20 2021 -0400
+++ b/tests/test-single-head.t Fri May 07 22:06:25 2021 -0400
@@ -65,6 +65,9 @@
1 files updated, 0 files merged, 1 files removed, 0 files unresolved
$ mkcommit c_dD0
created new head
+ $ hg log -r 'heads(::branch("default"))' -T '{node|short}\n'
+ 286d02a6e2a2
+ 9bf953aa81f6
$ hg push -f
pushing to $TESTTMP/single-head-server
searching for changes
--- a/tests/test-split.t Sun May 02 16:56:20 2021 -0400
+++ b/tests/test-split.t Fri May 07 22:06:25 2021 -0400
@@ -77,7 +77,7 @@
$ hg phase --public -r 'all()'
$ hg split .
- abort: cannot split public changesets
+ abort: cannot split public changesets: 1df0d5c5a3ab
(see 'hg help phases' for details)
[10]
@@ -466,7 +466,8 @@
$ cd $TESTTMP/d
#if obsstore-off
$ runsplit -r 1 --no-rebase
- abort: cannot split changeset with children
+ abort: cannot split changeset, as that will orphan 3 descendants
+ (see 'hg help evolution.instability')
[10]
#else
$ runsplit -r 1 --no-rebase >/dev/null
@@ -517,7 +518,8 @@
$ eval `hg tags -T '{tag}={node}\n'`
$ rm .hg/localtags
$ hg split $B --config experimental.evolution=createmarkers
- abort: cannot split changeset with children
+ abort: cannot split changeset, as that will orphan 4 descendants
+ (see 'hg help evolution.instability')
[10]
$ cat > $TESTTMP/messages <<EOF
> Split B
--- a/tests/test-unamend.t Sun May 02 16:56:20 2021 -0400
+++ b/tests/test-unamend.t Fri May 07 22:06:25 2021 -0400
@@ -6,6 +6,7 @@
> glog = log -G -T '{rev}:{node|short} {desc}'
> [experimental]
> evolution = createmarkers, allowunstable
+ > evolution.allowdivergence = true
> [extensions]
> rebase =
> amend =
@@ -283,7 +284,8 @@
$ hg --config experimental.evolution=createmarkers unamend
- abort: cannot unamend changeset with children
+ abort: cannot unamend changeset, as that will orphan 3 descendants
+ (see 'hg help evolution.instability')
[10]
$ hg unamend
@@ -296,7 +298,7 @@
$ hg phase -r . -p
1 new phase-divergent changesets
$ hg unamend
- abort: cannot unamend public changesets
+ abort: cannot unamend public changesets: 03ddd6fc5af1
(see 'hg help phases' for details)
[10]
--- a/tests/test-uncommit.t Sun May 02 16:56:20 2021 -0400
+++ b/tests/test-uncommit.t Fri May 07 22:06:25 2021 -0400
@@ -51,7 +51,7 @@
Uncommit with no commits should fail
$ hg uncommit
- abort: cannot uncommit null changeset
+ abort: cannot uncommit the null revision
(no changeset checked out)
[10]
@@ -410,7 +410,7 @@
[20]
$ hg uncommit --config experimental.uncommitondirtywdir=True
- abort: cannot uncommit while merging
+ abort: cannot uncommit changesets while merging
[20]
$ hg status
--- a/tests/testlib/ext-sidedata-2.py Sun May 02 16:56:20 2021 -0400
+++ b/tests/testlib/ext-sidedata-2.py Fri May 07 22:06:25 2021 -0400
@@ -14,6 +14,9 @@
import struct
from mercurial.revlogutils import sidedata as sidedatamod
+from mercurial.revlogutils import constants
+
+NO_FLAGS = (0, 0) # hoot
def compute_sidedata_1(repo, revlog, rev, sidedata, text=None):
@@ -21,7 +24,7 @@
if text is None:
text = revlog.revision(rev)
sidedata[sidedatamod.SD_TEST1] = struct.pack('>I', len(text))
- return sidedata
+ return sidedata, NO_FLAGS
def compute_sidedata_2(repo, revlog, rev, sidedata, text=None):
@@ -30,21 +33,23 @@
text = revlog.revision(rev)
sha256 = hashlib.sha256(text).digest()
sidedata[sidedatamod.SD_TEST2] = struct.pack('>32s', sha256)
- return sidedata
+ return sidedata, NO_FLAGS
def reposetup(ui, repo):
# Sidedata keys happen to be the same as the categories, easier for testing.
- for kind in (b'changelog', b'manifest', b'filelog'):
+ for kind in constants.ALL_KINDS:
repo.register_sidedata_computer(
kind,
sidedatamod.SD_TEST1,
(sidedatamod.SD_TEST1,),
compute_sidedata_1,
+ 0,
)
repo.register_sidedata_computer(
kind,
sidedatamod.SD_TEST2,
(sidedatamod.SD_TEST2,),
compute_sidedata_2,
+ 0,
)
--- a/tests/testlib/ext-sidedata-3.py Sun May 02 16:56:20 2021 -0400
+++ b/tests/testlib/ext-sidedata-3.py Fri May 07 22:06:25 2021 -0400
@@ -20,6 +20,9 @@
)
from mercurial.revlogutils import sidedata as sidedatamod
+from mercurial.revlogutils import constants
+
+NO_FLAGS = (0, 0)
def compute_sidedata_1(repo, revlog, rev, sidedata, text=None):
@@ -27,7 +30,7 @@
if text is None:
text = revlog.revision(rev)
sidedata[sidedatamod.SD_TEST1] = struct.pack('>I', len(text))
- return sidedata
+ return sidedata, NO_FLAGS
def compute_sidedata_2(repo, revlog, rev, sidedata, text=None):
@@ -36,7 +39,7 @@
text = revlog.revision(rev)
sha256 = hashlib.sha256(text).digest()
sidedata[sidedatamod.SD_TEST2] = struct.pack('>32s', sha256)
- return sidedata
+ return sidedata, NO_FLAGS
def compute_sidedata_3(repo, revlog, rev, sidedata, text=None):
@@ -45,7 +48,7 @@
text = revlog.revision(rev)
sha384 = hashlib.sha384(text).digest()
sidedata[sidedatamod.SD_TEST3] = struct.pack('>48s', sha384)
- return sidedata
+ return sidedata, NO_FLAGS
def wrapaddrevision(
@@ -54,8 +57,8 @@
if kwargs.get('sidedata') is None:
kwargs['sidedata'] = {}
sd = kwargs['sidedata']
- sd = compute_sidedata_1(None, self, None, sd, text=text)
- kwargs['sidedata'] = compute_sidedata_2(None, self, None, sd, text=text)
+ sd, flags = compute_sidedata_1(None, self, None, sd, text=text)
+ kwargs['sidedata'] = compute_sidedata_2(None, self, None, sd, text=text)[0]
return orig(self, text, transaction, link, p1, p2, *args, **kwargs)
@@ -65,24 +68,27 @@
def reposetup(ui, repo):
# Sidedata keys happen to be the same as the categories, easier for testing.
- for kind in (b'changelog', b'manifest', b'filelog'):
+ for kind in constants.ALL_KINDS:
repo.register_sidedata_computer(
kind,
sidedatamod.SD_TEST1,
(sidedatamod.SD_TEST1,),
compute_sidedata_1,
+ 0,
)
repo.register_sidedata_computer(
kind,
sidedatamod.SD_TEST2,
(sidedatamod.SD_TEST2,),
compute_sidedata_2,
+ 0,
)
repo.register_sidedata_computer(
kind,
sidedatamod.SD_TEST3,
(sidedatamod.SD_TEST3,),
compute_sidedata_3,
+ 0,
)
repo.register_wanted_sidedata(sidedatamod.SD_TEST1)
repo.register_wanted_sidedata(sidedatamod.SD_TEST2)
--- a/tests/testlib/ext-sidedata-5.py Sun May 02 16:56:20 2021 -0400
+++ b/tests/testlib/ext-sidedata-5.py Fri May 07 22:06:25 2021 -0400
@@ -21,6 +21,9 @@
from mercurial.revlogutils import sidedata as sidedatamod
+from mercurial.revlogutils import constants
+
+NO_FLAGS = (0, 0)
def compute_sidedata_1(repo, revlog, rev, sidedata, text=None):
@@ -28,7 +31,7 @@
if text is None:
text = revlog.revision(rev)
sidedata[sidedatamod.SD_TEST1] = struct.pack('>I', len(text))
- return sidedata
+ return sidedata, NO_FLAGS
def compute_sidedata_2(repo, revlog, rev, sidedata, text=None):
@@ -37,23 +40,25 @@
text = revlog.revision(rev)
sha256 = hashlib.sha256(text).digest()
sidedata[sidedatamod.SD_TEST2] = struct.pack('>32s', sha256)
- return sidedata
+ return sidedata, NO_FLAGS
def reposetup(ui, repo):
# Sidedata keys happen to be the same as the categories, easier for testing.
- for kind in (b'changelog', b'manifest', b'filelog'):
+ for kind in constants.ALL_KINDS:
repo.register_sidedata_computer(
kind,
sidedatamod.SD_TEST1,
(sidedatamod.SD_TEST1,),
compute_sidedata_1,
+ 0,
)
repo.register_sidedata_computer(
kind,
sidedatamod.SD_TEST2,
(sidedatamod.SD_TEST2,),
compute_sidedata_2,
+ 0,
)
# We don't register sidedata computers because we don't care within these
--- a/tests/testlib/ext-sidedata.py Sun May 02 16:56:20 2021 -0400
+++ b/tests/testlib/ext-sidedata.py Fri May 07 22:06:25 2021 -0400
@@ -10,10 +10,7 @@
import hashlib
import struct
-from mercurial.node import (
- nullid,
- nullrev,
-)
+from mercurial.node import nullrev
from mercurial import (
extensions,
requirements,
@@ -22,6 +19,7 @@
from mercurial.upgrade_utils import engine as upgrade_engine
+from mercurial.revlogutils import constants
from mercurial.revlogutils import sidedata
@@ -46,7 +44,7 @@
return text, sd
if self.version & 0xFFFF != 2:
return text, sd
- if nodeorrev != nullrev and nodeorrev != nullid:
+ if nodeorrev != nullrev and nodeorrev != self.nullid:
cat1 = sd.get(sidedata.SD_TEST1)
if cat1 is not None and len(text) != struct.unpack('>I', cat1)[0]:
raise RuntimeError('text size mismatch')
@@ -57,13 +55,15 @@
return text, sd
-def wrapgetsidedatacompanion(orig, srcrepo, dstrepo):
- sidedatacompanion = orig(srcrepo, dstrepo)
+def wrapget_sidedata_helpers(orig, srcrepo, dstrepo):
+ repo, computers, removers = orig(srcrepo, dstrepo)
+ assert not computers and not removers # deal with composition later
addedreqs = dstrepo.requirements - srcrepo.requirements
+
if requirements.SIDEDATA_REQUIREMENT in addedreqs:
- assert sidedatacompanion is None # deal with composition later
- def sidedatacompanion(revlog, rev):
+ def computer(repo, revlog, rev, old_sidedata):
+ assert not old_sidedata # not supported yet
update = {}
revlog.sidedatanocheck = True
try:
@@ -76,16 +76,25 @@
# and sha2 hashes
sha256 = hashlib.sha256(text).digest()
update[sidedata.SD_TEST2] = struct.pack('>32s', sha256)
- return False, (), update, 0, 0
+ return update, (0, 0)
- return sidedatacompanion
+ srcrepo.register_sidedata_computer(
+ constants.KIND_CHANGELOG,
+ b"whatever",
+ (sidedata.SD_TEST1, sidedata.SD_TEST2),
+ computer,
+ 0,
+ )
+ dstrepo.register_wanted_sidedata(b"whatever")
+
+ return sidedata.get_sidedata_helpers(srcrepo, dstrepo._wanted_sidedata)
def extsetup(ui):
extensions.wrapfunction(revlog.revlog, 'addrevision', wrapaddrevision)
extensions.wrapfunction(revlog.revlog, '_revisiondata', wrap_revisiondata)
extensions.wrapfunction(
- upgrade_engine, 'getsidedatacompanion', wrapgetsidedatacompanion
+ upgrade_engine, 'get_sidedata_helpers', wrapget_sidedata_helpers
)