--- a/contrib/chg/chg.c Tue Jun 23 16:07:18 2020 +0200
+++ b/contrib/chg/chg.c Thu Jun 25 10:32:51 2020 -0700
@@ -232,7 +232,7 @@
abortmsgerrno("failed to putenv CHG_CLEAR_LC_CTYPE");
} else {
if (setenv("CHGORIG_LC_CTYPE", lc_ctype_env, 1) != 0) {
- abortmsgerrno("failed to setenv CHGORIG_LC_CTYYPE");
+ abortmsgerrno("failed to setenv CHGORIG_LC_CTYPE");
}
}
--- a/contrib/fuzz/Makefile Tue Jun 23 16:07:18 2020 +0200
+++ b/contrib/fuzz/Makefile Thu Jun 25 10:32:51 2020 -0700
@@ -11,6 +11,7 @@
LIB_FUZZING_ENGINE ?= standalone_fuzz_target_runner.o
PYTHON_CONFIG ?= $$OUT/sanpy/bin/python-config
+PYTHON_CONFIG_FLAGS ?= --ldflags
CXXFLAGS += -Wno-deprecated-register
@@ -67,7 +68,7 @@
-Wno-register -Wno-macro-redefined \
-I../../mercurial dirs.cc \
pyutil.o $(PARSERS_OBJS) \
- $(LIB_FUZZING_ENGINE) `$(PYTHON_CONFIG) --ldflags` \
+ $(LIB_FUZZING_ENGINE) `$(PYTHON_CONFIG) $(PYTHON_CONFIG_FLAGS)` \
-o $$OUT/dirs_fuzzer
fncache_fuzzer: fncache.cc
@@ -75,7 +76,7 @@
-Wno-register -Wno-macro-redefined \
-I../../mercurial fncache.cc \
pyutil.o $(PARSERS_OBJS) \
- $(LIB_FUZZING_ENGINE) `$(PYTHON_CONFIG) --ldflags` \
+ $(LIB_FUZZING_ENGINE) `$(PYTHON_CONFIG) $(PYTHON_CONFIG_FLAGS)` \
-o $$OUT/fncache_fuzzer
jsonescapeu8fast_fuzzer: jsonescapeu8fast.cc pyutil.o $(PARSERS_OBJS)
@@ -83,7 +84,7 @@
-Wno-register -Wno-macro-redefined \
-I../../mercurial jsonescapeu8fast.cc \
pyutil.o $(PARSERS_OBJS) \
- $(LIB_FUZZING_ENGINE) `$(PYTHON_CONFIG) --ldflags` \
+ $(LIB_FUZZING_ENGINE) `$(PYTHON_CONFIG) $(PYTHON_CONFIG_FLAGS)` \
-o $$OUT/jsonescapeu8fast_fuzzer
manifest_fuzzer: manifest.cc pyutil.o $(PARSERS_OBJS) $$OUT/manifest_fuzzer_seed_corpus.zip
@@ -91,7 +92,7 @@
-Wno-register -Wno-macro-redefined \
-I../../mercurial manifest.cc \
pyutil.o $(PARSERS_OBJS) \
- $(LIB_FUZZING_ENGINE) `$(PYTHON_CONFIG) --ldflags` \
+ $(LIB_FUZZING_ENGINE) `$(PYTHON_CONFIG) $(PYTHON_CONFIG_FLAGS)` \
-o $$OUT/manifest_fuzzer
revlog_fuzzer: revlog.cc pyutil.o $(PARSERS_OBJS) $$OUT/revlog_fuzzer_seed_corpus.zip
@@ -99,7 +100,7 @@
-Wno-register -Wno-macro-redefined \
-I../../mercurial revlog.cc \
pyutil.o $(PARSERS_OBJS) \
- $(LIB_FUZZING_ENGINE) `$(PYTHON_CONFIG) --ldflags` \
+ $(LIB_FUZZING_ENGINE) `$(PYTHON_CONFIG) $(PYTHON_CONFIG_FLAGS)` \
-o $$OUT/revlog_fuzzer
dirstate_fuzzer: dirstate.cc pyutil.o $(PARSERS_OBJS) $$OUT/dirstate_fuzzer_seed_corpus.zip
@@ -107,7 +108,7 @@
-Wno-register -Wno-macro-redefined \
-I../../mercurial dirstate.cc \
pyutil.o $(PARSERS_OBJS) \
- $(LIB_FUZZING_ENGINE) `$(PYTHON_CONFIG) --ldflags` \
+ $(LIB_FUZZING_ENGINE) `$(PYTHON_CONFIG) $(PYTHON_CONFIG_FLAGS)` \
-o $$OUT/dirstate_fuzzer
fm1readmarkers_fuzzer: fm1readmarkers.cc pyutil.o $(PARSERS_OBJS) $$OUT/fm1readmarkers_fuzzer_seed_corpus.zip
@@ -115,7 +116,7 @@
-Wno-register -Wno-macro-redefined \
-I../../mercurial fm1readmarkers.cc \
pyutil.o $(PARSERS_OBJS) \
- $(LIB_FUZZING_ENGINE) `$(PYTHON_CONFIG) --ldflags` \
+ $(LIB_FUZZING_ENGINE) `$(PYTHON_CONFIG) $(PYTHON_CONFIG_FLAGS)` \
-o $$OUT/fm1readmarkers_fuzzer
clean:
--- a/contrib/fuzz/manifest.cc Tue Jun 23 16:07:18 2020 +0200
+++ b/contrib/fuzz/manifest.cc Thu Jun 25 10:32:51 2020 -0700
@@ -3,6 +3,7 @@
#include <stdlib.h>
#include <unistd.h>
+#include "FuzzedDataProvider.h"
#include "pyutil.h"
#include <string>
@@ -24,7 +25,7 @@
lm[e]
e in lm
(e + 'nope') in lm
- lm[b'xyzzy'] = (b'\0' * 20, 'x')
+ lm[b'xyzzy'] = (b'\0' * nlen, 'x')
# do an insert, text should change
assert lm.text() != mdata, "insert should change text and didn't: %r %r" % (lm.text(), mdata)
cloned = lm.filtercopy(lambda x: x != 'xyzzy')
@@ -51,10 +52,14 @@
if (Size > 100000) {
return 0;
}
+ FuzzedDataProvider provider(Data, Size);
+ Py_ssize_t nodelength = provider.ConsumeBool() ? 20 : 32;
+ PyObject *nlen = PyLong_FromSsize_t(nodelength);
PyObject *mtext =
PyBytes_FromStringAndSize((const char *)Data, (Py_ssize_t)Size);
PyObject *locals = PyDict_New();
PyDict_SetItemString(locals, "mdata", mtext);
+ PyDict_SetItemString(locals, "nlen", nlen);
PyObject *res = PyEval_EvalCode(code, contrib::pyglobals(), locals);
if (!res) {
PyErr_Print();
--- a/contrib/fuzz/manifest_corpus.py Tue Jun 23 16:07:18 2020 +0200
+++ b/contrib/fuzz/manifest_corpus.py Thu Jun 25 10:32:51 2020 -0700
@@ -10,7 +10,7 @@
with zipfile.ZipFile(args.out[0], "w", zipfile.ZIP_STORED) as zf:
zf.writestr(
"manifest_zero",
- '''PKG-INFO\09b3ed8f2b81095a13064402e930565f083346e9a
+ '''\0PKG-INFO\09b3ed8f2b81095a13064402e930565f083346e9a
README\080b6e76643dcb44d4bc729e932fc464b3e36dbe3
hg\0b6444347c629cc058d478023905cfb83b7f5bb9d
mercurial/__init__.py\0b80de5d138758541c5f05265ad144ab9fa86d1db
@@ -25,9 +25,14 @@
tkmerge\03c922edb43a9c143682f7bc7b00f98b3c756ebe7
''',
)
- zf.writestr("badmanifest_shorthashes", "narf\0aa\nnarf2\0aaa\n")
+ zf.writestr("badmanifest_shorthashes", "\0narf\0aa\nnarf2\0aaa\n")
zf.writestr(
"badmanifest_nonull",
- "narf\0cccccccccccccccccccccccccccccccccccccccc\n"
+ "\0narf\0cccccccccccccccccccccccccccccccccccccccc\n"
"narf2aaaaaaaaaaaaaaaaaaaa\n",
)
+
+ zf.writestr(
+ "manifest_long_nodes",
+ "\1a\0ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff\n",
+ )
--- a/contrib/fuzz/pyutil.cc Tue Jun 23 16:07:18 2020 +0200
+++ b/contrib/fuzz/pyutil.cc Thu Jun 25 10:32:51 2020 -0700
@@ -21,7 +21,7 @@
void initpy(const char *cselfpath)
{
#ifdef HG_FUZZER_PY3
- const std::string subdir = "/sanpy/lib/python3.7";
+ const std::string subdir = "/sanpy/lib/python3.8";
#else
const std::string subdir = "/sanpy/lib/python2.7";
#endif
--- a/contrib/heptapod-ci.yml Tue Jun 23 16:07:18 2020 +0200
+++ b/contrib/heptapod-ci.yml Thu Jun 25 10:32:51 2020 -0700
@@ -79,3 +79,9 @@
RUNTEST_ARGS: "--rust --blacklist /tmp/check-tests.txt"
PYTHON: python3
TEST_HGMODULEPOLICY: "rust+c"
+
+test-py2-chg:
+ <<: *runtests
+ variables:
+ RUNTEST_ARGS: "--blacklist /tmp/check-tests.txt --chg"
+ TEST_HGMODULEPOLICY: "c"
--- a/contrib/perf.py Tue Jun 23 16:07:18 2020 +0200
+++ b/contrib/perf.py Thu Jun 25 10:32:51 2020 -0700
@@ -3794,19 +3794,47 @@
fm.end()
-@command(b'perfwrite', formatteropts)
+@command(
+ b'perfwrite',
+ formatteropts
+ + [
+ (b'', b'write-method', b'write', b'ui write method'),
+ (b'', b'nlines', 100, b'number of lines'),
+ (b'', b'nitems', 100, b'number of items (per line)'),
+ (b'', b'item', b'x', b'item that is written'),
+ (b'', b'batch-line', None, b'pass whole line to write method at once'),
+ (b'', b'flush-line', None, b'flush after each line'),
+ ],
+)
def perfwrite(ui, repo, **opts):
- """microbenchmark ui.write
+ """microbenchmark ui.write (and others)
"""
opts = _byteskwargs(opts)
+ write = getattr(ui, _sysstr(opts[b'write_method']))
+ nlines = int(opts[b'nlines'])
+ nitems = int(opts[b'nitems'])
+ item = opts[b'item']
+ batch_line = opts.get(b'batch_line')
+ flush_line = opts.get(b'flush_line')
+
+ if batch_line:
+ line = item * nitems + b'\n'
+
+ def benchmark():
+ for i in pycompat.xrange(nlines):
+ if batch_line:
+ write(line)
+ else:
+ for i in pycompat.xrange(nitems):
+ write(item)
+ write(b'\n')
+ if flush_line:
+ ui.flush()
+ ui.flush()
+
timer, fm = gettimer(ui, opts)
-
- def write():
- for i in range(100000):
- ui.writenoi18n(b'Testing write performance\n')
-
- timer(write)
+ timer(benchmark)
fm.end()
--- a/hgext/absorb.py Tue Jun 23 16:07:18 2020 +0200
+++ b/hgext/absorb.py Thu Jun 25 10:32:51 2020 -0700
@@ -782,7 +782,9 @@
# nothing changed, nothing commited
nextp1 = ctx
continue
- if self._willbecomenoop(memworkingcopy, ctx, nextp1):
+ if ctx.files() and self._willbecomenoop(
+ memworkingcopy, ctx, nextp1
+ ):
# changeset is no longer necessary
self.replacemap[ctx.node()] = None
msg = _(b'became empty and was dropped')
@@ -887,6 +889,10 @@
if len(parents) != 1:
return False
pctx = parents[0]
+ if ctx.branch() != pctx.branch():
+ return False
+ if ctx.extra().get(b'close'):
+ return False
# ctx changes more files (not a subset of memworkingcopy)
if not set(ctx.files()).issubset(set(memworkingcopy)):
return False
@@ -1045,7 +1051,7 @@
not opts.get(b'apply_changes')
and state.ctxaffected
and ui.promptchoice(
- b"apply changes (yn)? $$ &Yes $$ &No", default=1
+ b"apply changes (y/N)? $$ &Yes $$ &No", default=1
)
):
raise error.Abort(_(b'absorb cancelled\n'))
--- a/hgext/convert/cvs.py Tue Jun 23 16:07:18 2020 +0200
+++ b/hgext/convert/cvs.py Thu Jun 25 10:32:51 2020 -0700
@@ -226,8 +226,7 @@
cmd = [rsh, host] + cmd
# popen2 does not support argument lists under Windows
- cmd = [procutil.shellquote(arg) for arg in cmd]
- cmd = procutil.quotecommand(b' '.join(cmd))
+ cmd = b' '.join(procutil.shellquote(arg) for arg in cmd)
self.writep, self.readp = procutil.popen2(cmd)
self.realroot = root
--- a/hgext/convert/gnuarch.py Tue Jun 23 16:07:18 2020 +0200
+++ b/hgext/convert/gnuarch.py Thu Jun 25 10:32:51 2020 -0700
@@ -217,7 +217,7 @@
cmdline = [procutil.shellquote(arg) for arg in cmdline]
bdevnull = pycompat.bytestr(os.devnull)
cmdline += [b'>', bdevnull, b'2>', bdevnull]
- cmdline = procutil.quotecommand(b' '.join(cmdline))
+ cmdline = b' '.join(cmdline)
self.ui.debug(cmdline, b'\n')
return os.system(pycompat.rapply(procutil.tonativestr, cmdline))
--- a/hgext/convert/subversion.py Tue Jun 23 16:07:18 2020 +0200
+++ b/hgext/convert/subversion.py Thu Jun 25 10:32:51 2020 -0700
@@ -1259,7 +1259,7 @@
arg = encodeargs(args)
hgexe = procutil.hgexecutable()
cmd = b'%s debugsvnlog' % procutil.shellquote(hgexe)
- stdin, stdout = procutil.popen2(procutil.quotecommand(cmd))
+ stdin, stdout = procutil.popen2(cmd)
stdin.write(arg)
try:
stdin.close()
--- a/hgext/extdiff.py Tue Jun 23 16:07:18 2020 +0200
+++ b/hgext/extdiff.py Thu Jun 25 10:32:51 2020 -0700
@@ -233,7 +233,6 @@
''' like 'procutil.system', but returns the Popen object directly
so we don't have to wait on it.
'''
- cmd = procutil.quotecommand(cmd)
env = procutil.shellenviron(environ)
proc = subprocess.Popen(
procutil.tonativestr(cmd),
@@ -360,14 +359,12 @@
- just invoke the diff for a single file in the working dir
'''
+ cmdutil.check_at_most_one_arg(opts, b'rev', b'change')
revs = opts.get(b'rev')
change = opts.get(b'change')
do3way = b'$parent2' in cmdline
- if revs and change:
- msg = _(b'cannot specify --rev and --change at the same time')
- raise error.Abort(msg)
- elif change:
+ if change:
ctx2 = scmutil.revsingle(repo, change, None)
ctx1a, ctx1b = ctx2.p1(), ctx2.p2()
else:
--- a/hgext/fix.py Tue Jun 23 16:07:18 2020 +0200
+++ b/hgext/fix.py Thu Jun 25 10:32:51 2020 -0700
@@ -144,6 +144,7 @@
match as matchmod,
mdiff,
merge,
+ mergestate as mergestatemod,
pycompat,
registrar,
rewriteutil,
@@ -426,7 +427,9 @@
if not (len(revs) == 1 and wdirrev in revs):
cmdutil.checkunfinished(repo)
rewriteutil.precheck(repo, revs, b'fix')
- if wdirrev in revs and list(merge.mergestate.read(repo).unresolved()):
+ if wdirrev in revs and list(
+ mergestatemod.mergestate.read(repo).unresolved()
+ ):
raise error.Abort(b'unresolved conflicts', hint=b"use 'hg resolve'")
if not revs:
raise error.Abort(
--- a/hgext/git/__init__.py Tue Jun 23 16:07:18 2020 +0200
+++ b/hgext/git/__init__.py Thu Jun 25 10:32:51 2020 -0700
@@ -16,6 +16,7 @@
extensions,
localrepo,
pycompat,
+ registrar,
scmutil,
store,
util,
@@ -28,6 +29,13 @@
index,
)
+configtable = {}
+configitem = registrar.configitem(configtable)
+# git.log-index-cache-miss: internal knob for testing
+configitem(
+ b"git", b"log-index-cache-miss", default=False,
+)
+
# TODO: extract an interface for this in core
class gitstore(object): # store.basicstore):
def __init__(self, path, vfstype):
@@ -41,13 +49,14 @@
os.path.normpath(os.path.join(path, b'..', b'.git'))
)
self._progress_factory = lambda *args, **kwargs: None
+ self._logfn = lambda x: None
@util.propertycache
def _db(self):
# We lazy-create the database because we want to thread a
# progress callback down to the indexing process if it's
# required, and we don't have a ui handle in makestore().
- return index.get_index(self.git, self._progress_factory)
+ return index.get_index(self.git, self._logfn, self._progress_factory)
def join(self, f):
"""Fake store.join method for git repositories.
@@ -276,6 +285,8 @@
if repo.local() and isinstance(repo.store, gitstore):
orig = repo.__class__
repo.store._progress_factory = repo.ui.makeprogress
+ if ui.configbool(b'git', b'log-index-cache-miss'):
+ repo.store._logfn = repo.ui.warn
class gitlocalrepo(orig):
def _makedirstate(self):
--- a/hgext/git/dirstate.py Tue Jun 23 16:07:18 2020 +0200
+++ b/hgext/git/dirstate.py Thu Jun 25 10:32:51 2020 -0700
@@ -288,6 +288,10 @@
# TODO: track copies?
return None
+ def prefetch_parents(self):
+ # TODO
+ pass
+
@contextlib.contextmanager
def parentchange(self):
# TODO: track this maybe?
--- a/hgext/git/gitlog.py Tue Jun 23 16:07:18 2020 +0200
+++ b/hgext/git/gitlog.py Thu Jun 25 10:32:51 2020 -0700
@@ -247,6 +247,60 @@
def descendants(self, revs):
return dagop.descendantrevs(revs, self.revs, self.parentrevs)
+ def incrementalmissingrevs(self, common=None):
+ """Return an object that can be used to incrementally compute the
+ revision numbers of the ancestors of arbitrary sets that are not
+ ancestors of common. This is an ancestor.incrementalmissingancestors
+ object.
+
+ 'common' is a list of revision numbers. If common is not supplied, uses
+ nullrev.
+ """
+ if common is None:
+ common = [nodemod.nullrev]
+
+ return ancestor.incrementalmissingancestors(self.parentrevs, common)
+
+ def findmissing(self, common=None, heads=None):
+ """Return the ancestors of heads that are not ancestors of common.
+
+ More specifically, return a list of nodes N such that every N
+ satisfies the following constraints:
+
+ 1. N is an ancestor of some node in 'heads'
+ 2. N is not an ancestor of any node in 'common'
+
+ The list is sorted by revision number, meaning it is
+ topologically sorted.
+
+ 'heads' and 'common' are both lists of node IDs. If heads is
+ not supplied, uses all of the revlog's heads. If common is not
+ supplied, uses nullid."""
+ if common is None:
+ common = [nodemod.nullid]
+ if heads is None:
+ heads = self.heads()
+
+ common = [self.rev(n) for n in common]
+ heads = [self.rev(n) for n in heads]
+
+ inc = self.incrementalmissingrevs(common=common)
+ return [self.node(r) for r in inc.missingancestors(heads)]
+
+ def children(self, node):
+ """find the children of a given node"""
+ c = []
+ p = self.rev(node)
+ for r in self.revs(start=p + 1):
+ prevs = [pr for pr in self.parentrevs(r) if pr != nodemod.nullrev]
+ if prevs:
+ for pr in prevs:
+ if pr == p:
+ c.append(self.node(r))
+ elif p == nodemod.nullrev:
+ c.append(self.node(r))
+ return c
+
def reachableroots(self, minroot, heads, roots, includepath=False):
return dagop._reachablerootspure(
self.parentrevs, minroot, roots, heads, includepath
@@ -270,7 +324,10 @@
def parentrevs(self, rev):
n = self.node(rev)
hn = gitutil.togitnode(n)
- c = self.gitrepo[hn]
+ if hn != gitutil.nullgit:
+ c = self.gitrepo[hn]
+ else:
+ return nodemod.nullrev, nodemod.nullrev
p1 = p2 = nodemod.nullrev
if c.parents:
p1 = self.rev(c.parents[0].id.raw)
@@ -342,7 +399,7 @@
'refs/hg/internal/latest-commit', oid, force=True
)
# Reindex now to pick up changes. We omit the progress
- # callback because this will be very quick.
+ # and log callbacks because this will be very quick.
index._index_repo(self.gitrepo, self._db)
return oid.raw
--- a/hgext/git/index.py Tue Jun 23 16:07:18 2020 +0200
+++ b/hgext/git/index.py Thu Jun 25 10:32:51 2020 -0700
@@ -216,7 +216,12 @@
db.commit()
-def _index_repo(gitrepo, db, progress_factory=lambda *args, **kwargs: None):
+def _index_repo(
+ gitrepo,
+ db,
+ logfn=lambda x: None,
+ progress_factory=lambda *args, **kwargs: None,
+):
# Identify all references so we can tell the walker to visit all of them.
all_refs = gitrepo.listall_references()
possible_heads = set()
@@ -245,11 +250,15 @@
# TODO: we should figure out how to incrementally index history
# (preferably by detecting rewinds!) so that we don't have to do a
# full changelog walk every time a new commit is created.
- cache_heads = {x[0] for x in db.execute('SELECT node FROM possible_heads')}
+ cache_heads = {
+ pycompat.sysstr(x[0])
+ for x in db.execute('SELECT node FROM possible_heads')
+ }
walker = None
cur_cache_heads = {h.hex for h in possible_heads}
if cur_cache_heads == cache_heads:
return
+ logfn(b'heads mismatch, rebuilding dagcache\n')
for start in possible_heads:
if walker is None:
walker = gitrepo.walk(start, _OUR_ORDER)
@@ -336,7 +345,9 @@
prog.complete()
-def get_index(gitrepo, progress_factory=lambda *args, **kwargs: None):
+def get_index(
+ gitrepo, logfn=lambda x: None, progress_factory=lambda *args, **kwargs: None
+):
cachepath = os.path.join(
pycompat.fsencode(gitrepo.path), b'..', b'.hg', b'cache'
)
@@ -346,5 +357,5 @@
db = _createdb(dbpath)
# TODO check against gitrepo heads before doing a full index
# TODO thread a ui.progress call into this layer
- _index_repo(gitrepo, db, progress_factory)
+ _index_repo(gitrepo, db, logfn, progress_factory)
return db
--- a/hgext/git/manifest.py Tue Jun 23 16:07:18 2020 +0200
+++ b/hgext/git/manifest.py Thu Jun 25 10:32:51 2020 -0700
@@ -56,8 +56,9 @@
return val
t = self._tree
comps = upath.split('/')
+ te = self._tree
for comp in comps[:-1]:
- te = self._tree[comp]
+ te = te[comp]
t = self._git_repo[te.id]
ent = t[comps[-1]]
if ent.filemode == pygit2.GIT_FILEMODE_BLOB:
@@ -125,9 +126,79 @@
def hasdir(self, dir):
return dir in self._dirs
- def diff(self, other, match=None, clean=False):
- # TODO
- assert False
+ def diff(self, other, match=lambda x: True, clean=False):
+ '''Finds changes between the current manifest and m2.
+
+ The result is returned as a dict with filename as key and
+ values of the form ((n1,fl1),(n2,fl2)), where n1/n2 is the
+ nodeid in the current/other manifest and fl1/fl2 is the flag
+ in the current/other manifest. Where the file does not exist,
+ the nodeid will be None and the flags will be the empty
+ string.
+ '''
+ result = {}
+
+ def _iterativediff(t1, t2, subdir):
+ """compares two trees and appends new tree nodes to examine to
+ the stack"""
+ if t1 is None:
+ t1 = {}
+ if t2 is None:
+ t2 = {}
+
+ for e1 in t1:
+ realname = subdir + pycompat.fsencode(e1.name)
+
+ if e1.type == pygit2.GIT_OBJ_TREE:
+ try:
+ e2 = t2[e1.name]
+ if e2.type != pygit2.GIT_OBJ_TREE:
+ e2 = None
+ except KeyError:
+ e2 = None
+
+ stack.append((realname + b'/', e1, e2))
+ else:
+ n1, fl1 = self.find(realname)
+
+ try:
+ e2 = t2[e1.name]
+ n2, fl2 = other.find(realname)
+ except KeyError:
+ e2 = None
+ n2, fl2 = (None, b'')
+
+ if e2 is not None and e2.type == pygit2.GIT_OBJ_TREE:
+ stack.append((realname + b'/', None, e2))
+
+ if not match(realname):
+ continue
+
+ if n1 != n2 or fl1 != fl2:
+ result[realname] = ((n1, fl1), (n2, fl2))
+ elif clean:
+ result[realname] = None
+
+ for e2 in t2:
+ if e2.name in t1:
+ continue
+
+ realname = subdir + pycompat.fsencode(e2.name)
+
+ if e2.type == pygit2.GIT_OBJ_TREE:
+ stack.append((realname + b'/', None, e2))
+ elif match(realname):
+ n2, fl2 = other.find(realname)
+ result[realname] = ((None, b''), (n2, fl2))
+
+ stack = []
+ _iterativediff(self._tree, other._tree, b'')
+ while stack:
+ subdir, t1, t2 = stack.pop()
+ # stack is populated in the function call
+ _iterativediff(t1, t2, subdir)
+
+ return result
def setflag(self, path, flag):
node, unused_flag = self._resolve_entry(path)
@@ -168,14 +239,13 @@
for te in tree:
# TODO: can we prune dir walks with the matcher?
realname = subdir + pycompat.fsencode(te.name)
- if te.type == r'tree':
+ if te.type == pygit2.GIT_OBJ_TREE:
for inner in self._walkonetree(
self._git_repo[te.id], match, realname + b'/'
):
yield inner
- if not match(realname):
- continue
- yield pycompat.fsencode(realname)
+ elif match(realname):
+ yield pycompat.fsencode(realname)
def walk(self, match):
# TODO: this is a very lazy way to merge in the pending
@@ -205,7 +275,7 @@
return memgittreemanifestctx(self._repo, self._tree)
def find(self, path):
- self.read()[path]
+ return self.read()[path]
@interfaceutil.implementer(repository.imanifestrevisionwritable)
--- a/hgext/githelp.py Tue Jun 23 16:07:18 2020 +0200
+++ b/hgext/githelp.py Thu Jun 25 10:32:51 2020 -0700
@@ -628,8 +628,17 @@
(b'', b'stat', None, b''),
(b'', b'graph', None, b''),
(b'p', b'patch', None, b''),
+ (b'G', b'grep-diff', b'', b''),
+ (b'S', b'pickaxe-regex', b'', b''),
]
args, opts = parseoptions(ui, cmdoptions, args)
+ grep_pat = opts.get(b'grep_diff') or opts.get(b'pickaxe_regex')
+ if grep_pat:
+ cmd = Command(b'grep')
+ cmd[b'--diff'] = grep_pat
+ ui.status(b'%s\n' % bytes(cmd))
+ return
+
ui.status(
_(
b'note: -v prints the entire commit message like Git does. To '
--- a/hgext/histedit.py Tue Jun 23 16:07:18 2020 +0200
+++ b/hgext/histedit.py Thu Jun 25 10:32:51 2020 -0700
@@ -224,6 +224,7 @@
hg,
logcmdutil,
merge as mergemod,
+ mergestate as mergestatemod,
mergeutil,
node,
obsolete,
@@ -2289,7 +2290,7 @@
def bootstrapcontinue(ui, state, opts):
repo = state.repo
- ms = mergemod.mergestate.read(repo)
+ ms = mergestatemod.mergestate.read(repo)
mergeutil.checkunresolved(ms)
if state.actions:
--- a/hgext/largefiles/overrides.py Tue Jun 23 16:07:18 2020 +0200
+++ b/hgext/largefiles/overrides.py Thu Jun 25 10:32:51 2020 -0700
@@ -31,6 +31,7 @@
logcmdutil,
match as matchmod,
merge,
+ mergestate as mergestatemod,
pathutil,
pycompat,
scmutil,
@@ -622,7 +623,7 @@
return actions, diverge, renamedelete
-@eh.wrapfunction(merge, b'recordupdates')
+@eh.wrapfunction(mergestatemod, b'recordupdates')
def mergerecordupdates(orig, repo, actions, branchmerge, getfiledata):
if b'lfmr' in actions:
lfdirstate = lfutil.openlfdirstate(repo.ui, repo)
--- a/hgext/mq.py Tue Jun 23 16:07:18 2020 +0200
+++ b/hgext/mq.py Thu Jun 25 10:32:51 2020 -0700
@@ -836,7 +836,15 @@
stat = opts.get(b'stat')
m = scmutil.match(repo[node1], files, opts)
logcmdutil.diffordiffstat(
- self.ui, repo, diffopts, node1, node2, m, changes, stat, fp
+ self.ui,
+ repo,
+ diffopts,
+ repo[node1],
+ repo[node2],
+ m,
+ changes,
+ stat,
+ fp,
)
def mergeone(self, repo, mergeq, head, patch, rev, diffopts):
--- a/hgext/phabricator.py Tue Jun 23 16:07:18 2020 +0200
+++ b/hgext/phabricator.py Thu Jun 25 10:32:51 2020 -0700
@@ -238,8 +238,8 @@
def decorate(fn):
def inner(*args, **kwargs):
- cassette = pycompat.fsdecode(kwargs.pop('test_vcr', None))
- if cassette:
+ if kwargs.get('test_vcr'):
+ cassette = pycompat.fsdecode(kwargs.pop('test_vcr'))
import hgdemandimport
with hgdemandimport.deactivated():
@@ -1650,7 +1650,7 @@
)
if ui.promptchoice(
- _(b'Send the above changes to %s (yn)?$$ &Yes $$ &No') % url
+ _(b'Send the above changes to %s (Y/n)?$$ &Yes $$ &No') % url
):
return False
--- a/hgext/rebase.py Tue Jun 23 16:07:18 2020 +0200
+++ b/hgext/rebase.py Thu Jun 25 10:32:51 2020 -0700
@@ -36,6 +36,7 @@
extensions,
hg,
merge as mergemod,
+ mergestate as mergestatemod,
mergeutil,
node as nodemod,
obsolete,
@@ -544,7 +545,7 @@
user=ctx.user(),
date=date,
)
- mergemod.mergestate.clean(repo)
+ mergestatemod.mergestate.clean(repo)
else:
newnode = commitnode(
repo,
@@ -1084,7 +1085,7 @@
)
# TODO: Make in-memory merge not use the on-disk merge state, so
# we don't have to clean it here
- mergemod.mergestate.clean(repo)
+ mergestatemod.mergestate.clean(repo)
clearstatus(repo)
clearcollapsemsg(repo)
return _dorebase(ui, repo, action, opts, inmemory=False)
@@ -1191,7 +1192,7 @@
if action == b'abort' and opts.get(b'tool', False):
ui.warn(_(b'tool option will be ignored\n'))
if action == b'continue':
- ms = mergemod.mergestate.read(repo)
+ ms = mergestatemod.mergestate.read(repo)
mergeutil.checkunresolved(ms)
retcode = rbsrt._prepareabortorcontinue(
@@ -2201,7 +2202,7 @@
def continuerebase(ui, repo):
with repo.wlock(), repo.lock():
rbsrt = rebaseruntime(repo, ui)
- ms = mergemod.mergestate.read(repo)
+ ms = mergestatemod.mergestate.read(repo)
mergeutil.checkunresolved(ms)
retcode = rbsrt._prepareabortorcontinue(isabort=False)
if retcode is not None:
--- a/hgext/strip.py Tue Jun 23 16:07:18 2020 +0200
+++ b/hgext/strip.py Thu Jun 25 10:32:51 2020 -0700
@@ -13,7 +13,7 @@
error,
hg,
lock as lockmod,
- merge,
+ mergestate as mergestatemod,
node as nodemod,
pycompat,
registrar,
@@ -269,7 +269,7 @@
repo.dirstate.write(repo.currenttransaction())
# clear resolve state
- merge.mergestate.clean(repo, repo[b'.'].node())
+ mergestatemod.mergestate.clean(repo, repo[b'.'].node())
update = False
--- a/mercurial/changelog.py Tue Jun 23 16:07:18 2020 +0200
+++ b/mercurial/changelog.py Thu Jun 25 10:32:51 2020 -0700
@@ -16,9 +16,9 @@
from .thirdparty import attr
from . import (
- copies,
encoding,
error,
+ metadata,
pycompat,
revlog,
)
@@ -318,7 +318,7 @@
rawindices = self.extra.get(b'filesadded')
if rawindices is None:
return None
- return copies.decodefileindices(self.files, rawindices)
+ return metadata.decodefileindices(self.files, rawindices)
@property
def filesremoved(self):
@@ -330,7 +330,7 @@
rawindices = self.extra.get(b'filesremoved')
if rawindices is None:
return None
- return copies.decodefileindices(self.files, rawindices)
+ return metadata.decodefileindices(self.files, rawindices)
@property
def p1copies(self):
@@ -342,7 +342,7 @@
rawcopies = self.extra.get(b'p1copies')
if rawcopies is None:
return None
- return copies.decodecopies(self.files, rawcopies)
+ return metadata.decodecopies(self.files, rawcopies)
@property
def p2copies(self):
@@ -354,7 +354,7 @@
rawcopies = self.extra.get(b'p2copies')
if rawcopies is None:
return None
- return copies.decodecopies(self.files, rawcopies)
+ return metadata.decodecopies(self.files, rawcopies)
@property
def description(self):
@@ -385,9 +385,7 @@
datafile=datafile,
checkambig=True,
mmaplargeindex=True,
- persistentnodemap=opener.options.get(
- b'exp-persistent-nodemap', False
- ),
+ persistentnodemap=opener.options.get(b'persistent-nodemap', False),
)
if self._initempty and (self.version & 0xFFFF == revlog.REVLOGV1):
@@ -572,13 +570,13 @@
):
extra.pop(name, None)
if p1copies is not None:
- p1copies = copies.encodecopies(sortedfiles, p1copies)
+ p1copies = metadata.encodecopies(sortedfiles, p1copies)
if p2copies is not None:
- p2copies = copies.encodecopies(sortedfiles, p2copies)
+ p2copies = metadata.encodecopies(sortedfiles, p2copies)
if filesadded is not None:
- filesadded = copies.encodefileindices(sortedfiles, filesadded)
+ filesadded = metadata.encodefileindices(sortedfiles, filesadded)
if filesremoved is not None:
- filesremoved = copies.encodefileindices(sortedfiles, filesremoved)
+ filesremoved = metadata.encodefileindices(sortedfiles, filesremoved)
if self._copiesstorage == b'extra':
extrasentries = p1copies, p2copies, filesadded, filesremoved
if extra is None and any(x is not None for x in extrasentries):
--- a/mercurial/chgserver.py Tue Jun 23 16:07:18 2020 +0200
+++ b/mercurial/chgserver.py Thu Jun 25 10:32:51 2020 -0700
@@ -320,7 +320,7 @@
self.channel = channel
def __call__(self, cmd, environ, cwd=None, type=b'system', cmdtable=None):
- args = [type, procutil.quotecommand(cmd), os.path.abspath(cwd or b'.')]
+ args = [type, cmd, os.path.abspath(cwd or b'.')]
args.extend(b'%s=%s' % (k, v) for k, v in pycompat.iteritems(environ))
data = b'\0'.join(args)
self.out.write(struct.pack(b'>cI', self.channel, len(data)))
--- a/mercurial/cmdutil.py Tue Jun 23 16:07:18 2020 +0200
+++ b/mercurial/cmdutil.py Thu Jun 25 10:32:51 2020 -0700
@@ -38,6 +38,7 @@
logcmdutil,
match as matchmod,
merge as mergemod,
+ mergestate as mergestatemod,
mergeutil,
obsolete,
patch,
@@ -890,7 +891,7 @@
def readmorestatus(repo):
"""Returns a morestatus object if the repo has unfinished state."""
statetuple = statemod.getrepostate(repo)
- mergestate = mergemod.mergestate.read(repo)
+ mergestate = mergestatemod.mergestate.read(repo)
activemerge = mergestate.active()
if not statetuple and not activemerge:
return None
@@ -2751,15 +2752,28 @@
ret = 1
needsfctx = ui.verbose or {b'size', b'flags'} & fm.datahint()
- for f in ctx.matches(m):
- fm.startitem()
- fm.context(ctx=ctx)
- if needsfctx:
- fc = ctx[f]
- fm.write(b'size flags', b'% 10d % 1s ', fc.size(), fc.flags())
- fm.data(path=f)
- fm.plain(fmt % uipathfn(f))
- ret = 0
+ if fm.isplain() and not needsfctx:
+ # Fast path. The speed-up comes from skipping the formatter, and batching
+ # calls to ui.write.
+ buf = []
+ for f in ctx.matches(m):
+ buf.append(fmt % uipathfn(f))
+ if len(buf) > 100:
+ ui.write(b''.join(buf))
+ del buf[:]
+ ret = 0
+ if buf:
+ ui.write(b''.join(buf))
+ else:
+ for f in ctx.matches(m):
+ fm.startitem()
+ fm.context(ctx=ctx)
+ if needsfctx:
+ fc = ctx[f]
+ fm.write(b'size flags', b'% 10d % 1s ', fc.size(), fc.flags())
+ fm.data(path=f)
+ fm.plain(fmt % uipathfn(f))
+ ret = 0
for subpath in sorted(ctx.substate):
submatch = matchmod.subdirmatcher(subpath, m)
@@ -3127,7 +3141,7 @@
if subs:
subrepoutil.writestate(repo, newsubstate)
- ms = mergemod.mergestate.read(repo)
+ ms = mergestatemod.mergestate.read(repo)
mergeutil.checkunresolved(ms)
filestoamend = {f for f in wctx.files() if matcher(f)}
--- a/mercurial/commands.py Tue Jun 23 16:07:18 2020 +0200
+++ b/mercurial/commands.py Thu Jun 25 10:32:51 2020 -0700
@@ -46,6 +46,7 @@
hg,
logcmdutil,
merge as mergemod,
+ mergestate as mergestatemod,
narrowspec,
obsolete,
obsutil,
@@ -2350,7 +2351,7 @@
Returns 0 on success, 1 if errors are encountered.
"""
opts = pycompat.byteskwargs(opts)
- with repo.wlock(False):
+ with repo.wlock():
return cmdutil.copy(ui, repo, pats, opts)
@@ -2475,26 +2476,27 @@
Returns 0 on success.
"""
+ cmdutil.check_at_most_one_arg(opts, 'rev', 'change')
opts = pycompat.byteskwargs(opts)
revs = opts.get(b'rev')
change = opts.get(b'change')
stat = opts.get(b'stat')
reverse = opts.get(b'reverse')
- if revs and change:
- msg = _(b'cannot specify --rev and --change at the same time')
- raise error.Abort(msg)
- elif change:
+ if change:
repo = scmutil.unhidehashlikerevs(repo, [change], b'nowarn')
ctx2 = scmutil.revsingle(repo, change, None)
ctx1 = ctx2.p1()
else:
repo = scmutil.unhidehashlikerevs(repo, revs, b'nowarn')
ctx1, ctx2 = scmutil.revpair(repo, revs)
- node1, node2 = ctx1.node(), ctx2.node()
if reverse:
- node1, node2 = node2, node1
+ ctxleft = ctx2
+ ctxright = ctx1
+ else:
+ ctxleft = ctx1
+ ctxright = ctx2
diffopts = patch.diffallopts(ui, opts)
m = scmutil.match(ctx2, pats, opts)
@@ -2504,8 +2506,8 @@
ui,
repo,
diffopts,
- node1,
- node2,
+ ctxleft,
+ ctxright,
m,
stat=stat,
listsubrepos=opts.get(b'subrepos'),
@@ -3431,8 +3433,11 @@
m = regexp.search(self.line, p)
if not m:
break
- yield m.span()
- p = m.end()
+ if m.end() == p:
+ p += 1
+ else:
+ yield m.span()
+ p = m.end()
matches = {}
copies = {}
@@ -3578,56 +3583,68 @@
getrenamed = scmutil.getrenamedfn(repo)
- def get_file_content(filename, filelog, filenode, context, revision):
- try:
- content = filelog.read(filenode)
- except error.WdirUnsupported:
- content = context[filename].data()
- except error.CensoredNodeError:
- content = None
- ui.warn(
- _(b'cannot search in censored file: %(filename)s:%(revnum)s\n')
- % {b'filename': filename, b'revnum': pycompat.bytestr(revision)}
- )
- return content
+ def readfile(ctx, fn):
+ rev = ctx.rev()
+ if rev is None:
+ fctx = ctx[fn]
+ try:
+ return fctx.data()
+ except IOError as e:
+ if e.errno != errno.ENOENT:
+ raise
+ else:
+ flog = getfile(fn)
+ fnode = ctx.filenode(fn)
+ try:
+ return flog.read(fnode)
+ except error.CensoredNodeError:
+ ui.warn(
+ _(
+ b'cannot search in censored file: %(filename)s:%(revnum)s\n'
+ )
+ % {b'filename': fn, b'revnum': pycompat.bytestr(rev),}
+ )
def prep(ctx, fns):
rev = ctx.rev()
pctx = ctx.p1()
- parent = pctx.rev()
matches.setdefault(rev, {})
- matches.setdefault(parent, {})
+ if diff:
+ parent = pctx.rev()
+ matches.setdefault(parent, {})
files = revfiles.setdefault(rev, [])
- for fn in fns:
- flog = getfile(fn)
- try:
- fnode = ctx.filenode(fn)
- except error.LookupError:
- continue
-
- copy = None
- if follow:
- copy = getrenamed(fn, rev)
- if copy:
- copies.setdefault(rev, {})[fn] = copy
- if fn in skip:
- skip.add(copy)
- if fn in skip:
- continue
- files.append(fn)
-
- if fn not in matches[rev]:
- content = get_file_content(fn, flog, fnode, ctx, rev)
- grepbody(fn, rev, content)
-
- pfn = copy or fn
- if pfn not in matches[parent]:
- try:
- pfnode = pctx.filenode(pfn)
- pcontent = get_file_content(pfn, flog, pfnode, pctx, parent)
- grepbody(pfn, parent, pcontent)
- except error.LookupError:
- pass
+ if rev is None:
+ # in `hg grep pattern`, 2/3 of the time is spent is spent in
+ # pathauditor checks without this in mozilla-central
+ contextmanager = repo.wvfs.audit.cached
+ else:
+ contextmanager = util.nullcontextmanager
+ with contextmanager():
+ for fn in fns:
+ # fn might not exist in the revision (could be a file removed by
+ # the revision). We could check `fn not in ctx` even when rev is
+ # None, but it's less racy to protect againt that in readfile.
+ if rev is not None and fn not in ctx:
+ continue
+
+ copy = None
+ if follow:
+ copy = getrenamed(fn, rev)
+ if copy:
+ copies.setdefault(rev, {})[fn] = copy
+ if fn in skip:
+ skip.add(copy)
+ if fn in skip:
+ continue
+ files.append(fn)
+
+ if fn not in matches[rev]:
+ grepbody(fn, rev, readfile(ctx, fn))
+
+ if diff:
+ pfn = copy or fn
+ if pfn not in matches[parent] and pfn in pctx:
+ grepbody(pfn, parent, readfile(pctx, pfn))
ui.pager(b'grep')
fm = ui.formatter(b'grep', opts)
@@ -5812,7 +5829,7 @@
Returns 0 on success, 1 if errors are encountered.
"""
opts = pycompat.byteskwargs(opts)
- with repo.wlock(False):
+ with repo.wlock():
return cmdutil.copy(ui, repo, pats, opts, rename=True)
@@ -5934,7 +5951,7 @@
if show:
ui.pager(b'resolve')
fm = ui.formatter(b'resolve', opts)
- ms = mergemod.mergestate.read(repo)
+ ms = mergestatemod.mergestate.read(repo)
wctx = repo[None]
m = scmutil.match(wctx, pats, opts)
@@ -5942,14 +5959,20 @@
# as 'P'. Resolved path conflicts show as 'R', the same as normal
# resolved conflicts.
mergestateinfo = {
- mergemod.MERGE_RECORD_UNRESOLVED: (b'resolve.unresolved', b'U'),
- mergemod.MERGE_RECORD_RESOLVED: (b'resolve.resolved', b'R'),
- mergemod.MERGE_RECORD_UNRESOLVED_PATH: (
+ mergestatemod.MERGE_RECORD_UNRESOLVED: (
+ b'resolve.unresolved',
+ b'U',
+ ),
+ mergestatemod.MERGE_RECORD_RESOLVED: (b'resolve.resolved', b'R'),
+ mergestatemod.MERGE_RECORD_UNRESOLVED_PATH: (
b'resolve.unresolved',
b'P',
),
- mergemod.MERGE_RECORD_RESOLVED_PATH: (b'resolve.resolved', b'R'),
- mergemod.MERGE_RECORD_DRIVER_RESOLVED: (
+ mergestatemod.MERGE_RECORD_RESOLVED_PATH: (
+ b'resolve.resolved',
+ b'R',
+ ),
+ mergestatemod.MERGE_RECORD_DRIVER_RESOLVED: (
b'resolve.driverresolved',
b'D',
),
@@ -5959,7 +5982,7 @@
if not m(f):
continue
- if ms[f] == mergemod.MERGE_RECORD_MERGED_OTHER:
+ if ms[f] == mergestatemod.MERGE_RECORD_MERGED_OTHER:
continue
label, key = mergestateinfo[ms[f]]
fm.startitem()
@@ -5971,7 +5994,7 @@
return 0
with repo.wlock():
- ms = mergemod.mergestate.read(repo)
+ ms = mergestatemod.mergestate.read(repo)
if not (ms.active() or repo.dirstate.p2() != nullid):
raise error.Abort(
@@ -5982,7 +6005,7 @@
if (
ms.mergedriver
- and ms.mdstate() == mergemod.MERGE_DRIVER_STATE_UNMARKED
+ and ms.mdstate() == mergestatemod.MERGE_DRIVER_STATE_UNMARKED
):
proceed = mergemod.driverpreprocess(repo, ms, wctx)
ms.commit()
@@ -6008,12 +6031,12 @@
didwork = True
- if ms[f] == mergemod.MERGE_RECORD_MERGED_OTHER:
+ if ms[f] == mergestatemod.MERGE_RECORD_MERGED_OTHER:
continue
# don't let driver-resolved files be marked, and run the conclude
# step if asked to resolve
- if ms[f] == mergemod.MERGE_RECORD_DRIVER_RESOLVED:
+ if ms[f] == mergestatemod.MERGE_RECORD_DRIVER_RESOLVED:
exact = m.exact(f)
if mark:
if exact:
@@ -6033,14 +6056,14 @@
# path conflicts must be resolved manually
if ms[f] in (
- mergemod.MERGE_RECORD_UNRESOLVED_PATH,
- mergemod.MERGE_RECORD_RESOLVED_PATH,
+ mergestatemod.MERGE_RECORD_UNRESOLVED_PATH,
+ mergestatemod.MERGE_RECORD_RESOLVED_PATH,
):
if mark:
- ms.mark(f, mergemod.MERGE_RECORD_RESOLVED_PATH)
+ ms.mark(f, mergestatemod.MERGE_RECORD_RESOLVED_PATH)
elif unmark:
- ms.mark(f, mergemod.MERGE_RECORD_UNRESOLVED_PATH)
- elif ms[f] == mergemod.MERGE_RECORD_UNRESOLVED_PATH:
+ ms.mark(f, mergestatemod.MERGE_RECORD_UNRESOLVED_PATH)
+ elif ms[f] == mergestatemod.MERGE_RECORD_UNRESOLVED_PATH:
ui.warn(
_(b'%s: path conflict must be resolved manually\n')
% uipathfn(f)
@@ -6052,12 +6075,12 @@
fdata = repo.wvfs.tryread(f)
if (
filemerge.hasconflictmarkers(fdata)
- and ms[f] != mergemod.MERGE_RECORD_RESOLVED
+ and ms[f] != mergestatemod.MERGE_RECORD_RESOLVED
):
hasconflictmarkers.append(f)
- ms.mark(f, mergemod.MERGE_RECORD_RESOLVED)
+ ms.mark(f, mergestatemod.MERGE_RECORD_RESOLVED)
elif unmark:
- ms.mark(f, mergemod.MERGE_RECORD_UNRESOLVED)
+ ms.mark(f, mergestatemod.MERGE_RECORD_UNRESOLVED)
else:
# backup pre-resolve (merge uses .orig for its own purposes)
a = repo.wjoin(f)
@@ -6791,6 +6814,7 @@
"""
+ cmdutil.check_at_most_one_arg(opts, 'rev', 'change')
opts = pycompat.byteskwargs(opts)
revs = opts.get(b'rev')
change = opts.get(b'change')
@@ -6801,10 +6825,7 @@
else:
terse = ui.config(b'commands', b'status.terse')
- if revs and change:
- msg = _(b'cannot specify --rev and --change at the same time')
- raise error.Abort(msg)
- elif revs and terse:
+ if revs and terse:
msg = _(b'cannot use --terse with --rev')
raise error.Abort(msg)
elif change:
@@ -6940,7 +6961,7 @@
marks = []
try:
- ms = mergemod.mergestate.read(repo)
+ ms = mergestatemod.mergestate.read(repo)
except error.UnsupportedMergeRecords as e:
s = b' '.join(e.recordtypes)
ui.warn(
--- a/mercurial/configitems.py Tue Jun 23 16:07:18 2020 +0200
+++ b/mercurial/configitems.py Thu Jun 25 10:32:51 2020 -0700
@@ -405,18 +405,6 @@
coreconfigitem(
b'devel', b'legacy.exchange', default=list,
)
-# TODO before getting `persistent-nodemap` out of experimental
-#
-# * decide for a "status" of the persistent nodemap and associated location
-# - part of the store next the revlog itself (new requirements)
-# - part of the cache directory
-# - part of an `index` directory
-# (https://www.mercurial-scm.org/wiki/ComputedIndexPlan)
-# * do we want to use this for more than just changelog? if so we need:
-# - simpler "pending" logic for them
-# - double check the memory story (we dont want to keep all revlog in memory)
-# - think about the naming scheme if we are in "cache"
-# * increment the version format to "1" and freeze it.
coreconfigitem(
b'devel', b'persistent-nodemap', default=False,
)
@@ -675,12 +663,6 @@
b'experimental', b'rust.index', default=False,
)
coreconfigitem(
- b'experimental', b'exp-persistent-nodemap', default=False,
-)
-coreconfigitem(
- b'experimental', b'exp-persistent-nodemap.mmap', default=True,
-)
-coreconfigitem(
b'experimental', b'server.filesdata.recommended-batch-size', default=50000,
)
coreconfigitem(
@@ -783,6 +765,12 @@
coreconfigitem(
b'format', b'usestore', default=True,
)
+# Right now, the only efficient implement of the nodemap logic is in Rust, so
+# the persistent nodemap feature needs to stay experimental as long as the Rust
+# extensions are an experimental feature.
+coreconfigitem(
+ b'format', b'use-persistent-nodemap', default=False, experimental=True
+)
coreconfigitem(
b'format',
b'exp-use-copies-side-data-changeset',
@@ -820,9 +808,6 @@
b'hostsecurity', b'ciphers', default=None,
)
coreconfigitem(
- b'hostsecurity', b'disabletls10warning', default=False,
-)
-coreconfigitem(
b'hostsecurity', b'minimumprotocol', default=dynamicdefault,
)
coreconfigitem(
@@ -1088,6 +1073,14 @@
default=True,
alias=[(b'format', b'aggressivemergedeltas')],
)
+# experimental as long as rust is experimental (or a C version is implemented)
+coreconfigitem(
+ b'storage', b'revlog.nodemap.mmap', default=True, experimental=True
+)
+# experimental as long as format.use-persistent-nodemap is.
+coreconfigitem(
+ b'storage', b'revlog.nodemap.mode', default=b'compat', experimental=True
+)
coreconfigitem(
b'storage', b'revlog.reuse-external-delta', default=True,
)
--- a/mercurial/context.py Tue Jun 23 16:07:18 2020 +0200
+++ b/mercurial/context.py Thu Jun 25 10:32:51 2020 -0700
@@ -28,12 +28,13 @@
open,
)
from . import (
- copies,
dagop,
encoding,
error,
fileset,
match as matchmod,
+ mergestate as mergestatemod,
+ metadata,
obsolete as obsmod,
patch,
pathutil,
@@ -299,7 +300,7 @@
@propertycache
def _copies(self):
- return copies.computechangesetcopies(self)
+ return metadata.computechangesetcopies(self)
def p1copies(self):
return self._copies[0]
@@ -474,6 +475,12 @@
return r
+ def mergestate(self, clean=False):
+ """Get a mergestate object for this context."""
+ raise NotImplementedError(
+ '%s does not implement mergestate()' % self.__class__
+ )
+
class changectx(basectx):
"""A changecontext object makes access to data related to a particular
@@ -582,7 +589,7 @@
filesadded = None
if filesadded is None:
if compute_on_none:
- filesadded = copies.computechangesetfilesadded(self)
+ filesadded = metadata.computechangesetfilesadded(self)
else:
filesadded = []
return filesadded
@@ -601,7 +608,7 @@
filesremoved = None
if filesremoved is None:
if compute_on_none:
- filesremoved = copies.computechangesetfilesremoved(self)
+ filesremoved = metadata.computechangesetfilesremoved(self)
else:
filesremoved = []
return filesremoved
@@ -2009,6 +2016,11 @@
sparse.aftercommit(self._repo, node)
+ def mergestate(self, clean=False):
+ if clean:
+ return mergestatemod.mergestate.clean(self._repo)
+ return mergestatemod.mergestate.read(self._repo)
+
class committablefilectx(basefilectx):
"""A committablefilectx provides common functionality for a file context
@@ -2310,7 +2322,7 @@
return self._cache[path][b'flags']
else:
raise error.ProgrammingError(
- b"No such file or directory: %s" % self._path
+ b"No such file or directory: %s" % path
)
else:
return self._wrappedctx[path].flags()
@@ -2427,7 +2439,7 @@
return len(self._cache[path][b'data'])
else:
raise error.ProgrammingError(
- b"No such file or directory: %s" % self._path
+ b"No such file or directory: %s" % path
)
return self._wrappedctx[path].size()
--- a/mercurial/copies.py Tue Jun 23 16:07:18 2020 +0200
+++ b/mercurial/copies.py Thu Jun 25 10:32:51 2020 -0700
@@ -8,7 +8,6 @@
from __future__ import absolute_import
import collections
-import multiprocessing
import os
from .i18n import _
@@ -17,7 +16,6 @@
from .revlogutils.flagutil import REVIDX_SIDEDATA
from . import (
- error,
match as matchmod,
node,
pathutil,
@@ -25,7 +23,6 @@
util,
)
-from .revlogutils import sidedata as sidedatamod
from .utils import stringutil
@@ -183,10 +180,27 @@
* p1copies: mapping of copies from p1
* p2copies: mapping of copies from p2
* removed: a list of removed files
+ * ismerged: a callback to know if file was merged in that revision
"""
cl = repo.changelog
parents = cl.parentrevs
+ def get_ismerged(rev):
+ ctx = repo[rev]
+
+ def ismerged(path):
+ if path not in ctx.files():
+ return False
+ fctx = ctx[path]
+ parents = fctx._filelog.parents(fctx._filenode)
+ nb_parents = 0
+ for n in parents:
+ if n != node.nullid:
+ nb_parents += 1
+ return nb_parents >= 2
+
+ return ismerged
+
if repo.filecopiesmode == b'changeset-sidedata':
changelogrevision = cl.changelogrevision
flags = cl.flags
@@ -218,6 +232,7 @@
def revinfo(rev):
p1, p2 = parents(rev)
+ value = None
if flags(rev) & REVIDX_SIDEDATA:
e = merge_caches.pop(rev, None)
if e is not None:
@@ -228,12 +243,22 @@
removed = c.filesremoved
if p1 != node.nullrev and p2 != node.nullrev:
# XXX some case we over cache, IGNORE
- merge_caches[rev] = (p1, p2, p1copies, p2copies, removed)
+ value = merge_caches[rev] = (
+ p1,
+ p2,
+ p1copies,
+ p2copies,
+ removed,
+ get_ismerged(rev),
+ )
else:
p1copies = {}
p2copies = {}
removed = []
- return p1, p2, p1copies, p2copies, removed
+
+ if value is None:
+ value = (p1, p2, p1copies, p2copies, removed, get_ismerged(rev))
+ return value
else:
@@ -242,7 +267,7 @@
ctx = repo[rev]
p1copies, p2copies = ctx._copies
removed = ctx.filesremoved()
- return p1, p2, p1copies, p2copies, removed
+ return p1, p2, p1copies, p2copies, removed, get_ismerged(rev)
return revinfo
@@ -256,6 +281,7 @@
revinfo = _revinfogetter(repo)
cl = repo.changelog
+ isancestor = cl.isancestorrev # XXX we should had chaching to this.
missingrevs = cl.findmissingrevs(common=[a.rev()], heads=[b.rev()])
mrset = set(missingrevs)
roots = set()
@@ -283,10 +309,14 @@
iterrevs.update(roots)
iterrevs.remove(b.rev())
revs = sorted(iterrevs)
- return _combinechangesetcopies(revs, children, b.rev(), revinfo, match)
+ return _combinechangesetcopies(
+ revs, children, b.rev(), revinfo, match, isancestor
+ )
-def _combinechangesetcopies(revs, children, targetrev, revinfo, match):
+def _combinechangesetcopies(
+ revs, children, targetrev, revinfo, match, isancestor
+):
"""combine the copies information for each item of iterrevs
revs: sorted iterable of revision to visit
@@ -305,7 +335,7 @@
# this is a root
copies = {}
for i, c in enumerate(children[r]):
- p1, p2, p1copies, p2copies, removed = revinfo(c)
+ p1, p2, p1copies, p2copies, removed, ismerged = revinfo(c)
if r == p1:
parent = 1
childcopies = p1copies
@@ -319,9 +349,12 @@
}
newcopies = copies
if childcopies:
- newcopies = _chain(newcopies, childcopies)
- # _chain makes a copies, we can avoid doing so in some
- # simple/linear cases.
+ newcopies = copies.copy()
+ for dest, source in pycompat.iteritems(childcopies):
+ prev = copies.get(source)
+ if prev is not None and prev[1] is not None:
+ source = prev[1]
+ newcopies[dest] = (c, source)
assert newcopies is not copies
for f in removed:
if f in newcopies:
@@ -330,7 +363,7 @@
# branches. when there are no other branches, this
# could be avoided.
newcopies = copies.copy()
- del newcopies[f]
+ newcopies[f] = (c, None)
othercopies = all_copies.get(c)
if othercopies is None:
all_copies[c] = newcopies
@@ -338,21 +371,55 @@
# we are the second parent to work on c, we need to merge our
# work with the other.
#
- # Unlike when copies are stored in the filelog, we consider
- # it a copy even if the destination already existed on the
- # other branch. It's simply too expensive to check if the
- # file existed in the manifest.
- #
# In case of conflict, parent 1 take precedence over parent 2.
# This is an arbitrary choice made anew when implementing
# changeset based copies. It was made without regards with
# potential filelog related behavior.
if parent == 1:
- othercopies.update(newcopies)
+ _merge_copies_dict(
+ othercopies, newcopies, isancestor, ismerged
+ )
else:
- newcopies.update(othercopies)
+ _merge_copies_dict(
+ newcopies, othercopies, isancestor, ismerged
+ )
all_copies[c] = newcopies
- return all_copies[targetrev]
+
+ final_copies = {}
+ for dest, (tt, source) in all_copies[targetrev].items():
+ if source is not None:
+ final_copies[dest] = source
+ return final_copies
+
+
+def _merge_copies_dict(minor, major, isancestor, ismerged):
+ """merge two copies-mapping together, minor and major
+
+ In case of conflict, value from "major" will be picked.
+
+ - `isancestors(low_rev, high_rev)`: callable return True if `low_rev` is an
+ ancestors of `high_rev`,
+
+ - `ismerged(path)`: callable return True if `path` have been merged in the
+ current revision,
+ """
+ for dest, value in major.items():
+ other = minor.get(dest)
+ if other is None:
+ minor[dest] = value
+ else:
+ new_tt = value[0]
+ other_tt = other[0]
+ if value[1] == other[1]:
+ continue
+ # content from "major" wins, unless it is older
+ # than the branch point or there is a merge
+ if (
+ new_tt == other_tt
+ or not isancestor(new_tt, other_tt)
+ or ismerged(dest)
+ ):
+ minor[dest] = value
def _forwardcopies(a, b, base=None, match=None):
@@ -569,6 +636,12 @@
self.dirmove = {} if dirmove is None else dirmove
self.movewithdir = {} if movewithdir is None else movewithdir
+ def __repr__(self):
+ return (
+ '<branch_copies\n copy=%r\n renamedelete=%r\n dirmove=%r\n movewithdir=%r\n>'
+ % (self.copy, self.renamedelete, self.dirmove, self.movewithdir,)
+ )
+
def _fullcopytracing(repo, c1, c2, base):
""" The full copytracing algorithm which finds all the new files that were
@@ -922,250 +995,3 @@
_filter(wctx.p1(), wctx, new_copies)
for dst, src in pycompat.iteritems(new_copies):
wctx[dst].markcopied(src)
-
-
-def computechangesetfilesadded(ctx):
- """return the list of files added in a changeset
- """
- added = []
- for f in ctx.files():
- if not any(f in p for p in ctx.parents()):
- added.append(f)
- return added
-
-
-def computechangesetfilesremoved(ctx):
- """return the list of files removed in a changeset
- """
- removed = []
- for f in ctx.files():
- if f not in ctx:
- removed.append(f)
- return removed
-
-
-def computechangesetcopies(ctx):
- """return the copies data for a changeset
-
- The copies data are returned as a pair of dictionnary (p1copies, p2copies).
-
- Each dictionnary are in the form: `{newname: oldname}`
- """
- p1copies = {}
- p2copies = {}
- p1 = ctx.p1()
- p2 = ctx.p2()
- narrowmatch = ctx._repo.narrowmatch()
- for dst in ctx.files():
- if not narrowmatch(dst) or dst not in ctx:
- continue
- copied = ctx[dst].renamed()
- if not copied:
- continue
- src, srcnode = copied
- if src in p1 and p1[src].filenode() == srcnode:
- p1copies[dst] = src
- elif src in p2 and p2[src].filenode() == srcnode:
- p2copies[dst] = src
- return p1copies, p2copies
-
-
-def encodecopies(files, copies):
- items = []
- for i, dst in enumerate(files):
- if dst in copies:
- items.append(b'%d\0%s' % (i, copies[dst]))
- if len(items) != len(copies):
- raise error.ProgrammingError(
- b'some copy targets missing from file list'
- )
- return b"\n".join(items)
-
-
-def decodecopies(files, data):
- try:
- copies = {}
- if not data:
- return copies
- for l in data.split(b'\n'):
- strindex, src = l.split(b'\0')
- i = int(strindex)
- dst = files[i]
- copies[dst] = src
- return copies
- except (ValueError, IndexError):
- # Perhaps someone had chosen the same key name (e.g. "p1copies") and
- # used different syntax for the value.
- return None
-
-
-def encodefileindices(files, subset):
- subset = set(subset)
- indices = []
- for i, f in enumerate(files):
- if f in subset:
- indices.append(b'%d' % i)
- return b'\n'.join(indices)
-
-
-def decodefileindices(files, data):
- try:
- subset = []
- if not data:
- return subset
- for strindex in data.split(b'\n'):
- i = int(strindex)
- if i < 0 or i >= len(files):
- return None
- subset.append(files[i])
- return subset
- except (ValueError, IndexError):
- # Perhaps someone had chosen the same key name (e.g. "added") and
- # used different syntax for the value.
- return None
-
-
-def _getsidedata(srcrepo, rev):
- ctx = srcrepo[rev]
- filescopies = computechangesetcopies(ctx)
- filesadded = computechangesetfilesadded(ctx)
- filesremoved = computechangesetfilesremoved(ctx)
- sidedata = {}
- if any([filescopies, filesadded, filesremoved]):
- sortedfiles = sorted(ctx.files())
- p1copies, p2copies = filescopies
- p1copies = encodecopies(sortedfiles, p1copies)
- p2copies = encodecopies(sortedfiles, p2copies)
- filesadded = encodefileindices(sortedfiles, filesadded)
- filesremoved = encodefileindices(sortedfiles, filesremoved)
- if p1copies:
- sidedata[sidedatamod.SD_P1COPIES] = p1copies
- if p2copies:
- sidedata[sidedatamod.SD_P2COPIES] = p2copies
- if filesadded:
- sidedata[sidedatamod.SD_FILESADDED] = filesadded
- if filesremoved:
- sidedata[sidedatamod.SD_FILESREMOVED] = filesremoved
- return sidedata
-
-
-def getsidedataadder(srcrepo, destrepo):
- use_w = srcrepo.ui.configbool(b'experimental', b'worker.repository-upgrade')
- if pycompat.iswindows or not use_w:
- return _get_simple_sidedata_adder(srcrepo, destrepo)
- else:
- return _get_worker_sidedata_adder(srcrepo, destrepo)
-
-
-def _sidedata_worker(srcrepo, revs_queue, sidedata_queue, tokens):
- """The function used by worker precomputing sidedata
-
- It read an input queue containing revision numbers
- It write in an output queue containing (rev, <sidedata-map>)
-
- The `None` input value is used as a stop signal.
-
- The `tokens` semaphore is user to avoid having too many unprocessed
- entries. The workers needs to acquire one token before fetching a task.
- They will be released by the consumer of the produced data.
- """
- tokens.acquire()
- rev = revs_queue.get()
- while rev is not None:
- data = _getsidedata(srcrepo, rev)
- sidedata_queue.put((rev, data))
- tokens.acquire()
- rev = revs_queue.get()
- # processing of `None` is completed, release the token.
- tokens.release()
-
-
-BUFF_PER_WORKER = 50
-
-
-def _get_worker_sidedata_adder(srcrepo, destrepo):
- """The parallel version of the sidedata computation
-
- This code spawn a pool of worker that precompute a buffer of sidedata
- before we actually need them"""
- # avoid circular import copies -> scmutil -> worker -> copies
- from . import worker
-
- nbworkers = worker._numworkers(srcrepo.ui)
-
- tokens = multiprocessing.BoundedSemaphore(nbworkers * BUFF_PER_WORKER)
- revsq = multiprocessing.Queue()
- sidedataq = multiprocessing.Queue()
-
- assert srcrepo.filtername is None
- # queue all tasks beforehand, revision numbers are small and it make
- # synchronisation simpler
- #
- # Since the computation for each node can be quite expensive, the overhead
- # of using a single queue is not revelant. In practice, most computation
- # are fast but some are very expensive and dominate all the other smaller
- # cost.
- for r in srcrepo.changelog.revs():
- revsq.put(r)
- # queue the "no more tasks" markers
- for i in range(nbworkers):
- revsq.put(None)
-
- allworkers = []
- for i in range(nbworkers):
- args = (srcrepo, revsq, sidedataq, tokens)
- w = multiprocessing.Process(target=_sidedata_worker, args=args)
- allworkers.append(w)
- w.start()
-
- # dictionnary to store results for revision higher than we one we are
- # looking for. For example, if we need the sidedatamap for 42, and 43 is
- # received, when shelve 43 for later use.
- staging = {}
-
- def sidedata_companion(revlog, rev):
- sidedata = {}
- if util.safehasattr(revlog, b'filteredrevs'): # this is a changelog
- # Is the data previously shelved ?
- sidedata = staging.pop(rev, None)
- if sidedata is None:
- # look at the queued result until we find the one we are lookig
- # for (shelve the other ones)
- r, sidedata = sidedataq.get()
- while r != rev:
- staging[r] = sidedata
- r, sidedata = sidedataq.get()
- tokens.release()
- return False, (), sidedata
-
- return sidedata_companion
-
-
-def _get_simple_sidedata_adder(srcrepo, destrepo):
- """The simple version of the sidedata computation
-
- It just compute it in the same thread on request"""
-
- def sidedatacompanion(revlog, rev):
- sidedata = {}
- if util.safehasattr(revlog, 'filteredrevs'): # this is a changelog
- sidedata = _getsidedata(srcrepo, rev)
- return False, (), sidedata
-
- return sidedatacompanion
-
-
-def getsidedataremover(srcrepo, destrepo):
- def sidedatacompanion(revlog, rev):
- f = ()
- if util.safehasattr(revlog, 'filteredrevs'): # this is a changelog
- if revlog.flags(rev) & REVIDX_SIDEDATA:
- f = (
- sidedatamod.SD_P1COPIES,
- sidedatamod.SD_P2COPIES,
- sidedatamod.SD_FILESADDED,
- sidedatamod.SD_FILESREMOVED,
- )
- return False, f, {}
-
- return sidedatacompanion
--- a/mercurial/debugcommands.py Tue Jun 23 16:07:18 2020 +0200
+++ b/mercurial/debugcommands.py Thu Jun 25 10:32:51 2020 -0700
@@ -58,7 +58,7 @@
localrepo,
lock as lockmod,
logcmdutil,
- merge as mergemod,
+ mergestate as mergestatemod,
obsolete,
obsutil,
pathutil,
@@ -1650,13 +1650,6 @@
fm.plain(_(b'checking "re2" regexp engine (%s)\n') % re2)
fm.data(re2=bool(util._re2))
- rust_debug_mod = policy.importrust("debug")
- if rust_debug_mod is not None:
- re2_rust = b'installed' if rust_debug_mod.re2_installed else b'missing'
-
- msg = b'checking "re2" regexp engine Rust bindings (%s)\n'
- fm.plain(_(msg % re2_rust))
-
# templates
p = templater.templatepaths()
fm.write(b'templatedirs', b'checking templates (%s)...\n', b' '.join(p))
@@ -1974,7 +1967,7 @@
was chosen."""
if ui.verbose:
- ms = mergemod.mergestate(repo)
+ ms = mergestatemod.mergestate(repo)
# sort so that reasonable information is on top
v1records = ms._readrecordsv1()
@@ -2008,7 +2001,7 @@
b'"}'
)
- ms = mergemod.mergestate.read(repo)
+ ms = mergestatemod.mergestate.read(repo)
fm = ui.formatter(b'debugmergestate', opts)
fm.startitem()
@@ -2034,8 +2027,8 @@
state = ms._state[f]
fm_files.data(state=state[0])
if state[0] in (
- mergemod.MERGE_RECORD_UNRESOLVED,
- mergemod.MERGE_RECORD_RESOLVED,
+ mergestatemod.MERGE_RECORD_UNRESOLVED,
+ mergestatemod.MERGE_RECORD_RESOLVED,
):
fm_files.data(local_key=state[1])
fm_files.data(local_path=state[2])
@@ -2045,8 +2038,8 @@
fm_files.data(other_node=state[6])
fm_files.data(local_flags=state[7])
elif state[0] in (
- mergemod.MERGE_RECORD_UNRESOLVED_PATH,
- mergemod.MERGE_RECORD_RESOLVED_PATH,
+ mergestatemod.MERGE_RECORD_UNRESOLVED_PATH,
+ mergestatemod.MERGE_RECORD_RESOLVED_PATH,
):
fm_files.data(renamed_path=state[1])
fm_files.data(rename_side=state[2])
--- a/mercurial/dirstate.py Tue Jun 23 16:07:18 2020 +0200
+++ b/mercurial/dirstate.py Thu Jun 25 10:32:51 2020 -0700
@@ -187,7 +187,7 @@
@propertycache
def _checkexec(self):
- return util.checkexec(self._root)
+ return bool(util.checkexec(self._root))
@propertycache
def _checkcase(self):
@@ -1114,6 +1114,7 @@
unknown,
warnings,
bad,
+ traversed,
) = rustmod.status(
self._map._rustmap,
matcher,
@@ -1124,7 +1125,13 @@
bool(list_clean),
bool(list_ignored),
bool(list_unknown),
+ bool(matcher.traversedir),
)
+
+ if matcher.traversedir:
+ for dir in traversed:
+ matcher.traversedir(dir)
+
if self._ui.warn:
for item in warnings:
if isinstance(item, tuple):
@@ -1200,10 +1207,8 @@
use_rust = False
elif sparse.enabled:
use_rust = False
- elif match.traversedir is not None:
- use_rust = False
elif not isinstance(match, allowed_matchers):
- # Matchers have yet to be implemented
+ # Some matchers have yet to be implemented
use_rust = False
if use_rust:
--- a/mercurial/filemerge.py Tue Jun 23 16:07:18 2020 +0200
+++ b/mercurial/filemerge.py Thu Jun 25 10:32:51 2020 -0700
@@ -98,6 +98,9 @@
self._ctx = ctx
self._f = f
+ def __bytes__(self):
+ return b'absent file %s@%s' % (self._f, self._ctx)
+
def path(self):
return self._f
--- a/mercurial/fileset.py Tue Jun 23 16:07:18 2020 +0200
+++ b/mercurial/fileset.py Thu Jun 25 10:32:51 2020 -0700
@@ -16,7 +16,7 @@
error,
filesetlang,
match as matchmod,
- merge,
+ mergestate as mergestatemod,
pycompat,
registrar,
scmutil,
@@ -245,7 +245,7 @@
getargs(x, 0, 0, _(b"resolved takes no arguments"))
if mctx.ctx.rev() is not None:
return mctx.never()
- ms = merge.mergestate.read(mctx.ctx.repo())
+ ms = mergestatemod.mergestate.read(mctx.ctx.repo())
return mctx.predicate(
lambda f: f in ms and ms[f] == b'r', predrepr=b'resolved'
)
@@ -259,7 +259,7 @@
getargs(x, 0, 0, _(b"unresolved takes no arguments"))
if mctx.ctx.rev() is not None:
return mctx.never()
- ms = merge.mergestate.read(mctx.ctx.repo())
+ ms = mergestatemod.mergestate.read(mctx.ctx.repo())
return mctx.predicate(
lambda f: f in ms and ms[f] == b'u', predrepr=b'unresolved'
)
--- a/mercurial/helptext/flags.txt Tue Jun 23 16:07:18 2020 +0200
+++ b/mercurial/helptext/flags.txt Thu Jun 25 10:32:51 2020 -0700
@@ -10,7 +10,9 @@
Every flag has at least a long name, such as --repository. Some flags may also
have a short one-letter name, such as the equivalent -R. Using the short or long
-name is equivalent and has the same effect.
+name is equivalent and has the same effect. The long name may be abbreviated to
+any unambiguous prefix. For example, :hg:`commit --amend` can be abbreviated
+to :hg:`commit --am`.
Flags that have a short name can also be bundled together - for instance, to
specify both --edit (short -e) and --interactive (short -i), one could use::
--- a/mercurial/helptext/internals/requirements.txt Tue Jun 23 16:07:18 2020 +0200
+++ b/mercurial/helptext/internals/requirements.txt Thu Jun 25 10:32:51 2020 -0700
@@ -142,3 +142,16 @@
August 2019). The requirement will only be present on repositories
that have opted in to this format (by having
``format.bookmarks-in-store=true`` set when they were created).
+
+persistent-nodemap
+==================
+
+The `nodemap` index (mapping nodeid to local revision number) is persisted on
+disk. This provides speed benefit (if the associated native code is used). The
+persistent nodemap is only used for two revlogs: the changelog and the
+manifestlog.
+
+Support for this requirement was added in Mercurial 5.5 (released August 2020).
+Note that as of 5.5, only installations compiled with the Rust extension will
+benefit from a speedup. The other installations will do the necessary work to
+keep the index up to date, but will suffer a slowdown.
--- a/mercurial/helptext/internals/revlogs.txt Tue Jun 23 16:07:18 2020 +0200
+++ b/mercurial/helptext/internals/revlogs.txt Thu Jun 25 10:32:51 2020 -0700
@@ -161,7 +161,7 @@
(In development. Format not finalized or stable.)
-Version 2 is identical to version 2 with the following differences.
+Version 2 is identical to version 1 with the following differences.
There is no dedicated *generaldelta* revlog format flag. Instead,
the feature is implied enabled by default.
--- a/mercurial/hg.py Tue Jun 23 16:07:18 2020 +0200
+++ b/mercurial/hg.py Thu Jun 25 10:32:51 2020 -0700
@@ -33,6 +33,7 @@
logcmdutil,
logexchange,
merge as mergemod,
+ mergestate as mergestatemod,
narrowspec,
node,
phases,
@@ -1164,7 +1165,7 @@
def abortmerge(ui, repo):
- ms = mergemod.mergestate.read(repo)
+ ms = mergestatemod.mergestate.read(repo)
if ms.active():
# there were conflicts
node = ms.localctx.hex()
--- a/mercurial/hgweb/server.py Tue Jun 23 16:07:18 2020 +0200
+++ b/mercurial/hgweb/server.py Thu Jun 25 10:32:51 2020 -0700
@@ -313,7 +313,7 @@
try:
from .. import sslutil
- sslutil.modernssl
+ sslutil.wrapserversocket
except ImportError:
raise error.Abort(_(b"SSL support is unavailable"))
--- a/mercurial/hook.py Tue Jun 23 16:07:18 2020 +0200
+++ b/mercurial/hook.py Thu Jun 25 10:32:51 2020 -0700
@@ -158,6 +158,10 @@
env[b'HG_HOOKNAME'] = name
for k, v in pycompat.iteritems(args):
+ # transaction changes can accumulate MBs of data, so skip it
+ # for external hooks
+ if k == b'changes':
+ continue
if callable(v):
v = v()
if isinstance(v, (dict, list)):
--- a/mercurial/interfaces/repository.py Tue Jun 23 16:07:18 2020 +0200
+++ b/mercurial/interfaces/repository.py Thu Jun 25 10:32:51 2020 -0700
@@ -1395,6 +1395,9 @@
Raises ``error.LookupError`` if the node is not known.
"""
+ def update_caches(transaction):
+ """update whatever cache are relevant for the used storage."""
+
class ilocalrepositoryfilestorage(interfaceutil.Interface):
"""Local repository sub-interface providing access to tracked file storage.
--- a/mercurial/localrepo.py Tue Jun 23 16:07:18 2020 +0200
+++ b/mercurial/localrepo.py Thu Jun 25 10:32:51 2020 -0700
@@ -44,8 +44,9 @@
hook,
lock as lockmod,
match as matchmod,
- merge as mergemod,
+ mergestate as mergestatemod,
mergeutil,
+ metadata,
namespaces,
narrowspec,
obsolete,
@@ -445,6 +446,9 @@
# copies related information in changeset's sidedata.
COPIESSDC_REQUIREMENT = b'exp-copies-sidedata-changeset'
+# The repository use persistent nodemap for the changelog and the manifest.
+NODEMAP_REQUIREMENT = b'persistent-nodemap'
+
# Functions receiving (ui, features) that extensions can register to impact
# the ability to load repositories with custom requirements. Only
# functions defined in loaded extensions are called.
@@ -505,6 +509,11 @@
except OSError as e:
if e.errno != errno.ENOENT:
raise
+ except ValueError as e:
+ # Can be raised on Python 3.8 when path is invalid.
+ raise error.Abort(
+ _(b'invalid path %s: %s') % (path, pycompat.bytestr(e))
+ )
raise error.RepoError(_(b'repository %s not found') % path)
@@ -933,10 +942,12 @@
if ui.configbool(b'experimental', b'rust.index'):
options[b'rust.index'] = True
- if ui.configbool(b'experimental', b'exp-persistent-nodemap'):
- options[b'exp-persistent-nodemap'] = True
- if ui.configbool(b'experimental', b'exp-persistent-nodemap.mmap'):
- options[b'exp-persistent-nodemap.mmap'] = True
+ if NODEMAP_REQUIREMENT in requirements:
+ options[b'persistent-nodemap'] = True
+ if ui.configbool(b'storage', b'revlog.nodemap.mmap'):
+ options[b'persistent-nodemap.mmap'] = True
+ epnm = ui.config(b'storage', b'revlog.nodemap.mode')
+ options[b'persistent-nodemap.mode'] = epnm
if ui.configbool(b'devel', b'persistent-nodemap'):
options[b'devel-force-nodemap'] = True
@@ -1021,6 +1032,7 @@
REVLOGV2_REQUIREMENT,
SIDEDATA_REQUIREMENT,
SPARSEREVLOG_REQUIREMENT,
+ NODEMAP_REQUIREMENT,
bookmarks.BOOKMARKS_IN_STORE_REQUIREMENT,
}
_basesupported = supportedformats | {
@@ -2239,6 +2251,7 @@
tr.hookargs[b'txnid'] = txnid
tr.hookargs[b'txnname'] = desc
+ tr.hookargs[b'changes'] = tr.changes
# note: writing the fncache only during finalize mean that the file is
# outdated when running hooks. As fncache is used for streaming clone,
# this is not expected to break anything that happen during the hooks.
@@ -2461,7 +2474,7 @@
ui.status(
_(b'working directory now based on revision %d\n') % parents
)
- mergemod.mergestate.clean(self, self[b'.'].node())
+ mergestatemod.mergestate.clean(self, self[b'.'].node())
# TODO: if we know which new heads may result from this rollback, pass
# them to destroy(), which will prevent the branchhead cache from being
@@ -2511,6 +2524,7 @@
unfi = self.unfiltered()
self.changelog.update_caches(transaction=tr)
+ self.manifestlog.update_caches(transaction=tr)
rbc = unfi.revbranchcache()
for r in unfi.changelog:
@@ -2859,10 +2873,10 @@
fparent2 = nullid
elif not fparentancestors:
# TODO: this whole if-else might be simplified much more
- ms = mergemod.mergestate.read(self)
+ ms = mergestatemod.mergestate.read(self)
if (
fname in ms
- and ms[fname] == mergemod.MERGE_RECORD_MERGED_OTHER
+ and ms[fname] == mergestatemod.MERGE_RECORD_MERGED_OTHER
):
fparent1, fparent2 = fparent2, nullid
@@ -2960,7 +2974,7 @@
self, status, text, user, date, extra
)
- ms = mergemod.mergestate.read(self)
+ ms = mergestatemod.mergestate.read(self)
mergeutil.checkunresolved(ms)
# internal config: ui.allowemptycommit
@@ -3018,6 +3032,12 @@
self.ui.write(
_(b'note: commit message saved in %s\n') % msgfn
)
+ self.ui.write(
+ _(
+ b"note: use 'hg commit --logfile "
+ b".hg/last-message.txt --edit' to reuse it\n"
+ )
+ )
raise
def commithook(unused_success):
@@ -3131,51 +3151,8 @@
for f in drop:
del m[f]
if p2.rev() != nullrev:
-
- @util.cachefunc
- def mas():
- p1n = p1.node()
- p2n = p2.node()
- cahs = self.changelog.commonancestorsheads(p1n, p2n)
- if not cahs:
- cahs = [nullrev]
- return [self[r].manifest() for r in cahs]
-
- def deletionfromparent(f):
- # When a file is removed relative to p1 in a merge, this
- # function determines whether the absence is due to a
- # deletion from a parent, or whether the merge commit
- # itself deletes the file. We decide this by doing a
- # simplified three way merge of the manifest entry for
- # the file. There are two ways we decide the merge
- # itself didn't delete a file:
- # - neither parent (nor the merge) contain the file
- # - exactly one parent contains the file, and that
- # parent has the same filelog entry as the merge
- # ancestor (or all of them if there two). In other
- # words, that parent left the file unchanged while the
- # other one deleted it.
- # One way to think about this is that deleting a file is
- # similar to emptying it, so the list of changed files
- # should be similar either way. The computation
- # described above is not done directly in _filecommit
- # when creating the list of changed files, however
- # it does something very similar by comparing filelog
- # nodes.
- if f in m1:
- return f not in m2 and all(
- f in ma and ma.find(f) == m1.find(f)
- for ma in mas()
- )
- elif f in m2:
- return all(
- f in ma and ma.find(f) == m2.find(f)
- for ma in mas()
- )
- else:
- return True
-
- removed = [f for f in removed if not deletionfromparent(f)]
+ rf = metadata.get_removal_filter(ctx, (p1, p2, m1, m2))
+ removed = [f for f in removed if not rf(f)]
files = changed + removed
md = None
@@ -3653,6 +3630,9 @@
if ui.configbool(b'format', b'bookmarks-in-store'):
requirements.add(bookmarks.BOOKMARKS_IN_STORE_REQUIREMENT)
+ if ui.configbool(b'format', b'use-persistent-nodemap'):
+ requirements.add(NODEMAP_REQUIREMENT)
+
return requirements
--- a/mercurial/logcmdutil.py Tue Jun 23 16:07:18 2020 +0200
+++ b/mercurial/logcmdutil.py Thu Jun 25 10:32:51 2020 -0700
@@ -72,8 +72,8 @@
ui,
repo,
diffopts,
- node1,
- node2,
+ ctx1,
+ ctx2,
match,
changes=None,
stat=False,
@@ -85,8 +85,6 @@
hunksfilterfn=None,
):
'''show diff or diffstat.'''
- ctx1 = repo[node1]
- ctx2 = repo[node2]
if root:
relroot = pathutil.canonpath(repo.root, repo.getcwd(), root)
else:
@@ -173,6 +171,7 @@
for chunk, label in chunks:
ui.write(chunk, label=label)
+ node2 = ctx2.node()
for subpath, sub in scmutil.itersubrepos(ctx1, ctx2):
tempnode2 = node2
try:
@@ -208,15 +207,12 @@
return None
def showdiff(self, ui, ctx, diffopts, graphwidth=0, stat=False):
- repo = ctx.repo()
- node = ctx.node()
- prev = ctx.p1().node()
diffordiffstat(
ui,
- repo,
+ ctx.repo(),
diffopts,
- prev,
- node,
+ ctx.p1(),
+ ctx,
match=self._makefilematcher(ctx),
stat=stat,
graphwidth=graphwidth,
--- a/mercurial/manifest.py Tue Jun 23 16:07:18 2020 +0200
+++ b/mercurial/manifest.py Thu Jun 25 10:32:51 2020 -0700
@@ -1599,6 +1599,7 @@
checkambig=not bool(tree),
mmaplargeindex=True,
upperboundcomp=MAXCOMPRESSION,
+ persistentnodemap=opener.options.get(b'persistent-nodemap', False),
)
self.index = self._revlog.index
@@ -1959,6 +1960,9 @@
def rev(self, node):
return self._rootstore.rev(node)
+ def update_caches(self, transaction):
+ return self._rootstore._revlog.update_caches(transaction=transaction)
+
@interfaceutil.implementer(repository.imanifestrevisionwritable)
class memmanifestctx(object):
--- a/mercurial/merge.py Tue Jun 23 16:07:18 2020 +0200
+++ b/mercurial/merge.py Thu Jun 25 10:32:51 2020 -0700
@@ -8,21 +8,16 @@
from __future__ import absolute_import
import errno
-import shutil
import stat
import struct
from .i18n import _
from .node import (
addednodeid,
- bin,
- hex,
modifiednodeid,
- nullhex,
nullid,
nullrev,
)
-from .pycompat import delattr
from .thirdparty import attr
from . import (
copies,
@@ -30,6 +25,7 @@
error,
filemerge,
match as matchmod,
+ mergestate as mergestatemod,
obsutil,
pathutil,
pycompat,
@@ -38,741 +34,11 @@
util,
worker,
)
-from .utils import hashutil
_pack = struct.pack
_unpack = struct.unpack
-def _droponode(data):
- # used for compatibility for v1
- bits = data.split(b'\0')
- bits = bits[:-2] + bits[-1:]
- return b'\0'.join(bits)
-
-
-# Merge state record types. See ``mergestate`` docs for more.
-RECORD_LOCAL = b'L'
-RECORD_OTHER = b'O'
-RECORD_MERGED = b'F'
-RECORD_CHANGEDELETE_CONFLICT = b'C'
-RECORD_MERGE_DRIVER_MERGE = b'D'
-RECORD_PATH_CONFLICT = b'P'
-RECORD_MERGE_DRIVER_STATE = b'm'
-RECORD_FILE_VALUES = b'f'
-RECORD_LABELS = b'l'
-RECORD_OVERRIDE = b't'
-RECORD_UNSUPPORTED_MANDATORY = b'X'
-RECORD_UNSUPPORTED_ADVISORY = b'x'
-RECORD_RESOLVED_OTHER = b'R'
-
-MERGE_DRIVER_STATE_UNMARKED = b'u'
-MERGE_DRIVER_STATE_MARKED = b'm'
-MERGE_DRIVER_STATE_SUCCESS = b's'
-
-MERGE_RECORD_UNRESOLVED = b'u'
-MERGE_RECORD_RESOLVED = b'r'
-MERGE_RECORD_UNRESOLVED_PATH = b'pu'
-MERGE_RECORD_RESOLVED_PATH = b'pr'
-MERGE_RECORD_DRIVER_RESOLVED = b'd'
-# represents that the file was automatically merged in favor
-# of other version. This info is used on commit.
-MERGE_RECORD_MERGED_OTHER = b'o'
-
-ACTION_FORGET = b'f'
-ACTION_REMOVE = b'r'
-ACTION_ADD = b'a'
-ACTION_GET = b'g'
-ACTION_PATH_CONFLICT = b'p'
-ACTION_PATH_CONFLICT_RESOLVE = b'pr'
-ACTION_ADD_MODIFIED = b'am'
-ACTION_CREATED = b'c'
-ACTION_DELETED_CHANGED = b'dc'
-ACTION_CHANGED_DELETED = b'cd'
-ACTION_MERGE = b'm'
-ACTION_LOCAL_DIR_RENAME_GET = b'dg'
-ACTION_DIR_RENAME_MOVE_LOCAL = b'dm'
-ACTION_KEEP = b'k'
-ACTION_EXEC = b'e'
-ACTION_CREATED_MERGE = b'cm'
-# GET the other/remote side and store this info in mergestate
-ACTION_GET_OTHER_AND_STORE = b'gs'
-
-
-class mergestate(object):
- '''track 3-way merge state of individual files
-
- The merge state is stored on disk when needed. Two files are used: one with
- an old format (version 1), and one with a new format (version 2). Version 2
- stores a superset of the data in version 1, including new kinds of records
- in the future. For more about the new format, see the documentation for
- `_readrecordsv2`.
-
- Each record can contain arbitrary content, and has an associated type. This
- `type` should be a letter. If `type` is uppercase, the record is mandatory:
- versions of Mercurial that don't support it should abort. If `type` is
- lowercase, the record can be safely ignored.
-
- Currently known records:
-
- L: the node of the "local" part of the merge (hexified version)
- O: the node of the "other" part of the merge (hexified version)
- F: a file to be merged entry
- C: a change/delete or delete/change conflict
- D: a file that the external merge driver will merge internally
- (experimental)
- P: a path conflict (file vs directory)
- m: the external merge driver defined for this merge plus its run state
- (experimental)
- f: a (filename, dictionary) tuple of optional values for a given file
- X: unsupported mandatory record type (used in tests)
- x: unsupported advisory record type (used in tests)
- l: the labels for the parts of the merge.
-
- Merge driver run states (experimental):
- u: driver-resolved files unmarked -- needs to be run next time we're about
- to resolve or commit
- m: driver-resolved files marked -- only needs to be run before commit
- s: success/skipped -- does not need to be run any more
-
- Merge record states (stored in self._state, indexed by filename):
- u: unresolved conflict
- r: resolved conflict
- pu: unresolved path conflict (file conflicts with directory)
- pr: resolved path conflict
- d: driver-resolved conflict
-
- The resolve command transitions between 'u' and 'r' for conflicts and
- 'pu' and 'pr' for path conflicts.
- '''
-
- statepathv1 = b'merge/state'
- statepathv2 = b'merge/state2'
-
- @staticmethod
- def clean(repo, node=None, other=None, labels=None):
- """Initialize a brand new merge state, removing any existing state on
- disk."""
- ms = mergestate(repo)
- ms.reset(node, other, labels)
- return ms
-
- @staticmethod
- def read(repo):
- """Initialize the merge state, reading it from disk."""
- ms = mergestate(repo)
- ms._read()
- return ms
-
- def __init__(self, repo):
- """Initialize the merge state.
-
- Do not use this directly! Instead call read() or clean()."""
- self._repo = repo
- self._dirty = False
- self._labels = None
-
- def reset(self, node=None, other=None, labels=None):
- self._state = {}
- self._stateextras = {}
- self._local = None
- self._other = None
- self._labels = labels
- for var in ('localctx', 'otherctx'):
- if var in vars(self):
- delattr(self, var)
- if node:
- self._local = node
- self._other = other
- self._readmergedriver = None
- if self.mergedriver:
- self._mdstate = MERGE_DRIVER_STATE_SUCCESS
- else:
- self._mdstate = MERGE_DRIVER_STATE_UNMARKED
- shutil.rmtree(self._repo.vfs.join(b'merge'), True)
- self._results = {}
- self._dirty = False
-
- def _read(self):
- """Analyse each record content to restore a serialized state from disk
-
- This function process "record" entry produced by the de-serialization
- of on disk file.
- """
- self._state = {}
- self._stateextras = {}
- self._local = None
- self._other = None
- for var in ('localctx', 'otherctx'):
- if var in vars(self):
- delattr(self, var)
- self._readmergedriver = None
- self._mdstate = MERGE_DRIVER_STATE_SUCCESS
- unsupported = set()
- records = self._readrecords()
- for rtype, record in records:
- if rtype == RECORD_LOCAL:
- self._local = bin(record)
- elif rtype == RECORD_OTHER:
- self._other = bin(record)
- elif rtype == RECORD_MERGE_DRIVER_STATE:
- bits = record.split(b'\0', 1)
- mdstate = bits[1]
- if len(mdstate) != 1 or mdstate not in (
- MERGE_DRIVER_STATE_UNMARKED,
- MERGE_DRIVER_STATE_MARKED,
- MERGE_DRIVER_STATE_SUCCESS,
- ):
- # the merge driver should be idempotent, so just rerun it
- mdstate = MERGE_DRIVER_STATE_UNMARKED
-
- self._readmergedriver = bits[0]
- self._mdstate = mdstate
- elif rtype in (
- RECORD_MERGED,
- RECORD_CHANGEDELETE_CONFLICT,
- RECORD_PATH_CONFLICT,
- RECORD_MERGE_DRIVER_MERGE,
- RECORD_RESOLVED_OTHER,
- ):
- bits = record.split(b'\0')
- self._state[bits[0]] = bits[1:]
- elif rtype == RECORD_FILE_VALUES:
- filename, rawextras = record.split(b'\0', 1)
- extraparts = rawextras.split(b'\0')
- extras = {}
- i = 0
- while i < len(extraparts):
- extras[extraparts[i]] = extraparts[i + 1]
- i += 2
-
- self._stateextras[filename] = extras
- elif rtype == RECORD_LABELS:
- labels = record.split(b'\0', 2)
- self._labels = [l for l in labels if len(l) > 0]
- elif not rtype.islower():
- unsupported.add(rtype)
- self._results = {}
- self._dirty = False
-
- if unsupported:
- raise error.UnsupportedMergeRecords(unsupported)
-
- def _readrecords(self):
- """Read merge state from disk and return a list of record (TYPE, data)
-
- We read data from both v1 and v2 files and decide which one to use.
-
- V1 has been used by version prior to 2.9.1 and contains less data than
- v2. We read both versions and check if no data in v2 contradicts
- v1. If there is not contradiction we can safely assume that both v1
- and v2 were written at the same time and use the extract data in v2. If
- there is contradiction we ignore v2 content as we assume an old version
- of Mercurial has overwritten the mergestate file and left an old v2
- file around.
-
- returns list of record [(TYPE, data), ...]"""
- v1records = self._readrecordsv1()
- v2records = self._readrecordsv2()
- if self._v1v2match(v1records, v2records):
- return v2records
- else:
- # v1 file is newer than v2 file, use it
- # we have to infer the "other" changeset of the merge
- # we cannot do better than that with v1 of the format
- mctx = self._repo[None].parents()[-1]
- v1records.append((RECORD_OTHER, mctx.hex()))
- # add place holder "other" file node information
- # nobody is using it yet so we do no need to fetch the data
- # if mctx was wrong `mctx[bits[-2]]` may fails.
- for idx, r in enumerate(v1records):
- if r[0] == RECORD_MERGED:
- bits = r[1].split(b'\0')
- bits.insert(-2, b'')
- v1records[idx] = (r[0], b'\0'.join(bits))
- return v1records
-
- def _v1v2match(self, v1records, v2records):
- oldv2 = set() # old format version of v2 record
- for rec in v2records:
- if rec[0] == RECORD_LOCAL:
- oldv2.add(rec)
- elif rec[0] == RECORD_MERGED:
- # drop the onode data (not contained in v1)
- oldv2.add((RECORD_MERGED, _droponode(rec[1])))
- for rec in v1records:
- if rec not in oldv2:
- return False
- else:
- return True
-
- def _readrecordsv1(self):
- """read on disk merge state for version 1 file
-
- returns list of record [(TYPE, data), ...]
-
- Note: the "F" data from this file are one entry short
- (no "other file node" entry)
- """
- records = []
- try:
- f = self._repo.vfs(self.statepathv1)
- for i, l in enumerate(f):
- if i == 0:
- records.append((RECORD_LOCAL, l[:-1]))
- else:
- records.append((RECORD_MERGED, l[:-1]))
- f.close()
- except IOError as err:
- if err.errno != errno.ENOENT:
- raise
- return records
-
- def _readrecordsv2(self):
- """read on disk merge state for version 2 file
-
- This format is a list of arbitrary records of the form:
-
- [type][length][content]
-
- `type` is a single character, `length` is a 4 byte integer, and
- `content` is an arbitrary byte sequence of length `length`.
-
- Mercurial versions prior to 3.7 have a bug where if there are
- unsupported mandatory merge records, attempting to clear out the merge
- state with hg update --clean or similar aborts. The 't' record type
- works around that by writing out what those versions treat as an
- advisory record, but later versions interpret as special: the first
- character is the 'real' record type and everything onwards is the data.
-
- Returns list of records [(TYPE, data), ...]."""
- records = []
- try:
- f = self._repo.vfs(self.statepathv2)
- data = f.read()
- off = 0
- end = len(data)
- while off < end:
- rtype = data[off : off + 1]
- off += 1
- length = _unpack(b'>I', data[off : (off + 4)])[0]
- off += 4
- record = data[off : (off + length)]
- off += length
- if rtype == RECORD_OVERRIDE:
- rtype, record = record[0:1], record[1:]
- records.append((rtype, record))
- f.close()
- except IOError as err:
- if err.errno != errno.ENOENT:
- raise
- return records
-
- @util.propertycache
- def mergedriver(self):
- # protect against the following:
- # - A configures a malicious merge driver in their hgrc, then
- # pauses the merge
- # - A edits their hgrc to remove references to the merge driver
- # - A gives a copy of their entire repo, including .hg, to B
- # - B inspects .hgrc and finds it to be clean
- # - B then continues the merge and the malicious merge driver
- # gets invoked
- configmergedriver = self._repo.ui.config(
- b'experimental', b'mergedriver'
- )
- if (
- self._readmergedriver is not None
- and self._readmergedriver != configmergedriver
- ):
- raise error.ConfigError(
- _(b"merge driver changed since merge started"),
- hint=_(b"revert merge driver change or abort merge"),
- )
-
- return configmergedriver
-
- @util.propertycache
- def local(self):
- if self._local is None:
- msg = b"local accessed but self._local isn't set"
- raise error.ProgrammingError(msg)
- return self._local
-
- @util.propertycache
- def localctx(self):
- return self._repo[self.local]
-
- @util.propertycache
- def other(self):
- if self._other is None:
- msg = b"other accessed but self._other isn't set"
- raise error.ProgrammingError(msg)
- return self._other
-
- @util.propertycache
- def otherctx(self):
- return self._repo[self.other]
-
- def active(self):
- """Whether mergestate is active.
-
- Returns True if there appears to be mergestate. This is a rough proxy
- for "is a merge in progress."
- """
- return bool(self._local) or bool(self._state)
-
- def commit(self):
- """Write current state on disk (if necessary)"""
- if self._dirty:
- records = self._makerecords()
- self._writerecords(records)
- self._dirty = False
-
- def _makerecords(self):
- records = []
- records.append((RECORD_LOCAL, hex(self._local)))
- records.append((RECORD_OTHER, hex(self._other)))
- if self.mergedriver:
- records.append(
- (
- RECORD_MERGE_DRIVER_STATE,
- b'\0'.join([self.mergedriver, self._mdstate]),
- )
- )
- # Write out state items. In all cases, the value of the state map entry
- # is written as the contents of the record. The record type depends on
- # the type of state that is stored, and capital-letter records are used
- # to prevent older versions of Mercurial that do not support the feature
- # from loading them.
- for filename, v in pycompat.iteritems(self._state):
- if v[0] == MERGE_RECORD_DRIVER_RESOLVED:
- # Driver-resolved merge. These are stored in 'D' records.
- records.append(
- (RECORD_MERGE_DRIVER_MERGE, b'\0'.join([filename] + v))
- )
- elif v[0] in (
- MERGE_RECORD_UNRESOLVED_PATH,
- MERGE_RECORD_RESOLVED_PATH,
- ):
- # Path conflicts. These are stored in 'P' records. The current
- # resolution state ('pu' or 'pr') is stored within the record.
- records.append(
- (RECORD_PATH_CONFLICT, b'\0'.join([filename] + v))
- )
- elif v[0] == MERGE_RECORD_MERGED_OTHER:
- records.append(
- (RECORD_RESOLVED_OTHER, b'\0'.join([filename] + v))
- )
- elif v[1] == nullhex or v[6] == nullhex:
- # Change/Delete or Delete/Change conflicts. These are stored in
- # 'C' records. v[1] is the local file, and is nullhex when the
- # file is deleted locally ('dc'). v[6] is the remote file, and
- # is nullhex when the file is deleted remotely ('cd').
- records.append(
- (RECORD_CHANGEDELETE_CONFLICT, b'\0'.join([filename] + v))
- )
- else:
- # Normal files. These are stored in 'F' records.
- records.append((RECORD_MERGED, b'\0'.join([filename] + v)))
- for filename, extras in sorted(pycompat.iteritems(self._stateextras)):
- rawextras = b'\0'.join(
- b'%s\0%s' % (k, v) for k, v in pycompat.iteritems(extras)
- )
- records.append(
- (RECORD_FILE_VALUES, b'%s\0%s' % (filename, rawextras))
- )
- if self._labels is not None:
- labels = b'\0'.join(self._labels)
- records.append((RECORD_LABELS, labels))
- return records
-
- def _writerecords(self, records):
- """Write current state on disk (both v1 and v2)"""
- self._writerecordsv1(records)
- self._writerecordsv2(records)
-
- def _writerecordsv1(self, records):
- """Write current state on disk in a version 1 file"""
- f = self._repo.vfs(self.statepathv1, b'wb')
- irecords = iter(records)
- lrecords = next(irecords)
- assert lrecords[0] == RECORD_LOCAL
- f.write(hex(self._local) + b'\n')
- for rtype, data in irecords:
- if rtype == RECORD_MERGED:
- f.write(b'%s\n' % _droponode(data))
- f.close()
-
- def _writerecordsv2(self, records):
- """Write current state on disk in a version 2 file
-
- See the docstring for _readrecordsv2 for why we use 't'."""
- # these are the records that all version 2 clients can read
- allowlist = (RECORD_LOCAL, RECORD_OTHER, RECORD_MERGED)
- f = self._repo.vfs(self.statepathv2, b'wb')
- for key, data in records:
- assert len(key) == 1
- if key not in allowlist:
- key, data = RECORD_OVERRIDE, b'%s%s' % (key, data)
- format = b'>sI%is' % len(data)
- f.write(_pack(format, key, len(data), data))
- f.close()
-
- @staticmethod
- def getlocalkey(path):
- """hash the path of a local file context for storage in the .hg/merge
- directory."""
-
- return hex(hashutil.sha1(path).digest())
-
- def add(self, fcl, fco, fca, fd):
- """add a new (potentially?) conflicting file the merge state
- fcl: file context for local,
- fco: file context for remote,
- fca: file context for ancestors,
- fd: file path of the resulting merge.
-
- note: also write the local version to the `.hg/merge` directory.
- """
- if fcl.isabsent():
- localkey = nullhex
- else:
- localkey = mergestate.getlocalkey(fcl.path())
- self._repo.vfs.write(b'merge/' + localkey, fcl.data())
- self._state[fd] = [
- MERGE_RECORD_UNRESOLVED,
- localkey,
- fcl.path(),
- fca.path(),
- hex(fca.filenode()),
- fco.path(),
- hex(fco.filenode()),
- fcl.flags(),
- ]
- self._stateextras[fd] = {b'ancestorlinknode': hex(fca.node())}
- self._dirty = True
-
- def addpath(self, path, frename, forigin):
- """add a new conflicting path to the merge state
- path: the path that conflicts
- frename: the filename the conflicting file was renamed to
- forigin: origin of the file ('l' or 'r' for local/remote)
- """
- self._state[path] = [MERGE_RECORD_UNRESOLVED_PATH, frename, forigin]
- self._dirty = True
-
- def addmergedother(self, path):
- self._state[path] = [MERGE_RECORD_MERGED_OTHER, nullhex, nullhex]
- self._dirty = True
-
- def __contains__(self, dfile):
- return dfile in self._state
-
- def __getitem__(self, dfile):
- return self._state[dfile][0]
-
- def __iter__(self):
- return iter(sorted(self._state))
-
- def files(self):
- return self._state.keys()
-
- def mark(self, dfile, state):
- self._state[dfile][0] = state
- self._dirty = True
-
- def mdstate(self):
- return self._mdstate
-
- def unresolved(self):
- """Obtain the paths of unresolved files."""
-
- for f, entry in pycompat.iteritems(self._state):
- if entry[0] in (
- MERGE_RECORD_UNRESOLVED,
- MERGE_RECORD_UNRESOLVED_PATH,
- ):
- yield f
-
- def driverresolved(self):
- """Obtain the paths of driver-resolved files."""
-
- for f, entry in self._state.items():
- if entry[0] == MERGE_RECORD_DRIVER_RESOLVED:
- yield f
-
- def extras(self, filename):
- return self._stateextras.setdefault(filename, {})
-
- def _resolve(self, preresolve, dfile, wctx):
- """rerun merge process for file path `dfile`"""
- if self[dfile] in (MERGE_RECORD_RESOLVED, MERGE_RECORD_DRIVER_RESOLVED):
- return True, 0
- if self._state[dfile][0] == MERGE_RECORD_MERGED_OTHER:
- return True, 0
- stateentry = self._state[dfile]
- state, localkey, lfile, afile, anode, ofile, onode, flags = stateentry
- octx = self._repo[self._other]
- extras = self.extras(dfile)
- anccommitnode = extras.get(b'ancestorlinknode')
- if anccommitnode:
- actx = self._repo[anccommitnode]
- else:
- actx = None
- fcd = self._filectxorabsent(localkey, wctx, dfile)
- fco = self._filectxorabsent(onode, octx, ofile)
- # TODO: move this to filectxorabsent
- fca = self._repo.filectx(afile, fileid=anode, changectx=actx)
- # "premerge" x flags
- flo = fco.flags()
- fla = fca.flags()
- if b'x' in flags + flo + fla and b'l' not in flags + flo + fla:
- if fca.node() == nullid and flags != flo:
- if preresolve:
- self._repo.ui.warn(
- _(
- b'warning: cannot merge flags for %s '
- b'without common ancestor - keeping local flags\n'
- )
- % afile
- )
- elif flags == fla:
- flags = flo
- if preresolve:
- # restore local
- if localkey != nullhex:
- f = self._repo.vfs(b'merge/' + localkey)
- wctx[dfile].write(f.read(), flags)
- f.close()
- else:
- wctx[dfile].remove(ignoremissing=True)
- complete, r, deleted = filemerge.premerge(
- self._repo,
- wctx,
- self._local,
- lfile,
- fcd,
- fco,
- fca,
- labels=self._labels,
- )
- else:
- complete, r, deleted = filemerge.filemerge(
- self._repo,
- wctx,
- self._local,
- lfile,
- fcd,
- fco,
- fca,
- labels=self._labels,
- )
- if r is None:
- # no real conflict
- del self._state[dfile]
- self._stateextras.pop(dfile, None)
- self._dirty = True
- elif not r:
- self.mark(dfile, MERGE_RECORD_RESOLVED)
-
- if complete:
- action = None
- if deleted:
- if fcd.isabsent():
- # dc: local picked. Need to drop if present, which may
- # happen on re-resolves.
- action = ACTION_FORGET
- else:
- # cd: remote picked (or otherwise deleted)
- action = ACTION_REMOVE
- else:
- if fcd.isabsent(): # dc: remote picked
- action = ACTION_GET
- elif fco.isabsent(): # cd: local picked
- if dfile in self.localctx:
- action = ACTION_ADD_MODIFIED
- else:
- action = ACTION_ADD
- # else: regular merges (no action necessary)
- self._results[dfile] = r, action
-
- return complete, r
-
- def _filectxorabsent(self, hexnode, ctx, f):
- if hexnode == nullhex:
- return filemerge.absentfilectx(ctx, f)
- else:
- return ctx[f]
-
- def preresolve(self, dfile, wctx):
- """run premerge process for dfile
-
- Returns whether the merge is complete, and the exit code."""
- return self._resolve(True, dfile, wctx)
-
- def resolve(self, dfile, wctx):
- """run merge process (assuming premerge was run) for dfile
-
- Returns the exit code of the merge."""
- return self._resolve(False, dfile, wctx)[1]
-
- def counts(self):
- """return counts for updated, merged and removed files in this
- session"""
- updated, merged, removed = 0, 0, 0
- for r, action in pycompat.itervalues(self._results):
- if r is None:
- updated += 1
- elif r == 0:
- if action == ACTION_REMOVE:
- removed += 1
- else:
- merged += 1
- return updated, merged, removed
-
- def unresolvedcount(self):
- """get unresolved count for this merge (persistent)"""
- return len(list(self.unresolved()))
-
- def actions(self):
- """return lists of actions to perform on the dirstate"""
- actions = {
- ACTION_REMOVE: [],
- ACTION_FORGET: [],
- ACTION_ADD: [],
- ACTION_ADD_MODIFIED: [],
- ACTION_GET: [],
- }
- for f, (r, action) in pycompat.iteritems(self._results):
- if action is not None:
- actions[action].append((f, None, b"merge result"))
- return actions
-
- def recordactions(self):
- """record remove/add/get actions in the dirstate"""
- branchmerge = self._repo.dirstate.p2() != nullid
- recordupdates(self._repo, self.actions(), branchmerge, None)
-
- def queueremove(self, f):
- """queues a file to be removed from the dirstate
-
- Meant for use by custom merge drivers."""
- self._results[f] = 0, ACTION_REMOVE
-
- def queueadd(self, f):
- """queues a file to be added to the dirstate
-
- Meant for use by custom merge drivers."""
- self._results[f] = 0, ACTION_ADD
-
- def queueget(self, f):
- """queues a file to be marked modified in the dirstate
-
- Meant for use by custom merge drivers."""
- self._results[f] = 0, ACTION_GET
-
-
def _getcheckunknownconfig(repo, section, name):
config = repo.ui.config(section, name)
valid = [b'abort', b'ignore', b'warn']
@@ -885,14 +151,17 @@
checkunknowndirs = _unknowndirschecker()
for f, (m, args, msg) in pycompat.iteritems(actions):
- if m in (ACTION_CREATED, ACTION_DELETED_CHANGED):
+ if m in (
+ mergestatemod.ACTION_CREATED,
+ mergestatemod.ACTION_DELETED_CHANGED,
+ ):
if _checkunknownfile(repo, wctx, mctx, f):
fileconflicts.add(f)
elif pathconfig and f not in wctx:
path = checkunknowndirs(repo, wctx, f)
if path is not None:
pathconflicts.add(path)
- elif m == ACTION_LOCAL_DIR_RENAME_GET:
+ elif m == mergestatemod.ACTION_LOCAL_DIR_RENAME_GET:
if _checkunknownfile(repo, wctx, mctx, f, args[0]):
fileconflicts.add(f)
@@ -903,7 +172,7 @@
collectconflicts(unknownconflicts, unknownconfig)
else:
for f, (m, args, msg) in pycompat.iteritems(actions):
- if m == ACTION_CREATED_MERGE:
+ if m == mergestatemod.ACTION_CREATED_MERGE:
fl2, anc = args
different = _checkunknownfile(repo, wctx, mctx, f)
if repo.dirstate._ignore(f):
@@ -924,10 +193,14 @@
# don't like an abort happening in the middle of
# merge.update.
if not different:
- actions[f] = (ACTION_GET, (fl2, False), b'remote created')
+ actions[f] = (
+ mergestatemod.ACTION_GET,
+ (fl2, False),
+ b'remote created',
+ )
elif mergeforce or config == b'abort':
actions[f] = (
- ACTION_MERGE,
+ mergestatemod.ACTION_MERGE,
(f, f, None, False, anc),
b'remote differs from untracked local',
)
@@ -936,7 +209,11 @@
else:
if config == b'warn':
warnconflicts.add(f)
- actions[f] = (ACTION_GET, (fl2, True), b'remote created')
+ actions[f] = (
+ mergestatemod.ACTION_GET,
+ (fl2, True),
+ b'remote created',
+ )
for f in sorted(abortconflicts):
warn = repo.ui.warn
@@ -962,14 +239,14 @@
repo.ui.warn(_(b"%s: replacing untracked files in directory\n") % f)
for f, (m, args, msg) in pycompat.iteritems(actions):
- if m == ACTION_CREATED:
+ if m == mergestatemod.ACTION_CREATED:
backup = (
f in fileconflicts
or f in pathconflicts
or any(p in pathconflicts for p in pathutil.finddirs(f))
)
(flags,) = args
- actions[f] = (ACTION_GET, (flags, backup), msg)
+ actions[f] = (mergestatemod.ACTION_GET, (flags, backup), msg)
def _forgetremoved(wctx, mctx, branchmerge):
@@ -988,9 +265,9 @@
"""
actions = {}
- m = ACTION_FORGET
+ m = mergestatemod.ACTION_FORGET
if branchmerge:
- m = ACTION_REMOVE
+ m = mergestatemod.ACTION_REMOVE
for f in wctx.deleted():
if f not in mctx:
actions[f] = m, None, b"forget deleted"
@@ -998,7 +275,11 @@
if not branchmerge:
for f in wctx.removed():
if f not in mctx:
- actions[f] = ACTION_FORGET, None, b"forget removed"
+ actions[f] = (
+ mergestatemod.ACTION_FORGET,
+ None,
+ b"forget removed",
+ )
return actions
@@ -1026,24 +307,24 @@
if actions:
# KEEP and EXEC are no-op
for m in (
- ACTION_ADD,
- ACTION_ADD_MODIFIED,
- ACTION_FORGET,
- ACTION_GET,
- ACTION_CHANGED_DELETED,
- ACTION_DELETED_CHANGED,
+ mergestatemod.ACTION_ADD,
+ mergestatemod.ACTION_ADD_MODIFIED,
+ mergestatemod.ACTION_FORGET,
+ mergestatemod.ACTION_GET,
+ mergestatemod.ACTION_CHANGED_DELETED,
+ mergestatemod.ACTION_DELETED_CHANGED,
):
for f, args, msg in actions[m]:
pmmf.add(f)
- for f, args, msg in actions[ACTION_REMOVE]:
+ for f, args, msg in actions[mergestatemod.ACTION_REMOVE]:
pmmf.discard(f)
- for f, args, msg in actions[ACTION_DIR_RENAME_MOVE_LOCAL]:
+ for f, args, msg in actions[mergestatemod.ACTION_DIR_RENAME_MOVE_LOCAL]:
f2, flags = args
pmmf.discard(f2)
pmmf.add(f)
- for f, args, msg in actions[ACTION_LOCAL_DIR_RENAME_GET]:
+ for f, args, msg in actions[mergestatemod.ACTION_LOCAL_DIR_RENAME_GET]:
pmmf.add(f)
- for f, args, msg in actions[ACTION_MERGE]:
+ for f, args, msg in actions[mergestatemod.ACTION_MERGE]:
f1, f2, fa, move, anc = args
if move:
pmmf.discard(f1)
@@ -1128,10 +409,10 @@
for f, (m, args, msg) in actions.items():
if m in (
- ACTION_CREATED,
- ACTION_DELETED_CHANGED,
- ACTION_MERGE,
- ACTION_CREATED_MERGE,
+ mergestatemod.ACTION_CREATED,
+ mergestatemod.ACTION_DELETED_CHANGED,
+ mergestatemod.ACTION_MERGE,
+ mergestatemod.ACTION_CREATED_MERGE,
):
# This action may create a new local file.
createdfiledirs.update(pathutil.finddirs(f))
@@ -1141,13 +422,13 @@
# will be checked once we know what all the deleted files are.
remoteconflicts.add(f)
# Track the names of all deleted files.
- if m == ACTION_REMOVE:
+ if m == mergestatemod.ACTION_REMOVE:
deletedfiles.add(f)
- if m == ACTION_MERGE:
+ if m == mergestatemod.ACTION_MERGE:
f1, f2, fa, move, anc = args
if move:
deletedfiles.add(f1)
- if m == ACTION_DIR_RENAME_MOVE_LOCAL:
+ if m == mergestatemod.ACTION_DIR_RENAME_MOVE_LOCAL:
f2, flags = args
deletedfiles.add(f2)
@@ -1164,10 +445,10 @@
# We will need to rename the local file.
localconflicts.add(p)
if p in actions and actions[p][0] in (
- ACTION_CREATED,
- ACTION_DELETED_CHANGED,
- ACTION_MERGE,
- ACTION_CREATED_MERGE,
+ mergestatemod.ACTION_CREATED,
+ mergestatemod.ACTION_DELETED_CHANGED,
+ mergestatemod.ACTION_MERGE,
+ mergestatemod.ACTION_CREATED_MERGE,
):
# The file is in a directory which aliases a remote file.
# This is an internal inconsistency within the remote
@@ -1179,12 +460,17 @@
if p not in deletedfiles:
ctxname = bytes(wctx).rstrip(b'+')
pnew = util.safename(p, ctxname, wctx, set(actions.keys()))
+ porig = wctx[p].copysource() or p
actions[pnew] = (
- ACTION_PATH_CONFLICT_RESOLVE,
- (p,),
+ mergestatemod.ACTION_PATH_CONFLICT_RESOLVE,
+ (p, porig),
b'local path conflict',
)
- actions[p] = (ACTION_PATH_CONFLICT, (pnew, b'l'), b'path conflict')
+ actions[p] = (
+ mergestatemod.ACTION_PATH_CONFLICT,
+ (pnew, b'l'),
+ b'path conflict',
+ )
if remoteconflicts:
# Check if all files in the conflicting directories have been removed.
@@ -1193,20 +479,23 @@
if f not in deletedfiles:
m, args, msg = actions[p]
pnew = util.safename(p, ctxname, wctx, set(actions.keys()))
- if m in (ACTION_DELETED_CHANGED, ACTION_MERGE):
+ if m in (
+ mergestatemod.ACTION_DELETED_CHANGED,
+ mergestatemod.ACTION_MERGE,
+ ):
# Action was merge, just update target.
actions[pnew] = (m, args, msg)
else:
# Action was create, change to renamed get action.
fl = args[0]
actions[pnew] = (
- ACTION_LOCAL_DIR_RENAME_GET,
+ mergestatemod.ACTION_LOCAL_DIR_RENAME_GET,
(p, fl),
b'remote path conflict',
)
actions[p] = (
- ACTION_PATH_CONFLICT,
- (pnew, ACTION_REMOVE),
+ mergestatemod.ACTION_PATH_CONFLICT,
+ (pnew, mergestatemod.ACTION_REMOVE),
b'path conflict',
)
remoteconflicts.remove(p)
@@ -1340,13 +629,13 @@
) or branch_copies2.copy.get(f, None)
if fa is not None:
actions[f] = (
- ACTION_MERGE,
+ mergestatemod.ACTION_MERGE,
(f, f, fa, False, pa.node()),
b'both renamed from %s' % fa,
)
else:
actions[f] = (
- ACTION_MERGE,
+ mergestatemod.ACTION_MERGE,
(f, f, None, False, pa.node()),
b'both created',
)
@@ -1355,35 +644,43 @@
fla = ma.flags(f)
nol = b'l' not in fl1 + fl2 + fla
if n2 == a and fl2 == fla:
- actions[f] = (ACTION_KEEP, (), b'remote unchanged')
+ actions[f] = (
+ mergestatemod.ACTION_KEEP,
+ (),
+ b'remote unchanged',
+ )
elif n1 == a and fl1 == fla: # local unchanged - use remote
if n1 == n2: # optimization: keep local content
actions[f] = (
- ACTION_EXEC,
+ mergestatemod.ACTION_EXEC,
(fl2,),
b'update permissions',
)
else:
actions[f] = (
- ACTION_GET_OTHER_AND_STORE
+ mergestatemod.ACTION_GET_OTHER_AND_STORE
if branchmerge
- else ACTION_GET,
+ else mergestatemod.ACTION_GET,
(fl2, False),
b'remote is newer',
)
elif nol and n2 == a: # remote only changed 'x'
- actions[f] = (ACTION_EXEC, (fl2,), b'update permissions')
+ actions[f] = (
+ mergestatemod.ACTION_EXEC,
+ (fl2,),
+ b'update permissions',
+ )
elif nol and n1 == a: # local only changed 'x'
actions[f] = (
- ACTION_GET_OTHER_AND_STORE
+ mergestatemod.ACTION_GET_OTHER_AND_STORE
if branchmerge
- else ACTION_GET,
+ else mergestatemod.ACTION_GET,
(fl1, False),
b'remote is newer',
)
else: # both changed something
actions[f] = (
- ACTION_MERGE,
+ mergestatemod.ACTION_MERGE,
(f, f, f, False, pa.node()),
b'versions differ',
)
@@ -1396,30 +693,34 @@
f2 = branch_copies1.movewithdir[f]
if f2 in m2:
actions[f2] = (
- ACTION_MERGE,
+ mergestatemod.ACTION_MERGE,
(f, f2, None, True, pa.node()),
b'remote directory rename, both created',
)
else:
actions[f2] = (
- ACTION_DIR_RENAME_MOVE_LOCAL,
+ mergestatemod.ACTION_DIR_RENAME_MOVE_LOCAL,
(f, fl1),
b'remote directory rename - move from %s' % f,
)
elif f in branch_copies1.copy:
f2 = branch_copies1.copy[f]
actions[f] = (
- ACTION_MERGE,
+ mergestatemod.ACTION_MERGE,
(f, f2, f2, False, pa.node()),
b'local copied/moved from %s' % f2,
)
elif f in ma: # clean, a different, no remote
if n1 != ma[f]:
if acceptremote:
- actions[f] = (ACTION_REMOVE, None, b'remote delete')
+ actions[f] = (
+ mergestatemod.ACTION_REMOVE,
+ None,
+ b'remote delete',
+ )
else:
actions[f] = (
- ACTION_CHANGED_DELETED,
+ mergestatemod.ACTION_CHANGED_DELETED,
(f, None, f, False, pa.node()),
b'prompt changed/deleted',
)
@@ -1427,9 +728,17 @@
# This extra 'a' is added by working copy manifest to mark
# the file as locally added. We should forget it instead of
# deleting it.
- actions[f] = (ACTION_FORGET, None, b'remote deleted')
+ actions[f] = (
+ mergestatemod.ACTION_FORGET,
+ None,
+ b'remote deleted',
+ )
else:
- actions[f] = (ACTION_REMOVE, None, b'other deleted')
+ actions[f] = (
+ mergestatemod.ACTION_REMOVE,
+ None,
+ b'other deleted',
+ )
elif n2: # file exists only on remote side
if f in copied1:
pass # we'll deal with it on m1 side
@@ -1437,13 +746,13 @@
f2 = branch_copies2.movewithdir[f]
if f2 in m1:
actions[f2] = (
- ACTION_MERGE,
+ mergestatemod.ACTION_MERGE,
(f2, f, None, False, pa.node()),
b'local directory rename, both created',
)
else:
actions[f2] = (
- ACTION_LOCAL_DIR_RENAME_GET,
+ mergestatemod.ACTION_LOCAL_DIR_RENAME_GET,
(f, fl2),
b'local directory rename - get from %s' % f,
)
@@ -1451,13 +760,13 @@
f2 = branch_copies2.copy[f]
if f2 in m2:
actions[f] = (
- ACTION_MERGE,
+ mergestatemod.ACTION_MERGE,
(f2, f, f2, False, pa.node()),
b'remote copied from %s' % f2,
)
else:
actions[f] = (
- ACTION_MERGE,
+ mergestatemod.ACTION_MERGE,
(f2, f, f2, True, pa.node()),
b'remote moved from %s' % f2,
)
@@ -1474,12 +783,20 @@
# Checking whether the files are different is expensive, so we
# don't do that when we can avoid it.
if not force:
- actions[f] = (ACTION_CREATED, (fl2,), b'remote created')
+ actions[f] = (
+ mergestatemod.ACTION_CREATED,
+ (fl2,),
+ b'remote created',
+ )
elif not branchmerge:
- actions[f] = (ACTION_CREATED, (fl2,), b'remote created')
+ actions[f] = (
+ mergestatemod.ACTION_CREATED,
+ (fl2,),
+ b'remote created',
+ )
else:
actions[f] = (
- ACTION_CREATED_MERGE,
+ mergestatemod.ACTION_CREATED_MERGE,
(fl2, pa.node()),
b'remote created, get or merge',
)
@@ -1492,16 +809,20 @@
break
if df is not None and df in m1:
actions[df] = (
- ACTION_MERGE,
+ mergestatemod.ACTION_MERGE,
(df, f, f, False, pa.node()),
b'local directory rename - respect move '
b'from %s' % f,
)
elif acceptremote:
- actions[f] = (ACTION_CREATED, (fl2,), b'remote recreating')
+ actions[f] = (
+ mergestatemod.ACTION_CREATED,
+ (fl2,),
+ b'remote recreating',
+ )
else:
actions[f] = (
- ACTION_DELETED_CHANGED,
+ mergestatemod.ACTION_DELETED_CHANGED,
(None, f, f, False, pa.node()),
b'prompt deleted/changed',
)
@@ -1528,14 +849,14 @@
# actions as we resolve trivial conflicts.
for f, (m, args, msg) in list(actions.items()):
if (
- m == ACTION_CHANGED_DELETED
+ m == mergestatemod.ACTION_CHANGED_DELETED
and f in ancestor
and not wctx[f].cmp(ancestor[f])
):
# local did change but ended up with same content
- actions[f] = ACTION_REMOVE, None, b'prompt same'
+ actions[f] = mergestatemod.ACTION_REMOVE, None, b'prompt same'
elif (
- m == ACTION_DELETED_CHANGED
+ m == mergestatemod.ACTION_DELETED_CHANGED
and f in ancestor
and not mctx[f].cmp(ancestor[f])
):
@@ -1613,8 +934,8 @@
for f, a in sorted(pycompat.iteritems(actions)):
m, args, msg = a
- if m == ACTION_GET_OTHER_AND_STORE:
- m = ACTION_GET
+ if m == mergestatemod.ACTION_GET_OTHER_AND_STORE:
+ m = mergestatemod.ACTION_GET
repo.ui.debug(b' %s: %s -> %s\n' % (f, msg, m))
if f in fbids:
d = fbids[f]
@@ -1638,14 +959,14 @@
actions[f] = l[0]
continue
# If keep is an option, just do it.
- if ACTION_KEEP in bids:
+ if mergestatemod.ACTION_KEEP in bids:
repo.ui.note(_(b" %s: picking 'keep' action\n") % f)
- actions[f] = bids[ACTION_KEEP][0]
+ actions[f] = bids[mergestatemod.ACTION_KEEP][0]
continue
# If there are gets and they all agree [how could they not?], do it.
- if ACTION_GET in bids:
- ga0 = bids[ACTION_GET][0]
- if all(a == ga0 for a in bids[ACTION_GET][1:]):
+ if mergestatemod.ACTION_GET in bids:
+ ga0 = bids[mergestatemod.ACTION_GET][0]
+ if all(a == ga0 for a in bids[mergestatemod.ACTION_GET][1:]):
repo.ui.note(_(b" %s: picking 'get' action\n") % f)
actions[f] = ga0
continue
@@ -1790,10 +1111,10 @@
oplist = [
actions[a]
for a in (
- ACTION_GET,
- ACTION_DELETED_CHANGED,
- ACTION_LOCAL_DIR_RENAME_GET,
- ACTION_MERGE,
+ mergestatemod.ACTION_GET,
+ mergestatemod.ACTION_DELETED_CHANGED,
+ mergestatemod.ACTION_LOCAL_DIR_RENAME_GET,
+ mergestatemod.ACTION_MERGE,
)
]
prefetch = scmutil.prefetchfiles
@@ -1826,21 +1147,21 @@
return {
m: []
for m in (
- ACTION_ADD,
- ACTION_ADD_MODIFIED,
- ACTION_FORGET,
- ACTION_GET,
- ACTION_CHANGED_DELETED,
- ACTION_DELETED_CHANGED,
- ACTION_REMOVE,
- ACTION_DIR_RENAME_MOVE_LOCAL,
- ACTION_LOCAL_DIR_RENAME_GET,
- ACTION_MERGE,
- ACTION_EXEC,
- ACTION_KEEP,
- ACTION_PATH_CONFLICT,
- ACTION_PATH_CONFLICT_RESOLVE,
- ACTION_GET_OTHER_AND_STORE,
+ mergestatemod.ACTION_ADD,
+ mergestatemod.ACTION_ADD_MODIFIED,
+ mergestatemod.ACTION_FORGET,
+ mergestatemod.ACTION_GET,
+ mergestatemod.ACTION_CHANGED_DELETED,
+ mergestatemod.ACTION_DELETED_CHANGED,
+ mergestatemod.ACTION_REMOVE,
+ mergestatemod.ACTION_DIR_RENAME_MOVE_LOCAL,
+ mergestatemod.ACTION_LOCAL_DIR_RENAME_GET,
+ mergestatemod.ACTION_MERGE,
+ mergestatemod.ACTION_EXEC,
+ mergestatemod.ACTION_KEEP,
+ mergestatemod.ACTION_PATH_CONFLICT,
+ mergestatemod.ACTION_PATH_CONFLICT_RESOLVE,
+ mergestatemod.ACTION_GET_OTHER_AND_STORE,
)
}
@@ -1862,10 +1183,12 @@
_prefetchfiles(repo, mctx, actions)
updated, merged, removed = 0, 0, 0
- ms = mergestate.clean(repo, wctx.p1().node(), mctx.node(), labels)
+ ms = mergestatemod.mergestate.clean(
+ repo, wctx.p1().node(), mctx.node(), labels
+ )
# add ACTION_GET_OTHER_AND_STORE to mergestate
- for e in actions[ACTION_GET_OTHER_AND_STORE]:
+ for e in actions[mergestatemod.ACTION_GET_OTHER_AND_STORE]:
ms.addmergedother(e[0])
moves = []
@@ -1873,9 +1196,9 @@
l.sort()
# 'cd' and 'dc' actions are treated like other merge conflicts
- mergeactions = sorted(actions[ACTION_CHANGED_DELETED])
- mergeactions.extend(sorted(actions[ACTION_DELETED_CHANGED]))
- mergeactions.extend(actions[ACTION_MERGE])
+ mergeactions = sorted(actions[mergestatemod.ACTION_CHANGED_DELETED])
+ mergeactions.extend(sorted(actions[mergestatemod.ACTION_DELETED_CHANGED]))
+ mergeactions.extend(actions[mergestatemod.ACTION_MERGE])
for f, args, msg in mergeactions:
f1, f2, fa, move, anc = args
if f == b'.hgsubstate': # merged internally
@@ -1906,16 +1229,22 @@
wctx[f].audit()
wctx[f].remove()
- numupdates = sum(len(l) for m, l in actions.items() if m != ACTION_KEEP)
+ numupdates = sum(
+ len(l) for m, l in actions.items() if m != mergestatemod.ACTION_KEEP
+ )
progress = repo.ui.makeprogress(
_(b'updating'), unit=_(b'files'), total=numupdates
)
- if [a for a in actions[ACTION_REMOVE] if a[0] == b'.hgsubstate']:
+ if [
+ a
+ for a in actions[mergestatemod.ACTION_REMOVE]
+ if a[0] == b'.hgsubstate'
+ ]:
subrepoutil.submerge(repo, wctx, mctx, wctx, overwrite, labels)
# record path conflicts
- for f, args, msg in actions[ACTION_PATH_CONFLICT]:
+ for f, args, msg in actions[mergestatemod.ACTION_PATH_CONFLICT]:
f1, fo = args
s = repo.ui.status
s(
@@ -1939,16 +1268,20 @@
# remove in parallel (must come before resolving path conflicts and getting)
prog = worker.worker(
- repo.ui, cost, batchremove, (repo, wctx), actions[ACTION_REMOVE]
+ repo.ui,
+ cost,
+ batchremove,
+ (repo, wctx),
+ actions[mergestatemod.ACTION_REMOVE],
)
for i, item in prog:
progress.increment(step=i, item=item)
- removed = len(actions[ACTION_REMOVE])
+ removed = len(actions[mergestatemod.ACTION_REMOVE])
# resolve path conflicts (must come before getting)
- for f, args, msg in actions[ACTION_PATH_CONFLICT_RESOLVE]:
+ for f, args, msg in actions[mergestatemod.ACTION_PATH_CONFLICT_RESOLVE]:
repo.ui.debug(b" %s: %s -> pr\n" % (f, msg))
- (f0,) = args
+ (f0, origf0) = args
if wctx[f0].lexists():
repo.ui.note(_(b"moving %s to %s\n") % (f0, f))
wctx[f].audit()
@@ -1965,7 +1298,7 @@
cost,
batchget,
(repo, mctx, wctx, wantfiledata),
- actions[ACTION_GET],
+ actions[mergestatemod.ACTION_GET],
threadsafe=threadsafe,
hasretval=True,
)
@@ -1976,33 +1309,33 @@
else:
i, item = res
progress.increment(step=i, item=item)
- updated = len(actions[ACTION_GET])
+ updated = len(actions[mergestatemod.ACTION_GET])
- if [a for a in actions[ACTION_GET] if a[0] == b'.hgsubstate']:
+ if [a for a in actions[mergestatemod.ACTION_GET] if a[0] == b'.hgsubstate']:
subrepoutil.submerge(repo, wctx, mctx, wctx, overwrite, labels)
# forget (manifest only, just log it) (must come first)
- for f, args, msg in actions[ACTION_FORGET]:
+ for f, args, msg in actions[mergestatemod.ACTION_FORGET]:
repo.ui.debug(b" %s: %s -> f\n" % (f, msg))
progress.increment(item=f)
# re-add (manifest only, just log it)
- for f, args, msg in actions[ACTION_ADD]:
+ for f, args, msg in actions[mergestatemod.ACTION_ADD]:
repo.ui.debug(b" %s: %s -> a\n" % (f, msg))
progress.increment(item=f)
# re-add/mark as modified (manifest only, just log it)
- for f, args, msg in actions[ACTION_ADD_MODIFIED]:
+ for f, args, msg in actions[mergestatemod.ACTION_ADD_MODIFIED]:
repo.ui.debug(b" %s: %s -> am\n" % (f, msg))
progress.increment(item=f)
# keep (noop, just log it)
- for f, args, msg in actions[ACTION_KEEP]:
+ for f, args, msg in actions[mergestatemod.ACTION_KEEP]:
repo.ui.debug(b" %s: %s -> k\n" % (f, msg))
# no progress
# directory rename, move local
- for f, args, msg in actions[ACTION_DIR_RENAME_MOVE_LOCAL]:
+ for f, args, msg in actions[mergestatemod.ACTION_DIR_RENAME_MOVE_LOCAL]:
repo.ui.debug(b" %s: %s -> dm\n" % (f, msg))
progress.increment(item=f)
f0, flags = args
@@ -2013,7 +1346,7 @@
updated += 1
# local directory rename, get
- for f, args, msg in actions[ACTION_LOCAL_DIR_RENAME_GET]:
+ for f, args, msg in actions[mergestatemod.ACTION_LOCAL_DIR_RENAME_GET]:
repo.ui.debug(b" %s: %s -> dg\n" % (f, msg))
progress.increment(item=f)
f0, flags = args
@@ -2022,7 +1355,7 @@
updated += 1
# exec
- for f, args, msg in actions[ACTION_EXEC]:
+ for f, args, msg in actions[mergestatemod.ACTION_EXEC]:
repo.ui.debug(b" %s: %s -> e\n" % (f, msg))
progress.increment(item=f)
(flags,) = args
@@ -2087,7 +1420,7 @@
if (
usemergedriver
and not unresolved
- and ms.mdstate() != MERGE_DRIVER_STATE_SUCCESS
+ and ms.mdstate() != mergestatemod.MERGE_DRIVER_STATE_SUCCESS
):
if not driverconclude(repo, ms, wctx, labels=labels):
# XXX setting unresolved to at least 1 is a hack to make sure we
@@ -2103,10 +1436,10 @@
extraactions = ms.actions()
if extraactions:
- mfiles = {a[0] for a in actions[ACTION_MERGE]}
+ mfiles = {a[0] for a in actions[mergestatemod.ACTION_MERGE]}
for k, acts in pycompat.iteritems(extraactions):
actions[k].extend(acts)
- if k == ACTION_GET and wantfiledata:
+ if k == mergestatemod.ACTION_GET and wantfiledata:
# no filedata until mergestate is updated to provide it
for a in acts:
getfiledata[a[0]] = None
@@ -2128,112 +1461,17 @@
# those lists aren't consulted again.
mfiles.difference_update(a[0] for a in acts)
- actions[ACTION_MERGE] = [
- a for a in actions[ACTION_MERGE] if a[0] in mfiles
+ actions[mergestatemod.ACTION_MERGE] = [
+ a for a in actions[mergestatemod.ACTION_MERGE] if a[0] in mfiles
]
progress.complete()
- assert len(getfiledata) == (len(actions[ACTION_GET]) if wantfiledata else 0)
+ assert len(getfiledata) == (
+ len(actions[mergestatemod.ACTION_GET]) if wantfiledata else 0
+ )
return updateresult(updated, merged, removed, unresolved), getfiledata
-def recordupdates(repo, actions, branchmerge, getfiledata):
- """record merge actions to the dirstate"""
- # remove (must come first)
- for f, args, msg in actions.get(ACTION_REMOVE, []):
- if branchmerge:
- repo.dirstate.remove(f)
- else:
- repo.dirstate.drop(f)
-
- # forget (must come first)
- for f, args, msg in actions.get(ACTION_FORGET, []):
- repo.dirstate.drop(f)
-
- # resolve path conflicts
- for f, args, msg in actions.get(ACTION_PATH_CONFLICT_RESOLVE, []):
- (f0,) = args
- origf0 = repo.dirstate.copied(f0) or f0
- repo.dirstate.add(f)
- repo.dirstate.copy(origf0, f)
- if f0 == origf0:
- repo.dirstate.remove(f0)
- else:
- repo.dirstate.drop(f0)
-
- # re-add
- for f, args, msg in actions.get(ACTION_ADD, []):
- repo.dirstate.add(f)
-
- # re-add/mark as modified
- for f, args, msg in actions.get(ACTION_ADD_MODIFIED, []):
- if branchmerge:
- repo.dirstate.normallookup(f)
- else:
- repo.dirstate.add(f)
-
- # exec change
- for f, args, msg in actions.get(ACTION_EXEC, []):
- repo.dirstate.normallookup(f)
-
- # keep
- for f, args, msg in actions.get(ACTION_KEEP, []):
- pass
-
- # get
- for f, args, msg in actions.get(ACTION_GET, []):
- if branchmerge:
- repo.dirstate.otherparent(f)
- else:
- parentfiledata = getfiledata[f] if getfiledata else None
- repo.dirstate.normal(f, parentfiledata=parentfiledata)
-
- # merge
- for f, args, msg in actions.get(ACTION_MERGE, []):
- f1, f2, fa, move, anc = args
- if branchmerge:
- # We've done a branch merge, mark this file as merged
- # so that we properly record the merger later
- repo.dirstate.merge(f)
- if f1 != f2: # copy/rename
- if move:
- repo.dirstate.remove(f1)
- if f1 != f:
- repo.dirstate.copy(f1, f)
- else:
- repo.dirstate.copy(f2, f)
- else:
- # We've update-merged a locally modified file, so
- # we set the dirstate to emulate a normal checkout
- # of that file some time in the past. Thus our
- # merge will appear as a normal local file
- # modification.
- if f2 == f: # file not locally copied/moved
- repo.dirstate.normallookup(f)
- if move:
- repo.dirstate.drop(f1)
-
- # directory rename, move local
- for f, args, msg in actions.get(ACTION_DIR_RENAME_MOVE_LOCAL, []):
- f0, flag = args
- if branchmerge:
- repo.dirstate.add(f)
- repo.dirstate.remove(f0)
- repo.dirstate.copy(f0, f)
- else:
- repo.dirstate.normal(f)
- repo.dirstate.drop(f0)
-
- # directory rename, get
- for f, args, msg in actions.get(ACTION_LOCAL_DIR_RENAME_GET, []):
- f0, flag = args
- if branchmerge:
- repo.dirstate.add(f)
- repo.dirstate.copy(f0, f)
- else:
- repo.dirstate.normal(f)
-
-
UPDATECHECK_ABORT = b'abort' # handled at higher layers
UPDATECHECK_NONE = b'none'
UPDATECHECK_LINEAR = b'linear'
@@ -2334,7 +1572,11 @@
),
)
)
- with repo.wlock():
+ if wc is not None and wc.isinmemory():
+ maybe_wlock = util.nullcontextmanager()
+ else:
+ maybe_wlock = repo.wlock()
+ with maybe_wlock:
if wc is None:
wc = repo[None]
pl = wc.parents()
@@ -2356,7 +1598,7 @@
if not overwrite:
if len(pl) > 1:
raise error.Abort(_(b"outstanding uncommitted merge"))
- ms = mergestate.read(repo)
+ ms = mergestatemod.mergestate.read(repo)
if list(ms.unresolved()):
raise error.Abort(
_(b"outstanding merge conflicts"),
@@ -2443,12 +1685,12 @@
if updatecheck == UPDATECHECK_NO_CONFLICT:
for f, (m, args, msg) in pycompat.iteritems(actionbyfile):
if m not in (
- ACTION_GET,
- ACTION_KEEP,
- ACTION_EXEC,
- ACTION_REMOVE,
- ACTION_PATH_CONFLICT_RESOLVE,
- ACTION_GET_OTHER_AND_STORE,
+ mergestatemod.ACTION_GET,
+ mergestatemod.ACTION_KEEP,
+ mergestatemod.ACTION_EXEC,
+ mergestatemod.ACTION_REMOVE,
+ mergestatemod.ACTION_PATH_CONFLICT_RESOLVE,
+ mergestatemod.ACTION_GET_OTHER_AND_STORE,
):
msg = _(b"conflicting changes")
hint = _(b"commit or update --clean to discard changes")
@@ -2462,7 +1704,7 @@
m, args, msg = actionbyfile[f]
prompts = filemerge.partextras(labels)
prompts[b'f'] = f
- if m == ACTION_CHANGED_DELETED:
+ if m == mergestatemod.ACTION_CHANGED_DELETED:
if repo.ui.promptchoice(
_(
b"local%(l)s changed %(f)s which other%(o)s deleted\n"
@@ -2472,16 +1714,24 @@
% prompts,
0,
):
- actionbyfile[f] = (ACTION_REMOVE, None, b'prompt delete')
+ actionbyfile[f] = (
+ mergestatemod.ACTION_REMOVE,
+ None,
+ b'prompt delete',
+ )
elif f in p1:
actionbyfile[f] = (
- ACTION_ADD_MODIFIED,
+ mergestatemod.ACTION_ADD_MODIFIED,
None,
b'prompt keep',
)
else:
- actionbyfile[f] = (ACTION_ADD, None, b'prompt keep')
- elif m == ACTION_DELETED_CHANGED:
+ actionbyfile[f] = (
+ mergestatemod.ACTION_ADD,
+ None,
+ b'prompt keep',
+ )
+ elif m == mergestatemod.ACTION_DELETED_CHANGED:
f1, f2, fa, move, anc = args
flags = p2[f2].flags()
if (
@@ -2497,7 +1747,7 @@
== 0
):
actionbyfile[f] = (
- ACTION_GET,
+ mergestatemod.ACTION_GET,
(flags, False),
b'prompt recreating',
)
@@ -2511,9 +1761,9 @@
actions[m] = []
actions[m].append((f, args, msg))
- # ACTION_GET_OTHER_AND_STORE is a ACTION_GET + store in mergestate
- for e in actions[ACTION_GET_OTHER_AND_STORE]:
- actions[ACTION_GET].append(e)
+ # ACTION_GET_OTHER_AND_STORE is a mergestatemod.ACTION_GET + store in mergestate
+ for e in actions[mergestatemod.ACTION_GET_OTHER_AND_STORE]:
+ actions[mergestatemod.ACTION_GET].append(e)
if not util.fscasesensitive(repo.path):
# check collision between files only in p2 for clean update
@@ -2590,7 +1840,7 @@
fsmonitorwarning
and not fsmonitorenabled
and p1.node() == nullid
- and len(actions[ACTION_GET]) >= fsmonitorthreshold
+ and len(actions[mergestatemod.ACTION_GET]) >= fsmonitorthreshold
and pycompat.sysplatform.startswith((b'linux', b'darwin'))
):
repo.ui.warn(
@@ -2609,7 +1859,9 @@
if updatedirstate:
with repo.dirstate.parentchange():
repo.setparents(fp1, fp2)
- recordupdates(repo, actions, branchmerge, getfiledata)
+ mergestatemod.recordupdates(
+ repo, actions, branchmerge, getfiledata
+ )
# update completed, clear state
util.unlink(repo.vfs.join(b'updatestate'))
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/mercurial/mergestate.py Thu Jun 25 10:32:51 2020 -0700
@@ -0,0 +1,850 @@
+from __future__ import absolute_import
+
+import errno
+import shutil
+import struct
+
+from .i18n import _
+from .node import (
+ bin,
+ hex,
+ nullhex,
+ nullid,
+)
+from .pycompat import delattr
+from . import (
+ error,
+ filemerge,
+ pycompat,
+ util,
+)
+from .utils import hashutil
+
+_pack = struct.pack
+_unpack = struct.unpack
+
+
+def _droponode(data):
+ # used for compatibility for v1
+ bits = data.split(b'\0')
+ bits = bits[:-2] + bits[-1:]
+ return b'\0'.join(bits)
+
+
+def _filectxorabsent(hexnode, ctx, f):
+ if hexnode == nullhex:
+ return filemerge.absentfilectx(ctx, f)
+ else:
+ return ctx[f]
+
+
+# Merge state record types. See ``mergestate`` docs for more.
+RECORD_LOCAL = b'L'
+RECORD_OTHER = b'O'
+RECORD_MERGED = b'F'
+RECORD_CHANGEDELETE_CONFLICT = b'C'
+RECORD_MERGE_DRIVER_MERGE = b'D'
+RECORD_PATH_CONFLICT = b'P'
+RECORD_MERGE_DRIVER_STATE = b'm'
+RECORD_FILE_VALUES = b'f'
+RECORD_LABELS = b'l'
+RECORD_OVERRIDE = b't'
+RECORD_UNSUPPORTED_MANDATORY = b'X'
+RECORD_UNSUPPORTED_ADVISORY = b'x'
+RECORD_RESOLVED_OTHER = b'R'
+
+MERGE_DRIVER_STATE_UNMARKED = b'u'
+MERGE_DRIVER_STATE_MARKED = b'm'
+MERGE_DRIVER_STATE_SUCCESS = b's'
+
+MERGE_RECORD_UNRESOLVED = b'u'
+MERGE_RECORD_RESOLVED = b'r'
+MERGE_RECORD_UNRESOLVED_PATH = b'pu'
+MERGE_RECORD_RESOLVED_PATH = b'pr'
+MERGE_RECORD_DRIVER_RESOLVED = b'd'
+# represents that the file was automatically merged in favor
+# of other version. This info is used on commit.
+MERGE_RECORD_MERGED_OTHER = b'o'
+
+ACTION_FORGET = b'f'
+ACTION_REMOVE = b'r'
+ACTION_ADD = b'a'
+ACTION_GET = b'g'
+ACTION_PATH_CONFLICT = b'p'
+ACTION_PATH_CONFLICT_RESOLVE = b'pr'
+ACTION_ADD_MODIFIED = b'am'
+ACTION_CREATED = b'c'
+ACTION_DELETED_CHANGED = b'dc'
+ACTION_CHANGED_DELETED = b'cd'
+ACTION_MERGE = b'm'
+ACTION_LOCAL_DIR_RENAME_GET = b'dg'
+ACTION_DIR_RENAME_MOVE_LOCAL = b'dm'
+ACTION_KEEP = b'k'
+ACTION_EXEC = b'e'
+ACTION_CREATED_MERGE = b'cm'
+# GET the other/remote side and store this info in mergestate
+ACTION_GET_OTHER_AND_STORE = b'gs'
+
+
+class mergestate(object):
+ '''track 3-way merge state of individual files
+
+ The merge state is stored on disk when needed. Two files are used: one with
+ an old format (version 1), and one with a new format (version 2). Version 2
+ stores a superset of the data in version 1, including new kinds of records
+ in the future. For more about the new format, see the documentation for
+ `_readrecordsv2`.
+
+ Each record can contain arbitrary content, and has an associated type. This
+ `type` should be a letter. If `type` is uppercase, the record is mandatory:
+ versions of Mercurial that don't support it should abort. If `type` is
+ lowercase, the record can be safely ignored.
+
+ Currently known records:
+
+ L: the node of the "local" part of the merge (hexified version)
+ O: the node of the "other" part of the merge (hexified version)
+ F: a file to be merged entry
+ C: a change/delete or delete/change conflict
+ D: a file that the external merge driver will merge internally
+ (experimental)
+ P: a path conflict (file vs directory)
+ m: the external merge driver defined for this merge plus its run state
+ (experimental)
+ f: a (filename, dictionary) tuple of optional values for a given file
+ X: unsupported mandatory record type (used in tests)
+ x: unsupported advisory record type (used in tests)
+ l: the labels for the parts of the merge.
+
+ Merge driver run states (experimental):
+ u: driver-resolved files unmarked -- needs to be run next time we're about
+ to resolve or commit
+ m: driver-resolved files marked -- only needs to be run before commit
+ s: success/skipped -- does not need to be run any more
+
+ Merge record states (stored in self._state, indexed by filename):
+ u: unresolved conflict
+ r: resolved conflict
+ pu: unresolved path conflict (file conflicts with directory)
+ pr: resolved path conflict
+ d: driver-resolved conflict
+
+ The resolve command transitions between 'u' and 'r' for conflicts and
+ 'pu' and 'pr' for path conflicts.
+ '''
+
+ statepathv1 = b'merge/state'
+ statepathv2 = b'merge/state2'
+
+ @staticmethod
+ def clean(repo, node=None, other=None, labels=None):
+ """Initialize a brand new merge state, removing any existing state on
+ disk."""
+ ms = mergestate(repo)
+ ms.reset(node, other, labels)
+ return ms
+
+ @staticmethod
+ def read(repo):
+ """Initialize the merge state, reading it from disk."""
+ ms = mergestate(repo)
+ ms._read()
+ return ms
+
+ def __init__(self, repo):
+ """Initialize the merge state.
+
+ Do not use this directly! Instead call read() or clean()."""
+ self._repo = repo
+ self._dirty = False
+ self._labels = None
+
+ def reset(self, node=None, other=None, labels=None):
+ self._state = {}
+ self._stateextras = {}
+ self._local = None
+ self._other = None
+ self._labels = labels
+ for var in ('localctx', 'otherctx'):
+ if var in vars(self):
+ delattr(self, var)
+ if node:
+ self._local = node
+ self._other = other
+ self._readmergedriver = None
+ if self.mergedriver:
+ self._mdstate = MERGE_DRIVER_STATE_SUCCESS
+ else:
+ self._mdstate = MERGE_DRIVER_STATE_UNMARKED
+ shutil.rmtree(self._repo.vfs.join(b'merge'), True)
+ self._results = {}
+ self._dirty = False
+
+ def _read(self):
+ """Analyse each record content to restore a serialized state from disk
+
+ This function process "record" entry produced by the de-serialization
+ of on disk file.
+ """
+ self._state = {}
+ self._stateextras = {}
+ self._local = None
+ self._other = None
+ for var in ('localctx', 'otherctx'):
+ if var in vars(self):
+ delattr(self, var)
+ self._readmergedriver = None
+ self._mdstate = MERGE_DRIVER_STATE_SUCCESS
+ unsupported = set()
+ records = self._readrecords()
+ for rtype, record in records:
+ if rtype == RECORD_LOCAL:
+ self._local = bin(record)
+ elif rtype == RECORD_OTHER:
+ self._other = bin(record)
+ elif rtype == RECORD_MERGE_DRIVER_STATE:
+ bits = record.split(b'\0', 1)
+ mdstate = bits[1]
+ if len(mdstate) != 1 or mdstate not in (
+ MERGE_DRIVER_STATE_UNMARKED,
+ MERGE_DRIVER_STATE_MARKED,
+ MERGE_DRIVER_STATE_SUCCESS,
+ ):
+ # the merge driver should be idempotent, so just rerun it
+ mdstate = MERGE_DRIVER_STATE_UNMARKED
+
+ self._readmergedriver = bits[0]
+ self._mdstate = mdstate
+ elif rtype in (
+ RECORD_MERGED,
+ RECORD_CHANGEDELETE_CONFLICT,
+ RECORD_PATH_CONFLICT,
+ RECORD_MERGE_DRIVER_MERGE,
+ RECORD_RESOLVED_OTHER,
+ ):
+ bits = record.split(b'\0')
+ self._state[bits[0]] = bits[1:]
+ elif rtype == RECORD_FILE_VALUES:
+ filename, rawextras = record.split(b'\0', 1)
+ extraparts = rawextras.split(b'\0')
+ extras = {}
+ i = 0
+ while i < len(extraparts):
+ extras[extraparts[i]] = extraparts[i + 1]
+ i += 2
+
+ self._stateextras[filename] = extras
+ elif rtype == RECORD_LABELS:
+ labels = record.split(b'\0', 2)
+ self._labels = [l for l in labels if len(l) > 0]
+ elif not rtype.islower():
+ unsupported.add(rtype)
+ self._results = {}
+ self._dirty = False
+
+ if unsupported:
+ raise error.UnsupportedMergeRecords(unsupported)
+
+ def _readrecords(self):
+ """Read merge state from disk and return a list of record (TYPE, data)
+
+ We read data from both v1 and v2 files and decide which one to use.
+
+ V1 has been used by version prior to 2.9.1 and contains less data than
+ v2. We read both versions and check if no data in v2 contradicts
+ v1. If there is not contradiction we can safely assume that both v1
+ and v2 were written at the same time and use the extract data in v2. If
+ there is contradiction we ignore v2 content as we assume an old version
+ of Mercurial has overwritten the mergestate file and left an old v2
+ file around.
+
+ returns list of record [(TYPE, data), ...]"""
+ v1records = self._readrecordsv1()
+ v2records = self._readrecordsv2()
+ if self._v1v2match(v1records, v2records):
+ return v2records
+ else:
+ # v1 file is newer than v2 file, use it
+ # we have to infer the "other" changeset of the merge
+ # we cannot do better than that with v1 of the format
+ mctx = self._repo[None].parents()[-1]
+ v1records.append((RECORD_OTHER, mctx.hex()))
+ # add place holder "other" file node information
+ # nobody is using it yet so we do no need to fetch the data
+ # if mctx was wrong `mctx[bits[-2]]` may fails.
+ for idx, r in enumerate(v1records):
+ if r[0] == RECORD_MERGED:
+ bits = r[1].split(b'\0')
+ bits.insert(-2, b'')
+ v1records[idx] = (r[0], b'\0'.join(bits))
+ return v1records
+
+ def _v1v2match(self, v1records, v2records):
+ oldv2 = set() # old format version of v2 record
+ for rec in v2records:
+ if rec[0] == RECORD_LOCAL:
+ oldv2.add(rec)
+ elif rec[0] == RECORD_MERGED:
+ # drop the onode data (not contained in v1)
+ oldv2.add((RECORD_MERGED, _droponode(rec[1])))
+ for rec in v1records:
+ if rec not in oldv2:
+ return False
+ else:
+ return True
+
+ def _readrecordsv1(self):
+ """read on disk merge state for version 1 file
+
+ returns list of record [(TYPE, data), ...]
+
+ Note: the "F" data from this file are one entry short
+ (no "other file node" entry)
+ """
+ records = []
+ try:
+ f = self._repo.vfs(self.statepathv1)
+ for i, l in enumerate(f):
+ if i == 0:
+ records.append((RECORD_LOCAL, l[:-1]))
+ else:
+ records.append((RECORD_MERGED, l[:-1]))
+ f.close()
+ except IOError as err:
+ if err.errno != errno.ENOENT:
+ raise
+ return records
+
+ def _readrecordsv2(self):
+ """read on disk merge state for version 2 file
+
+ This format is a list of arbitrary records of the form:
+
+ [type][length][content]
+
+ `type` is a single character, `length` is a 4 byte integer, and
+ `content` is an arbitrary byte sequence of length `length`.
+
+ Mercurial versions prior to 3.7 have a bug where if there are
+ unsupported mandatory merge records, attempting to clear out the merge
+ state with hg update --clean or similar aborts. The 't' record type
+ works around that by writing out what those versions treat as an
+ advisory record, but later versions interpret as special: the first
+ character is the 'real' record type and everything onwards is the data.
+
+ Returns list of records [(TYPE, data), ...]."""
+ records = []
+ try:
+ f = self._repo.vfs(self.statepathv2)
+ data = f.read()
+ off = 0
+ end = len(data)
+ while off < end:
+ rtype = data[off : off + 1]
+ off += 1
+ length = _unpack(b'>I', data[off : (off + 4)])[0]
+ off += 4
+ record = data[off : (off + length)]
+ off += length
+ if rtype == RECORD_OVERRIDE:
+ rtype, record = record[0:1], record[1:]
+ records.append((rtype, record))
+ f.close()
+ except IOError as err:
+ if err.errno != errno.ENOENT:
+ raise
+ return records
+
+ @util.propertycache
+ def mergedriver(self):
+ # protect against the following:
+ # - A configures a malicious merge driver in their hgrc, then
+ # pauses the merge
+ # - A edits their hgrc to remove references to the merge driver
+ # - A gives a copy of their entire repo, including .hg, to B
+ # - B inspects .hgrc and finds it to be clean
+ # - B then continues the merge and the malicious merge driver
+ # gets invoked
+ configmergedriver = self._repo.ui.config(
+ b'experimental', b'mergedriver'
+ )
+ if (
+ self._readmergedriver is not None
+ and self._readmergedriver != configmergedriver
+ ):
+ raise error.ConfigError(
+ _(b"merge driver changed since merge started"),
+ hint=_(b"revert merge driver change or abort merge"),
+ )
+
+ return configmergedriver
+
+ @util.propertycache
+ def local(self):
+ if self._local is None:
+ msg = b"local accessed but self._local isn't set"
+ raise error.ProgrammingError(msg)
+ return self._local
+
+ @util.propertycache
+ def localctx(self):
+ return self._repo[self.local]
+
+ @util.propertycache
+ def other(self):
+ if self._other is None:
+ msg = b"other accessed but self._other isn't set"
+ raise error.ProgrammingError(msg)
+ return self._other
+
+ @util.propertycache
+ def otherctx(self):
+ return self._repo[self.other]
+
+ def active(self):
+ """Whether mergestate is active.
+
+ Returns True if there appears to be mergestate. This is a rough proxy
+ for "is a merge in progress."
+ """
+ return bool(self._local) or bool(self._state)
+
+ def commit(self):
+ """Write current state on disk (if necessary)"""
+ if self._dirty:
+ records = self._makerecords()
+ self._writerecords(records)
+ self._dirty = False
+
+ def _makerecords(self):
+ records = []
+ records.append((RECORD_LOCAL, hex(self._local)))
+ records.append((RECORD_OTHER, hex(self._other)))
+ if self.mergedriver:
+ records.append(
+ (
+ RECORD_MERGE_DRIVER_STATE,
+ b'\0'.join([self.mergedriver, self._mdstate]),
+ )
+ )
+ # Write out state items. In all cases, the value of the state map entry
+ # is written as the contents of the record. The record type depends on
+ # the type of state that is stored, and capital-letter records are used
+ # to prevent older versions of Mercurial that do not support the feature
+ # from loading them.
+ for filename, v in pycompat.iteritems(self._state):
+ if v[0] == MERGE_RECORD_DRIVER_RESOLVED:
+ # Driver-resolved merge. These are stored in 'D' records.
+ records.append(
+ (RECORD_MERGE_DRIVER_MERGE, b'\0'.join([filename] + v))
+ )
+ elif v[0] in (
+ MERGE_RECORD_UNRESOLVED_PATH,
+ MERGE_RECORD_RESOLVED_PATH,
+ ):
+ # Path conflicts. These are stored in 'P' records. The current
+ # resolution state ('pu' or 'pr') is stored within the record.
+ records.append(
+ (RECORD_PATH_CONFLICT, b'\0'.join([filename] + v))
+ )
+ elif v[0] == MERGE_RECORD_MERGED_OTHER:
+ records.append(
+ (RECORD_RESOLVED_OTHER, b'\0'.join([filename] + v))
+ )
+ elif v[1] == nullhex or v[6] == nullhex:
+ # Change/Delete or Delete/Change conflicts. These are stored in
+ # 'C' records. v[1] is the local file, and is nullhex when the
+ # file is deleted locally ('dc'). v[6] is the remote file, and
+ # is nullhex when the file is deleted remotely ('cd').
+ records.append(
+ (RECORD_CHANGEDELETE_CONFLICT, b'\0'.join([filename] + v))
+ )
+ else:
+ # Normal files. These are stored in 'F' records.
+ records.append((RECORD_MERGED, b'\0'.join([filename] + v)))
+ for filename, extras in sorted(pycompat.iteritems(self._stateextras)):
+ rawextras = b'\0'.join(
+ b'%s\0%s' % (k, v) for k, v in pycompat.iteritems(extras)
+ )
+ records.append(
+ (RECORD_FILE_VALUES, b'%s\0%s' % (filename, rawextras))
+ )
+ if self._labels is not None:
+ labels = b'\0'.join(self._labels)
+ records.append((RECORD_LABELS, labels))
+ return records
+
+ def _writerecords(self, records):
+ """Write current state on disk (both v1 and v2)"""
+ self._writerecordsv1(records)
+ self._writerecordsv2(records)
+
+ def _writerecordsv1(self, records):
+ """Write current state on disk in a version 1 file"""
+ f = self._repo.vfs(self.statepathv1, b'wb')
+ irecords = iter(records)
+ lrecords = next(irecords)
+ assert lrecords[0] == RECORD_LOCAL
+ f.write(hex(self._local) + b'\n')
+ for rtype, data in irecords:
+ if rtype == RECORD_MERGED:
+ f.write(b'%s\n' % _droponode(data))
+ f.close()
+
+ def _writerecordsv2(self, records):
+ """Write current state on disk in a version 2 file
+
+ See the docstring for _readrecordsv2 for why we use 't'."""
+ # these are the records that all version 2 clients can read
+ allowlist = (RECORD_LOCAL, RECORD_OTHER, RECORD_MERGED)
+ f = self._repo.vfs(self.statepathv2, b'wb')
+ for key, data in records:
+ assert len(key) == 1
+ if key not in allowlist:
+ key, data = RECORD_OVERRIDE, b'%s%s' % (key, data)
+ format = b'>sI%is' % len(data)
+ f.write(_pack(format, key, len(data), data))
+ f.close()
+
+ @staticmethod
+ def getlocalkey(path):
+ """hash the path of a local file context for storage in the .hg/merge
+ directory."""
+
+ return hex(hashutil.sha1(path).digest())
+
+ def add(self, fcl, fco, fca, fd):
+ """add a new (potentially?) conflicting file the merge state
+ fcl: file context for local,
+ fco: file context for remote,
+ fca: file context for ancestors,
+ fd: file path of the resulting merge.
+
+ note: also write the local version to the `.hg/merge` directory.
+ """
+ if fcl.isabsent():
+ localkey = nullhex
+ else:
+ localkey = mergestate.getlocalkey(fcl.path())
+ self._repo.vfs.write(b'merge/' + localkey, fcl.data())
+ self._state[fd] = [
+ MERGE_RECORD_UNRESOLVED,
+ localkey,
+ fcl.path(),
+ fca.path(),
+ hex(fca.filenode()),
+ fco.path(),
+ hex(fco.filenode()),
+ fcl.flags(),
+ ]
+ self._stateextras[fd] = {b'ancestorlinknode': hex(fca.node())}
+ self._dirty = True
+
+ def addpath(self, path, frename, forigin):
+ """add a new conflicting path to the merge state
+ path: the path that conflicts
+ frename: the filename the conflicting file was renamed to
+ forigin: origin of the file ('l' or 'r' for local/remote)
+ """
+ self._state[path] = [MERGE_RECORD_UNRESOLVED_PATH, frename, forigin]
+ self._dirty = True
+
+ def addmergedother(self, path):
+ self._state[path] = [MERGE_RECORD_MERGED_OTHER, nullhex, nullhex]
+ self._dirty = True
+
+ def __contains__(self, dfile):
+ return dfile in self._state
+
+ def __getitem__(self, dfile):
+ return self._state[dfile][0]
+
+ def __iter__(self):
+ return iter(sorted(self._state))
+
+ def files(self):
+ return self._state.keys()
+
+ def mark(self, dfile, state):
+ self._state[dfile][0] = state
+ self._dirty = True
+
+ def mdstate(self):
+ return self._mdstate
+
+ def unresolved(self):
+ """Obtain the paths of unresolved files."""
+
+ for f, entry in pycompat.iteritems(self._state):
+ if entry[0] in (
+ MERGE_RECORD_UNRESOLVED,
+ MERGE_RECORD_UNRESOLVED_PATH,
+ ):
+ yield f
+
+ def driverresolved(self):
+ """Obtain the paths of driver-resolved files."""
+
+ for f, entry in self._state.items():
+ if entry[0] == MERGE_RECORD_DRIVER_RESOLVED:
+ yield f
+
+ def extras(self, filename):
+ return self._stateextras.setdefault(filename, {})
+
+ def _resolve(self, preresolve, dfile, wctx):
+ """rerun merge process for file path `dfile`"""
+ if self[dfile] in (MERGE_RECORD_RESOLVED, MERGE_RECORD_DRIVER_RESOLVED):
+ return True, 0
+ if self._state[dfile][0] == MERGE_RECORD_MERGED_OTHER:
+ return True, 0
+ stateentry = self._state[dfile]
+ state, localkey, lfile, afile, anode, ofile, onode, flags = stateentry
+ octx = self._repo[self._other]
+ extras = self.extras(dfile)
+ anccommitnode = extras.get(b'ancestorlinknode')
+ if anccommitnode:
+ actx = self._repo[anccommitnode]
+ else:
+ actx = None
+ fcd = _filectxorabsent(localkey, wctx, dfile)
+ fco = _filectxorabsent(onode, octx, ofile)
+ # TODO: move this to filectxorabsent
+ fca = self._repo.filectx(afile, fileid=anode, changectx=actx)
+ # "premerge" x flags
+ flo = fco.flags()
+ fla = fca.flags()
+ if b'x' in flags + flo + fla and b'l' not in flags + flo + fla:
+ if fca.node() == nullid and flags != flo:
+ if preresolve:
+ self._repo.ui.warn(
+ _(
+ b'warning: cannot merge flags for %s '
+ b'without common ancestor - keeping local flags\n'
+ )
+ % afile
+ )
+ elif flags == fla:
+ flags = flo
+ if preresolve:
+ # restore local
+ if localkey != nullhex:
+ f = self._repo.vfs(b'merge/' + localkey)
+ wctx[dfile].write(f.read(), flags)
+ f.close()
+ else:
+ wctx[dfile].remove(ignoremissing=True)
+ complete, r, deleted = filemerge.premerge(
+ self._repo,
+ wctx,
+ self._local,
+ lfile,
+ fcd,
+ fco,
+ fca,
+ labels=self._labels,
+ )
+ else:
+ complete, r, deleted = filemerge.filemerge(
+ self._repo,
+ wctx,
+ self._local,
+ lfile,
+ fcd,
+ fco,
+ fca,
+ labels=self._labels,
+ )
+ if r is None:
+ # no real conflict
+ del self._state[dfile]
+ self._stateextras.pop(dfile, None)
+ self._dirty = True
+ elif not r:
+ self.mark(dfile, MERGE_RECORD_RESOLVED)
+
+ if complete:
+ action = None
+ if deleted:
+ if fcd.isabsent():
+ # dc: local picked. Need to drop if present, which may
+ # happen on re-resolves.
+ action = ACTION_FORGET
+ else:
+ # cd: remote picked (or otherwise deleted)
+ action = ACTION_REMOVE
+ else:
+ if fcd.isabsent(): # dc: remote picked
+ action = ACTION_GET
+ elif fco.isabsent(): # cd: local picked
+ if dfile in self.localctx:
+ action = ACTION_ADD_MODIFIED
+ else:
+ action = ACTION_ADD
+ # else: regular merges (no action necessary)
+ self._results[dfile] = r, action
+
+ return complete, r
+
+ def preresolve(self, dfile, wctx):
+ """run premerge process for dfile
+
+ Returns whether the merge is complete, and the exit code."""
+ return self._resolve(True, dfile, wctx)
+
+ def resolve(self, dfile, wctx):
+ """run merge process (assuming premerge was run) for dfile
+
+ Returns the exit code of the merge."""
+ return self._resolve(False, dfile, wctx)[1]
+
+ def counts(self):
+ """return counts for updated, merged and removed files in this
+ session"""
+ updated, merged, removed = 0, 0, 0
+ for r, action in pycompat.itervalues(self._results):
+ if r is None:
+ updated += 1
+ elif r == 0:
+ if action == ACTION_REMOVE:
+ removed += 1
+ else:
+ merged += 1
+ return updated, merged, removed
+
+ def unresolvedcount(self):
+ """get unresolved count for this merge (persistent)"""
+ return len(list(self.unresolved()))
+
+ def actions(self):
+ """return lists of actions to perform on the dirstate"""
+ actions = {
+ ACTION_REMOVE: [],
+ ACTION_FORGET: [],
+ ACTION_ADD: [],
+ ACTION_ADD_MODIFIED: [],
+ ACTION_GET: [],
+ }
+ for f, (r, action) in pycompat.iteritems(self._results):
+ if action is not None:
+ actions[action].append((f, None, b"merge result"))
+ return actions
+
+ def recordactions(self):
+ """record remove/add/get actions in the dirstate"""
+ branchmerge = self._repo.dirstate.p2() != nullid
+ recordupdates(self._repo, self.actions(), branchmerge, None)
+
+ def queueremove(self, f):
+ """queues a file to be removed from the dirstate
+
+ Meant for use by custom merge drivers."""
+ self._results[f] = 0, ACTION_REMOVE
+
+ def queueadd(self, f):
+ """queues a file to be added to the dirstate
+
+ Meant for use by custom merge drivers."""
+ self._results[f] = 0, ACTION_ADD
+
+ def queueget(self, f):
+ """queues a file to be marked modified in the dirstate
+
+ Meant for use by custom merge drivers."""
+ self._results[f] = 0, ACTION_GET
+
+
+def recordupdates(repo, actions, branchmerge, getfiledata):
+ """record merge actions to the dirstate"""
+ # remove (must come first)
+ for f, args, msg in actions.get(ACTION_REMOVE, []):
+ if branchmerge:
+ repo.dirstate.remove(f)
+ else:
+ repo.dirstate.drop(f)
+
+ # forget (must come first)
+ for f, args, msg in actions.get(ACTION_FORGET, []):
+ repo.dirstate.drop(f)
+
+ # resolve path conflicts
+ for f, args, msg in actions.get(ACTION_PATH_CONFLICT_RESOLVE, []):
+ (f0, origf0) = args
+ repo.dirstate.add(f)
+ repo.dirstate.copy(origf0, f)
+ if f0 == origf0:
+ repo.dirstate.remove(f0)
+ else:
+ repo.dirstate.drop(f0)
+
+ # re-add
+ for f, args, msg in actions.get(ACTION_ADD, []):
+ repo.dirstate.add(f)
+
+ # re-add/mark as modified
+ for f, args, msg in actions.get(ACTION_ADD_MODIFIED, []):
+ if branchmerge:
+ repo.dirstate.normallookup(f)
+ else:
+ repo.dirstate.add(f)
+
+ # exec change
+ for f, args, msg in actions.get(ACTION_EXEC, []):
+ repo.dirstate.normallookup(f)
+
+ # keep
+ for f, args, msg in actions.get(ACTION_KEEP, []):
+ pass
+
+ # get
+ for f, args, msg in actions.get(ACTION_GET, []):
+ if branchmerge:
+ repo.dirstate.otherparent(f)
+ else:
+ parentfiledata = getfiledata[f] if getfiledata else None
+ repo.dirstate.normal(f, parentfiledata=parentfiledata)
+
+ # merge
+ for f, args, msg in actions.get(ACTION_MERGE, []):
+ f1, f2, fa, move, anc = args
+ if branchmerge:
+ # We've done a branch merge, mark this file as merged
+ # so that we properly record the merger later
+ repo.dirstate.merge(f)
+ if f1 != f2: # copy/rename
+ if move:
+ repo.dirstate.remove(f1)
+ if f1 != f:
+ repo.dirstate.copy(f1, f)
+ else:
+ repo.dirstate.copy(f2, f)
+ else:
+ # We've update-merged a locally modified file, so
+ # we set the dirstate to emulate a normal checkout
+ # of that file some time in the past. Thus our
+ # merge will appear as a normal local file
+ # modification.
+ if f2 == f: # file not locally copied/moved
+ repo.dirstate.normallookup(f)
+ if move:
+ repo.dirstate.drop(f1)
+
+ # directory rename, move local
+ for f, args, msg in actions.get(ACTION_DIR_RENAME_MOVE_LOCAL, []):
+ f0, flag = args
+ if branchmerge:
+ repo.dirstate.add(f)
+ repo.dirstate.remove(f0)
+ repo.dirstate.copy(f0, f)
+ else:
+ repo.dirstate.normal(f)
+ repo.dirstate.drop(f0)
+
+ # directory rename, get
+ for f, args, msg in actions.get(ACTION_LOCAL_DIR_RENAME_GET, []):
+ f0, flag = args
+ if branchmerge:
+ repo.dirstate.add(f)
+ repo.dirstate.copy(f0, f)
+ else:
+ repo.dirstate.normal(f)
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/mercurial/metadata.py Thu Jun 25 10:32:51 2020 -0700
@@ -0,0 +1,327 @@
+# metadata.py -- code related to various metadata computation and access.
+#
+# Copyright 2019 Google, Inc <martinvonz@google.com>
+# Copyright 2020 Pierre-Yves David <pierre-yves.david@octobus.net>
+#
+# This software may be used and distributed according to the terms of the
+# GNU General Public License version 2 or any later version.
+from __future__ import absolute_import, print_function
+
+import multiprocessing
+
+from . import (
+ error,
+ node,
+ pycompat,
+ util,
+)
+
+from .revlogutils import (
+ flagutil as sidedataflag,
+ sidedata as sidedatamod,
+)
+
+
+def computechangesetfilesadded(ctx):
+ """return the list of files added in a changeset
+ """
+ added = []
+ for f in ctx.files():
+ if not any(f in p for p in ctx.parents()):
+ added.append(f)
+ return added
+
+
+def get_removal_filter(ctx, x=None):
+ """return a function to detect files "wrongly" detected as `removed`
+
+ When a file is removed relative to p1 in a merge, this
+ function determines whether the absence is due to a
+ deletion from a parent, or whether the merge commit
+ itself deletes the file. We decide this by doing a
+ simplified three way merge of the manifest entry for
+ the file. There are two ways we decide the merge
+ itself didn't delete a file:
+ - neither parent (nor the merge) contain the file
+ - exactly one parent contains the file, and that
+ parent has the same filelog entry as the merge
+ ancestor (or all of them if there two). In other
+ words, that parent left the file unchanged while the
+ other one deleted it.
+ One way to think about this is that deleting a file is
+ similar to emptying it, so the list of changed files
+ should be similar either way. The computation
+ described above is not done directly in _filecommit
+ when creating the list of changed files, however
+ it does something very similar by comparing filelog
+ nodes.
+ """
+
+ if x is not None:
+ p1, p2, m1, m2 = x
+ else:
+ p1 = ctx.p1()
+ p2 = ctx.p2()
+ m1 = p1.manifest()
+ m2 = p2.manifest()
+
+ @util.cachefunc
+ def mas():
+ p1n = p1.node()
+ p2n = p2.node()
+ cahs = ctx.repo().changelog.commonancestorsheads(p1n, p2n)
+ if not cahs:
+ cahs = [node.nullrev]
+ return [ctx.repo()[r].manifest() for r in cahs]
+
+ def deletionfromparent(f):
+ if f in m1:
+ return f not in m2 and all(
+ f in ma and ma.find(f) == m1.find(f) for ma in mas()
+ )
+ elif f in m2:
+ return all(f in ma and ma.find(f) == m2.find(f) for ma in mas())
+ else:
+ return True
+
+ return deletionfromparent
+
+
+def computechangesetfilesremoved(ctx):
+ """return the list of files removed in a changeset
+ """
+ removed = []
+ for f in ctx.files():
+ if f not in ctx:
+ removed.append(f)
+ if removed:
+ rf = get_removal_filter(ctx)
+ removed = [r for r in removed if not rf(r)]
+ return removed
+
+
+def computechangesetcopies(ctx):
+ """return the copies data for a changeset
+
+ The copies data are returned as a pair of dictionnary (p1copies, p2copies).
+
+ Each dictionnary are in the form: `{newname: oldname}`
+ """
+ p1copies = {}
+ p2copies = {}
+ p1 = ctx.p1()
+ p2 = ctx.p2()
+ narrowmatch = ctx._repo.narrowmatch()
+ for dst in ctx.files():
+ if not narrowmatch(dst) or dst not in ctx:
+ continue
+ copied = ctx[dst].renamed()
+ if not copied:
+ continue
+ src, srcnode = copied
+ if src in p1 and p1[src].filenode() == srcnode:
+ p1copies[dst] = src
+ elif src in p2 and p2[src].filenode() == srcnode:
+ p2copies[dst] = src
+ return p1copies, p2copies
+
+
+def encodecopies(files, copies):
+ items = []
+ for i, dst in enumerate(files):
+ if dst in copies:
+ items.append(b'%d\0%s' % (i, copies[dst]))
+ if len(items) != len(copies):
+ raise error.ProgrammingError(
+ b'some copy targets missing from file list'
+ )
+ return b"\n".join(items)
+
+
+def decodecopies(files, data):
+ try:
+ copies = {}
+ if not data:
+ return copies
+ for l in data.split(b'\n'):
+ strindex, src = l.split(b'\0')
+ i = int(strindex)
+ dst = files[i]
+ copies[dst] = src
+ return copies
+ except (ValueError, IndexError):
+ # Perhaps someone had chosen the same key name (e.g. "p1copies") and
+ # used different syntax for the value.
+ return None
+
+
+def encodefileindices(files, subset):
+ subset = set(subset)
+ indices = []
+ for i, f in enumerate(files):
+ if f in subset:
+ indices.append(b'%d' % i)
+ return b'\n'.join(indices)
+
+
+def decodefileindices(files, data):
+ try:
+ subset = []
+ if not data:
+ return subset
+ for strindex in data.split(b'\n'):
+ i = int(strindex)
+ if i < 0 or i >= len(files):
+ return None
+ subset.append(files[i])
+ return subset
+ except (ValueError, IndexError):
+ # Perhaps someone had chosen the same key name (e.g. "added") and
+ # used different syntax for the value.
+ return None
+
+
+def _getsidedata(srcrepo, rev):
+ ctx = srcrepo[rev]
+ filescopies = computechangesetcopies(ctx)
+ filesadded = computechangesetfilesadded(ctx)
+ filesremoved = computechangesetfilesremoved(ctx)
+ sidedata = {}
+ if any([filescopies, filesadded, filesremoved]):
+ sortedfiles = sorted(ctx.files())
+ p1copies, p2copies = filescopies
+ p1copies = encodecopies(sortedfiles, p1copies)
+ p2copies = encodecopies(sortedfiles, p2copies)
+ filesadded = encodefileindices(sortedfiles, filesadded)
+ filesremoved = encodefileindices(sortedfiles, filesremoved)
+ if p1copies:
+ sidedata[sidedatamod.SD_P1COPIES] = p1copies
+ if p2copies:
+ sidedata[sidedatamod.SD_P2COPIES] = p2copies
+ if filesadded:
+ sidedata[sidedatamod.SD_FILESADDED] = filesadded
+ if filesremoved:
+ sidedata[sidedatamod.SD_FILESREMOVED] = filesremoved
+ return sidedata
+
+
+def getsidedataadder(srcrepo, destrepo):
+ use_w = srcrepo.ui.configbool(b'experimental', b'worker.repository-upgrade')
+ if pycompat.iswindows or not use_w:
+ return _get_simple_sidedata_adder(srcrepo, destrepo)
+ else:
+ return _get_worker_sidedata_adder(srcrepo, destrepo)
+
+
+def _sidedata_worker(srcrepo, revs_queue, sidedata_queue, tokens):
+ """The function used by worker precomputing sidedata
+
+ It read an input queue containing revision numbers
+ It write in an output queue containing (rev, <sidedata-map>)
+
+ The `None` input value is used as a stop signal.
+
+ The `tokens` semaphore is user to avoid having too many unprocessed
+ entries. The workers needs to acquire one token before fetching a task.
+ They will be released by the consumer of the produced data.
+ """
+ tokens.acquire()
+ rev = revs_queue.get()
+ while rev is not None:
+ data = _getsidedata(srcrepo, rev)
+ sidedata_queue.put((rev, data))
+ tokens.acquire()
+ rev = revs_queue.get()
+ # processing of `None` is completed, release the token.
+ tokens.release()
+
+
+BUFF_PER_WORKER = 50
+
+
+def _get_worker_sidedata_adder(srcrepo, destrepo):
+ """The parallel version of the sidedata computation
+
+ This code spawn a pool of worker that precompute a buffer of sidedata
+ before we actually need them"""
+ # avoid circular import copies -> scmutil -> worker -> copies
+ from . import worker
+
+ nbworkers = worker._numworkers(srcrepo.ui)
+
+ tokens = multiprocessing.BoundedSemaphore(nbworkers * BUFF_PER_WORKER)
+ revsq = multiprocessing.Queue()
+ sidedataq = multiprocessing.Queue()
+
+ assert srcrepo.filtername is None
+ # queue all tasks beforehand, revision numbers are small and it make
+ # synchronisation simpler
+ #
+ # Since the computation for each node can be quite expensive, the overhead
+ # of using a single queue is not revelant. In practice, most computation
+ # are fast but some are very expensive and dominate all the other smaller
+ # cost.
+ for r in srcrepo.changelog.revs():
+ revsq.put(r)
+ # queue the "no more tasks" markers
+ for i in range(nbworkers):
+ revsq.put(None)
+
+ allworkers = []
+ for i in range(nbworkers):
+ args = (srcrepo, revsq, sidedataq, tokens)
+ w = multiprocessing.Process(target=_sidedata_worker, args=args)
+ allworkers.append(w)
+ w.start()
+
+ # dictionnary to store results for revision higher than we one we are
+ # looking for. For example, if we need the sidedatamap for 42, and 43 is
+ # received, when shelve 43 for later use.
+ staging = {}
+
+ def sidedata_companion(revlog, rev):
+ sidedata = {}
+ if util.safehasattr(revlog, b'filteredrevs'): # this is a changelog
+ # Is the data previously shelved ?
+ sidedata = staging.pop(rev, None)
+ if sidedata is None:
+ # look at the queued result until we find the one we are lookig
+ # for (shelve the other ones)
+ r, sidedata = sidedataq.get()
+ while r != rev:
+ staging[r] = sidedata
+ r, sidedata = sidedataq.get()
+ tokens.release()
+ return False, (), sidedata
+
+ return sidedata_companion
+
+
+def _get_simple_sidedata_adder(srcrepo, destrepo):
+ """The simple version of the sidedata computation
+
+ It just compute it in the same thread on request"""
+
+ def sidedatacompanion(revlog, rev):
+ sidedata = {}
+ if util.safehasattr(revlog, 'filteredrevs'): # this is a changelog
+ sidedata = _getsidedata(srcrepo, rev)
+ return False, (), sidedata
+
+ return sidedatacompanion
+
+
+def getsidedataremover(srcrepo, destrepo):
+ def sidedatacompanion(revlog, rev):
+ f = ()
+ if util.safehasattr(revlog, 'filteredrevs'): # this is a changelog
+ if revlog.flags(rev) & sidedataflag.REVIDX_SIDEDATA:
+ f = (
+ sidedatamod.SD_P1COPIES,
+ sidedatamod.SD_P2COPIES,
+ sidedatamod.SD_FILESADDED,
+ sidedatamod.SD_FILESREMOVED,
+ )
+ return False, f, {}
+
+ return sidedatacompanion
--- a/mercurial/narrowspec.py Tue Jun 23 16:07:18 2020 +0200
+++ b/mercurial/narrowspec.py Thu Jun 25 10:32:51 2020 -0700
@@ -14,6 +14,7 @@
error,
match as matchmod,
merge,
+ mergestate as mergestatemod,
scmutil,
sparse,
util,
@@ -272,7 +273,7 @@
def _writeaddedfiles(repo, pctx, files):
actions = merge.emptyactions()
- addgaction = actions[merge.ACTION_GET].append
+ addgaction = actions[mergestatemod.ACTION_GET].append
mf = repo[b'.'].manifest()
for f in files:
if not repo.wvfs.exists(f):
--- a/mercurial/pathutil.py Tue Jun 23 16:07:18 2020 +0200
+++ b/mercurial/pathutil.py Thu Jun 25 10:32:51 2020 -0700
@@ -1,5 +1,6 @@
from __future__ import absolute_import
+import contextlib
import errno
import os
import posixpath
@@ -148,6 +149,19 @@
except (OSError, error.Abort):
return False
+ @contextlib.contextmanager
+ def cached(self):
+ if self._cached:
+ yield
+ else:
+ try:
+ self._cached = True
+ yield
+ finally:
+ self.audited.clear()
+ self.auditeddir.clear()
+ self._cached = False
+
def canonpath(root, cwd, myname, auditor=None):
'''return the canonical path of myname, given cwd and root
--- a/mercurial/phases.py Tue Jun 23 16:07:18 2020 +0200
+++ b/mercurial/phases.py Thu Jun 25 10:32:51 2020 -0700
@@ -583,30 +583,32 @@
raise error.ProgrammingError(msg)
repo = repo.unfiltered()
- currentroots = self.phaseroots[targetphase]
+ torev = repo.changelog.rev
+ tonode = repo.changelog.node
+ currentroots = {torev(node) for node in self.phaseroots[targetphase]}
finalroots = oldroots = set(currentroots)
+ newroots = [torev(node) for node in nodes]
newroots = [
- n for n in nodes if self.phase(repo, repo[n].rev()) < targetphase
+ rev for rev in newroots if self.phase(repo, rev) < targetphase
]
+
if newroots:
-
- if nullid in newroots:
+ if nullrev in newroots:
raise error.Abort(_(b'cannot change null revision phase'))
- currentroots = currentroots.copy()
currentroots.update(newroots)
# Only compute new roots for revs above the roots that are being
# retracted.
- minnewroot = min(repo[n].rev() for n in newroots)
- aboveroots = [
- n for n in currentroots if repo[n].rev() >= minnewroot
- ]
- updatedroots = repo.set(b'roots(%ln::)', aboveroots)
+ minnewroot = min(newroots)
+ aboveroots = [rev for rev in currentroots if rev >= minnewroot]
+ updatedroots = repo.revs(b'roots(%ld::)', aboveroots)
- finalroots = {n for n in currentroots if repo[n].rev() < minnewroot}
- finalroots.update(ctx.node() for ctx in updatedroots)
+ finalroots = {rev for rev in currentroots if rev < minnewroot}
+ finalroots.update(updatedroots)
if finalroots != oldroots:
- self._updateroots(targetphase, finalroots, tr)
+ self._updateroots(
+ targetphase, {tonode(rev) for rev in finalroots}, tr
+ )
return True
return False
--- a/mercurial/posix.py Tue Jun 23 16:07:18 2020 +0200
+++ b/mercurial/posix.py Thu Jun 25 10:32:51 2020 -0700
@@ -538,10 +538,6 @@
return pycompat.shlexsplit(s, posix=True)
-def quotecommand(cmd):
- return cmd
-
-
def testpid(pid):
'''return False if pid dead, True if running or not sure'''
if pycompat.sysplatform == b'OpenVMS':
--- a/mercurial/pycompat.py Tue Jun 23 16:07:18 2020 +0200
+++ b/mercurial/pycompat.py Thu Jun 25 10:32:51 2020 -0700
@@ -98,7 +98,6 @@
import codecs
import functools
import io
- import locale
import struct
if os.name == r'nt' and sys.version_info >= (3, 6):
@@ -156,16 +155,10 @@
if getattr(sys, 'argv', None) is not None:
# On POSIX, the char** argv array is converted to Python str using
- # Py_DecodeLocale(). The inverse of this is Py_EncodeLocale(), which isn't
- # directly callable from Python code. So, we need to emulate it.
- # Py_DecodeLocale() calls mbstowcs() and falls back to mbrtowc() with
- # surrogateescape error handling on failure. These functions take the
- # current system locale into account. So, the inverse operation is to
- # .encode() using the system locale's encoding and using the
- # surrogateescape error handler. The only tricky part here is getting
- # the system encoding correct, since `locale.getlocale()` can return
- # None. We fall back to the filesystem encoding if lookups via `locale`
- # fail, as this seems like a reasonable thing to do.
+ # Py_DecodeLocale(). The inverse of this is Py_EncodeLocale(), which
+ # isn't directly callable from Python code. In practice, os.fsencode()
+ # can be used instead (this is recommended by Python's documentation
+ # for sys.argv).
#
# On Windows, the wchar_t **argv is passed into the interpreter as-is.
# Like POSIX, we need to emulate what Py_EncodeLocale() would do. But
@@ -178,19 +171,7 @@
if os.name == r'nt':
sysargv = [a.encode("mbcs", "ignore") for a in sys.argv]
else:
-
- def getdefaultlocale_if_known():
- try:
- return locale.getdefaultlocale()
- except ValueError:
- return None, None
-
- encoding = (
- locale.getlocale()[1]
- or getdefaultlocale_if_known()[1]
- or sys.getfilesystemencoding()
- )
- sysargv = [a.encode(encoding, "surrogateescape") for a in sys.argv]
+ sysargv = [fsencode(a) for a in sys.argv]
bytechr = struct.Struct('>B').pack
byterepr = b'%r'.__mod__
--- a/mercurial/revlogutils/nodemap.py Tue Jun 23 16:07:18 2020 +0200
+++ b/mercurial/revlogutils/nodemap.py Thu Jun 25 10:32:51 2020 -0700
@@ -13,6 +13,8 @@
import re
import struct
+from ..i18n import _
+
from .. import (
error,
node as nodemod,
@@ -48,7 +50,7 @@
docket.data_unused = data_unused
filename = _rawdata_filepath(revlog, docket)
- use_mmap = revlog.opener.options.get(b"exp-persistent-nodemap.mmap")
+ use_mmap = revlog.opener.options.get(b"persistent-nodemap.mmap")
try:
with revlog.opener(filename) as fd:
if use_mmap:
@@ -105,6 +107,9 @@
def addabort(self, *args, **kwargs):
pass
+ def _report(self, *args):
+ pass
+
def update_persistent_nodemap(revlog):
"""update the persistent nodemap right now
@@ -137,7 +142,14 @@
can_incremental = util.safehasattr(revlog.index, "nodemap_data_incremental")
ondisk_docket = revlog._nodemap_docket
feed_data = util.safehasattr(revlog.index, "update_nodemap_data")
- use_mmap = revlog.opener.options.get(b"exp-persistent-nodemap.mmap")
+ use_mmap = revlog.opener.options.get(b"persistent-nodemap.mmap")
+ mode = revlog.opener.options.get(b"persistent-nodemap.mode")
+ if not can_incremental:
+ msg = _(b"persistent nodemap in strict mode without efficient method")
+ if mode == b'warn':
+ tr._report(b"%s\n" % msg)
+ elif mode == b'strict':
+ raise error.Abort(msg)
data = None
# first attemp an incremental update of the data
@@ -255,8 +267,7 @@
# data. Its content is currently very light, but it will expand as the on disk
# nodemap gains the necessary features to be used in production.
-# version 0 is experimental, no BC garantee, do no use outside of tests.
-ONDISK_VERSION = 0
+ONDISK_VERSION = 1
S_VERSION = struct.Struct(">B")
S_HEADER = struct.Struct(">BQQQQ")
--- a/mercurial/revset.py Tue Jun 23 16:07:18 2020 +0200
+++ b/mercurial/revset.py Thu Jun 25 10:32:51 2020 -0700
@@ -789,9 +789,9 @@
"merge" here includes merge conflicts from e.g. 'hg rebase' or 'hg graft'.
"""
getargs(x, 0, 0, _(b"conflictlocal takes no arguments"))
- from . import merge
-
- mergestate = merge.mergestate.read(repo)
+ from . import mergestate as mergestatemod
+
+ mergestate = mergestatemod.mergestate.read(repo)
if mergestate.active() and repo.changelog.hasnode(mergestate.local):
return subset & {repo.changelog.rev(mergestate.local)}
@@ -805,9 +805,9 @@
"merge" here includes merge conflicts from e.g. 'hg rebase' or 'hg graft'.
"""
getargs(x, 0, 0, _(b"conflictother takes no arguments"))
- from . import merge
-
- mergestate = merge.mergestate.read(repo)
+ from . import mergestate as mergestatemod
+
+ mergestate = mergestatemod.mergestate.read(repo)
if mergestate.active() and repo.changelog.hasnode(mergestate.other):
return subset & {repo.changelog.rev(mergestate.other)}
--- a/mercurial/scmutil.py Tue Jun 23 16:07:18 2020 +0200
+++ b/mercurial/scmutil.py Thu Jun 25 10:32:51 2020 -0700
@@ -456,9 +456,7 @@
def resolvehexnodeidprefix(repo, prefix):
- if prefix.startswith(b'x') and repo.ui.configbool(
- b'experimental', b'revisions.prefixhexnode'
- ):
+ if prefix.startswith(b'x'):
prefix = prefix[1:]
try:
# Uses unfiltered repo because it's faster when prefix is ambiguous/
@@ -805,9 +803,12 @@
if relative:
cwd = repo.getcwd()
- pathto = repo.pathto
- return lambda f: pathto(f, cwd)
- elif repo.ui.configbool(b'ui', b'slash'):
+ if cwd != b'':
+ # this branch would work even if cwd == b'' (ie cwd = repo
+ # root), but its generality makes the returned function slower
+ pathto = repo.pathto
+ return lambda f: pathto(f, cwd)
+ if repo.ui.configbool(b'ui', b'slash'):
return lambda f: f
else:
return util.localpath
--- a/mercurial/shelve.py Tue Jun 23 16:07:18 2020 +0200
+++ b/mercurial/shelve.py Thu Jun 25 10:32:51 2020 -0700
@@ -42,6 +42,7 @@
lock as lockmod,
mdiff,
merge,
+ mergestate as mergestatemod,
node as nodemod,
patch,
phases,
@@ -801,7 +802,7 @@
basename = state.name
with repo.lock():
checkparents(repo, state)
- ms = merge.mergestate.read(repo)
+ ms = mergestatemod.mergestate.read(repo)
if list(ms.unresolved()):
raise error.Abort(
_(b"unresolved conflicts, can't continue"),
--- a/mercurial/simplemerge.py Tue Jun 23 16:07:18 2020 +0200
+++ b/mercurial/simplemerge.py Thu Jun 25 10:32:51 2020 -0700
@@ -451,12 +451,7 @@
return result
-def _bytes_to_set(b):
- """turns a multiple bytes (usually flags) into a set of individual byte"""
- return set(b[x : x + 1] for x in range(len(b)))
-
-
-def is_null(ctx):
+def is_not_null(ctx):
if not util.safehasattr(ctx, "node"):
return False
return ctx.node() != nodemod.nullid
@@ -518,15 +513,13 @@
# merge flags if necessary
flags = localctx.flags()
- localflags = _bytes_to_set(flags)
- otherflags = _bytes_to_set(otherctx.flags())
- if is_null(basectx) and localflags != otherflags:
- baseflags = _bytes_to_set(basectx.flags())
- flags = localflags & otherflags
- for f in localflags.symmetric_difference(otherflags):
- if f not in baseflags:
- flags.add(f)
- flags = b''.join(sorted(flags))
+ localflags = set(pycompat.iterbytestr(flags))
+ otherflags = set(pycompat.iterbytestr(otherctx.flags()))
+ if is_not_null(basectx) and localflags != otherflags:
+ baseflags = set(pycompat.iterbytestr(basectx.flags()))
+ commonflags = localflags & otherflags
+ addedflags = (localflags ^ otherflags) - baseflags
+ flags = b''.join(sorted(commonflags | addedflags))
if not opts.get(b'print'):
localctx.write(mergedtext, flags)
--- a/mercurial/sparse.py Tue Jun 23 16:07:18 2020 +0200
+++ b/mercurial/sparse.py Thu Jun 25 10:32:51 2020 -0700
@@ -18,6 +18,7 @@
error,
match as matchmod,
merge as mergemod,
+ mergestate as mergestatemod,
pathutil,
pycompat,
scmutil,
@@ -406,7 +407,7 @@
elif file in wctx:
prunedactions[file] = (b'r', args, msg)
- if branchmerge and type == mergemod.ACTION_MERGE:
+ if branchmerge and type == mergestatemod.ACTION_MERGE:
f1, f2, fa, move, anc = args
if not sparsematch(f1):
temporaryfiles.append(f1)
--- a/mercurial/sshpeer.py Tue Jun 23 16:07:18 2020 +0200
+++ b/mercurial/sshpeer.py Thu Jun 25 10:32:51 2020 -0700
@@ -36,15 +36,16 @@
return b"'%s'" % s.replace(b"'", b"'\\''")
-def _forwardoutput(ui, pipe):
+def _forwardoutput(ui, pipe, warn=False):
"""display all data currently available on pipe as remote output.
This is non blocking."""
if pipe:
s = procutil.readpipe(pipe)
if s:
+ display = ui.warn if warn else ui.status
for l in s.splitlines():
- ui.status(_(b"remote: "), l, b'\n')
+ display(_(b"remote: "), l, b'\n')
class doublepipe(object):
@@ -178,7 +179,6 @@
)
ui.debug(b'running %s\n' % cmd)
- cmd = procutil.quotecommand(cmd)
# no buffer allow the use of 'select'
# feel free to remove buffering and select usage when we ultimately
@@ -204,8 +204,12 @@
def _performhandshake(ui, stdin, stdout, stderr):
def badresponse():
- # Flush any output on stderr.
- _forwardoutput(ui, stderr)
+ # Flush any output on stderr. In general, the stderr contains errors
+ # from the remote (ssh errors, some hg errors), and status indications
+ # (like "adding changes"), with no current way to tell them apart.
+ # Here we failed so early that it's almost certainly only errors, so
+ # use warn=True so -q doesn't hide them.
+ _forwardoutput(ui, stderr, warn=True)
msg = _(b'no suitable response from remote hg')
hint = ui.config(b'ui', b'ssherrorhint')
@@ -307,7 +311,7 @@
while lines[-1] and max_noise:
try:
l = stdout.readline()
- _forwardoutput(ui, stderr)
+ _forwardoutput(ui, stderr, warn=True)
# Look for reply to protocol upgrade request. It has a token
# in it, so there should be no false positives.
@@ -374,7 +378,7 @@
badresponse()
# Flush any output on stderr before proceeding.
- _forwardoutput(ui, stderr)
+ _forwardoutput(ui, stderr, warn=True)
return protoname, caps
--- a/mercurial/sslutil.py Tue Jun 23 16:07:18 2020 +0200
+++ b/mercurial/sslutil.py Thu Jun 25 10:32:51 2020 -0700
@@ -33,9 +33,8 @@
# support for TLS 1.1, TLS 1.2, SNI, system CA stores, etc. These features are
# all exposed via the "ssl" module.
#
-# Depending on the version of Python being used, SSL/TLS support is either
-# modern/secure or legacy/insecure. Many operations in this module have
-# separate code paths depending on support in Python.
+# We require in setup.py the presence of ssl.SSLContext, which indicates modern
+# SSL/TLS support.
configprotocols = {
b'tls1.0',
@@ -45,76 +44,19 @@
hassni = getattr(ssl, 'HAS_SNI', False)
-# TLS 1.1 and 1.2 may not be supported if the OpenSSL Python is compiled
-# against doesn't support them.
-supportedprotocols = {b'tls1.0'}
-if util.safehasattr(ssl, b'PROTOCOL_TLSv1_1'):
+# ssl.HAS_TLSv1* are preferred to check support but they were added in Python
+# 3.7. Prior to CPython commit 6e8cda91d92da72800d891b2fc2073ecbc134d98
+# (backported to the 3.7 branch), ssl.PROTOCOL_TLSv1_1 / ssl.PROTOCOL_TLSv1_2
+# were defined only if compiled against a OpenSSL version with TLS 1.1 / 1.2
+# support. At the mentioned commit, they were unconditionally defined.
+supportedprotocols = set()
+if getattr(ssl, 'HAS_TLSv1', util.safehasattr(ssl, 'PROTOCOL_TLSv1')):
+ supportedprotocols.add(b'tls1.0')
+if getattr(ssl, 'HAS_TLSv1_1', util.safehasattr(ssl, 'PROTOCOL_TLSv1_1')):
supportedprotocols.add(b'tls1.1')
-if util.safehasattr(ssl, b'PROTOCOL_TLSv1_2'):
+if getattr(ssl, 'HAS_TLSv1_2', util.safehasattr(ssl, 'PROTOCOL_TLSv1_2')):
supportedprotocols.add(b'tls1.2')
-try:
- # ssl.SSLContext was added in 2.7.9 and presence indicates modern
- # SSL/TLS features are available.
- SSLContext = ssl.SSLContext
- modernssl = True
- _canloaddefaultcerts = util.safehasattr(SSLContext, b'load_default_certs')
-except AttributeError:
- modernssl = False
- _canloaddefaultcerts = False
-
- # We implement SSLContext using the interface from the standard library.
- class SSLContext(object):
- def __init__(self, protocol):
- # From the public interface of SSLContext
- self.protocol = protocol
- self.check_hostname = False
- self.options = 0
- self.verify_mode = ssl.CERT_NONE
-
- # Used by our implementation.
- self._certfile = None
- self._keyfile = None
- self._certpassword = None
- self._cacerts = None
- self._ciphers = None
-
- def load_cert_chain(self, certfile, keyfile=None, password=None):
- self._certfile = certfile
- self._keyfile = keyfile
- self._certpassword = password
-
- def load_default_certs(self, purpose=None):
- pass
-
- def load_verify_locations(self, cafile=None, capath=None, cadata=None):
- if capath:
- raise error.Abort(_(b'capath not supported'))
- if cadata:
- raise error.Abort(_(b'cadata not supported'))
-
- self._cacerts = cafile
-
- def set_ciphers(self, ciphers):
- self._ciphers = ciphers
-
- def wrap_socket(self, socket, server_hostname=None, server_side=False):
- # server_hostname is unique to SSLContext.wrap_socket and is used
- # for SNI in that context. So there's nothing for us to do with it
- # in this legacy code since we don't support SNI.
-
- args = {
- 'keyfile': self._keyfile,
- 'certfile': self._certfile,
- 'server_side': server_side,
- 'cert_reqs': self.verify_mode,
- 'ssl_version': self.protocol,
- 'ca_certs': self._cacerts,
- 'ciphers': self._ciphers,
- }
-
- return ssl.wrap_socket(socket, **args)
-
def _hostsettings(ui, hostname):
"""Obtain security settings for a hostname.
@@ -135,15 +77,11 @@
b'disablecertverification': False,
# Whether the legacy [hostfingerprints] section has data for this host.
b'legacyfingerprint': False,
- # PROTOCOL_* constant to use for SSLContext.__init__.
- b'protocol': None,
# String representation of minimum protocol to be used for UI
# presentation.
- b'protocolui': None,
+ b'minimumprotocol': None,
# ssl.CERT_* constant used by SSLContext.verify_mode.
b'verifymode': None,
- # Defines extra ssl.OP* bitwise options to set.
- b'ctxoptions': None,
# OpenSSL Cipher List to use (instead of default).
b'ciphers': None,
}
@@ -158,45 +96,30 @@
% b' '.join(sorted(configprotocols)),
)
- # We default to TLS 1.1+ where we can because TLS 1.0 has known
- # vulnerabilities (like BEAST and POODLE). We allow users to downgrade to
- # TLS 1.0+ via config options in case a legacy server is encountered.
- if b'tls1.1' in supportedprotocols:
- defaultprotocol = b'tls1.1'
- else:
- # Let people know they are borderline secure.
- # We don't document this config option because we want people to see
- # the bold warnings on the web site.
- # internal config: hostsecurity.disabletls10warning
- if not ui.configbool(b'hostsecurity', b'disabletls10warning'):
- ui.warn(
- _(
- b'warning: connecting to %s using legacy security '
- b'technology (TLS 1.0); see '
- b'https://mercurial-scm.org/wiki/SecureConnections for '
- b'more info\n'
- )
- % bhostname
- )
- defaultprotocol = b'tls1.0'
+ # We default to TLS 1.1+ because TLS 1.0 has known vulnerabilities (like
+ # BEAST and POODLE). We allow users to downgrade to TLS 1.0+ via config
+ # options in case a legacy server is encountered.
+
+ # setup.py checks that TLS 1.1 or TLS 1.2 is present, so the following
+ # assert should not fail.
+ assert supportedprotocols - {b'tls1.0'}
+ defaultminimumprotocol = b'tls1.1'
key = b'minimumprotocol'
- protocol = ui.config(b'hostsecurity', key, defaultprotocol)
- validateprotocol(protocol, key)
+ minimumprotocol = ui.config(b'hostsecurity', key, defaultminimumprotocol)
+ validateprotocol(minimumprotocol, key)
key = b'%s:minimumprotocol' % bhostname
- protocol = ui.config(b'hostsecurity', key, protocol)
- validateprotocol(protocol, key)
+ minimumprotocol = ui.config(b'hostsecurity', key, minimumprotocol)
+ validateprotocol(minimumprotocol, key)
# If --insecure is used, we allow the use of TLS 1.0 despite config options.
# We always print a "connection security to %s is disabled..." message when
# --insecure is used. So no need to print anything more here.
if ui.insecureconnections:
- protocol = b'tls1.0'
+ minimumprotocol = b'tls1.0'
- s[b'protocol'], s[b'ctxoptions'], s[b'protocolui'] = protocolsettings(
- protocol
- )
+ s[b'minimumprotocol'] = minimumprotocol
ciphers = ui.config(b'hostsecurity', b'ciphers')
ciphers = ui.config(b'hostsecurity', b'%s:ciphers' % bhostname, ciphers)
@@ -288,7 +211,7 @@
# Require certificate validation if CA certs are being loaded and
# verification hasn't been disabled above.
- if cafile or (_canloaddefaultcerts and s[b'allowloaddefaultcerts']):
+ if cafile or s[b'allowloaddefaultcerts']:
s[b'verifymode'] = ssl.CERT_REQUIRED
else:
# At this point we don't have a fingerprint, aren't being
@@ -298,59 +221,26 @@
# user).
s[b'verifymode'] = ssl.CERT_NONE
- assert s[b'protocol'] is not None
- assert s[b'ctxoptions'] is not None
assert s[b'verifymode'] is not None
return s
-def protocolsettings(protocol):
- """Resolve the protocol for a config value.
-
- Returns a 3-tuple of (protocol, options, ui value) where the first
- 2 items are values used by SSLContext and the last is a string value
- of the ``minimumprotocol`` config option equivalent.
+def commonssloptions(minimumprotocol):
+ """Return SSLContext options common to servers and clients.
"""
- if protocol not in configprotocols:
- raise ValueError(b'protocol value not supported: %s' % protocol)
-
- # Despite its name, PROTOCOL_SSLv23 selects the highest protocol
- # that both ends support, including TLS protocols. On legacy stacks,
- # the highest it likely goes is TLS 1.0. On modern stacks, it can
- # support TLS 1.2.
- #
- # The PROTOCOL_TLSv* constants select a specific TLS version
- # only (as opposed to multiple versions). So the method for
- # supporting multiple TLS versions is to use PROTOCOL_SSLv23 and
- # disable protocols via SSLContext.options and OP_NO_* constants.
- # However, SSLContext.options doesn't work unless we have the
- # full/real SSLContext available to us.
- if supportedprotocols == {b'tls1.0'}:
- if protocol != b'tls1.0':
- raise error.Abort(
- _(b'current Python does not support protocol setting %s')
- % protocol,
- hint=_(
- b'upgrade Python or disable setting since '
- b'only TLS 1.0 is supported'
- ),
- )
-
- return ssl.PROTOCOL_TLSv1, 0, b'tls1.0'
-
- # WARNING: returned options don't work unless the modern ssl module
- # is available. Be careful when adding options here.
+ if minimumprotocol not in configprotocols:
+ raise ValueError(b'protocol value not supported: %s' % minimumprotocol)
# SSLv2 and SSLv3 are broken. We ban them outright.
options = ssl.OP_NO_SSLv2 | ssl.OP_NO_SSLv3
- if protocol == b'tls1.0':
+ if minimumprotocol == b'tls1.0':
# Defaults above are to use TLS 1.0+
pass
- elif protocol == b'tls1.1':
+ elif minimumprotocol == b'tls1.1':
options |= ssl.OP_NO_TLSv1
- elif protocol == b'tls1.2':
+ elif minimumprotocol == b'tls1.2':
options |= ssl.OP_NO_TLSv1 | ssl.OP_NO_TLSv1_1
else:
raise error.Abort(_(b'this should not happen'))
@@ -359,7 +249,7 @@
# There is no guarantee this attribute is defined on the module.
options |= getattr(ssl, 'OP_NO_COMPRESSION', 0)
- return ssl.PROTOCOL_SSLv23, options, protocol
+ return options
def wrapsocket(sock, keyfile, certfile, ui, serverhostname=None):
@@ -414,12 +304,12 @@
# bundle with a specific CA cert removed. If the system/default CA bundle
# is loaded and contains that removed CA, you've just undone the user's
# choice.
- sslcontext = SSLContext(settings[b'protocol'])
-
- # This is a no-op unless using modern ssl.
- sslcontext.options |= settings[b'ctxoptions']
-
- # This still works on our fake SSLContext.
+ #
+ # Despite its name, PROTOCOL_SSLv23 selects the highest protocol that both
+ # ends support, including TLS protocols. commonssloptions() restricts the
+ # set of allowed protocols.
+ sslcontext = ssl.SSLContext(ssl.PROTOCOL_SSLv23)
+ sslcontext.options |= commonssloptions(settings[b'minimumprotocol'])
sslcontext.verify_mode = settings[b'verifymode']
if settings[b'ciphers']:
@@ -468,8 +358,6 @@
# If we're doing certificate verification and no CA certs are loaded,
# that is almost certainly the reason why verification failed. Provide
# a hint to the user.
- # Only modern ssl module exposes SSLContext.get_ca_certs() so we can
- # only show this warning if modern ssl is available.
# The exception handler is here to handle bugs around cert attributes:
# https://bugs.python.org/issue20916#msg213479. (See issues5313.)
# When the main 20916 bug occurs, 'sslcontext.get_ca_certs()' is a
@@ -478,7 +366,6 @@
if (
caloaded
and settings[b'verifymode'] == ssl.CERT_REQUIRED
- and modernssl
and not sslcontext.get_ca_certs()
):
ui.warn(
@@ -502,7 +389,7 @@
# reason, try to emit an actionable warning.
if e.reason == 'UNSUPPORTED_PROTOCOL':
# We attempted TLS 1.0+.
- if settings[b'protocolui'] == b'tls1.0':
+ if settings[b'minimumprotocol'] == b'tls1.0':
# We support more than just TLS 1.0+. If this happens,
# the likely scenario is either the client or the server
# is really old. (e.g. server doesn't support TLS 1.0+ or
@@ -547,7 +434,7 @@
b'to be more secure than the server can support)\n'
)
% (
- settings[b'protocolui'],
+ settings[b'minimumprotocol'],
pycompat.bytesurl(serverhostname),
)
)
@@ -618,12 +505,18 @@
_(b'referenced certificate file (%s) does not exist') % f
)
- protocol, options, _protocolui = protocolsettings(b'tls1.0')
+ # Despite its name, PROTOCOL_SSLv23 selects the highest protocol that both
+ # ends support, including TLS protocols. commonssloptions() restricts the
+ # set of allowed protocols.
+ protocol = ssl.PROTOCOL_SSLv23
+ options = commonssloptions(b'tls1.0')
# This config option is intended for use in tests only. It is a giant
# footgun to kill security. Don't define it.
exactprotocol = ui.config(b'devel', b'serverexactprotocol')
if exactprotocol == b'tls1.0':
+ if b'tls1.0' not in supportedprotocols:
+ raise error.Abort(_(b'TLS 1.0 not supported by this Python'))
protocol = ssl.PROTOCOL_TLSv1
elif exactprotocol == b'tls1.1':
if b'tls1.1' not in supportedprotocols:
@@ -638,23 +531,20 @@
_(b'invalid value for serverexactprotocol: %s') % exactprotocol
)
- if modernssl:
- # We /could/ use create_default_context() here since it doesn't load
- # CAs when configured for client auth. However, it is hard-coded to
- # use ssl.PROTOCOL_SSLv23 which may not be appropriate here.
- sslcontext = SSLContext(protocol)
- sslcontext.options |= options
+ # We /could/ use create_default_context() here since it doesn't load
+ # CAs when configured for client auth. However, it is hard-coded to
+ # use ssl.PROTOCOL_SSLv23 which may not be appropriate here.
+ sslcontext = ssl.SSLContext(protocol)
+ sslcontext.options |= options
- # Improve forward secrecy.
- sslcontext.options |= getattr(ssl, 'OP_SINGLE_DH_USE', 0)
- sslcontext.options |= getattr(ssl, 'OP_SINGLE_ECDH_USE', 0)
+ # Improve forward secrecy.
+ sslcontext.options |= getattr(ssl, 'OP_SINGLE_DH_USE', 0)
+ sslcontext.options |= getattr(ssl, 'OP_SINGLE_ECDH_USE', 0)
- # Use the list of more secure ciphers if found in the ssl module.
- if util.safehasattr(ssl, b'_RESTRICTED_SERVER_CIPHERS'):
- sslcontext.options |= getattr(ssl, 'OP_CIPHER_SERVER_PREFERENCE', 0)
- sslcontext.set_ciphers(ssl._RESTRICTED_SERVER_CIPHERS)
- else:
- sslcontext = SSLContext(ssl.PROTOCOL_TLSv1)
+ # Use the list of more secure ciphers if found in the ssl module.
+ if util.safehasattr(ssl, b'_RESTRICTED_SERVER_CIPHERS'):
+ sslcontext.options |= getattr(ssl, 'OP_CIPHER_SERVER_PREFERENCE', 0)
+ sslcontext.set_ciphers(ssl._RESTRICTED_SERVER_CIPHERS)
if requireclientcert:
sslcontext.verify_mode = ssl.CERT_REQUIRED
@@ -797,14 +687,6 @@
)
-_systemcacertpaths = [
- # RHEL, CentOS, and Fedora
- b'/etc/pki/tls/certs/ca-bundle.trust.crt',
- # Debian, Ubuntu, Gentoo
- b'/etc/ssl/certs/ca-certificates.crt',
-]
-
-
def _defaultcacerts(ui):
"""return path to default CA certificates or None.
@@ -827,23 +709,6 @@
except (ImportError, AttributeError):
pass
- # On Windows, only the modern ssl module is capable of loading the system
- # CA certificates. If we're not capable of doing that, emit a warning
- # because we'll get a certificate verification error later and the lack
- # of loaded CA certificates will be the reason why.
- # Assertion: this code is only called if certificates are being verified.
- if pycompat.iswindows:
- if not _canloaddefaultcerts:
- ui.warn(
- _(
- b'(unable to load Windows CA certificates; see '
- b'https://mercurial-scm.org/wiki/SecureConnections for '
- b'how to configure Mercurial to avoid this message)\n'
- )
- )
-
- return None
-
# Apple's OpenSSL has patches that allow a specially constructed certificate
# to load the system CA store. If we're running on Apple Python, use this
# trick.
@@ -854,58 +719,6 @@
if os.path.exists(dummycert):
return dummycert
- # The Apple OpenSSL trick isn't available to us. If Python isn't able to
- # load system certs, we're out of luck.
- if pycompat.isdarwin:
- # FUTURE Consider looking for Homebrew or MacPorts installed certs
- # files. Also consider exporting the keychain certs to a file during
- # Mercurial install.
- if not _canloaddefaultcerts:
- ui.warn(
- _(
- b'(unable to load CA certificates; see '
- b'https://mercurial-scm.org/wiki/SecureConnections for '
- b'how to configure Mercurial to avoid this message)\n'
- )
- )
- return None
-
- # / is writable on Windows. Out of an abundance of caution make sure
- # we're not on Windows because paths from _systemcacerts could be installed
- # by non-admin users.
- assert not pycompat.iswindows
-
- # Try to find CA certificates in well-known locations. We print a warning
- # when using a found file because we don't want too much silent magic
- # for security settings. The expectation is that proper Mercurial
- # installs will have the CA certs path defined at install time and the
- # installer/packager will make an appropriate decision on the user's
- # behalf. We only get here and perform this setting as a feature of
- # last resort.
- if not _canloaddefaultcerts:
- for path in _systemcacertpaths:
- if os.path.isfile(path):
- ui.warn(
- _(
- b'(using CA certificates from %s; if you see this '
- b'message, your Mercurial install is not properly '
- b'configured; see '
- b'https://mercurial-scm.org/wiki/SecureConnections '
- b'for how to configure Mercurial to avoid this '
- b'message)\n'
- )
- % path
- )
- return path
-
- ui.warn(
- _(
- b'(unable to load CA certificates; see '
- b'https://mercurial-scm.org/wiki/SecureConnections for '
- b'how to configure Mercurial to avoid this message)\n'
- )
- )
-
return None
--- a/mercurial/subrepo.py Tue Jun 23 16:07:18 2020 +0200
+++ b/mercurial/subrepo.py Thu Jun 25 10:32:51 2020 -0700
@@ -617,8 +617,8 @@
ui,
self._repo,
diffopts,
- node1,
- node2,
+ self._repo[node1],
+ self._repo[node2],
match,
prefix=prefix,
listsubrepos=True,
--- a/mercurial/templatekw.py Tue Jun 23 16:07:18 2020 +0200
+++ b/mercurial/templatekw.py Thu Jun 25 10:32:51 2020 -0700
@@ -419,9 +419,9 @@
else:
merge_nodes = cache.get(b'merge_nodes')
if merge_nodes is None:
- from . import merge
+ from . import mergestate as mergestatemod
- mergestate = merge.mergestate.read(repo)
+ mergestate = mergestatemod.mergestate.read(repo)
if mergestate.active():
merge_nodes = (mergestate.local, mergestate.other)
else:
--- a/mercurial/ui.py Tue Jun 23 16:07:18 2020 +0200
+++ b/mercurial/ui.py Thu Jun 25 10:32:51 2020 -0700
@@ -1200,7 +1200,7 @@
dest.write(msg)
# stderr may be buffered under win32 when redirected to files,
# including stdout.
- if dest is self._ferr and not getattr(self._ferr, 'closed', False):
+ if dest is self._ferr and not getattr(dest, 'closed', False):
dest.flush()
except IOError as err:
if dest is self._ferr and err.errno in (
--- a/mercurial/upgrade.py Tue Jun 23 16:07:18 2020 +0200
+++ b/mercurial/upgrade.py Thu Jun 25 10:32:51 2020 -0700
@@ -13,12 +13,12 @@
from .pycompat import getattr
from . import (
changelog,
- copies,
error,
filelog,
hg,
localrepo,
manifest,
+ metadata,
pycompat,
revlog,
scmutil,
@@ -78,6 +78,7 @@
localrepo.SPARSEREVLOG_REQUIREMENT,
localrepo.SIDEDATA_REQUIREMENT,
localrepo.COPIESSDC_REQUIREMENT,
+ localrepo.NODEMAP_REQUIREMENT,
}
for name in compression.compengines:
engine = compression.compengines[name]
@@ -105,6 +106,7 @@
localrepo.SPARSEREVLOG_REQUIREMENT,
localrepo.SIDEDATA_REQUIREMENT,
localrepo.COPIESSDC_REQUIREMENT,
+ localrepo.NODEMAP_REQUIREMENT,
}
for name in compression.compengines:
engine = compression.compengines[name]
@@ -132,6 +134,7 @@
localrepo.SPARSEREVLOG_REQUIREMENT,
localrepo.SIDEDATA_REQUIREMENT,
localrepo.COPIESSDC_REQUIREMENT,
+ localrepo.NODEMAP_REQUIREMENT,
}
for name in compression.compengines:
engine = compression.compengines[name]
@@ -374,6 +377,21 @@
@registerformatvariant
+class persistentnodemap(requirementformatvariant):
+ name = b'persistent-nodemap'
+
+ _requirement = localrepo.NODEMAP_REQUIREMENT
+
+ default = False
+
+ description = _(
+ b'persist the node -> rev mapping on disk to speedup lookup'
+ )
+
+ upgrademessage = _(b'Speedup revision lookup by node id.')
+
+
+@registerformatvariant
class copiessdc(requirementformatvariant):
name = b'copies-sdc'
@@ -716,9 +734,9 @@
return False, (), {}
elif localrepo.COPIESSDC_REQUIREMENT in addedreqs:
- sidedatacompanion = copies.getsidedataadder(srcrepo, dstrepo)
+ sidedatacompanion = metadata.getsidedataadder(srcrepo, dstrepo)
elif localrepo.COPIESSDC_REQUIREMENT in removedreqs:
- sidedatacompanion = copies.getsidedataremover(srcrepo, dstrepo)
+ sidedatacompanion = metadata.getsidedataremover(srcrepo, dstrepo)
return sidedatacompanion
@@ -807,14 +825,14 @@
if not revcount:
return
- ui.write(
+ ui.status(
_(
b'migrating %d total revisions (%d in filelogs, %d in manifests, '
b'%d in changelog)\n'
)
% (revcount, frevcount, mrevcount, crevcount)
)
- ui.write(
+ ui.status(
_(b'migrating %s in store; %s tracked data\n')
% ((util.bytecount(srcsize), util.bytecount(srcrawsize)))
)
@@ -837,7 +855,7 @@
oldrl = _revlogfrompath(srcrepo, unencoded)
if isinstance(oldrl, changelog.changelog) and b'c' not in seen:
- ui.write(
+ ui.status(
_(
b'finished migrating %d manifest revisions across %d '
b'manifests; change in size: %s\n'
@@ -845,7 +863,7 @@
% (mrevcount, mcount, util.bytecount(mdstsize - msrcsize))
)
- ui.write(
+ ui.status(
_(
b'migrating changelog containing %d revisions '
b'(%s in store; %s tracked data)\n'
@@ -861,7 +879,7 @@
_(b'changelog revisions'), total=crevcount
)
elif isinstance(oldrl, manifest.manifestrevlog) and b'm' not in seen:
- ui.write(
+ ui.status(
_(
b'finished migrating %d filelog revisions across %d '
b'filelogs; change in size: %s\n'
@@ -869,7 +887,7 @@
% (frevcount, fcount, util.bytecount(fdstsize - fsrcsize))
)
- ui.write(
+ ui.status(
_(
b'migrating %d manifests containing %d revisions '
b'(%s in store; %s tracked data)\n'
@@ -888,7 +906,7 @@
_(b'manifest revisions'), total=mrevcount
)
elif b'f' not in seen:
- ui.write(
+ ui.status(
_(
b'migrating %d filelogs containing %d revisions '
b'(%s in store; %s tracked data)\n'
@@ -941,7 +959,7 @@
progress.complete()
- ui.write(
+ ui.status(
_(
b'finished migrating %d changelog revisions; change in size: '
b'%s\n'
@@ -949,7 +967,7 @@
% (crevcount, util.bytecount(cdstsize - csrcsize))
)
- ui.write(
+ ui.status(
_(
b'finished migrating %d total revisions; total change in store '
b'size: %s\n'
@@ -975,7 +993,7 @@
Function should return ``True`` if the file is to be copied.
"""
# Skip revlogs.
- if path.endswith((b'.i', b'.d')):
+ if path.endswith((b'.i', b'.d', b'.n', b'.nd')):
return False
# Skip transaction related files.
if path.startswith(b'undo'):
@@ -1013,7 +1031,7 @@
assert srcrepo.currentwlock()
assert dstrepo.currentwlock()
- ui.write(
+ ui.status(
_(
b'(it is safe to interrupt this process any time before '
b'data migration completes)\n'
@@ -1048,14 +1066,14 @@
if not _filterstorefile(srcrepo, dstrepo, requirements, p, kind, st):
continue
- srcrepo.ui.write(_(b'copying %s\n') % p)
+ srcrepo.ui.status(_(b'copying %s\n') % p)
src = srcrepo.store.rawvfs.join(p)
dst = dstrepo.store.rawvfs.join(p)
util.copyfile(src, dst, copystat=True)
_finishdatamigration(ui, srcrepo, dstrepo, requirements)
- ui.write(_(b'data fully migrated to temporary repository\n'))
+ ui.status(_(b'data fully migrated to temporary repository\n'))
backuppath = pycompat.mkdtemp(prefix=b'upgradebackup.', dir=srcrepo.path)
backupvfs = vfsmod.vfs(backuppath)
@@ -1067,7 +1085,7 @@
# as a mechanism to lock out new clients during the data swap. This is
# better than allowing a client to continue while the repository is in
# an inconsistent state.
- ui.write(
+ ui.status(
_(
b'marking source repository as being upgraded; clients will be '
b'unable to read from repository\n'
@@ -1077,18 +1095,18 @@
srcrepo.vfs, srcrepo.requirements | {b'upgradeinprogress'}
)
- ui.write(_(b'starting in-place swap of repository data\n'))
- ui.write(_(b'replaced files will be backed up at %s\n') % backuppath)
+ ui.status(_(b'starting in-place swap of repository data\n'))
+ ui.status(_(b'replaced files will be backed up at %s\n') % backuppath)
# Now swap in the new store directory. Doing it as a rename should make
# the operation nearly instantaneous and atomic (at least in well-behaved
# environments).
- ui.write(_(b'replacing store...\n'))
+ ui.status(_(b'replacing store...\n'))
tstart = util.timer()
util.rename(srcrepo.spath, backupvfs.join(b'store'))
util.rename(dstrepo.spath, srcrepo.spath)
elapsed = util.timer() - tstart
- ui.write(
+ ui.status(
_(
b'store replacement complete; repository was inconsistent for '
b'%0.1fs\n'
@@ -1098,7 +1116,7 @@
# We first write the requirements file. Any new requirements will lock
# out legacy clients.
- ui.write(
+ ui.status(
_(
b'finalizing requirements file and making repository readable '
b'again\n'
@@ -1274,9 +1292,20 @@
ui.write((b'\n'))
ui.write(b'\n')
+ def printoptimisations():
+ optimisations = [a for a in actions if a.type == optimisation]
+ optimisations.sort(key=lambda a: a.name)
+ if optimisations:
+ ui.write(_(b'optimisations: '))
+ write_labeled(
+ [a.name for a in optimisations],
+ "upgrade-repo.optimisation.performed",
+ )
+ ui.write(b'\n\n')
+
def printupgradeactions():
for a in actions:
- ui.write(b'%s\n %s\n\n' % (a.name, a.upgrademessage))
+ ui.status(b'%s\n %s\n\n' % (a.name, a.upgrademessage))
if not run:
fromconfig = []
@@ -1291,35 +1320,35 @@
if fromconfig or onlydefault:
if fromconfig:
- ui.write(
+ ui.status(
_(
b'repository lacks features recommended by '
b'current config options:\n\n'
)
)
for i in fromconfig:
- ui.write(b'%s\n %s\n\n' % (i.name, i.description))
+ ui.status(b'%s\n %s\n\n' % (i.name, i.description))
if onlydefault:
- ui.write(
+ ui.status(
_(
b'repository lacks features used by the default '
b'config options:\n\n'
)
)
for i in onlydefault:
- ui.write(b'%s\n %s\n\n' % (i.name, i.description))
+ ui.status(b'%s\n %s\n\n' % (i.name, i.description))
- ui.write(b'\n')
+ ui.status(b'\n')
else:
- ui.write(
+ ui.status(
_(
b'(no feature deficiencies found in existing '
b'repository)\n'
)
)
- ui.write(
+ ui.status(
_(
b'performing an upgrade with "--run" will make the following '
b'changes:\n\n'
@@ -1327,31 +1356,33 @@
)
printrequirements()
+ printoptimisations()
printupgradeactions()
unusedoptimize = [i for i in alloptimizations if i not in actions]
if unusedoptimize:
- ui.write(
+ ui.status(
_(
b'additional optimizations are available by specifying '
b'"--optimize <name>":\n\n'
)
)
for i in unusedoptimize:
- ui.write(_(b'%s\n %s\n\n') % (i.name, i.description))
+ ui.status(_(b'%s\n %s\n\n') % (i.name, i.description))
return
# Else we're in the run=true case.
ui.write(_(b'upgrade will perform the following actions:\n\n'))
printrequirements()
+ printoptimisations()
printupgradeactions()
upgradeactions = [a.name for a in actions]
- ui.write(_(b'beginning upgrade...\n'))
+ ui.status(_(b'beginning upgrade...\n'))
with repo.wlock(), repo.lock():
- ui.write(_(b'repository locked and read-only\n'))
+ ui.status(_(b'repository locked and read-only\n'))
# Our strategy for upgrading the repository is to create a new,
# temporary repository, write data to it, then do a swap of the
# data. There are less heavyweight ways to do this, but it is easier
@@ -1360,7 +1391,7 @@
tmppath = pycompat.mkdtemp(prefix=b'upgrade.', dir=repo.path)
backuppath = None
try:
- ui.write(
+ ui.status(
_(
b'creating temporary repository to stage migrated '
b'data: %s\n'
@@ -1377,15 +1408,17 @@
ui, repo, dstrepo, newreqs, upgradeactions, revlogs=revlogs
)
if not (backup or backuppath is None):
- ui.write(_(b'removing old repository content%s\n') % backuppath)
+ ui.status(
+ _(b'removing old repository content%s\n') % backuppath
+ )
repo.vfs.rmtree(backuppath, forcibly=True)
backuppath = None
finally:
- ui.write(_(b'removing temporary repository %s\n') % tmppath)
+ ui.status(_(b'removing temporary repository %s\n') % tmppath)
repo.vfs.rmtree(tmppath, forcibly=True)
- if backuppath:
+ if backuppath and not ui.quiet:
ui.warn(
_(b'copy of old repository backed up at %s\n') % backuppath
)
--- a/mercurial/util.py Tue Jun 23 16:07:18 2020 +0200
+++ b/mercurial/util.py Thu Jun 25 10:32:51 2020 -0700
@@ -204,6 +204,8 @@
b" update your code.)"
) % version
warnings.warn(pycompat.sysstr(msg), DeprecationWarning, stacklevel + 1)
+ # on python 3 with chg, we will need to explicitly flush the output
+ sys.stderr.flush()
DIGESTS = {
@@ -2844,7 +2846,7 @@
# [1]: fixed by changeset 67dc99a989cd in the cpython hg repo.
#
# Here we workaround the EINTR issue for fileobj.__iter__. Other methods
- # like "read*" are ignored for now, as Python < 2.7.4 is a minority.
+ # like "read*" work fine, as we do not support Python < 2.7.4.
#
# Although we can workaround the EINTR issue for fp.__iter__, it is slower:
# "for x in fp" is 4x faster than "for x in iter(fp.readline, '')" in
@@ -2856,39 +2858,6 @@
# affects things like pipes, sockets, ttys etc. We treat "normal" (S_ISREG)
# files approximately as "fast" files and use the fast (unsafe) code path,
# to minimize the performance impact.
- if sys.version_info >= (2, 7, 4):
- # fp.readline deals with EINTR correctly, use it as a workaround.
- def _safeiterfile(fp):
- return iter(fp.readline, b'')
-
- else:
- # fp.read* are broken too, manually deal with EINTR in a stupid way.
- # note: this may block longer than necessary because of bufsize.
- def _safeiterfile(fp, bufsize=4096):
- fd = fp.fileno()
- line = b''
- while True:
- try:
- buf = os.read(fd, bufsize)
- except OSError as ex:
- # os.read only raises EINTR before any data is read
- if ex.errno == errno.EINTR:
- continue
- else:
- raise
- line += buf
- if b'\n' in buf:
- splitted = line.splitlines(True)
- line = b''
- for l in splitted:
- if l[-1] == b'\n':
- yield l
- else:
- line = l
- if not buf:
- break
- if line:
- yield line
def iterfile(fp):
fastpath = True
@@ -2897,7 +2866,8 @@
if fastpath:
return fp
else:
- return _safeiterfile(fp)
+ # fp.readline deals with EINTR correctly, use it as a workaround.
+ return iter(fp.readline, b'')
else:
--- a/mercurial/utils/procutil.py Tue Jun 23 16:07:18 2020 +0200
+++ b/mercurial/utils/procutil.py Thu Jun 25 10:32:51 2020 -0700
@@ -49,18 +49,40 @@
return False
-# Python 2 uses the C library's standard I/O streams. Glibc determines
-# buffering on first write to stdout - if we replace a TTY destined stdout with
-# a pipe destined stdout (e.g. pager), we want line buffering (or unbuffered,
-# on Windows).
-# Python 3 rolls its own standard I/O streams.
+if pycompat.ispy3:
+
+ class LineBufferedWrapper(object):
+ def __init__(self, orig):
+ self.orig = orig
+
+ def __getattr__(self, attr):
+ return getattr(self.orig, attr)
+
+ def write(self, s):
+ orig = self.orig
+ res = orig.write(s)
+ if s.endswith(b'\n'):
+ orig.flush()
+ return res
+
+ io.BufferedIOBase.register(LineBufferedWrapper)
+
+
+# glibc determines buffering on first write to stdout - if we replace a TTY
+# destined stdout with a pipe destined stdout (e.g. pager), we want line
+# buffering (or unbuffered, on Windows)
if isatty(stdout):
if pycompat.iswindows:
# Windows doesn't support line buffering
stdout = os.fdopen(stdout.fileno(), 'wb', 0)
- elif not pycompat.ispy3:
- # on Python 3, stdout (sys.stdout.buffer) is already line buffered and
- # buffering=1 is not handled in binary mode
+ elif pycompat.ispy3:
+ # On Python 3, buffered binary streams can't be set line-buffered.
+ # Therefore we have a wrapper that implements line buffering.
+ if isinstance(stdout, io.BufferedIOBase) and not isinstance(
+ stdout, LineBufferedWrapper
+ ):
+ stdout = LineBufferedWrapper(stdout)
+ else:
stdout = os.fdopen(stdout.fileno(), 'wb', 1)
if pycompat.iswindows:
@@ -75,7 +97,6 @@
getuser = platform.getuser
getpid = os.getpid
hidewindow = platform.hidewindow
-quotecommand = platform.quotecommand
readpipe = platform.readpipe
setbinary = platform.setbinary
setsignalhandler = platform.setsignalhandler
@@ -140,7 +161,7 @@
def _popenreader(cmd, bufsize):
p = subprocess.Popen(
- tonativestr(quotecommand(cmd)),
+ tonativestr(cmd),
shell=True,
bufsize=bufsize,
close_fds=closefds,
@@ -151,7 +172,7 @@
def _popenwriter(cmd, bufsize):
p = subprocess.Popen(
- tonativestr(quotecommand(cmd)),
+ tonativestr(cmd),
shell=True,
bufsize=bufsize,
close_fds=closefds,
@@ -397,7 +418,6 @@
stdout.flush()
except Exception:
pass
- cmd = quotecommand(cmd)
env = shellenviron(environ)
if out is None or isstdout(out):
rc = subprocess.call(
--- a/mercurial/windows.py Tue Jun 23 16:07:18 2020 +0200
+++ b/mercurial/windows.py Thu Jun 25 10:32:51 2020 -0700
@@ -474,14 +474,6 @@
return pycompat.maplist(_unquote, pycompat.shlexsplit(s, posix=False))
-def quotecommand(cmd):
- """Build a command string suitable for os.popen* calls."""
- if sys.version_info < (2, 7, 1):
- # Python versions since 2.7.1 do this extra quoting themselves
- return b'"' + cmd + b'"'
- return cmd
-
-
# if you change this stub into a real check, please try to implement the
# username and groupname functions above, too.
def isowner(st):
--- a/relnotes/next Tue Jun 23 16:07:18 2020 +0200
+++ b/relnotes/next Thu Jun 25 10:32:51 2020 -0700
@@ -3,9 +3,29 @@
== New Experimental Features ==
+ * The core of some hg operations have been (and are being)
+ implemented in rust, for speed. `hg status` on a repository with
+ 300k tracked files goes from 1.8s to 0.6s for instance.
+ This has currently been tested only on linux, and does not build on
+ windows. See rust/README.rst in the mercurial repository for
+ instructions to opt into this.
== Backwards Compatibility Changes ==
+* Mercurial now requires at least Python 2.7.9 or a Python version that
+ backported modern SSL/TLS features (as defined in PEP 466), and that Python
+ was compiled against a OpenSSL version supporting TLS 1.1 or TLS 1.2
+ (likely this requires the OpenSSL version to be at least 1.0.1).
+
+* The `hg perfwrite` command from contrib/perf.py was made more flexible and
+ changed its default behavior. To get the previous behavior, run `hg perfwrite
+ --nlines=100000 --nitems=1 --item='Testing write performance' --batch-line`.
+
== Internal API Changes ==
+ * logcmdutil.diffordiffstat() now takes contexts instead of nodes.
+
+ * The `mergestate` class along with some related methods and constants have
+ moved from `mercurial.merge` to a new `mercurial.mergestate` module.
+
--- a/rust/Cargo.lock Tue Jun 23 16:07:18 2020 +0200
+++ b/rust/Cargo.lock Thu Jun 25 10:32:51 2020 -0700
@@ -42,11 +42,6 @@
source = "registry+https://github.com/rust-lang/crates.io-index"
[[package]]
-name = "cc"
-version = "1.0.50"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-
-[[package]]
name = "cfg-if"
version = "0.1.10"
source = "registry+https://github.com/rust-lang/crates.io-index"
@@ -208,22 +203,20 @@
version = "0.1.0"
dependencies = [
"byteorder 1.3.4 (registry+https://github.com/rust-lang/crates.io-index)",
- "cc 1.0.50 (registry+https://github.com/rust-lang/crates.io-index)",
"clap 2.33.0 (registry+https://github.com/rust-lang/crates.io-index)",
"crossbeam 0.7.3 (registry+https://github.com/rust-lang/crates.io-index)",
"hex 0.4.2 (registry+https://github.com/rust-lang/crates.io-index)",
"lazy_static 1.4.0 (registry+https://github.com/rust-lang/crates.io-index)",
- "libc 0.2.67 (registry+https://github.com/rust-lang/crates.io-index)",
"log 0.4.8 (registry+https://github.com/rust-lang/crates.io-index)",
"memchr 2.3.3 (registry+https://github.com/rust-lang/crates.io-index)",
"memmap 0.7.0 (registry+https://github.com/rust-lang/crates.io-index)",
- "micro-timer 0.2.1 (registry+https://github.com/rust-lang/crates.io-index)",
+ "micro-timer 0.3.0 (registry+https://github.com/rust-lang/crates.io-index)",
"pretty_assertions 0.6.1 (registry+https://github.com/rust-lang/crates.io-index)",
"rand 0.7.3 (registry+https://github.com/rust-lang/crates.io-index)",
"rand_distr 0.2.2 (registry+https://github.com/rust-lang/crates.io-index)",
"rand_pcg 0.2.1 (registry+https://github.com/rust-lang/crates.io-index)",
"rayon 1.3.0 (registry+https://github.com/rust-lang/crates.io-index)",
- "regex 1.3.6 (registry+https://github.com/rust-lang/crates.io-index)",
+ "regex 1.3.9 (registry+https://github.com/rust-lang/crates.io-index)",
"same-file 1.0.6 (registry+https://github.com/rust-lang/crates.io-index)",
"tempfile 3.1.0 (registry+https://github.com/rust-lang/crates.io-index)",
"twox-hash 1.5.0 (registry+https://github.com/rust-lang/crates.io-index)",
@@ -287,16 +280,16 @@
[[package]]
name = "micro-timer"
-version = "0.2.1"
+version = "0.3.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
dependencies = [
- "micro-timer-macros 0.2.0 (registry+https://github.com/rust-lang/crates.io-index)",
+ "micro-timer-macros 0.3.0 (registry+https://github.com/rust-lang/crates.io-index)",
"scopeguard 1.1.0 (registry+https://github.com/rust-lang/crates.io-index)",
]
[[package]]
name = "micro-timer-macros"
-version = "0.2.0"
+version = "0.3.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
dependencies = [
"proc-macro2 1.0.9 (registry+https://github.com/rust-lang/crates.io-index)",
@@ -369,7 +362,7 @@
source = "registry+https://github.com/rust-lang/crates.io-index"
dependencies = [
"libc 0.2.67 (registry+https://github.com/rust-lang/crates.io-index)",
- "regex 1.3.6 (registry+https://github.com/rust-lang/crates.io-index)",
+ "regex 1.3.9 (registry+https://github.com/rust-lang/crates.io-index)",
]
[[package]]
@@ -378,7 +371,7 @@
source = "registry+https://github.com/rust-lang/crates.io-index"
dependencies = [
"libc 0.2.67 (registry+https://github.com/rust-lang/crates.io-index)",
- "regex 1.3.6 (registry+https://github.com/rust-lang/crates.io-index)",
+ "regex 1.3.9 (registry+https://github.com/rust-lang/crates.io-index)",
]
[[package]]
@@ -471,18 +464,18 @@
[[package]]
name = "regex"
-version = "1.3.6"
+version = "1.3.9"
source = "registry+https://github.com/rust-lang/crates.io-index"
dependencies = [
"aho-corasick 0.7.10 (registry+https://github.com/rust-lang/crates.io-index)",
"memchr 2.3.3 (registry+https://github.com/rust-lang/crates.io-index)",
- "regex-syntax 0.6.17 (registry+https://github.com/rust-lang/crates.io-index)",
+ "regex-syntax 0.6.18 (registry+https://github.com/rust-lang/crates.io-index)",
"thread_local 1.0.1 (registry+https://github.com/rust-lang/crates.io-index)",
]
[[package]]
name = "regex-syntax"
-version = "0.6.17"
+version = "0.6.18"
source = "registry+https://github.com/rust-lang/crates.io-index"
[[package]]
@@ -494,6 +487,10 @@
]
[[package]]
+name = "rhg"
+version = "0.1.0"
+
+[[package]]
name = "rustc_version"
version = "0.2.3"
source = "registry+https://github.com/rust-lang/crates.io-index"
@@ -655,7 +652,6 @@
"checksum autocfg 1.0.0 (registry+https://github.com/rust-lang/crates.io-index)" = "f8aac770f1885fd7e387acedd76065302551364496e46b3dd00860b2f8359b9d"
"checksum bitflags 1.2.1 (registry+https://github.com/rust-lang/crates.io-index)" = "cf1de2fe8c75bc145a2f577add951f8134889b4795d47466a54a5c846d691693"
"checksum byteorder 1.3.4 (registry+https://github.com/rust-lang/crates.io-index)" = "08c48aae112d48ed9f069b33538ea9e3e90aa263cfa3d1c24309612b1f7472de"
-"checksum cc 1.0.50 (registry+https://github.com/rust-lang/crates.io-index)" = "95e28fa049fda1c330bcf9d723be7663a899c4679724b34c81e9f5a326aab8cd"
"checksum cfg-if 0.1.10 (registry+https://github.com/rust-lang/crates.io-index)" = "4785bdd1c96b2a846b2bd7cc02e86b6b3dbf14e7e53446c4f54c92a361040822"
"checksum chrono 0.4.11 (registry+https://github.com/rust-lang/crates.io-index)" = "80094f509cf8b5ae86a4966a39b3ff66cd7e2a3e594accec3743ff3fabeab5b2"
"checksum clap 2.33.0 (registry+https://github.com/rust-lang/crates.io-index)" = "5067f5bb2d80ef5d68b4c87db81601f0b75bca627bc2ef76b141d7b846a3c6d9"
@@ -680,8 +676,8 @@
"checksum memchr 2.3.3 (registry+https://github.com/rust-lang/crates.io-index)" = "3728d817d99e5ac407411fa471ff9800a778d88a24685968b36824eaf4bee400"
"checksum memmap 0.7.0 (registry+https://github.com/rust-lang/crates.io-index)" = "6585fd95e7bb50d6cc31e20d4cf9afb4e2ba16c5846fc76793f11218da9c475b"
"checksum memoffset 0.5.3 (registry+https://github.com/rust-lang/crates.io-index)" = "75189eb85871ea5c2e2c15abbdd541185f63b408415e5051f5cac122d8c774b9"
-"checksum micro-timer 0.2.1 (registry+https://github.com/rust-lang/crates.io-index)" = "987429cd6162a80ed5ff44fc790f5090b1c6d617ac73a2e272965ed91201d79b"
-"checksum micro-timer-macros 0.2.0 (registry+https://github.com/rust-lang/crates.io-index)" = "43cec5c0b38783eb33ef7bccf4b250b7a085703e11f5f2238fa31969e629388a"
+"checksum micro-timer 0.3.0 (registry+https://github.com/rust-lang/crates.io-index)" = "25b31d6cb9112984323d05d7a353f272ae5d7a307074f9ab9b25c00121b8c947"
+"checksum micro-timer-macros 0.3.0 (registry+https://github.com/rust-lang/crates.io-index)" = "5694085dd384bb9e824207facc040c248d9df653f55e28c3ad0686958b448504"
"checksum num-integer 0.1.42 (registry+https://github.com/rust-lang/crates.io-index)" = "3f6ea62e9d81a77cd3ee9a2a5b9b609447857f3d358704331e4ef39eb247fcba"
"checksum num-traits 0.2.11 (registry+https://github.com/rust-lang/crates.io-index)" = "c62be47e61d1842b9170f0fdeec8eba98e60e90e5446449a0545e5152acd7096"
"checksum num_cpus 1.12.0 (registry+https://github.com/rust-lang/crates.io-index)" = "46203554f085ff89c235cd12f7075f3233af9b11ed7c9e16dfe2560d03313ce6"
@@ -701,8 +697,8 @@
"checksum rayon 1.3.0 (registry+https://github.com/rust-lang/crates.io-index)" = "db6ce3297f9c85e16621bb8cca38a06779ffc31bb8184e1be4bed2be4678a098"
"checksum rayon-core 1.7.0 (registry+https://github.com/rust-lang/crates.io-index)" = "08a89b46efaf957e52b18062fb2f4660f8b8a4dde1807ca002690868ef2c85a9"
"checksum redox_syscall 0.1.56 (registry+https://github.com/rust-lang/crates.io-index)" = "2439c63f3f6139d1b57529d16bc3b8bb855230c8efcc5d3a896c8bea7c3b1e84"
-"checksum regex 1.3.6 (registry+https://github.com/rust-lang/crates.io-index)" = "7f6946991529684867e47d86474e3a6d0c0ab9b82d5821e314b1ede31fa3a4b3"
-"checksum regex-syntax 0.6.17 (registry+https://github.com/rust-lang/crates.io-index)" = "7fe5bd57d1d7414c6b5ed48563a2c855d995ff777729dcd91c369ec7fea395ae"
+"checksum regex 1.3.9 (registry+https://github.com/rust-lang/crates.io-index)" = "9c3780fcf44b193bc4d09f36d2a3c87b251da4a046c87795a0d35f4f927ad8e6"
+"checksum regex-syntax 0.6.18 (registry+https://github.com/rust-lang/crates.io-index)" = "26412eb97c6b088a6997e05f69403a802a92d520de2f8e63c2b65f9e0f47c4e8"
"checksum remove_dir_all 0.5.2 (registry+https://github.com/rust-lang/crates.io-index)" = "4a83fa3702a688b9359eccba92d153ac33fd2e8462f9e0e3fdf155239ea7792e"
"checksum rustc_version 0.2.3 (registry+https://github.com/rust-lang/crates.io-index)" = "138e3e0acb6c9fb258b19b67cb8abd63c00679d2851805ea151465464fe9030a"
"checksum same-file 1.0.6 (registry+https://github.com/rust-lang/crates.io-index)" = "93fc1dc3aaa9bfed95e02e6eadabb4baf7e3078b0bd1b4d7b6b0b68378900502"
--- a/rust/Cargo.toml Tue Jun 23 16:07:18 2020 +0200
+++ b/rust/Cargo.toml Thu Jun 25 10:32:51 2020 -0700
@@ -1,3 +1,3 @@
[workspace]
-members = ["hg-core", "hg-cpython"]
+members = ["hg-core", "hg-cpython", "rhg"]
exclude = ["chg", "hgcli"]
--- a/rust/README.rst Tue Jun 23 16:07:18 2020 +0200
+++ b/rust/README.rst Thu Jun 25 10:32:51 2020 -0700
@@ -8,9 +8,9 @@
There are currently three independent rust projects:
- chg. An implementation of chg, in rust instead of C.
-- hgcli. A experiment for starting hg in rust rather than in python,
- by linking with the python runtime. Probably meant to be replaced by
- PyOxidizer at some point.
+- hgcli. A project that provide a (mostly) self-contained "hg" binary,
+ for ease of deployment and a bit of speed, using PyOxidizer. See
+ hgcli/README.md.
- hg-core (and hg-cpython): implementation of some
functionality of mercurial in rust, e.g. ancestry computations in
revision graphs, status or pull discovery. The top-level ``Cargo.toml`` file
@@ -27,8 +27,6 @@
$ ./hg debuginstall | grep -i rust # to validate rust is in use
checking Rust extensions (installed)
checking module policy (rust+c-allow)
- checking "re2" regexp engine Rust bindings (installed)
-
If the environment variable ``HGWITHRUSTEXT=cpython`` is set, the Rust
extension will be used by default unless ``--no-rust``.
@@ -36,35 +34,20 @@
One day we may use this environment variable to switch to new experimental
binding crates like a hypothetical ``HGWITHRUSTEXT=hpy``.
-Using the fastest ``hg status``
--------------------------------
-
-The code for ``hg status`` needs to conform to ``.hgignore`` rules, which are
-all translated into regex.
-
-In the first version, for compatibility and ease of development reasons, the
-Re2 regex engine was chosen until we figured out if the ``regex`` crate had
-similar enough behavior.
-
-Now that that work has been done, the default behavior is to use the ``regex``
-crate, that provides a significant performance boost compared to the standard
-Python + C path in many commands such as ``status``, ``diff`` and ``commit``,
+Profiling
+=========
-However, the ``Re2`` path remains slightly faster for our use cases and remains
-a better option for getting the most speed out of your Mercurial.
+Setting the environment variable ``RUST_LOG=trace`` will make hg print
+a few high level rust-related performance numbers. It can also
+indicate why the rust code cannot be used (say, using lookarounds in
+hgignore).
-If you want to use ``Re2``, you need to install ``Re2`` following Google's
-guidelines: https://github.com/google/re2/wiki/Install.
-Then, use ``HG_RUST_FEATURES=with-re2`` and
-``HG_RE2_PATH=system|<path to your re2 install>`` when building ``hg`` to
-signal the use of Re2. Using the local path instead of the "system" RE2 links
-it statically.
-
-For example::
-
- $ HG_RUST_FEATURES=with-re2 HG_RE2_PATH=system make PURE=--rust
- $ # OR
- $ HG_RUST_FEATURES=with-re2 HG_RE2_PATH=/path/to/re2 make PURE=--rust
+``py-spy`` (https://github.com/benfred/py-spy) can be used to
+construct a single profile with rust functions and python functions
+(as opposed to ``hg --profile``, which attributes time spent in rust
+to some unlucky python code running shortly after the rust code, and
+as opposed to tools for native code like ``perf``, which attribute
+time to the python interpreter instead of python functions).
Developing Rust
===============
@@ -114,14 +97,3 @@
$ cargo +nightly fmt
This requires you to have the nightly toolchain installed.
-
-Additional features
--------------------
-
-As mentioned in the section about ``hg status``, code paths using ``re2`` are
-opt-in.
-
-For example::
-
- $ cargo check --features with-re2
-
--- a/rust/chg/Cargo.lock Tue Jun 23 16:07:18 2020 +0200
+++ b/rust/chg/Cargo.lock Thu Jun 25 10:32:51 2020 -0700
@@ -6,9 +6,14 @@
source = "registry+https://github.com/rust-lang/crates.io-index"
[[package]]
-name = "autocfg"
-version = "1.0.0"
+name = "async-trait"
+version = "0.1.30"
source = "registry+https://github.com/rust-lang/crates.io-index"
+dependencies = [
+ "proc-macro2 1.0.10 (registry+https://github.com/rust-lang/crates.io-index)",
+ "quote 1.0.3 (registry+https://github.com/rust-lang/crates.io-index)",
+ "syn 1.0.17 (registry+https://github.com/rust-lang/crates.io-index)",
+]
[[package]]
name = "bitflags"
@@ -16,20 +21,11 @@
source = "registry+https://github.com/rust-lang/crates.io-index"
[[package]]
-name = "byteorder"
-version = "1.3.4"
+name = "bytes"
+version = "0.5.4"
source = "registry+https://github.com/rust-lang/crates.io-index"
[[package]]
-name = "bytes"
-version = "0.4.12"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-dependencies = [
- "byteorder 1.3.4 (registry+https://github.com/rust-lang/crates.io-index)",
- "iovec 0.1.4 (registry+https://github.com/rust-lang/crates.io-index)",
-]
-
-[[package]]
name = "cc"
version = "1.0.50"
source = "registry+https://github.com/rust-lang/crates.io-index"
@@ -43,91 +39,17 @@
name = "chg"
version = "0.1.0"
dependencies = [
- "bytes 0.4.12 (registry+https://github.com/rust-lang/crates.io-index)",
+ "async-trait 0.1.30 (registry+https://github.com/rust-lang/crates.io-index)",
+ "bytes 0.5.4 (registry+https://github.com/rust-lang/crates.io-index)",
"cc 1.0.50 (registry+https://github.com/rust-lang/crates.io-index)",
- "futures 0.1.29 (registry+https://github.com/rust-lang/crates.io-index)",
- "libc 0.2.68 (registry+https://github.com/rust-lang/crates.io-index)",
+ "futures 0.3.4 (registry+https://github.com/rust-lang/crates.io-index)",
+ "libc 0.2.69 (registry+https://github.com/rust-lang/crates.io-index)",
"log 0.4.8 (registry+https://github.com/rust-lang/crates.io-index)",
- "tokio 0.1.22 (registry+https://github.com/rust-lang/crates.io-index)",
- "tokio-hglib 0.2.0 (registry+https://github.com/rust-lang/crates.io-index)",
- "tokio-process 0.2.5 (registry+https://github.com/rust-lang/crates.io-index)",
- "tokio-timer 0.2.13 (registry+https://github.com/rust-lang/crates.io-index)",
-]
-
-[[package]]
-name = "cloudabi"
-version = "0.0.3"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-dependencies = [
- "bitflags 1.2.1 (registry+https://github.com/rust-lang/crates.io-index)",
-]
-
-[[package]]
-name = "crossbeam-deque"
-version = "0.7.3"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-dependencies = [
- "crossbeam-epoch 0.8.2 (registry+https://github.com/rust-lang/crates.io-index)",
- "crossbeam-utils 0.7.2 (registry+https://github.com/rust-lang/crates.io-index)",
- "maybe-uninit 2.0.0 (registry+https://github.com/rust-lang/crates.io-index)",
-]
-
-[[package]]
-name = "crossbeam-epoch"
-version = "0.8.2"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-dependencies = [
- "autocfg 1.0.0 (registry+https://github.com/rust-lang/crates.io-index)",
- "cfg-if 0.1.10 (registry+https://github.com/rust-lang/crates.io-index)",
- "crossbeam-utils 0.7.2 (registry+https://github.com/rust-lang/crates.io-index)",
- "lazy_static 1.4.0 (registry+https://github.com/rust-lang/crates.io-index)",
- "maybe-uninit 2.0.0 (registry+https://github.com/rust-lang/crates.io-index)",
- "memoffset 0.5.4 (registry+https://github.com/rust-lang/crates.io-index)",
- "scopeguard 1.1.0 (registry+https://github.com/rust-lang/crates.io-index)",
+ "tokio 0.2.18 (registry+https://github.com/rust-lang/crates.io-index)",
+ "tokio-hglib 0.3.0 (registry+https://github.com/rust-lang/crates.io-index)",
]
[[package]]
-name = "crossbeam-queue"
-version = "0.1.2"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-dependencies = [
- "crossbeam-utils 0.6.6 (registry+https://github.com/rust-lang/crates.io-index)",
-]
-
-[[package]]
-name = "crossbeam-queue"
-version = "0.2.1"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-dependencies = [
- "cfg-if 0.1.10 (registry+https://github.com/rust-lang/crates.io-index)",
- "crossbeam-utils 0.7.2 (registry+https://github.com/rust-lang/crates.io-index)",
-]
-
-[[package]]
-name = "crossbeam-utils"
-version = "0.6.6"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-dependencies = [
- "cfg-if 0.1.10 (registry+https://github.com/rust-lang/crates.io-index)",
- "lazy_static 1.4.0 (registry+https://github.com/rust-lang/crates.io-index)",
-]
-
-[[package]]
-name = "crossbeam-utils"
-version = "0.7.2"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-dependencies = [
- "autocfg 1.0.0 (registry+https://github.com/rust-lang/crates.io-index)",
- "cfg-if 0.1.10 (registry+https://github.com/rust-lang/crates.io-index)",
- "lazy_static 1.4.0 (registry+https://github.com/rust-lang/crates.io-index)",
-]
-
-[[package]]
-name = "fnv"
-version = "1.0.6"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-
-[[package]]
name = "fuchsia-zircon"
version = "0.3.3"
source = "registry+https://github.com/rust-lang/crates.io-index"
@@ -143,15 +65,84 @@
[[package]]
name = "futures"
-version = "0.1.29"
+version = "0.3.4"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+dependencies = [
+ "futures-channel 0.3.4 (registry+https://github.com/rust-lang/crates.io-index)",
+ "futures-core 0.3.4 (registry+https://github.com/rust-lang/crates.io-index)",
+ "futures-executor 0.3.4 (registry+https://github.com/rust-lang/crates.io-index)",
+ "futures-io 0.3.4 (registry+https://github.com/rust-lang/crates.io-index)",
+ "futures-sink 0.3.4 (registry+https://github.com/rust-lang/crates.io-index)",
+ "futures-task 0.3.4 (registry+https://github.com/rust-lang/crates.io-index)",
+ "futures-util 0.3.4 (registry+https://github.com/rust-lang/crates.io-index)",
+]
+
+[[package]]
+name = "futures-channel"
+version = "0.3.4"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+dependencies = [
+ "futures-core 0.3.4 (registry+https://github.com/rust-lang/crates.io-index)",
+ "futures-sink 0.3.4 (registry+https://github.com/rust-lang/crates.io-index)",
+]
+
+[[package]]
+name = "futures-core"
+version = "0.3.4"
source = "registry+https://github.com/rust-lang/crates.io-index"
[[package]]
-name = "hermit-abi"
-version = "0.1.10"
+name = "futures-executor"
+version = "0.3.4"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+dependencies = [
+ "futures-core 0.3.4 (registry+https://github.com/rust-lang/crates.io-index)",
+ "futures-task 0.3.4 (registry+https://github.com/rust-lang/crates.io-index)",
+ "futures-util 0.3.4 (registry+https://github.com/rust-lang/crates.io-index)",
+]
+
+[[package]]
+name = "futures-io"
+version = "0.3.4"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+
+[[package]]
+name = "futures-macro"
+version = "0.3.4"
source = "registry+https://github.com/rust-lang/crates.io-index"
dependencies = [
- "libc 0.2.68 (registry+https://github.com/rust-lang/crates.io-index)",
+ "proc-macro-hack 0.5.15 (registry+https://github.com/rust-lang/crates.io-index)",
+ "proc-macro2 1.0.10 (registry+https://github.com/rust-lang/crates.io-index)",
+ "quote 1.0.3 (registry+https://github.com/rust-lang/crates.io-index)",
+ "syn 1.0.17 (registry+https://github.com/rust-lang/crates.io-index)",
+]
+
+[[package]]
+name = "futures-sink"
+version = "0.3.4"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+
+[[package]]
+name = "futures-task"
+version = "0.3.4"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+
+[[package]]
+name = "futures-util"
+version = "0.3.4"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+dependencies = [
+ "futures-channel 0.3.4 (registry+https://github.com/rust-lang/crates.io-index)",
+ "futures-core 0.3.4 (registry+https://github.com/rust-lang/crates.io-index)",
+ "futures-io 0.3.4 (registry+https://github.com/rust-lang/crates.io-index)",
+ "futures-macro 0.3.4 (registry+https://github.com/rust-lang/crates.io-index)",
+ "futures-sink 0.3.4 (registry+https://github.com/rust-lang/crates.io-index)",
+ "futures-task 0.3.4 (registry+https://github.com/rust-lang/crates.io-index)",
+ "memchr 2.3.3 (registry+https://github.com/rust-lang/crates.io-index)",
+ "pin-utils 0.1.0-alpha.4 (registry+https://github.com/rust-lang/crates.io-index)",
+ "proc-macro-hack 0.5.15 (registry+https://github.com/rust-lang/crates.io-index)",
+ "proc-macro-nested 0.1.4 (registry+https://github.com/rust-lang/crates.io-index)",
+ "slab 0.4.2 (registry+https://github.com/rust-lang/crates.io-index)",
]
[[package]]
@@ -159,7 +150,7 @@
version = "0.1.4"
source = "registry+https://github.com/rust-lang/crates.io-index"
dependencies = [
- "libc 0.2.68 (registry+https://github.com/rust-lang/crates.io-index)",
+ "libc 0.2.69 (registry+https://github.com/rust-lang/crates.io-index)",
]
[[package]]
@@ -178,18 +169,10 @@
[[package]]
name = "libc"
-version = "0.2.68"
+version = "0.2.69"
source = "registry+https://github.com/rust-lang/crates.io-index"
[[package]]
-name = "lock_api"
-version = "0.3.3"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-dependencies = [
- "scopeguard 1.1.0 (registry+https://github.com/rust-lang/crates.io-index)",
-]
-
-[[package]]
name = "log"
version = "0.4.8"
source = "registry+https://github.com/rust-lang/crates.io-index"
@@ -198,19 +181,11 @@
]
[[package]]
-name = "maybe-uninit"
-version = "2.0.0"
+name = "memchr"
+version = "2.3.3"
source = "registry+https://github.com/rust-lang/crates.io-index"
[[package]]
-name = "memoffset"
-version = "0.5.4"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-dependencies = [
- "autocfg 1.0.0 (registry+https://github.com/rust-lang/crates.io-index)",
-]
-
-[[package]]
name = "mio"
version = "0.6.21"
source = "registry+https://github.com/rust-lang/crates.io-index"
@@ -220,7 +195,7 @@
"fuchsia-zircon-sys 0.3.3 (registry+https://github.com/rust-lang/crates.io-index)",
"iovec 0.1.4 (registry+https://github.com/rust-lang/crates.io-index)",
"kernel32-sys 0.2.2 (registry+https://github.com/rust-lang/crates.io-index)",
- "libc 0.2.68 (registry+https://github.com/rust-lang/crates.io-index)",
+ "libc 0.2.69 (registry+https://github.com/rust-lang/crates.io-index)",
"log 0.4.8 (registry+https://github.com/rust-lang/crates.io-index)",
"miow 0.2.1 (registry+https://github.com/rust-lang/crates.io-index)",
"net2 0.2.33 (registry+https://github.com/rust-lang/crates.io-index)",
@@ -245,7 +220,7 @@
source = "registry+https://github.com/rust-lang/crates.io-index"
dependencies = [
"iovec 0.1.4 (registry+https://github.com/rust-lang/crates.io-index)",
- "libc 0.2.68 (registry+https://github.com/rust-lang/crates.io-index)",
+ "libc 0.2.69 (registry+https://github.com/rust-lang/crates.io-index)",
"mio 0.6.21 (registry+https://github.com/rust-lang/crates.io-index)",
]
@@ -265,7 +240,7 @@
version = "0.3.3"
source = "registry+https://github.com/rust-lang/crates.io-index"
dependencies = [
- "socket2 0.3.11 (registry+https://github.com/rust-lang/crates.io-index)",
+ "socket2 0.3.12 (registry+https://github.com/rust-lang/crates.io-index)",
"winapi 0.3.8 (registry+https://github.com/rust-lang/crates.io-index)",
]
@@ -275,41 +250,44 @@
source = "registry+https://github.com/rust-lang/crates.io-index"
dependencies = [
"cfg-if 0.1.10 (registry+https://github.com/rust-lang/crates.io-index)",
- "libc 0.2.68 (registry+https://github.com/rust-lang/crates.io-index)",
+ "libc 0.2.69 (registry+https://github.com/rust-lang/crates.io-index)",
"winapi 0.3.8 (registry+https://github.com/rust-lang/crates.io-index)",
]
[[package]]
-name = "num_cpus"
-version = "1.12.0"
+name = "pin-project-lite"
+version = "0.1.4"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+
+[[package]]
+name = "pin-utils"
+version = "0.1.0-alpha.4"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+
+[[package]]
+name = "proc-macro-hack"
+version = "0.5.15"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+
+[[package]]
+name = "proc-macro-nested"
+version = "0.1.4"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+
+[[package]]
+name = "proc-macro2"
+version = "1.0.10"
source = "registry+https://github.com/rust-lang/crates.io-index"
dependencies = [
- "hermit-abi 0.1.10 (registry+https://github.com/rust-lang/crates.io-index)",
- "libc 0.2.68 (registry+https://github.com/rust-lang/crates.io-index)",
+ "unicode-xid 0.2.0 (registry+https://github.com/rust-lang/crates.io-index)",
]
[[package]]
-name = "parking_lot"
-version = "0.9.0"
+name = "quote"
+version = "1.0.3"
source = "registry+https://github.com/rust-lang/crates.io-index"
dependencies = [
- "lock_api 0.3.3 (registry+https://github.com/rust-lang/crates.io-index)",
- "parking_lot_core 0.6.2 (registry+https://github.com/rust-lang/crates.io-index)",
- "rustc_version 0.2.3 (registry+https://github.com/rust-lang/crates.io-index)",
-]
-
-[[package]]
-name = "parking_lot_core"
-version = "0.6.2"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-dependencies = [
- "cfg-if 0.1.10 (registry+https://github.com/rust-lang/crates.io-index)",
- "cloudabi 0.0.3 (registry+https://github.com/rust-lang/crates.io-index)",
- "libc 0.2.68 (registry+https://github.com/rust-lang/crates.io-index)",
- "redox_syscall 0.1.56 (registry+https://github.com/rust-lang/crates.io-index)",
- "rustc_version 0.2.3 (registry+https://github.com/rust-lang/crates.io-index)",
- "smallvec 0.6.13 (registry+https://github.com/rust-lang/crates.io-index)",
- "winapi 0.3.8 (registry+https://github.com/rust-lang/crates.io-index)",
+ "proc-macro2 1.0.10 (registry+https://github.com/rust-lang/crates.io-index)",
]
[[package]]
@@ -318,38 +296,12 @@
source = "registry+https://github.com/rust-lang/crates.io-index"
[[package]]
-name = "rustc_version"
-version = "0.2.3"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-dependencies = [
- "semver 0.9.0 (registry+https://github.com/rust-lang/crates.io-index)",
-]
-
-[[package]]
-name = "scopeguard"
-version = "1.1.0"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-
-[[package]]
-name = "semver"
-version = "0.9.0"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-dependencies = [
- "semver-parser 0.7.0 (registry+https://github.com/rust-lang/crates.io-index)",
-]
-
-[[package]]
-name = "semver-parser"
-version = "0.7.0"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-
-[[package]]
name = "signal-hook-registry"
version = "1.2.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
dependencies = [
"arc-swap 0.4.5 (registry+https://github.com/rust-lang/crates.io-index)",
- "libc 0.2.68 (registry+https://github.com/rust-lang/crates.io-index)",
+ "libc 0.2.69 (registry+https://github.com/rust-lang/crates.io-index)",
]
[[package]]
@@ -358,240 +310,85 @@
source = "registry+https://github.com/rust-lang/crates.io-index"
[[package]]
-name = "smallvec"
-version = "0.6.13"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-dependencies = [
- "maybe-uninit 2.0.0 (registry+https://github.com/rust-lang/crates.io-index)",
-]
-
-[[package]]
name = "socket2"
-version = "0.3.11"
+version = "0.3.12"
source = "registry+https://github.com/rust-lang/crates.io-index"
dependencies = [
"cfg-if 0.1.10 (registry+https://github.com/rust-lang/crates.io-index)",
- "libc 0.2.68 (registry+https://github.com/rust-lang/crates.io-index)",
+ "libc 0.2.69 (registry+https://github.com/rust-lang/crates.io-index)",
"redox_syscall 0.1.56 (registry+https://github.com/rust-lang/crates.io-index)",
"winapi 0.3.8 (registry+https://github.com/rust-lang/crates.io-index)",
]
[[package]]
-name = "tokio"
-version = "0.1.22"
+name = "syn"
+version = "1.0.17"
source = "registry+https://github.com/rust-lang/crates.io-index"
dependencies = [
- "bytes 0.4.12 (registry+https://github.com/rust-lang/crates.io-index)",
- "futures 0.1.29 (registry+https://github.com/rust-lang/crates.io-index)",
- "mio 0.6.21 (registry+https://github.com/rust-lang/crates.io-index)",
- "num_cpus 1.12.0 (registry+https://github.com/rust-lang/crates.io-index)",
- "tokio-codec 0.1.2 (registry+https://github.com/rust-lang/crates.io-index)",
- "tokio-current-thread 0.1.7 (registry+https://github.com/rust-lang/crates.io-index)",
- "tokio-executor 0.1.10 (registry+https://github.com/rust-lang/crates.io-index)",
- "tokio-fs 0.1.7 (registry+https://github.com/rust-lang/crates.io-index)",
- "tokio-io 0.1.13 (registry+https://github.com/rust-lang/crates.io-index)",
- "tokio-reactor 0.1.12 (registry+https://github.com/rust-lang/crates.io-index)",
- "tokio-sync 0.1.8 (registry+https://github.com/rust-lang/crates.io-index)",
- "tokio-tcp 0.1.4 (registry+https://github.com/rust-lang/crates.io-index)",
- "tokio-threadpool 0.1.18 (registry+https://github.com/rust-lang/crates.io-index)",
- "tokio-timer 0.2.13 (registry+https://github.com/rust-lang/crates.io-index)",
- "tokio-udp 0.1.6 (registry+https://github.com/rust-lang/crates.io-index)",
- "tokio-uds 0.2.6 (registry+https://github.com/rust-lang/crates.io-index)",
-]
-
-[[package]]
-name = "tokio-codec"
-version = "0.1.2"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-dependencies = [
- "bytes 0.4.12 (registry+https://github.com/rust-lang/crates.io-index)",
- "futures 0.1.29 (registry+https://github.com/rust-lang/crates.io-index)",
- "tokio-io 0.1.13 (registry+https://github.com/rust-lang/crates.io-index)",
-]
-
-[[package]]
-name = "tokio-current-thread"
-version = "0.1.7"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-dependencies = [
- "futures 0.1.29 (registry+https://github.com/rust-lang/crates.io-index)",
- "tokio-executor 0.1.10 (registry+https://github.com/rust-lang/crates.io-index)",
+ "proc-macro2 1.0.10 (registry+https://github.com/rust-lang/crates.io-index)",
+ "quote 1.0.3 (registry+https://github.com/rust-lang/crates.io-index)",
+ "unicode-xid 0.2.0 (registry+https://github.com/rust-lang/crates.io-index)",
]
[[package]]
-name = "tokio-executor"
-version = "0.1.10"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-dependencies = [
- "crossbeam-utils 0.7.2 (registry+https://github.com/rust-lang/crates.io-index)",
- "futures 0.1.29 (registry+https://github.com/rust-lang/crates.io-index)",
-]
-
-[[package]]
-name = "tokio-fs"
-version = "0.1.7"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-dependencies = [
- "futures 0.1.29 (registry+https://github.com/rust-lang/crates.io-index)",
- "tokio-io 0.1.13 (registry+https://github.com/rust-lang/crates.io-index)",
- "tokio-threadpool 0.1.18 (registry+https://github.com/rust-lang/crates.io-index)",
-]
-
-[[package]]
-name = "tokio-hglib"
-version = "0.2.0"
+name = "tokio"
+version = "0.2.18"
source = "registry+https://github.com/rust-lang/crates.io-index"
dependencies = [
- "bytes 0.4.12 (registry+https://github.com/rust-lang/crates.io-index)",
- "futures 0.1.29 (registry+https://github.com/rust-lang/crates.io-index)",
- "tokio 0.1.22 (registry+https://github.com/rust-lang/crates.io-index)",
- "tokio-codec 0.1.2 (registry+https://github.com/rust-lang/crates.io-index)",
- "tokio-io 0.1.13 (registry+https://github.com/rust-lang/crates.io-index)",
- "tokio-process 0.2.5 (registry+https://github.com/rust-lang/crates.io-index)",
- "tokio-uds 0.2.6 (registry+https://github.com/rust-lang/crates.io-index)",
-]
-
-[[package]]
-name = "tokio-io"
-version = "0.1.13"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-dependencies = [
- "bytes 0.4.12 (registry+https://github.com/rust-lang/crates.io-index)",
- "futures 0.1.29 (registry+https://github.com/rust-lang/crates.io-index)",
- "log 0.4.8 (registry+https://github.com/rust-lang/crates.io-index)",
-]
-
-[[package]]
-name = "tokio-process"
-version = "0.2.5"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-dependencies = [
- "crossbeam-queue 0.1.2 (registry+https://github.com/rust-lang/crates.io-index)",
- "futures 0.1.29 (registry+https://github.com/rust-lang/crates.io-index)",
+ "bytes 0.5.4 (registry+https://github.com/rust-lang/crates.io-index)",
+ "futures-core 0.3.4 (registry+https://github.com/rust-lang/crates.io-index)",
"lazy_static 1.4.0 (registry+https://github.com/rust-lang/crates.io-index)",
- "libc 0.2.68 (registry+https://github.com/rust-lang/crates.io-index)",
- "log 0.4.8 (registry+https://github.com/rust-lang/crates.io-index)",
+ "libc 0.2.69 (registry+https://github.com/rust-lang/crates.io-index)",
+ "memchr 2.3.3 (registry+https://github.com/rust-lang/crates.io-index)",
"mio 0.6.21 (registry+https://github.com/rust-lang/crates.io-index)",
"mio-named-pipes 0.1.6 (registry+https://github.com/rust-lang/crates.io-index)",
- "tokio-io 0.1.13 (registry+https://github.com/rust-lang/crates.io-index)",
- "tokio-reactor 0.1.12 (registry+https://github.com/rust-lang/crates.io-index)",
- "tokio-signal 0.2.9 (registry+https://github.com/rust-lang/crates.io-index)",
+ "mio-uds 0.6.7 (registry+https://github.com/rust-lang/crates.io-index)",
+ "pin-project-lite 0.1.4 (registry+https://github.com/rust-lang/crates.io-index)",
+ "signal-hook-registry 1.2.0 (registry+https://github.com/rust-lang/crates.io-index)",
+ "slab 0.4.2 (registry+https://github.com/rust-lang/crates.io-index)",
+ "tokio-macros 0.2.5 (registry+https://github.com/rust-lang/crates.io-index)",
"winapi 0.3.8 (registry+https://github.com/rust-lang/crates.io-index)",
]
[[package]]
-name = "tokio-reactor"
-version = "0.1.12"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-dependencies = [
- "crossbeam-utils 0.7.2 (registry+https://github.com/rust-lang/crates.io-index)",
- "futures 0.1.29 (registry+https://github.com/rust-lang/crates.io-index)",
- "lazy_static 1.4.0 (registry+https://github.com/rust-lang/crates.io-index)",
- "log 0.4.8 (registry+https://github.com/rust-lang/crates.io-index)",
- "mio 0.6.21 (registry+https://github.com/rust-lang/crates.io-index)",
- "num_cpus 1.12.0 (registry+https://github.com/rust-lang/crates.io-index)",
- "parking_lot 0.9.0 (registry+https://github.com/rust-lang/crates.io-index)",
- "slab 0.4.2 (registry+https://github.com/rust-lang/crates.io-index)",
- "tokio-executor 0.1.10 (registry+https://github.com/rust-lang/crates.io-index)",
- "tokio-io 0.1.13 (registry+https://github.com/rust-lang/crates.io-index)",
- "tokio-sync 0.1.8 (registry+https://github.com/rust-lang/crates.io-index)",
-]
-
-[[package]]
-name = "tokio-signal"
-version = "0.2.9"
+name = "tokio-hglib"
+version = "0.3.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
dependencies = [
- "futures 0.1.29 (registry+https://github.com/rust-lang/crates.io-index)",
- "libc 0.2.68 (registry+https://github.com/rust-lang/crates.io-index)",
- "mio 0.6.21 (registry+https://github.com/rust-lang/crates.io-index)",
- "mio-uds 0.6.7 (registry+https://github.com/rust-lang/crates.io-index)",
- "signal-hook-registry 1.2.0 (registry+https://github.com/rust-lang/crates.io-index)",
- "tokio-executor 0.1.10 (registry+https://github.com/rust-lang/crates.io-index)",
- "tokio-io 0.1.13 (registry+https://github.com/rust-lang/crates.io-index)",
- "tokio-reactor 0.1.12 (registry+https://github.com/rust-lang/crates.io-index)",
- "winapi 0.3.8 (registry+https://github.com/rust-lang/crates.io-index)",
-]
-
-[[package]]
-name = "tokio-sync"
-version = "0.1.8"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-dependencies = [
- "fnv 1.0.6 (registry+https://github.com/rust-lang/crates.io-index)",
- "futures 0.1.29 (registry+https://github.com/rust-lang/crates.io-index)",
-]
-
-[[package]]
-name = "tokio-tcp"
-version = "0.1.4"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-dependencies = [
- "bytes 0.4.12 (registry+https://github.com/rust-lang/crates.io-index)",
- "futures 0.1.29 (registry+https://github.com/rust-lang/crates.io-index)",
- "iovec 0.1.4 (registry+https://github.com/rust-lang/crates.io-index)",
- "mio 0.6.21 (registry+https://github.com/rust-lang/crates.io-index)",
- "tokio-io 0.1.13 (registry+https://github.com/rust-lang/crates.io-index)",
- "tokio-reactor 0.1.12 (registry+https://github.com/rust-lang/crates.io-index)",
+ "async-trait 0.1.30 (registry+https://github.com/rust-lang/crates.io-index)",
+ "bytes 0.5.4 (registry+https://github.com/rust-lang/crates.io-index)",
+ "futures 0.3.4 (registry+https://github.com/rust-lang/crates.io-index)",
+ "tokio 0.2.18 (registry+https://github.com/rust-lang/crates.io-index)",
+ "tokio-util 0.3.1 (registry+https://github.com/rust-lang/crates.io-index)",
]
[[package]]
-name = "tokio-threadpool"
-version = "0.1.18"
+name = "tokio-macros"
+version = "0.2.5"
source = "registry+https://github.com/rust-lang/crates.io-index"
dependencies = [
- "crossbeam-deque 0.7.3 (registry+https://github.com/rust-lang/crates.io-index)",
- "crossbeam-queue 0.2.1 (registry+https://github.com/rust-lang/crates.io-index)",
- "crossbeam-utils 0.7.2 (registry+https://github.com/rust-lang/crates.io-index)",
- "futures 0.1.29 (registry+https://github.com/rust-lang/crates.io-index)",
- "lazy_static 1.4.0 (registry+https://github.com/rust-lang/crates.io-index)",
- "log 0.4.8 (registry+https://github.com/rust-lang/crates.io-index)",
- "num_cpus 1.12.0 (registry+https://github.com/rust-lang/crates.io-index)",
- "slab 0.4.2 (registry+https://github.com/rust-lang/crates.io-index)",
- "tokio-executor 0.1.10 (registry+https://github.com/rust-lang/crates.io-index)",
-]
-
-[[package]]
-name = "tokio-timer"
-version = "0.2.13"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-dependencies = [
- "crossbeam-utils 0.7.2 (registry+https://github.com/rust-lang/crates.io-index)",
- "futures 0.1.29 (registry+https://github.com/rust-lang/crates.io-index)",
- "slab 0.4.2 (registry+https://github.com/rust-lang/crates.io-index)",
- "tokio-executor 0.1.10 (registry+https://github.com/rust-lang/crates.io-index)",
+ "proc-macro2 1.0.10 (registry+https://github.com/rust-lang/crates.io-index)",
+ "quote 1.0.3 (registry+https://github.com/rust-lang/crates.io-index)",
+ "syn 1.0.17 (registry+https://github.com/rust-lang/crates.io-index)",
]
[[package]]
-name = "tokio-udp"
-version = "0.1.6"
+name = "tokio-util"
+version = "0.3.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
dependencies = [
- "bytes 0.4.12 (registry+https://github.com/rust-lang/crates.io-index)",
- "futures 0.1.29 (registry+https://github.com/rust-lang/crates.io-index)",
+ "bytes 0.5.4 (registry+https://github.com/rust-lang/crates.io-index)",
+ "futures-core 0.3.4 (registry+https://github.com/rust-lang/crates.io-index)",
+ "futures-sink 0.3.4 (registry+https://github.com/rust-lang/crates.io-index)",
"log 0.4.8 (registry+https://github.com/rust-lang/crates.io-index)",
- "mio 0.6.21 (registry+https://github.com/rust-lang/crates.io-index)",
- "tokio-codec 0.1.2 (registry+https://github.com/rust-lang/crates.io-index)",
- "tokio-io 0.1.13 (registry+https://github.com/rust-lang/crates.io-index)",
- "tokio-reactor 0.1.12 (registry+https://github.com/rust-lang/crates.io-index)",
+ "pin-project-lite 0.1.4 (registry+https://github.com/rust-lang/crates.io-index)",
+ "tokio 0.2.18 (registry+https://github.com/rust-lang/crates.io-index)",
]
[[package]]
-name = "tokio-uds"
-version = "0.2.6"
+name = "unicode-xid"
+version = "0.2.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
-dependencies = [
- "bytes 0.4.12 (registry+https://github.com/rust-lang/crates.io-index)",
- "futures 0.1.29 (registry+https://github.com/rust-lang/crates.io-index)",
- "iovec 0.1.4 (registry+https://github.com/rust-lang/crates.io-index)",
- "libc 0.2.68 (registry+https://github.com/rust-lang/crates.io-index)",
- "log 0.4.8 (registry+https://github.com/rust-lang/crates.io-index)",
- "mio 0.6.21 (registry+https://github.com/rust-lang/crates.io-index)",
- "mio-uds 0.6.7 (registry+https://github.com/rust-lang/crates.io-index)",
- "tokio-codec 0.1.2 (registry+https://github.com/rust-lang/crates.io-index)",
- "tokio-io 0.1.13 (registry+https://github.com/rust-lang/crates.io-index)",
- "tokio-reactor 0.1.12 (registry+https://github.com/rust-lang/crates.io-index)",
-]
[[package]]
name = "winapi"
@@ -633,66 +430,50 @@
[metadata]
"checksum arc-swap 0.4.5 (registry+https://github.com/rust-lang/crates.io-index)" = "d663a8e9a99154b5fb793032533f6328da35e23aac63d5c152279aa8ba356825"
-"checksum autocfg 1.0.0 (registry+https://github.com/rust-lang/crates.io-index)" = "f8aac770f1885fd7e387acedd76065302551364496e46b3dd00860b2f8359b9d"
+"checksum async-trait 0.1.30 (registry+https://github.com/rust-lang/crates.io-index)" = "da71fef07bc806586090247e971229289f64c210a278ee5ae419314eb386b31d"
"checksum bitflags 1.2.1 (registry+https://github.com/rust-lang/crates.io-index)" = "cf1de2fe8c75bc145a2f577add951f8134889b4795d47466a54a5c846d691693"
-"checksum byteorder 1.3.4 (registry+https://github.com/rust-lang/crates.io-index)" = "08c48aae112d48ed9f069b33538ea9e3e90aa263cfa3d1c24309612b1f7472de"
-"checksum bytes 0.4.12 (registry+https://github.com/rust-lang/crates.io-index)" = "206fdffcfa2df7cbe15601ef46c813fce0965eb3286db6b56c583b814b51c81c"
+"checksum bytes 0.5.4 (registry+https://github.com/rust-lang/crates.io-index)" = "130aac562c0dd69c56b3b1cc8ffd2e17be31d0b6c25b61c96b76231aa23e39e1"
"checksum cc 1.0.50 (registry+https://github.com/rust-lang/crates.io-index)" = "95e28fa049fda1c330bcf9d723be7663a899c4679724b34c81e9f5a326aab8cd"
"checksum cfg-if 0.1.10 (registry+https://github.com/rust-lang/crates.io-index)" = "4785bdd1c96b2a846b2bd7cc02e86b6b3dbf14e7e53446c4f54c92a361040822"
-"checksum cloudabi 0.0.3 (registry+https://github.com/rust-lang/crates.io-index)" = "ddfc5b9aa5d4507acaf872de71051dfd0e309860e88966e1051e462a077aac4f"
-"checksum crossbeam-deque 0.7.3 (registry+https://github.com/rust-lang/crates.io-index)" = "9f02af974daeee82218205558e51ec8768b48cf524bd01d550abe5573a608285"
-"checksum crossbeam-epoch 0.8.2 (registry+https://github.com/rust-lang/crates.io-index)" = "058ed274caafc1f60c4997b5fc07bf7dc7cca454af7c6e81edffe5f33f70dace"
-"checksum crossbeam-queue 0.1.2 (registry+https://github.com/rust-lang/crates.io-index)" = "7c979cd6cfe72335896575c6b5688da489e420d36a27a0b9eb0c73db574b4a4b"
-"checksum crossbeam-queue 0.2.1 (registry+https://github.com/rust-lang/crates.io-index)" = "c695eeca1e7173472a32221542ae469b3e9aac3a4fc81f7696bcad82029493db"
-"checksum crossbeam-utils 0.6.6 (registry+https://github.com/rust-lang/crates.io-index)" = "04973fa96e96579258a5091af6003abde64af786b860f18622b82e026cca60e6"
-"checksum crossbeam-utils 0.7.2 (registry+https://github.com/rust-lang/crates.io-index)" = "c3c7c73a2d1e9fc0886a08b93e98eb643461230d5f1925e4036204d5f2e261a8"
-"checksum fnv 1.0.6 (registry+https://github.com/rust-lang/crates.io-index)" = "2fad85553e09a6f881f739c29f0b00b0f01357c743266d478b68951ce23285f3"
"checksum fuchsia-zircon 0.3.3 (registry+https://github.com/rust-lang/crates.io-index)" = "2e9763c69ebaae630ba35f74888db465e49e259ba1bc0eda7d06f4a067615d82"
"checksum fuchsia-zircon-sys 0.3.3 (registry+https://github.com/rust-lang/crates.io-index)" = "3dcaa9ae7725d12cdb85b3ad99a434db70b468c09ded17e012d86b5c1010f7a7"
-"checksum futures 0.1.29 (registry+https://github.com/rust-lang/crates.io-index)" = "1b980f2816d6ee8673b6517b52cb0e808a180efc92e5c19d02cdda79066703ef"
-"checksum hermit-abi 0.1.10 (registry+https://github.com/rust-lang/crates.io-index)" = "725cf19794cf90aa94e65050cb4191ff5d8fa87a498383774c47b332e3af952e"
+"checksum futures 0.3.4 (registry+https://github.com/rust-lang/crates.io-index)" = "5c329ae8753502fb44ae4fc2b622fa2a94652c41e795143765ba0927f92ab780"
+"checksum futures-channel 0.3.4 (registry+https://github.com/rust-lang/crates.io-index)" = "f0c77d04ce8edd9cb903932b608268b3fffec4163dc053b3b402bf47eac1f1a8"
+"checksum futures-core 0.3.4 (registry+https://github.com/rust-lang/crates.io-index)" = "f25592f769825e89b92358db00d26f965761e094951ac44d3663ef25b7ac464a"
+"checksum futures-executor 0.3.4 (registry+https://github.com/rust-lang/crates.io-index)" = "f674f3e1bcb15b37284a90cedf55afdba482ab061c407a9c0ebbd0f3109741ba"
+"checksum futures-io 0.3.4 (registry+https://github.com/rust-lang/crates.io-index)" = "a638959aa96152c7a4cddf50fcb1e3fede0583b27157c26e67d6f99904090dc6"
+"checksum futures-macro 0.3.4 (registry+https://github.com/rust-lang/crates.io-index)" = "9a5081aa3de1f7542a794a397cde100ed903b0630152d0973479018fd85423a7"
+"checksum futures-sink 0.3.4 (registry+https://github.com/rust-lang/crates.io-index)" = "3466821b4bc114d95b087b850a724c6f83115e929bc88f1fa98a3304a944c8a6"
+"checksum futures-task 0.3.4 (registry+https://github.com/rust-lang/crates.io-index)" = "7b0a34e53cf6cdcd0178aa573aed466b646eb3db769570841fda0c7ede375a27"
+"checksum futures-util 0.3.4 (registry+https://github.com/rust-lang/crates.io-index)" = "22766cf25d64306bedf0384da004d05c9974ab104fcc4528f1236181c18004c5"
"checksum iovec 0.1.4 (registry+https://github.com/rust-lang/crates.io-index)" = "b2b3ea6ff95e175473f8ffe6a7eb7c00d054240321b84c57051175fe3c1e075e"
"checksum kernel32-sys 0.2.2 (registry+https://github.com/rust-lang/crates.io-index)" = "7507624b29483431c0ba2d82aece8ca6cdba9382bff4ddd0f7490560c056098d"
"checksum lazy_static 1.4.0 (registry+https://github.com/rust-lang/crates.io-index)" = "e2abad23fbc42b3700f2f279844dc832adb2b2eb069b2df918f455c4e18cc646"
-"checksum libc 0.2.68 (registry+https://github.com/rust-lang/crates.io-index)" = "dea0c0405123bba743ee3f91f49b1c7cfb684eef0da0a50110f758ccf24cdff0"
-"checksum lock_api 0.3.3 (registry+https://github.com/rust-lang/crates.io-index)" = "79b2de95ecb4691949fea4716ca53cdbcfccb2c612e19644a8bad05edcf9f47b"
+"checksum libc 0.2.69 (registry+https://github.com/rust-lang/crates.io-index)" = "99e85c08494b21a9054e7fe1374a732aeadaff3980b6990b94bfd3a70f690005"
"checksum log 0.4.8 (registry+https://github.com/rust-lang/crates.io-index)" = "14b6052be84e6b71ab17edffc2eeabf5c2c3ae1fdb464aae35ac50c67a44e1f7"
-"checksum maybe-uninit 2.0.0 (registry+https://github.com/rust-lang/crates.io-index)" = "60302e4db3a61da70c0cb7991976248362f30319e88850c487b9b95bbf059e00"
-"checksum memoffset 0.5.4 (registry+https://github.com/rust-lang/crates.io-index)" = "b4fc2c02a7e374099d4ee95a193111f72d2110197fe200272371758f6c3643d8"
+"checksum memchr 2.3.3 (registry+https://github.com/rust-lang/crates.io-index)" = "3728d817d99e5ac407411fa471ff9800a778d88a24685968b36824eaf4bee400"
"checksum mio 0.6.21 (registry+https://github.com/rust-lang/crates.io-index)" = "302dec22bcf6bae6dfb69c647187f4b4d0fb6f535521f7bc022430ce8e12008f"
"checksum mio-named-pipes 0.1.6 (registry+https://github.com/rust-lang/crates.io-index)" = "f5e374eff525ce1c5b7687c4cef63943e7686524a387933ad27ca7ec43779cb3"
"checksum mio-uds 0.6.7 (registry+https://github.com/rust-lang/crates.io-index)" = "966257a94e196b11bb43aca423754d87429960a768de9414f3691d6957abf125"
"checksum miow 0.2.1 (registry+https://github.com/rust-lang/crates.io-index)" = "8c1f2f3b1cf331de6896aabf6e9d55dca90356cc9960cca7eaaf408a355ae919"
"checksum miow 0.3.3 (registry+https://github.com/rust-lang/crates.io-index)" = "396aa0f2003d7df8395cb93e09871561ccc3e785f0acb369170e8cc74ddf9226"
"checksum net2 0.2.33 (registry+https://github.com/rust-lang/crates.io-index)" = "42550d9fb7b6684a6d404d9fa7250c2eb2646df731d1c06afc06dcee9e1bcf88"
-"checksum num_cpus 1.12.0 (registry+https://github.com/rust-lang/crates.io-index)" = "46203554f085ff89c235cd12f7075f3233af9b11ed7c9e16dfe2560d03313ce6"
-"checksum parking_lot 0.9.0 (registry+https://github.com/rust-lang/crates.io-index)" = "f842b1982eb6c2fe34036a4fbfb06dd185a3f5c8edfaacdf7d1ea10b07de6252"
-"checksum parking_lot_core 0.6.2 (registry+https://github.com/rust-lang/crates.io-index)" = "b876b1b9e7ac6e1a74a6da34d25c42e17e8862aa409cbbbdcfc8d86c6f3bc62b"
+"checksum pin-project-lite 0.1.4 (registry+https://github.com/rust-lang/crates.io-index)" = "237844750cfbb86f67afe27eee600dfbbcb6188d734139b534cbfbf4f96792ae"
+"checksum pin-utils 0.1.0-alpha.4 (registry+https://github.com/rust-lang/crates.io-index)" = "5894c618ce612a3fa23881b152b608bafb8c56cfc22f434a3ba3120b40f7b587"
+"checksum proc-macro-hack 0.5.15 (registry+https://github.com/rust-lang/crates.io-index)" = "0d659fe7c6d27f25e9d80a1a094c223f5246f6a6596453e09d7229bf42750b63"
+"checksum proc-macro-nested 0.1.4 (registry+https://github.com/rust-lang/crates.io-index)" = "8e946095f9d3ed29ec38de908c22f95d9ac008e424c7bcae54c75a79c527c694"
+"checksum proc-macro2 1.0.10 (registry+https://github.com/rust-lang/crates.io-index)" = "df246d292ff63439fea9bc8c0a270bed0e390d5ebd4db4ba15aba81111b5abe3"
+"checksum quote 1.0.3 (registry+https://github.com/rust-lang/crates.io-index)" = "2bdc6c187c65bca4260c9011c9e3132efe4909da44726bad24cf7572ae338d7f"
"checksum redox_syscall 0.1.56 (registry+https://github.com/rust-lang/crates.io-index)" = "2439c63f3f6139d1b57529d16bc3b8bb855230c8efcc5d3a896c8bea7c3b1e84"
-"checksum rustc_version 0.2.3 (registry+https://github.com/rust-lang/crates.io-index)" = "138e3e0acb6c9fb258b19b67cb8abd63c00679d2851805ea151465464fe9030a"
-"checksum scopeguard 1.1.0 (registry+https://github.com/rust-lang/crates.io-index)" = "d29ab0c6d3fc0ee92fe66e2d99f700eab17a8d57d1c1d3b748380fb20baa78cd"
-"checksum semver 0.9.0 (registry+https://github.com/rust-lang/crates.io-index)" = "1d7eb9ef2c18661902cc47e535f9bc51b78acd254da71d375c2f6720d9a40403"
-"checksum semver-parser 0.7.0 (registry+https://github.com/rust-lang/crates.io-index)" = "388a1df253eca08550bef6c72392cfe7c30914bf41df5269b68cbd6ff8f570a3"
"checksum signal-hook-registry 1.2.0 (registry+https://github.com/rust-lang/crates.io-index)" = "94f478ede9f64724c5d173d7bb56099ec3e2d9fc2774aac65d34b8b890405f41"
"checksum slab 0.4.2 (registry+https://github.com/rust-lang/crates.io-index)" = "c111b5bd5695e56cffe5129854aa230b39c93a305372fdbb2668ca2394eea9f8"
-"checksum smallvec 0.6.13 (registry+https://github.com/rust-lang/crates.io-index)" = "f7b0758c52e15a8b5e3691eae6cc559f08eee9406e548a4477ba4e67770a82b6"
-"checksum socket2 0.3.11 (registry+https://github.com/rust-lang/crates.io-index)" = "e8b74de517221a2cb01a53349cf54182acdc31a074727d3079068448c0676d85"
-"checksum tokio 0.1.22 (registry+https://github.com/rust-lang/crates.io-index)" = "5a09c0b5bb588872ab2f09afa13ee6e9dac11e10a0ec9e8e3ba39a5a5d530af6"
-"checksum tokio-codec 0.1.2 (registry+https://github.com/rust-lang/crates.io-index)" = "25b2998660ba0e70d18684de5d06b70b70a3a747469af9dea7618cc59e75976b"
-"checksum tokio-current-thread 0.1.7 (registry+https://github.com/rust-lang/crates.io-index)" = "b1de0e32a83f131e002238d7ccde18211c0a5397f60cbfffcb112868c2e0e20e"
-"checksum tokio-executor 0.1.10 (registry+https://github.com/rust-lang/crates.io-index)" = "fb2d1b8f4548dbf5e1f7818512e9c406860678f29c300cdf0ebac72d1a3a1671"
-"checksum tokio-fs 0.1.7 (registry+https://github.com/rust-lang/crates.io-index)" = "297a1206e0ca6302a0eed35b700d292b275256f596e2f3fea7729d5e629b6ff4"
-"checksum tokio-hglib 0.2.0 (registry+https://github.com/rust-lang/crates.io-index)" = "8a138c3cb866c8a95ceddae44634bb159eefeebcdba45aec2158f8ad6c201e6d"
-"checksum tokio-io 0.1.13 (registry+https://github.com/rust-lang/crates.io-index)" = "57fc868aae093479e3131e3d165c93b1c7474109d13c90ec0dda2a1bbfff0674"
-"checksum tokio-process 0.2.5 (registry+https://github.com/rust-lang/crates.io-index)" = "382d90f43fa31caebe5d3bc6cfd854963394fff3b8cb59d5146607aaae7e7e43"
-"checksum tokio-reactor 0.1.12 (registry+https://github.com/rust-lang/crates.io-index)" = "09bc590ec4ba8ba87652da2068d150dcada2cfa2e07faae270a5e0409aa51351"
-"checksum tokio-signal 0.2.9 (registry+https://github.com/rust-lang/crates.io-index)" = "d0c34c6e548f101053321cba3da7cbb87a610b85555884c41b07da2eb91aff12"
-"checksum tokio-sync 0.1.8 (registry+https://github.com/rust-lang/crates.io-index)" = "edfe50152bc8164fcc456dab7891fa9bf8beaf01c5ee7e1dd43a397c3cf87dee"
-"checksum tokio-tcp 0.1.4 (registry+https://github.com/rust-lang/crates.io-index)" = "98df18ed66e3b72e742f185882a9e201892407957e45fbff8da17ae7a7c51f72"
-"checksum tokio-threadpool 0.1.18 (registry+https://github.com/rust-lang/crates.io-index)" = "df720b6581784c118f0eb4310796b12b1d242a7eb95f716a8367855325c25f89"
-"checksum tokio-timer 0.2.13 (registry+https://github.com/rust-lang/crates.io-index)" = "93044f2d313c95ff1cb7809ce9a7a05735b012288a888b62d4434fd58c94f296"
-"checksum tokio-udp 0.1.6 (registry+https://github.com/rust-lang/crates.io-index)" = "e2a0b10e610b39c38b031a2fcab08e4b82f16ece36504988dcbd81dbba650d82"
-"checksum tokio-uds 0.2.6 (registry+https://github.com/rust-lang/crates.io-index)" = "5076db410d6fdc6523df7595447629099a1fdc47b3d9f896220780fa48faf798"
+"checksum socket2 0.3.12 (registry+https://github.com/rust-lang/crates.io-index)" = "03088793f677dce356f3ccc2edb1b314ad191ab702a5de3faf49304f7e104918"
+"checksum syn 1.0.17 (registry+https://github.com/rust-lang/crates.io-index)" = "0df0eb663f387145cab623dea85b09c2c5b4b0aef44e945d928e682fce71bb03"
+"checksum tokio 0.2.18 (registry+https://github.com/rust-lang/crates.io-index)" = "34ef16d072d2b6dc8b4a56c70f5c5ced1a37752116f8e7c1e80c659aa7cb6713"
+"checksum tokio-hglib 0.3.0 (registry+https://github.com/rust-lang/crates.io-index)" = "8d7e2b5d44911ebf67a1044423604f5f69206c5cbbd7e911b4966e6831514bca"
+"checksum tokio-macros 0.2.5 (registry+https://github.com/rust-lang/crates.io-index)" = "f0c3acc6aa564495a0f2e1d59fab677cd7f81a19994cfc7f3ad0e64301560389"
+"checksum tokio-util 0.3.1 (registry+https://github.com/rust-lang/crates.io-index)" = "be8242891f2b6cbef26a2d7e8605133c2c554cd35b3e4948ea892d6d68436499"
+"checksum unicode-xid 0.2.0 (registry+https://github.com/rust-lang/crates.io-index)" = "826e7639553986605ec5979c7dd957c7895e93eabed50ab2ffa7f6128a75097c"
"checksum winapi 0.2.8 (registry+https://github.com/rust-lang/crates.io-index)" = "167dc9d6949a9b857f3451275e911c3f44255842c1f7a76f33c55103a909087a"
"checksum winapi 0.3.8 (registry+https://github.com/rust-lang/crates.io-index)" = "8093091eeb260906a183e6ae1abdba2ef5ef2257a21801128899c3fc699229c6"
"checksum winapi-build 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)" = "2d315eee3b34aca4797b2da6b13ed88266e6d612562a0c46390af8299fc699bc"
--- a/rust/chg/Cargo.toml Tue Jun 23 16:07:18 2020 +0200
+++ b/rust/chg/Cargo.toml Thu Jun 25 10:32:51 2020 -0700
@@ -7,14 +7,16 @@
edition = "2018"
[dependencies]
-bytes = "0.4"
-futures = "0.1"
+async-trait = "0.1"
+bytes = "0.5"
+futures = "0.3"
libc = "0.2"
log = { version = "0.4", features = ["std"] }
-tokio = "0.1"
-tokio-hglib = "0.2"
-tokio-process = "0.2.3"
-tokio-timer = "0.2"
+tokio-hglib = "0.3"
+
+[dependencies.tokio]
+version = "0.2"
+features = ["rt-core", "io-util", "time", "process", "macros"]
[build-dependencies]
cc = "1.0"
--- a/rust/chg/src/attachio.rs Tue Jun 23 16:07:18 2020 +0200
+++ b/rust/chg/src/attachio.rs Thu Jun 25 10:32:51 2020 -0700
@@ -5,17 +5,15 @@
//! Functions to send client-side fds over the command server channel.
-use futures::{try_ready, Async, Future, Poll};
use std::io;
use std::os::unix::io::AsRawFd;
use tokio_hglib::codec::ChannelMessage;
-use tokio_hglib::protocol::MessageLoop;
-use tokio_hglib::{Client, Connection};
+use tokio_hglib::{Connection, Protocol};
use crate::message;
use crate::procutil;
-/// Future to send client-side fds over the command server channel.
+/// Sends client-side fds over the command server channel.
///
/// This works as follows:
/// 1. Client sends "attachio" request.
@@ -23,92 +21,48 @@
/// 3. Client sends fds with 1-byte dummy payload in response.
/// 4. Server returns the number of the fds received.
///
-/// If the stderr is omitted, it will be redirected to the stdout. This
-/// allows us to attach the pager stdin to both stdout and stderr, and
-/// dispose of the client-side handle once attached.
-#[must_use = "futures do nothing unless polled"]
-pub struct AttachIo<C, I, O, E>
-where
- C: Connection,
-{
- msg_loop: MessageLoop<C>,
- stdin: I,
- stdout: O,
- stderr: Option<E>,
-}
-
-impl<C, I, O, E> AttachIo<C, I, O, E>
-where
- C: Connection + AsRawFd,
- I: AsRawFd,
- O: AsRawFd,
- E: AsRawFd,
-{
- pub fn with_client(
- client: Client<C>,
- stdin: I,
- stdout: O,
- stderr: Option<E>,
- ) -> AttachIo<C, I, O, E> {
- let msg_loop = MessageLoop::start(client, b"attachio");
- AttachIo {
- msg_loop,
- stdin,
- stdout,
- stderr,
- }
- }
-}
-
-impl<C, I, O, E> Future for AttachIo<C, I, O, E>
-where
- C: Connection + AsRawFd,
- I: AsRawFd,
- O: AsRawFd,
- E: AsRawFd,
-{
- type Item = Client<C>;
- type Error = io::Error;
-
- fn poll(&mut self) -> Poll<Self::Item, Self::Error> {
- loop {
- let (client, msg) = try_ready!(self.msg_loop.poll());
- match msg {
- ChannelMessage::Data(b'r', data) => {
- let fd_cnt = message::parse_result_code(data)?;
- if fd_cnt == 3 {
- return Ok(Async::Ready(client));
- } else {
- return Err(io::Error::new(
- io::ErrorKind::InvalidData,
- "unexpected attachio result",
- ));
- }
- }
- ChannelMessage::Data(..) => {
- // just ignore data sent to uninteresting (optional) channel
- self.msg_loop = MessageLoop::resume(client);
- }
- ChannelMessage::InputRequest(1) => {
- // this may fail with EWOULDBLOCK in theory, but the
- // payload is quite small, and the send buffer should
- // be empty so the operation will complete immediately
- let sock_fd = client.as_raw_fd();
- let ifd = self.stdin.as_raw_fd();
- let ofd = self.stdout.as_raw_fd();
- let efd = self.stderr.as_ref().map_or(ofd, |f| f.as_raw_fd());
- procutil::send_raw_fds(sock_fd, &[ifd, ofd, efd])?;
- self.msg_loop = MessageLoop::resume(client);
- }
- ChannelMessage::InputRequest(..)
- | ChannelMessage::LineRequest(..)
- | ChannelMessage::SystemRequest(..) => {
+/// The client-side fds may be dropped once duplicated to the server.
+pub async fn attach_io(
+ proto: &mut Protocol<impl Connection + AsRawFd>,
+ stdin: &impl AsRawFd,
+ stdout: &impl AsRawFd,
+ stderr: &impl AsRawFd,
+) -> io::Result<()> {
+ proto.send_command("attachio").await?;
+ loop {
+ match proto.fetch_response().await? {
+ ChannelMessage::Data(b'r', data) => {
+ let fd_cnt = message::parse_result_code(data)?;
+ if fd_cnt == 3 {
+ return Ok(());
+ } else {
return Err(io::Error::new(
io::ErrorKind::InvalidData,
- "unsupported request while attaching io",
+ "unexpected attachio result",
));
}
}
+ ChannelMessage::Data(..) => {
+ // just ignore data sent to uninteresting (optional) channel
+ }
+ ChannelMessage::InputRequest(1) => {
+ // this may fail with EWOULDBLOCK in theory, but the
+ // payload is quite small, and the send buffer should
+ // be empty so the operation will complete immediately
+ let sock_fd = proto.as_raw_fd();
+ let ifd = stdin.as_raw_fd();
+ let ofd = stdout.as_raw_fd();
+ let efd = stderr.as_raw_fd();
+ procutil::send_raw_fds(sock_fd, &[ifd, ofd, efd])?;
+ }
+ ChannelMessage::InputRequest(..)
+ | ChannelMessage::LineRequest(..)
+ | ChannelMessage::SystemRequest(..) => {
+ return Err(io::Error::new(
+ io::ErrorKind::InvalidData,
+ "unsupported request while attaching io",
+ ));
+ }
}
}
}
--- a/rust/chg/src/clientext.rs Tue Jun 23 16:07:18 2020 +0200
+++ b/rust/chg/src/clientext.rs Thu Jun 25 10:32:51 2020 -0700
@@ -5,55 +5,99 @@
//! cHg extensions to command server client.
-use bytes::{BufMut, Bytes, BytesMut};
+use bytes::{BufMut, BytesMut};
use std::ffi::OsStr;
use std::io;
use std::mem;
use std::os::unix::ffi::OsStrExt;
use std::os::unix::io::AsRawFd;
use std::path::Path;
-use tokio_hglib::protocol::{OneShotQuery, OneShotRequest};
-use tokio_hglib::{Client, Connection};
+use tokio_hglib::UnixClient;
-use crate::attachio::AttachIo;
-use crate::message::{self, Instruction};
-use crate::runcommand::ChgRunCommand;
+use crate::attachio;
+use crate::message::{self, Instruction, ServerSpec};
+use crate::runcommand;
use crate::uihandler::SystemHandler;
-pub trait ChgClientExt<C>
-where
- C: Connection + AsRawFd,
-{
+/// Command-server client that also supports cHg extensions.
+pub struct ChgClient {
+ client: UnixClient,
+}
+
+impl ChgClient {
+ /// Connects to a command server listening at the specified socket path.
+ pub async fn connect(path: impl AsRef<Path>) -> io::Result<Self> {
+ let client = UnixClient::connect(path).await?;
+ Ok(ChgClient { client })
+ }
+
+ /// Server capabilities, encoding, etc.
+ pub fn server_spec(&self) -> &ServerSpec {
+ self.client.server_spec()
+ }
+
/// Attaches the client file descriptors to the server.
- fn attach_io<I, O, E>(self, stdin: I, stdout: O, stderr: E) -> AttachIo<C, I, O, E>
- where
- I: AsRawFd,
- O: AsRawFd,
- E: AsRawFd;
+ pub async fn attach_io(
+ &mut self,
+ stdin: &impl AsRawFd,
+ stdout: &impl AsRawFd,
+ stderr: &impl AsRawFd,
+ ) -> io::Result<()> {
+ attachio::attach_io(self.client.borrow_protocol_mut(), stdin, stdout, stderr).await
+ }
/// Changes the working directory of the server.
- fn set_current_dir(self, dir: impl AsRef<Path>) -> OneShotRequest<C>;
+ pub async fn set_current_dir(&mut self, dir: impl AsRef<Path>) -> io::Result<()> {
+ let dir_bytes = dir.as_ref().as_os_str().as_bytes().to_owned();
+ self.client
+ .borrow_protocol_mut()
+ .send_command_with_args("chdir", dir_bytes)
+ .await
+ }
/// Updates the environment variables of the server.
- fn set_env_vars_os(
- self,
+ pub async fn set_env_vars_os(
+ &mut self,
vars: impl IntoIterator<Item = (impl AsRef<OsStr>, impl AsRef<OsStr>)>,
- ) -> OneShotRequest<C>;
+ ) -> io::Result<()> {
+ self.client
+ .borrow_protocol_mut()
+ .send_command_with_args("setenv", message::pack_env_vars_os(vars))
+ .await
+ }
/// Changes the process title of the server.
- fn set_process_name(self, name: impl AsRef<OsStr>) -> OneShotRequest<C>;
+ pub async fn set_process_name(&mut self, name: impl AsRef<OsStr>) -> io::Result<()> {
+ let name_bytes = name.as_ref().as_bytes().to_owned();
+ self.client
+ .borrow_protocol_mut()
+ .send_command_with_args("setprocname", name_bytes)
+ .await
+ }
/// Changes the umask of the server process.
- fn set_umask(self, mask: u32) -> OneShotRequest<C>;
+ pub async fn set_umask(&mut self, mask: u32) -> io::Result<()> {
+ let mut mask_bytes = BytesMut::with_capacity(mem::size_of_val(&mask));
+ mask_bytes.put_u32(mask);
+ self.client
+ .borrow_protocol_mut()
+ .send_command_with_args("setumask2", mask_bytes)
+ .await
+ }
/// Runs the specified Mercurial command with cHg extension.
- fn run_command_chg<H>(
- self,
- handler: H,
+ pub async fn run_command_chg(
+ &mut self,
+ handler: &mut impl SystemHandler,
args: impl IntoIterator<Item = impl AsRef<OsStr>>,
- ) -> ChgRunCommand<C, H>
- where
- H: SystemHandler;
+ ) -> io::Result<i32> {
+ runcommand::run_command(
+ self.client.borrow_protocol_mut(),
+ handler,
+ message::pack_args_os(args),
+ )
+ .await
+ }
/// Validates if the server can run Mercurial commands with the expected
/// configuration.
@@ -63,66 +107,15 @@
///
/// Client-side environment must be sent prior to this request, by
/// `set_current_dir()` and `set_env_vars_os()`.
- fn validate(
- self,
+ pub async fn validate(
+ &mut self,
args: impl IntoIterator<Item = impl AsRef<OsStr>>,
- ) -> OneShotQuery<C, fn(Bytes) -> io::Result<Vec<Instruction>>>;
-}
-
-impl<C> ChgClientExt<C> for Client<C>
-where
- C: Connection + AsRawFd,
-{
- fn attach_io<I, O, E>(self, stdin: I, stdout: O, stderr: E) -> AttachIo<C, I, O, E>
- where
- I: AsRawFd,
- O: AsRawFd,
- E: AsRawFd,
- {
- AttachIo::with_client(self, stdin, stdout, Some(stderr))
- }
-
- fn set_current_dir(self, dir: impl AsRef<Path>) -> OneShotRequest<C> {
- OneShotRequest::start_with_args(self, b"chdir", dir.as_ref().as_os_str().as_bytes())
- }
-
- fn set_env_vars_os(
- self,
- vars: impl IntoIterator<Item = (impl AsRef<OsStr>, impl AsRef<OsStr>)>,
- ) -> OneShotRequest<C> {
- OneShotRequest::start_with_args(self, b"setenv", message::pack_env_vars_os(vars))
- }
-
- fn set_process_name(self, name: impl AsRef<OsStr>) -> OneShotRequest<C> {
- OneShotRequest::start_with_args(self, b"setprocname", name.as_ref().as_bytes())
- }
-
- fn set_umask(self, mask: u32) -> OneShotRequest<C> {
- let mut args = BytesMut::with_capacity(mem::size_of_val(&mask));
- args.put_u32_be(mask);
- OneShotRequest::start_with_args(self, b"setumask2", args)
- }
-
- fn run_command_chg<H>(
- self,
- handler: H,
- args: impl IntoIterator<Item = impl AsRef<OsStr>>,
- ) -> ChgRunCommand<C, H>
- where
- H: SystemHandler,
- {
- ChgRunCommand::with_client(self, handler, message::pack_args_os(args))
- }
-
- fn validate(
- self,
- args: impl IntoIterator<Item = impl AsRef<OsStr>>,
- ) -> OneShotQuery<C, fn(Bytes) -> io::Result<Vec<Instruction>>> {
- OneShotQuery::start_with_args(
- self,
- b"validate",
- message::pack_args_os(args),
- message::parse_instructions,
- )
+ ) -> io::Result<Vec<Instruction>> {
+ let data = self
+ .client
+ .borrow_protocol_mut()
+ .query_with_args("validate", message::pack_args_os(args))
+ .await?;
+ message::parse_instructions(data)
}
}
--- a/rust/chg/src/lib.rs Tue Jun 23 16:07:18 2020 +0200
+++ b/rust/chg/src/lib.rs Thu Jun 25 10:32:51 2020 -0700
@@ -11,5 +11,5 @@
mod runcommand;
mod uihandler;
-pub use clientext::ChgClientExt;
+pub use clientext::ChgClient;
pub use uihandler::{ChgUiHandler, SystemHandler};
--- a/rust/chg/src/locator.rs Tue Jun 23 16:07:18 2020 +0200
+++ b/rust/chg/src/locator.rs Thu Jun 25 10:32:51 2020 -0700
@@ -5,7 +5,6 @@
//! Utility for locating command-server process.
-use futures::future::{self, Either, Loop};
use log::debug;
use std::env;
use std::ffi::{OsStr, OsString};
@@ -14,14 +13,11 @@
use std::os::unix::ffi::{OsStrExt, OsStringExt};
use std::os::unix::fs::{DirBuilderExt, MetadataExt};
use std::path::{Path, PathBuf};
-use std::process::{self, Command};
-use std::time::Duration;
-use tokio::prelude::*;
-use tokio_hglib::UnixClient;
-use tokio_process::{Child, CommandExt};
-use tokio_timer;
+use std::process::{self, Child, Command};
+use std::time::{Duration, Instant};
+use tokio::time;
-use crate::clientext::ChgClientExt;
+use crate::clientext::ChgClient;
use crate::message::{Instruction, ServerSpec};
use crate::procutil;
@@ -82,43 +78,33 @@
/// Connects to the server.
///
/// The server process will be spawned if not running.
- pub fn connect(self) -> impl Future<Item = (Self, UnixClient), Error = io::Error> {
- future::loop_fn((self, 0), |(loc, cnt)| {
- if cnt < 10 {
- let fut = loc
- .try_connect()
- .and_then(|(loc, client)| {
- client
- .validate(&loc.hg_early_args)
- .map(|(client, instructions)| (loc, client, instructions))
- })
- .and_then(move |(loc, client, instructions)| {
- loc.run_instructions(client, instructions, cnt)
- });
- Either::A(fut)
- } else {
- let msg = format!(
- concat!(
- "too many redirections.\n",
- "Please make sure {:?} is not a wrapper which ",
- "changes sensitive environment variables ",
- "before executing hg. If you have to use a ",
- "wrapper, wrap chg instead of hg.",
- ),
- loc.hg_command
- );
- Either::B(future::err(io::Error::new(io::ErrorKind::Other, msg)))
+ pub async fn connect(&mut self) -> io::Result<ChgClient> {
+ for _cnt in 0..10 {
+ let mut client = self.try_connect().await?;
+ let instructions = client.validate(&self.hg_early_args).await?;
+ let reconnect = self.run_instructions(&instructions)?;
+ if !reconnect {
+ return Ok(client);
}
- })
+ }
+
+ let msg = format!(
+ concat!(
+ "too many redirections.\n",
+ "Please make sure {:?} is not a wrapper which ",
+ "changes sensitive environment variables ",
+ "before executing hg. If you have to use a ",
+ "wrapper, wrap chg instead of hg.",
+ ),
+ self.hg_command
+ );
+ Err(io::Error::new(io::ErrorKind::Other, msg))
}
/// Runs instructions received from the server.
- fn run_instructions(
- mut self,
- client: UnixClient,
- instructions: Vec<Instruction>,
- cnt: usize,
- ) -> io::Result<Loop<(Self, UnixClient), (Self, usize)>> {
+ ///
+ /// Returns true if the client should try connecting to the other server.
+ fn run_instructions(&mut self, instructions: &[Instruction]) -> io::Result<bool> {
let mut reconnect = false;
for inst in instructions {
debug!("instruction: {:?}", inst);
@@ -126,7 +112,7 @@
Instruction::Exit(_) => {
// Just returns the current connection to run the
// unparsable command and report the error
- return Ok(Loop::Break((self, client)));
+ return Ok(false);
}
Instruction::Reconnect => {
reconnect = true;
@@ -139,7 +125,7 @@
);
return Err(io::Error::new(io::ErrorKind::InvalidData, msg));
}
- self.redirect_sock_path = Some(path);
+ self.redirect_sock_path = Some(path.to_owned());
reconnect = true;
}
Instruction::Unlink(path) => {
@@ -155,64 +141,44 @@
}
}
- if reconnect {
- Ok(Loop::Continue((self, cnt + 1)))
- } else {
- Ok(Loop::Break((self, client)))
- }
+ Ok(reconnect)
}
/// Tries to connect to the existing server, or spawns new if not running.
- fn try_connect(self) -> impl Future<Item = (Self, UnixClient), Error = io::Error> {
+ async fn try_connect(&mut self) -> io::Result<ChgClient> {
let sock_path = self
.redirect_sock_path
.as_ref()
.unwrap_or(&self.base_sock_path)
.clone();
debug!("try connect to {}", sock_path.display());
- UnixClient::connect(sock_path)
- .then(|res| {
- match res {
- Ok(client) => Either::A(future::ok((self, client))),
- Err(_) => {
- // Prevent us from being re-connected to the outdated
- // master server: We were told by the server to redirect
- // to redirect_sock_path, which didn't work. We do not
- // want to connect to the same master server again
- // because it would probably tell us the same thing.
- if self.redirect_sock_path.is_some() {
- fs::remove_file(&self.base_sock_path).unwrap_or(());
- // may race
- }
- Either::B(self.spawn_connect())
- }
+ let mut client = match ChgClient::connect(sock_path).await {
+ Ok(client) => client,
+ Err(_) => {
+ // Prevent us from being re-connected to the outdated
+ // master server: We were told by the server to redirect
+ // to redirect_sock_path, which didn't work. We do not
+ // want to connect to the same master server again
+ // because it would probably tell us the same thing.
+ if self.redirect_sock_path.is_some() {
+ fs::remove_file(&self.base_sock_path).unwrap_or(());
+ // may race
}
- })
- .and_then(|(loc, client)| {
- check_server_capabilities(client.server_spec())?;
- Ok((loc, client))
- })
- .and_then(|(loc, client)| {
- // It's purely optional, and the server might not support this command.
- if client.server_spec().capabilities.contains("setprocname") {
- let fut = client
- .set_process_name(format!("chg[worker/{}]", loc.process_id))
- .map(|client| (loc, client));
- Either::A(fut)
- } else {
- Either::B(future::ok((loc, client)))
- }
- })
- .and_then(|(loc, client)| {
- client
- .set_current_dir(&loc.current_dir)
- .map(|client| (loc, client))
- })
- .and_then(|(loc, client)| {
- client
- .set_env_vars_os(loc.env_vars.iter().cloned())
- .map(|client| (loc, client))
- })
+ self.spawn_connect().await?
+ }
+ };
+ check_server_capabilities(client.server_spec())?;
+ // It's purely optional, and the server might not support this command.
+ if client.server_spec().capabilities.contains("setprocname") {
+ client
+ .set_process_name(format!("chg[worker/{}]", self.process_id))
+ .await?;
+ }
+ client.set_current_dir(&self.current_dir).await?;
+ client
+ .set_env_vars_os(self.env_vars.iter().cloned())
+ .await?;
+ Ok(client)
}
/// Spawns new server process and connects to it.
@@ -220,10 +186,10 @@
/// The server will be spawned at the current working directory, then
/// chdir to "/", so that the server will load configs from the target
/// repository.
- fn spawn_connect(self) -> impl Future<Item = (Self, UnixClient), Error = io::Error> {
+ async fn spawn_connect(&mut self) -> io::Result<ChgClient> {
let sock_path = self.temp_sock_path();
debug!("start cmdserver at {}", sock_path.display());
- Command::new(&self.hg_command)
+ let server = Command::new(&self.hg_command)
.arg("serve")
.arg("--cmdserver")
.arg("chgunix")
@@ -236,68 +202,49 @@
.env_clear()
.envs(self.env_vars.iter().cloned())
.env("CHGINTERNALMARK", "")
- .spawn_async()
- .into_future()
- .and_then(|server| self.connect_spawned(server, sock_path))
- .and_then(|(loc, client, sock_path)| {
- debug!(
- "rename {} to {}",
- sock_path.display(),
- loc.base_sock_path.display()
- );
- fs::rename(&sock_path, &loc.base_sock_path)?;
- Ok((loc, client))
- })
+ .spawn()?;
+ let client = self.connect_spawned(server, &sock_path).await?;
+ debug!(
+ "rename {} to {}",
+ sock_path.display(),
+ self.base_sock_path.display()
+ );
+ fs::rename(&sock_path, &self.base_sock_path)?;
+ Ok(client)
}
/// Tries to connect to the just spawned server repeatedly until timeout
/// exceeded.
- fn connect_spawned(
- self,
- server: Child,
- sock_path: PathBuf,
- ) -> impl Future<Item = (Self, UnixClient, PathBuf), Error = io::Error> {
+ async fn connect_spawned(
+ &mut self,
+ mut server: Child,
+ sock_path: &Path,
+ ) -> io::Result<ChgClient> {
debug!("try connect to {} repeatedly", sock_path.display());
- let connect = future::loop_fn(sock_path, |sock_path| {
- UnixClient::connect(sock_path.clone()).then(|res| {
- match res {
- Ok(client) => Either::A(future::ok(Loop::Break((client, sock_path)))),
- Err(_) => {
- // try again with slight delay
- let fut = tokio_timer::sleep(Duration::from_millis(10))
- .map(|()| Loop::Continue(sock_path))
- .map_err(|err| io::Error::new(io::ErrorKind::Other, err));
- Either::B(fut)
- }
- }
- })
- });
-
// waits for either connection established or server failed to start
- connect
- .select2(server)
- .map_err(|res| res.split().0)
- .timeout(self.timeout)
- .map_err(|err| {
- err.into_inner().unwrap_or_else(|| {
- io::Error::new(
- io::ErrorKind::TimedOut,
- "timed out while connecting to server",
- )
- })
- })
- .and_then(|res| {
- match res {
- Either::A(((client, sock_path), server)) => {
- server.forget(); // continue to run in background
- Ok((self, client, sock_path))
- }
- Either::B((st, _)) => Err(io::Error::new(
- io::ErrorKind::Other,
- format!("server exited too early: {}", st),
- )),
- }
- })
+ let start_time = Instant::now();
+ while start_time.elapsed() < self.timeout {
+ if let Ok(client) = ChgClient::connect(&sock_path).await {
+ // server handle is dropped here, but the detached process
+ // will continue running in background
+ return Ok(client);
+ }
+
+ if let Some(st) = server.try_wait()? {
+ return Err(io::Error::new(
+ io::ErrorKind::Other,
+ format!("server exited too early: {}", st),
+ ));
+ }
+
+ // try again with slight delay
+ time::delay_for(Duration::from_millis(10)).await;
+ }
+
+ Err(io::Error::new(
+ io::ErrorKind::TimedOut,
+ "timed out while connecting to server",
+ ))
}
}
--- a/rust/chg/src/main.rs Tue Jun 23 16:07:18 2020 +0200
+++ b/rust/chg/src/main.rs Thu Jun 25 10:32:51 2020 -0700
@@ -5,13 +5,12 @@
use chg::locator::{self, Locator};
use chg::procutil;
-use chg::{ChgClientExt, ChgUiHandler};
-use futures::sync::oneshot;
+use chg::ChgUiHandler;
use std::env;
use std::io;
+use std::io::Write;
use std::process;
use std::time::Instant;
-use tokio::prelude::*;
struct DebugLogger {
start: Instant,
@@ -67,31 +66,23 @@
process::exit(code);
}
-fn run(umask: u32) -> io::Result<i32> {
+#[tokio::main]
+async fn run(umask: u32) -> io::Result<i32> {
let mut loc = Locator::prepare_from_env()?;
loc.set_early_args(locator::collect_early_args(env::args_os().skip(1)));
- let handler = ChgUiHandler::new();
- let (result_tx, result_rx) = oneshot::channel();
- let fut = loc
- .connect()
- .and_then(|(_, client)| client.attach_io(io::stdin(), io::stdout(), io::stderr()))
- .and_then(move |client| client.set_umask(umask))
- .and_then(|client| {
- let pid = client.server_spec().process_id.unwrap();
- let pgid = client.server_spec().process_group_id;
- procutil::setup_signal_handler_once(pid, pgid)?;
- Ok(client)
- })
- .and_then(|client| client.run_command_chg(handler, env::args_os().skip(1)))
- .map(|(_client, _handler, code)| {
- procutil::restore_signal_handler_once()?;
- Ok(code)
- })
- .or_else(|err| Ok(Err(err))) // pass back error to caller
- .map(|res| result_tx.send(res).unwrap());
- tokio::run(fut);
- result_rx.wait().unwrap_or(Err(io::Error::new(
- io::ErrorKind::Other,
- "no exit code set",
- )))
+ let mut handler = ChgUiHandler::new();
+ let mut client = loc.connect().await?;
+ client
+ .attach_io(&io::stdin(), &io::stdout(), &io::stderr())
+ .await?;
+ client.set_umask(umask).await?;
+ let pid = client.server_spec().process_id.unwrap();
+ let pgid = client.server_spec().process_group_id;
+ procutil::setup_signal_handler_once(pid, pgid)?;
+ let code = client
+ .run_command_chg(&mut handler, env::args_os().skip(1))
+ .await?;
+ procutil::restore_signal_handler_once()?;
+ handler.wait_pager().await?;
+ Ok(code)
}
--- a/rust/chg/src/runcommand.rs Tue Jun 23 16:07:18 2020 +0200
+++ b/rust/chg/src/runcommand.rs Thu Jun 25 10:32:51 2020 -0700
@@ -6,164 +6,56 @@
//! Functions to run Mercurial command in cHg-aware command server.
use bytes::Bytes;
-use futures::future::IntoFuture;
-use futures::{Async, Future, Poll};
use std::io;
-use std::mem;
use std::os::unix::io::AsRawFd;
use tokio_hglib::codec::ChannelMessage;
-use tokio_hglib::protocol::MessageLoop;
-use tokio_hglib::{Client, Connection};
+use tokio_hglib::{Connection, Protocol};
-use crate::attachio::AttachIo;
+use crate::attachio;
use crate::message::{self, CommandType};
use crate::uihandler::SystemHandler;
-enum AsyncS<R, S> {
- Ready(R),
- NotReady(S),
- PollAgain(S),
-}
-
-enum CommandState<C, H>
-where
- C: Connection,
- H: SystemHandler,
-{
- Running(MessageLoop<C>, H),
- SpawningPager(Client<C>, <H::SpawnPagerResult as IntoFuture>::Future),
- AttachingPager(AttachIo<C, io::Stdin, H::PagerStdin, H::PagerStdin>, H),
- WaitingSystem(Client<C>, <H::RunSystemResult as IntoFuture>::Future),
- Finished,
-}
-
-type CommandPoll<C, H> = io::Result<AsyncS<(Client<C>, H, i32), CommandState<C, H>>>;
-
-/// Future resolves to `(exit_code, client)`.
-#[must_use = "futures do nothing unless polled"]
-pub struct ChgRunCommand<C, H>
-where
- C: Connection,
- H: SystemHandler,
-{
- state: CommandState<C, H>,
-}
-
-impl<C, H> ChgRunCommand<C, H>
-where
- C: Connection + AsRawFd,
- H: SystemHandler,
-{
- pub fn with_client(client: Client<C>, handler: H, packed_args: Bytes) -> ChgRunCommand<C, H> {
- let msg_loop = MessageLoop::start_with_args(client, b"runcommand", packed_args);
- ChgRunCommand {
- state: CommandState::Running(msg_loop, handler),
- }
- }
-}
-
-impl<C, H> Future for ChgRunCommand<C, H>
-where
- C: Connection + AsRawFd,
- H: SystemHandler,
-{
- type Item = (Client<C>, H, i32);
- type Error = io::Error;
-
- fn poll(&mut self) -> Poll<Self::Item, Self::Error> {
- loop {
- let state = mem::replace(&mut self.state, CommandState::Finished);
- match state.poll()? {
- AsyncS::Ready((client, handler, code)) => {
- return Ok(Async::Ready((client, handler, code)));
- }
- AsyncS::NotReady(newstate) => {
- self.state = newstate;
- return Ok(Async::NotReady);
- }
- AsyncS::PollAgain(newstate) => {
- self.state = newstate;
- }
- }
- }
- }
-}
-
-impl<C, H> CommandState<C, H>
-where
- C: Connection + AsRawFd,
- H: SystemHandler,
-{
- fn poll(self) -> CommandPoll<C, H> {
- match self {
- CommandState::Running(mut msg_loop, handler) => {
- if let Async::Ready((client, msg)) = msg_loop.poll()? {
- process_message(client, handler, msg)
- } else {
- Ok(AsyncS::NotReady(CommandState::Running(msg_loop, handler)))
- }
- }
- CommandState::SpawningPager(client, mut fut) => {
- if let Async::Ready((handler, pin)) = fut.poll()? {
- let fut = AttachIo::with_client(client, io::stdin(), pin, None);
- Ok(AsyncS::PollAgain(CommandState::AttachingPager(
- fut, handler,
- )))
- } else {
- Ok(AsyncS::NotReady(CommandState::SpawningPager(client, fut)))
- }
- }
- CommandState::AttachingPager(mut fut, handler) => {
- if let Async::Ready(client) = fut.poll()? {
- let msg_loop = MessageLoop::start(client, b""); // terminator
- Ok(AsyncS::PollAgain(CommandState::Running(msg_loop, handler)))
- } else {
- Ok(AsyncS::NotReady(CommandState::AttachingPager(fut, handler)))
- }
- }
- CommandState::WaitingSystem(client, mut fut) => {
- if let Async::Ready((handler, code)) = fut.poll()? {
- let data = message::pack_result_code(code);
- let msg_loop = MessageLoop::resume_with_data(client, data);
- Ok(AsyncS::PollAgain(CommandState::Running(msg_loop, handler)))
- } else {
- Ok(AsyncS::NotReady(CommandState::WaitingSystem(client, fut)))
- }
- }
- CommandState::Finished => panic!("poll ChgRunCommand after it's done"),
- }
- }
-}
-
-fn process_message<C, H>(client: Client<C>, handler: H, msg: ChannelMessage) -> CommandPoll<C, H>
-where
- C: Connection,
- H: SystemHandler,
-{
- {
- match msg {
+/// Runs the given Mercurial command in cHg-aware command server, and
+/// fetches the result code.
+///
+/// This is a subset of tokio-hglib's `run_command()` with the additional
+/// SystemRequest support.
+pub async fn run_command(
+ proto: &mut Protocol<impl Connection + AsRawFd>,
+ handler: &mut impl SystemHandler,
+ packed_args: impl Into<Bytes>,
+) -> io::Result<i32> {
+ proto
+ .send_command_with_args("runcommand", packed_args)
+ .await?;
+ loop {
+ match proto.fetch_response().await? {
ChannelMessage::Data(b'r', data) => {
- let code = message::parse_result_code(data)?;
- Ok(AsyncS::Ready((client, handler, code)))
+ return message::parse_result_code(data);
}
ChannelMessage::Data(..) => {
// just ignores data sent to optional channel
- let msg_loop = MessageLoop::resume(client);
- Ok(AsyncS::PollAgain(CommandState::Running(msg_loop, handler)))
}
- ChannelMessage::InputRequest(..) | ChannelMessage::LineRequest(..) => Err(
- io::Error::new(io::ErrorKind::InvalidData, "unsupported request"),
- ),
+ ChannelMessage::InputRequest(..) | ChannelMessage::LineRequest(..) => {
+ return Err(io::Error::new(
+ io::ErrorKind::InvalidData,
+ "unsupported request",
+ ));
+ }
ChannelMessage::SystemRequest(data) => {
let (cmd_type, cmd_spec) = message::parse_command_spec(data)?;
match cmd_type {
CommandType::Pager => {
- let fut = handler.spawn_pager(cmd_spec).into_future();
- Ok(AsyncS::PollAgain(CommandState::SpawningPager(client, fut)))
+ // server spins new command loop while pager request is
+ // in progress, which can be terminated by "" command.
+ let pin = handler.spawn_pager(&cmd_spec).await?;
+ attachio::attach_io(proto, &io::stdin(), &pin, &pin).await?;
+ proto.send_command("").await?; // terminator
}
CommandType::System => {
- let fut = handler.run_system(cmd_spec).into_future();
- Ok(AsyncS::PollAgain(CommandState::WaitingSystem(client, fut)))
+ let code = handler.run_system(&cmd_spec).await?;
+ let data = message::pack_result_code(code);
+ proto.send_data(data).await?;
}
}
}
--- a/rust/chg/src/uihandler.rs Tue Jun 23 16:07:18 2020 +0200
+++ b/rust/chg/src/uihandler.rs Thu Jun 25 10:32:51 2020 -0700
@@ -3,76 +3,75 @@
// This software may be used and distributed according to the terms of the
// GNU General Public License version 2 or any later version.
-use futures::future::IntoFuture;
-use futures::Future;
+use async_trait::async_trait;
use std::io;
use std::os::unix::io::AsRawFd;
use std::os::unix::process::ExitStatusExt;
-use std::process::{Command, Stdio};
+use std::process::Stdio;
use tokio;
-use tokio_process::{ChildStdin, CommandExt};
+use tokio::process::{Child, ChildStdin, Command};
use crate::message::CommandSpec;
use crate::procutil;
/// Callback to process shell command requests received from server.
-pub trait SystemHandler: Sized {
+#[async_trait]
+pub trait SystemHandler {
type PagerStdin: AsRawFd;
- type SpawnPagerResult: IntoFuture<Item = (Self, Self::PagerStdin), Error = io::Error>;
- type RunSystemResult: IntoFuture<Item = (Self, i32), Error = io::Error>;
/// Handles pager command request.
///
/// Returns the pipe to be attached to the server if the pager is spawned.
- fn spawn_pager(self, spec: CommandSpec) -> Self::SpawnPagerResult;
+ async fn spawn_pager(&mut self, spec: &CommandSpec) -> io::Result<Self::PagerStdin>;
/// Handles system command request.
///
/// Returns command exit code (positive) or signal number (negative).
- fn run_system(self, spec: CommandSpec) -> Self::RunSystemResult;
+ async fn run_system(&mut self, spec: &CommandSpec) -> io::Result<i32>;
}
/// Default cHg implementation to process requests received from server.
-pub struct ChgUiHandler {}
+pub struct ChgUiHandler {
+ pager: Option<Child>,
+}
impl ChgUiHandler {
pub fn new() -> ChgUiHandler {
- ChgUiHandler {}
+ ChgUiHandler { pager: None }
+ }
+
+ /// Waits until the pager process exits.
+ pub async fn wait_pager(&mut self) -> io::Result<()> {
+ if let Some(p) = self.pager.take() {
+ p.await?;
+ }
+ Ok(())
}
}
+#[async_trait]
impl SystemHandler for ChgUiHandler {
type PagerStdin = ChildStdin;
- type SpawnPagerResult = io::Result<(Self, Self::PagerStdin)>;
- type RunSystemResult = Box<dyn Future<Item = (Self, i32), Error = io::Error> + Send>;
- fn spawn_pager(self, spec: CommandSpec) -> Self::SpawnPagerResult {
- let mut pager = new_shell_command(&spec)
- .stdin(Stdio::piped())
- .spawn_async()?;
- let pin = pager.stdin().take().unwrap();
+ async fn spawn_pager(&mut self, spec: &CommandSpec) -> io::Result<Self::PagerStdin> {
+ let mut pager = new_shell_command(&spec).stdin(Stdio::piped()).spawn()?;
+ let pin = pager.stdin.take().unwrap();
procutil::set_blocking_fd(pin.as_raw_fd())?;
// TODO: if pager exits, notify the server with SIGPIPE immediately.
// otherwise the server won't get SIGPIPE if it does not write
// anything. (issue5278)
// kill(peerpid, SIGPIPE);
- tokio::spawn(pager.map(|_| ()).map_err(|_| ())); // just ignore errors
- Ok((self, pin))
+ self.pager = Some(pager);
+ Ok(pin)
}
- fn run_system(self, spec: CommandSpec) -> Self::RunSystemResult {
- let fut = new_shell_command(&spec)
- .spawn_async()
- .into_future()
- .flatten()
- .map(|status| {
- let code = status
- .code()
- .or_else(|| status.signal().map(|n| -n))
- .expect("either exit code or signal should be set");
- (self, code)
- });
- Box::new(fut)
+ async fn run_system(&mut self, spec: &CommandSpec) -> io::Result<i32> {
+ let status = new_shell_command(&spec).spawn()?.await?;
+ let code = status
+ .code()
+ .or_else(|| status.signal().map(|n| -n))
+ .expect("either exit code or signal should be set");
+ Ok(code)
}
}
--- a/rust/hg-core/Cargo.toml Tue Jun 23 16:07:18 2020 +0200
+++ b/rust/hg-core/Cargo.toml Thu Jun 25 10:32:51 2020 -0700
@@ -4,7 +4,6 @@
authors = ["Georges Racinet <gracinet@anybox.fr>"]
description = "Mercurial pure Rust core library, with no assumption on Python bindings (FFI)"
edition = "2018"
-build = "build.rs"
[lib]
name = "hg"
@@ -13,17 +12,16 @@
byteorder = "1.3.4"
hex = "0.4.2"
lazy_static = "1.4.0"
-libc = { version = "0.2.66", optional = true }
memchr = "2.3.3"
rand = "0.7.3"
rand_pcg = "0.2.1"
rand_distr = "0.2.2"
rayon = "1.3.0"
-regex = "1.3.6"
+regex = "1.3.9"
twox-hash = "1.5.0"
same-file = "1.0.6"
crossbeam = "0.7.3"
-micro-timer = "0.2.1"
+micro-timer = "0.3.0"
log = "0.4.8"
[dev-dependencies]
@@ -31,10 +29,3 @@
memmap = "0.7.0"
pretty_assertions = "0.6.1"
tempfile = "3.1.0"
-
-[build-dependencies]
-cc = { version = "1.0.48", optional = true }
-
-[features]
-default = []
-with-re2 = ["cc", "libc"]
--- a/rust/hg-core/build.rs Tue Jun 23 16:07:18 2020 +0200
+++ /dev/null Thu Jan 01 00:00:00 1970 +0000
@@ -1,61 +0,0 @@
-// build.rs
-//
-// Copyright 2020 Raphaël Gomès <rgomes@octobus.net>
-//
-// This software may be used and distributed according to the terms of the
-// GNU General Public License version 2 or any later version.
-
-#[cfg(feature = "with-re2")]
-use cc;
-
-/// Uses either the system Re2 install as a dynamic library or the provided
-/// build as a static library
-#[cfg(feature = "with-re2")]
-fn compile_re2() {
- use cc;
- use std::path::Path;
- use std::process::exit;
-
- let msg = r"HG_RE2_PATH must be one of `system|<path to build source clone of Re2>`";
- let re2 = match std::env::var_os("HG_RE2_PATH") {
- None => {
- eprintln!("{}", msg);
- exit(1)
- }
- Some(v) => {
- if v == "system" {
- None
- } else {
- Some(v)
- }
- }
- };
-
- let mut options = cc::Build::new();
- options
- .cpp(true)
- .flag("-std=c++11")
- .file("src/re2/rust_re2.cpp");
-
- if let Some(ref source) = re2 {
- options.include(Path::new(source));
- };
-
- options.compile("librustre.a");
-
- if let Some(ref source) = &re2 {
- // Link the local source statically
- println!(
- "cargo:rustc-link-search=native={}",
- Path::new(source).join(Path::new("obj")).display()
- );
- println!("cargo:rustc-link-lib=static=re2");
- } else {
- println!("cargo:rustc-link-lib=re2");
- }
-}
-
-fn main() {
- #[cfg(feature = "with-re2")]
- compile_re2();
-}
--- a/rust/hg-core/src/ancestors.rs Tue Jun 23 16:07:18 2020 +0200
+++ b/rust/hg-core/src/ancestors.rs Thu Jun 25 10:32:51 2020 -0700
@@ -55,19 +55,19 @@
let filtered_initrevs = initrevs.into_iter().filter(|&r| r >= stoprev);
if inclusive {
let visit: BinaryHeap<Revision> = filtered_initrevs.collect();
- let seen = visit.iter().map(|&x| x).collect();
+ let seen = visit.iter().cloned().collect();
return Ok(AncestorsIterator {
- visit: visit,
- seen: seen,
- stoprev: stoprev,
- graph: graph,
+ visit,
+ seen,
+ stoprev,
+ graph,
});
}
let mut this = AncestorsIterator {
visit: BinaryHeap::new(),
seen: HashSet::new(),
- stoprev: stoprev,
- graph: graph,
+ stoprev,
+ graph,
};
this.seen.insert(NULL_REVISION);
for rev in filtered_initrevs {
@@ -107,7 +107,7 @@
}
pub fn peek(&self) -> Option<Revision> {
- self.visit.peek().map(|&r| r)
+ self.visit.peek().cloned()
}
/// Tell if the iterator is about an empty set
@@ -182,8 +182,8 @@
inclusive,
)?,
initrevs: v,
- stoprev: stoprev,
- inclusive: inclusive,
+ stoprev,
+ inclusive,
})
}
@@ -211,7 +211,7 @@
impl<G: Graph> MissingAncestors<G> {
pub fn new(graph: G, bases: impl IntoIterator<Item = Revision>) -> Self {
let mut created = MissingAncestors {
- graph: graph,
+ graph,
bases: HashSet::new(),
max_base: NULL_REVISION,
};
--- a/rust/hg-core/src/dagops.rs Tue Jun 23 16:07:18 2020 +0200
+++ b/rust/hg-core/src/dagops.rs Thu Jun 25 10:32:51 2020 -0700
@@ -16,10 +16,10 @@
use crate::ancestors::AncestorsIterator;
use std::collections::{BTreeSet, HashSet};
-fn remove_parents(
+fn remove_parents<S: std::hash::BuildHasher>(
graph: &impl Graph,
rev: Revision,
- set: &mut HashSet<Revision>,
+ set: &mut HashSet<Revision, S>,
) -> Result<(), GraphError> {
for parent in graph.parents(rev)?.iter() {
if *parent != NULL_REVISION {
@@ -65,9 +65,9 @@
///
/// # Performance notes
/// Internally, this function will store a full copy of `revs` in a `Vec`.
-pub fn retain_heads(
+pub fn retain_heads<S: std::hash::BuildHasher>(
graph: &impl Graph,
- revs: &mut HashSet<Revision>,
+ revs: &mut HashSet<Revision, S>,
) -> Result<(), GraphError> {
revs.remove(&NULL_REVISION);
// we need to construct an iterable copy of revs to avoid itering while
@@ -84,9 +84,9 @@
/// Roots of `revs`, passed as a `HashSet`
///
/// They are returned in arbitrary order
-pub fn roots<G: Graph>(
+pub fn roots<G: Graph, S: std::hash::BuildHasher>(
graph: &G,
- revs: &HashSet<Revision>,
+ revs: &HashSet<Revision, S>,
) -> Result<Vec<Revision>, GraphError> {
let mut roots: Vec<Revision> = Vec::new();
for rev in revs {
@@ -229,7 +229,8 @@
graph: &impl Graph,
revs: &[Revision],
) -> Result<Vec<Revision>, GraphError> {
- let mut as_vec = roots(graph, &revs.iter().cloned().collect())?;
+ let set: HashSet<_> = revs.iter().cloned().collect();
+ let mut as_vec = roots(graph, &set)?;
as_vec.sort();
Ok(as_vec)
}
--- a/rust/hg-core/src/dirstate/dirs_multiset.rs Tue Jun 23 16:07:18 2020 +0200
+++ b/rust/hg-core/src/dirstate/dirs_multiset.rs Thu Jun 25 10:32:51 2020 -0700
@@ -108,7 +108,7 @@
for subpath in files::find_dirs(path.as_ref()) {
match self.inner.entry(subpath.to_owned()) {
Entry::Occupied(mut entry) => {
- let val = entry.get().clone();
+ let val = *entry.get();
if val > 1 {
entry.insert(val - 1);
break;
@@ -137,6 +137,10 @@
pub fn len(&self) -> usize {
self.inner.len()
}
+
+ pub fn is_empty(&self) -> bool {
+ self.len() == 0
+ }
}
/// This is basically a reimplementation of `DirsMultiset` that stores the
@@ -156,7 +160,7 @@
let mut new = Self {
inner: HashMap::default(),
only_include: only_include
- .map(|s| s.iter().map(|p| p.as_ref()).collect()),
+ .map(|s| s.iter().map(AsRef::as_ref).collect()),
};
for path in paths {
--- a/rust/hg-core/src/dirstate/dirstate_map.rs Tue Jun 23 16:07:18 2020 +0200
+++ b/rust/hg-core/src/dirstate/dirstate_map.rs Thu Jun 25 10:32:51 2020 -0700
@@ -223,7 +223,7 @@
self.get_non_normal_other_parent_entries()
.0
.union(&other)
- .map(|e| e.to_owned())
+ .map(ToOwned::to_owned)
.collect()
}
--- a/rust/hg-core/src/dirstate/parsers.rs Tue Jun 23 16:07:18 2020 +0200
+++ b/rust/hg-core/src/dirstate/parsers.rs Thu Jun 25 10:32:51 2020 -0700
@@ -135,7 +135,7 @@
}
let mut new_filename = new_filename.into_vec();
if let Some(copy) = copy_map.get(filename) {
- new_filename.push('\0' as u8);
+ new_filename.push(b'\0');
new_filename.extend(copy.bytes());
}
--- a/rust/hg-core/src/dirstate/status.rs Tue Jun 23 16:07:18 2020 +0200
+++ b/rust/hg-core/src/dirstate/status.rs Thu Jun 25 10:32:51 2020 -0700
@@ -127,7 +127,7 @@
if skip_dot_hg && filename.as_bytes() == b".hg" && file_type.is_dir() {
return Ok(vec![]);
} else {
- results.push((HgPathBuf::from(filename), entry))
+ results.push((filename, entry))
}
}
@@ -164,14 +164,15 @@
(mode ^ st_mode as i32) & 0o100 != 0o000 && options.check_exec;
let metadata_changed = size >= 0 && (size_changed || mode_changed);
let other_parent = size == SIZE_FROM_OTHER_PARENT;
+
if metadata_changed
|| other_parent
|| copy_map.contains_key(filename.as_ref())
{
Dispatch::Modified
- } else if mod_compare(mtime, st_mtime as i32) {
- Dispatch::Unsure
- } else if st_mtime == options.last_normal_time {
+ } else if mod_compare(mtime, st_mtime as i32)
+ || st_mtime == options.last_normal_time
+ {
// the file may have just been marked as normal and
// it may have changed in the same second without
// changing its size. This can happen if we quickly
@@ -221,13 +222,14 @@
dmap: &'a DirstateMap,
root_dir: impl AsRef<Path> + Sync + Send + 'a,
options: StatusOptions,
+ traversed_sender: crossbeam::Sender<HgPathBuf>,
) -> impl ParallelIterator<Item = IoResult<(&'a HgPath, Dispatch)>> {
files
.unwrap_or(&DEFAULT_WORK)
.par_iter()
- .map(move |filename| {
+ .map(move |&filename| {
// TODO normalization
- let normalized = filename.as_ref();
+ let normalized = filename;
let buf = match hg_path_to_path_buf(normalized) {
Ok(x) => x,
@@ -253,28 +255,31 @@
)));
}
Some(Ok((normalized, Dispatch::Unknown)))
+ } else if file_type.is_dir() {
+ if options.collect_traversed_dirs {
+ traversed_sender
+ .send(normalized.to_owned())
+ .expect("receiver should outlive sender");
+ }
+ Some(Ok((
+ normalized,
+ Dispatch::Directory {
+ was_file: in_dmap.is_some(),
+ },
+ )))
} else {
- if file_type.is_dir() {
- Some(Ok((
- normalized,
- Dispatch::Directory {
- was_file: in_dmap.is_some(),
- },
- )))
- } else {
- Some(Ok((
- normalized,
- Dispatch::Bad(BadMatch::BadType(
- // TODO do more than unknown
- // Support for all `BadType` variant
- // varies greatly between platforms.
- // So far, no tests check the type and
- // this should be good enough for most
- // users.
- BadType::Unknown,
- )),
- )))
- }
+ Some(Ok((
+ normalized,
+ Dispatch::Bad(BadMatch::BadType(
+ // TODO do more than unknown
+ // Support for all `BadType` variant
+ // varies greatly between platforms.
+ // So far, no tests check the type and
+ // this should be good enough for most
+ // users.
+ BadType::Unknown,
+ )),
+ )))
};
}
Err(_) => {
@@ -302,6 +307,9 @@
pub list_clean: bool,
pub list_unknown: bool,
pub list_ignored: bool,
+ /// Whether to collect traversed dirs for applying a callback later.
+ /// Used by `hg purge` for example.
+ pub collect_traversed_dirs: bool,
}
/// Dispatch a single entry (file, folder, symlink...) found during `traverse`.
@@ -319,6 +327,7 @@
options: StatusOptions,
filename: HgPathBuf,
dir_entry: DirEntry,
+ traversed_sender: crossbeam::Sender<HgPathBuf>,
) -> IoResult<()> {
let file_type = dir_entry.file_type()?;
let entry_option = dmap.get(&filename);
@@ -341,6 +350,7 @@
options,
entry_option,
filename,
+ traversed_sender,
);
} else if file_type.is_file() || file_type.is_symlink() {
if let Some(entry) = entry_option {
@@ -370,7 +380,7 @@
.send(Ok((filename.to_owned(), Dispatch::Ignored)))
.unwrap();
}
- } else {
+ } else if options.list_unknown {
files_sender
.send(Ok((filename.to_owned(), Dispatch::Unknown)))
.unwrap();
@@ -405,6 +415,7 @@
options: StatusOptions,
entry_option: Option<&'a DirstateEntry>,
directory: HgPathBuf,
+ traversed_sender: crossbeam::Sender<HgPathBuf>,
) {
scope.spawn(move |_| {
// Nested `if` until `rust-lang/rust#53668` is stable
@@ -431,6 +442,7 @@
ignore_fn,
dir_ignore_fn,
options,
+ traversed_sender,
)
.unwrap_or_else(|e| files_sender.send(Err(e)).unwrap())
}
@@ -449,9 +461,16 @@
ignore_fn: &IgnoreFnType,
dir_ignore_fn: &IgnoreFnType,
options: StatusOptions,
+ traversed_sender: crossbeam::Sender<HgPathBuf>,
) -> IoResult<()> {
let directory = directory.as_ref();
+ if options.collect_traversed_dirs {
+ traversed_sender
+ .send(directory.to_owned())
+ .expect("receiver should outlive sender");
+ }
+
let visit_entries = match matcher.visit_children_set(directory) {
VisitChildrenSet::Empty => return Ok(()),
VisitChildrenSet::This | VisitChildrenSet::Recursive => None,
@@ -508,6 +527,7 @@
options,
filename,
dir_entry,
+ traversed_sender.clone(),
)?;
}
}
@@ -531,11 +551,12 @@
dir_ignore_fn: &IgnoreFnType,
options: StatusOptions,
results: &mut Vec<(Cow<'a, HgPath>, Dispatch)>,
+ traversed_sender: crossbeam::Sender<HgPathBuf>,
) -> IoResult<()> {
let root_dir = root_dir.as_ref();
// The traversal is done in parallel, so use a channel to gather entries.
- // `crossbeam::Sender` is `Send`, while `mpsc::Sender` is not.
+ // `crossbeam::Sender` is `Sync`, while `mpsc::Sender` is not.
let (files_transmitter, files_receiver) = crossbeam::channel::unbounded();
traverse_dir(
@@ -548,6 +569,7 @@
&ignore_fn,
&dir_ignore_fn,
options,
+ traversed_sender,
)?;
// Disconnect the channel so the receiver stops waiting
@@ -638,11 +660,14 @@
pub ignored: Vec<Cow<'a, HgPath>>,
pub unknown: Vec<Cow<'a, HgPath>>,
pub bad: Vec<(Cow<'a, HgPath>, BadMatch)>,
+ /// Only filled if `collect_traversed_dirs` is `true`
+ pub traversed: Vec<HgPathBuf>,
}
#[timed]
fn build_response<'a>(
results: impl IntoIterator<Item = (Cow<'a, HgPath>, Dispatch)>,
+ traversed: Vec<HgPathBuf>,
) -> (Vec<Cow<'a, HgPath>>, DirstateStatus<'a>) {
let mut lookup = vec![];
let mut modified = vec![];
@@ -681,6 +706,7 @@
ignored,
unknown,
bad,
+ traversed,
},
)
}
@@ -817,7 +843,7 @@
Vec<PatternFileWarning>,
)> {
// Needs to outlive `dir_ignore_fn` since it's captured.
- let mut ignore_fn: IgnoreFnType;
+ let ignore_fn: IgnoreFnType;
// Only involve real ignore mechanism if we're listing unknowns or ignored.
let (dir_ignore_fn, warnings): (IgnoreFnType, _) = if options.list_ignored
@@ -847,8 +873,17 @@
let files = matcher.file_set();
+ // `crossbeam::Sender` is `Sync`, while `mpsc::Sender` is not.
+ let (traversed_sender, traversed_recv) = crossbeam::channel::unbounded();
+
// Step 1: check the files explicitly mentioned by the user
- let explicit = walk_explicit(files, &dmap, root_dir, options);
+ let explicit = walk_explicit(
+ files,
+ &dmap,
+ root_dir,
+ options,
+ traversed_sender.clone(),
+ );
// Collect results into a `Vec` because we do very few lookups in most
// cases.
@@ -886,6 +921,7 @@
&dir_ignore_fn,
options,
&mut results,
+ traversed_sender.clone(),
)?;
}
}
@@ -909,5 +945,9 @@
}
}
- Ok((build_response(results), warnings))
+ // Close the channel
+ drop(traversed_sender);
+ let traversed_dirs = traversed_recv.into_iter().collect();
+
+ Ok((build_response(results, traversed_dirs), warnings))
}
--- a/rust/hg-core/src/discovery.rs Tue Jun 23 16:07:18 2020 +0200
+++ b/rust/hg-core/src/discovery.rs Thu Jun 25 10:32:51 2020 -0700
@@ -181,8 +181,8 @@
common: MissingAncestors::new(graph, vec![]),
missing: HashSet::new(),
rng: Rng::from_seed(seed),
- respect_size: respect_size,
- randomize: randomize,
+ respect_size,
+ randomize,
}
}
@@ -284,7 +284,7 @@
/// Did we acquire full knowledge of our Revisions that the peer has?
pub fn is_complete(&self) -> bool {
- self.undecided.as_ref().map_or(false, |s| s.is_empty())
+ self.undecided.as_ref().map_or(false, HashSet::is_empty)
}
/// Return the heads of the currently known common set of revisions.
@@ -332,7 +332,7 @@
FastHashMap::default();
for &rev in self.undecided.as_ref().unwrap() {
for p in ParentsIterator::graph_parents(&self.graph, rev)? {
- children.entry(p).or_insert_with(|| Vec::new()).push(rev);
+ children.entry(p).or_insert_with(Vec::new).push(rev);
}
}
self.children_cache = Some(children);
@@ -342,7 +342,7 @@
/// Provide statistics about the current state of the discovery process
pub fn stats(&self) -> DiscoveryStats {
DiscoveryStats {
- undecided: self.undecided.as_ref().map(|s| s.len()),
+ undecided: self.undecided.as_ref().map(HashSet::len),
}
}
--- a/rust/hg-core/src/filepatterns.rs Tue Jun 23 16:07:18 2020 +0200
+++ b/rust/hg-core/src/filepatterns.rs Thu Jun 25 10:32:51 2020 -0700
@@ -176,14 +176,15 @@
return vec![];
}
match syntax {
- // The `regex` crate adds `.*` to the start and end of expressions
- // if there are no anchors, so add them.
- PatternSyntax::Regexp => [b"^", &pattern[..], b"$"].concat(),
+ PatternSyntax::Regexp => pattern.to_owned(),
PatternSyntax::RelRegexp => {
// The `regex` crate accepts `**` while `re2` and Python's `re`
// do not. Checking for `*` correctly triggers the same error all
// engines.
- if pattern[0] == b'^' || pattern[0] == b'*' {
+ if pattern[0] == b'^'
+ || pattern[0] == b'*'
+ || pattern.starts_with(b".*")
+ {
return pattern.to_owned();
}
[&b".*"[..], pattern].concat()
@@ -196,15 +197,14 @@
}
PatternSyntax::RootFiles => {
let mut res = if pattern == b"." {
- vec![b'^']
+ vec![]
} else {
// Pattern is a directory name.
- [b"^", escape_pattern(pattern).as_slice(), b"/"].concat()
+ [escape_pattern(pattern).as_slice(), b"/"].concat()
};
// Anything after the pattern must be a non-directory.
res.extend(b"[^/]+$");
- res.push(b'$');
res
}
PatternSyntax::RelGlob => {
@@ -216,7 +216,7 @@
}
}
PatternSyntax::Glob | PatternSyntax::RootGlob => {
- [b"^", glob_to_re(pattern).as_slice(), GLOB_SUFFIX].concat()
+ [glob_to_re(pattern).as_slice(), GLOB_SUFFIX].concat()
}
PatternSyntax::Include | PatternSyntax::SubInclude => unreachable!(),
}
@@ -271,7 +271,7 @@
/// that don't need to be transformed into a regex.
pub fn build_single_regex(
entry: &IgnorePattern,
-) -> Result<Vec<u8>, PatternError> {
+) -> Result<Option<Vec<u8>>, PatternError> {
let IgnorePattern {
pattern, syntax, ..
} = entry;
@@ -288,16 +288,11 @@
if *syntax == PatternSyntax::RootGlob
&& !pattern.iter().any(|b| GLOB_SPECIAL_CHARACTERS.contains(b))
{
- // The `regex` crate adds `.*` to the start and end of expressions
- // if there are no anchors, so add the start anchor.
- let mut escaped = vec![b'^'];
- escaped.extend(escape_pattern(&pattern));
- escaped.extend(GLOB_SUFFIX);
- Ok(escaped)
+ Ok(None)
} else {
let mut entry = entry.clone();
entry.pattern = pattern;
- Ok(_build_single_regex(&entry))
+ Ok(Some(_build_single_regex(&entry)))
}
}
@@ -329,6 +324,8 @@
warn: bool,
) -> Result<(Vec<IgnorePattern>, Vec<PatternFileWarning>), PatternError> {
let comment_regex = Regex::new(r"((?:^|[^\\])(?:\\\\)*)#.*").unwrap();
+
+ #[allow(clippy::trivial_regex)]
let comment_escape_regex = Regex::new(r"\\#").unwrap();
let mut inputs: Vec<IgnorePattern> = vec![];
let mut warnings: Vec<PatternFileWarning> = vec![];
@@ -463,9 +460,7 @@
.into_iter()
.flat_map(|entry| -> PatternResult<_> {
let IgnorePattern {
- syntax,
- pattern,
- source: _,
+ syntax, pattern, ..
} = &entry;
Ok(match syntax {
PatternSyntax::Include => {
@@ -509,10 +504,11 @@
normalize_path_bytes(&get_bytes_from_path(source));
let source_root = get_path_from_bytes(&normalized_source);
- let source_root = source_root.parent().unwrap_or(source_root.deref());
+ let source_root =
+ source_root.parent().unwrap_or_else(|| source_root.deref());
let path = source_root.join(get_path_from_bytes(pattern));
- let new_root = path.parent().unwrap_or(path.deref());
+ let new_root = path.parent().unwrap_or_else(|| path.deref());
let prefix = canonical_path(&root_dir, &root_dir, new_root)?;
@@ -628,7 +624,16 @@
Path::new("")
))
.unwrap(),
- br"(?:.*/)?rust/target(?:/|$)".to_vec(),
+ Some(br"(?:.*/)?rust/target(?:/|$)".to_vec()),
+ );
+ assert_eq!(
+ build_single_regex(&IgnorePattern::new(
+ PatternSyntax::Regexp,
+ br"rust/target/\d+",
+ Path::new("")
+ ))
+ .unwrap(),
+ Some(br"rust/target/\d+".to_vec()),
);
}
@@ -641,7 +646,7 @@
Path::new("")
))
.unwrap(),
- br"^\.(?:/|$)".to_vec(),
+ None,
);
assert_eq!(
build_single_regex(&IgnorePattern::new(
@@ -650,7 +655,7 @@
Path::new("")
))
.unwrap(),
- br"^whatever(?:/|$)".to_vec(),
+ None,
);
assert_eq!(
build_single_regex(&IgnorePattern::new(
@@ -659,7 +664,7 @@
Path::new("")
))
.unwrap(),
- br"^[^/]*\.o(?:/|$)".to_vec(),
+ Some(br"[^/]*\.o(?:/|$)".to_vec()),
);
}
}
--- a/rust/hg-core/src/lib.rs Tue Jun 23 16:07:18 2020 +0200
+++ b/rust/hg-core/src/lib.rs Thu Jun 25 10:32:51 2020 -0700
@@ -23,8 +23,7 @@
pub mod matchers;
pub mod revlog;
pub use revlog::*;
-#[cfg(feature = "with-re2")]
-pub mod re2;
+pub mod operations;
pub mod utils;
// Remove this to see (potential) non-artificial compile failures. MacOS
@@ -141,9 +140,6 @@
/// Needed a pattern that can be turned into a regex but got one that
/// can't. This should only happen through programmer error.
NonRegexPattern(IgnorePattern),
- /// This is temporary, see `re2/mod.rs`.
- /// This will cause a fallback to Python.
- Re2NotInstalled,
}
impl ToString for PatternError {
@@ -166,10 +162,6 @@
PatternError::NonRegexPattern(pattern) => {
format!("'{:?}' cannot be turned into a regex", pattern)
}
- PatternError::Re2NotInstalled => {
- "Re2 is not installed, cannot use regex functionality."
- .to_string()
- }
}
}
}
--- a/rust/hg-core/src/matchers.rs Tue Jun 23 16:07:18 2020 +0200
+++ b/rust/hg-core/src/matchers.rs Thu Jun 25 10:32:51 2020 -0700
@@ -7,8 +7,6 @@
//! Structs and types for matching files and directories.
-#[cfg(feature = "with-re2")]
-use crate::re2::Re2;
use crate::{
dirstate::dirs_multiset::DirsChildrenMultiset,
filepatterns::{
@@ -24,6 +22,7 @@
PatternSyntax,
};
+use crate::filepatterns::normalize_path_bytes;
use std::borrow::ToOwned;
use std::collections::HashSet;
use std::fmt::{Display, Error, Formatter};
@@ -31,6 +30,8 @@
use std::ops::Deref;
use std::path::{Path, PathBuf};
+use micro_timer::timed;
+
#[derive(Debug, PartialEq)]
pub enum VisitChildrenSet<'a> {
/// Don't visit anything
@@ -163,7 +164,7 @@
files: &'a [impl AsRef<HgPath>],
) -> Result<Self, DirstateMapError> {
Ok(Self {
- files: HashSet::from_iter(files.iter().map(|f| f.as_ref())),
+ files: HashSet::from_iter(files.iter().map(AsRef::as_ref)),
dirs: DirsMultiset::from_manifest(files)?,
})
}
@@ -189,10 +190,10 @@
if self.files.is_empty() || !self.dirs.contains(&directory) {
return VisitChildrenSet::Empty;
}
- let dirs_as_set = self.dirs.iter().map(|k| k.deref()).collect();
+ let dirs_as_set = self.dirs.iter().map(Deref::deref).collect();
let mut candidates: HashSet<&HgPath> =
- self.files.union(&dirs_as_set).map(|k| *k).collect();
+ self.files.union(&dirs_as_set).cloned().collect();
candidates.remove(HgPath::new(b""));
if !directory.as_ref().is_empty() {
@@ -236,29 +237,24 @@
}
/// Matches files that are included in the ignore rules.
-#[cfg_attr(
- feature = "with-re2",
- doc = r##"
-```
-use hg::{
- matchers::{IncludeMatcher, Matcher},
- IgnorePattern,
- PatternSyntax,
- utils::hg_path::HgPath
-};
-use std::path::Path;
-///
-let ignore_patterns =
-vec![IgnorePattern::new(PatternSyntax::RootGlob, b"this*", Path::new(""))];
-let (matcher, _) = IncludeMatcher::new(ignore_patterns, "").unwrap();
-///
-assert_eq!(matcher.matches(HgPath::new(b"testing")), false);
-assert_eq!(matcher.matches(HgPath::new(b"this should work")), true);
-assert_eq!(matcher.matches(HgPath::new(b"this also")), true);
-assert_eq!(matcher.matches(HgPath::new(b"but not this")), false);
-```
-"##
-)]
+/// ```
+/// use hg::{
+/// matchers::{IncludeMatcher, Matcher},
+/// IgnorePattern,
+/// PatternSyntax,
+/// utils::hg_path::HgPath
+/// };
+/// use std::path::Path;
+/// ///
+/// let ignore_patterns =
+/// vec![IgnorePattern::new(PatternSyntax::RootGlob, b"this*", Path::new(""))];
+/// let (matcher, _) = IncludeMatcher::new(ignore_patterns, "").unwrap();
+/// ///
+/// assert_eq!(matcher.matches(HgPath::new(b"testing")), false);
+/// assert_eq!(matcher.matches(HgPath::new(b"this should work")), true);
+/// assert_eq!(matcher.matches(HgPath::new(b"this also")), true);
+/// assert_eq!(matcher.matches(HgPath::new(b"but not this")), false);
+/// ```
pub struct IncludeMatcher<'a> {
patterns: Vec<u8>,
match_fn: Box<dyn for<'r> Fn(&'r HgPath) -> bool + 'a + Sync>,
@@ -316,33 +312,21 @@
}
}
-#[cfg(feature = "with-re2")]
-/// Returns a function that matches an `HgPath` against the given regex
-/// pattern.
-///
-/// This can fail when the pattern is invalid or not supported by the
-/// underlying engine `Re2`, for instance anything with back-references.
-fn re_matcher(
- pattern: &[u8],
-) -> PatternResult<impl Fn(&HgPath) -> bool + Sync> {
- let regex = Re2::new(pattern);
- let regex = regex.map_err(|e| PatternError::UnsupportedSyntax(e))?;
- Ok(move |path: &HgPath| regex.is_match(path.as_bytes()))
-}
-
-#[cfg(not(feature = "with-re2"))]
/// Returns a function that matches an `HgPath` against the given regex
/// pattern.
///
/// This can fail when the pattern is invalid or not supported by the
/// underlying engine (the `regex` crate), for instance anything with
/// back-references.
+#[timed]
fn re_matcher(
pattern: &[u8],
) -> PatternResult<impl Fn(&HgPath) -> bool + Sync> {
use std::io::Write;
- let mut escaped_bytes = vec![];
+ // The `regex` crate adds `.*` to the start and end of expressions if there
+ // are no anchors, so add the start anchor.
+ let mut escaped_bytes = vec![b'^', b'(', b'?', b':'];
for byte in pattern {
if *byte > 127 {
write!(escaped_bytes, "\\x{:x}", *byte).unwrap();
@@ -350,6 +334,7 @@
escaped_bytes.push(*byte);
}
}
+ escaped_bytes.push(b')');
// Avoid the cost of UTF8 checking
//
@@ -373,15 +358,32 @@
fn build_regex_match<'a>(
ignore_patterns: &'a [&'a IgnorePattern],
) -> PatternResult<(Vec<u8>, Box<dyn Fn(&HgPath) -> bool + Sync>)> {
- let regexps: Result<Vec<_>, PatternError> = ignore_patterns
- .into_iter()
- .map(|k| build_single_regex(*k))
- .collect();
- let regexps = regexps?;
+ let mut regexps = vec![];
+ let mut exact_set = HashSet::new();
+
+ for pattern in ignore_patterns {
+ if let Some(re) = build_single_regex(pattern)? {
+ regexps.push(re);
+ } else {
+ let exact = normalize_path_bytes(&pattern.pattern);
+ exact_set.insert(HgPathBuf::from_bytes(&exact));
+ }
+ }
+
let full_regex = regexps.join(&b'|');
- let matcher = re_matcher(&full_regex)?;
- let func = Box::new(move |filename: &HgPath| matcher(filename));
+ // An empty pattern would cause the regex engine to incorrectly match the
+ // (empty) root directory
+ let func = if !(regexps.is_empty()) {
+ let matcher = re_matcher(&full_regex)?;
+ let func = move |filename: &HgPath| {
+ exact_set.contains(filename) || matcher(filename)
+ };
+ Box::new(func) as Box<dyn Fn(&HgPath) -> bool + Sync>
+ } else {
+ let func = move |filename: &HgPath| exact_set.contains(filename);
+ Box::new(func) as Box<dyn Fn(&HgPath) -> bool + Sync>
+ };
Ok((full_regex, func))
}
@@ -468,7 +470,7 @@
_ => unreachable!(),
})?
.iter()
- .map(|k| k.to_owned()),
+ .map(ToOwned::to_owned),
);
parents.extend(
DirsMultiset::from_manifest(&roots)
@@ -477,7 +479,7 @@
_ => unreachable!(),
})?
.iter()
- .map(|k| k.to_owned()),
+ .map(ToOwned::to_owned),
);
Ok(RootsDirsAndParents {
@@ -521,7 +523,7 @@
let match_subinclude = move |filename: &HgPath| {
for prefix in prefixes.iter() {
if let Some(rel) = filename.relative_to(prefix) {
- if (submatchers.get(prefix).unwrap())(rel) {
+ if (submatchers[prefix])(rel) {
return true;
}
}
@@ -652,6 +654,12 @@
impl<'a> Display for IncludeMatcher<'a> {
fn fmt(&self, f: &mut Formatter<'_>) -> Result<(), Error> {
+ // XXX What about exact matches?
+ // I'm not sure it's worth it to clone the HashSet and keep it
+ // around just in case someone wants to display the matcher, plus
+ // it's going to be unreadable after a few entries, but we need to
+ // inform in this display that exact matches are being used and are
+ // (on purpose) missing from the `includes`.
write!(
f,
"IncludeMatcher(includes='{}')",
@@ -813,7 +821,6 @@
);
}
- #[cfg(feature = "with-re2")]
#[test]
fn test_includematcher() {
// VisitchildrensetPrefix
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/rust/hg-core/src/operations/find_root.rs Thu Jun 25 10:32:51 2020 -0700
@@ -0,0 +1,124 @@
+use super::Operation;
+use std::fmt;
+use std::path::{Path, PathBuf};
+
+/// Kind of error encoutered by FindRoot
+#[derive(Debug)]
+pub enum FindRootErrorKind {
+ /// Root of the repository has not been found
+ /// Contains the current directory used by FindRoot
+ RootNotFound(PathBuf),
+ /// The current directory does not exists or permissions are insufficient
+ /// to get access to it
+ GetCurrentDirError(std::io::Error),
+}
+
+/// A FindRoot error
+#[derive(Debug)]
+pub struct FindRootError {
+ /// Kind of error encoutered by FindRoot
+ pub kind: FindRootErrorKind,
+}
+
+impl std::error::Error for FindRootError {}
+
+impl fmt::Display for FindRootError {
+ fn fmt(&self, _f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ unimplemented!()
+ }
+}
+
+/// Find the root of the repository
+/// by searching for a .hg directory in the current directory and its
+/// ancestors
+pub struct FindRoot<'a> {
+ current_dir: Option<&'a Path>,
+}
+
+impl<'a> FindRoot<'a> {
+ pub fn new() -> Self {
+ Self { current_dir: None }
+ }
+
+ pub fn new_from_path(current_dir: &'a Path) -> Self {
+ Self {
+ current_dir: Some(current_dir),
+ }
+ }
+}
+
+impl<'a> Operation<PathBuf> for FindRoot<'a> {
+ type Error = FindRootError;
+
+ fn run(&self) -> Result<PathBuf, Self::Error> {
+ let current_dir = match self.current_dir {
+ None => std::env::current_dir().or_else(|e| {
+ Err(FindRootError {
+ kind: FindRootErrorKind::GetCurrentDirError(e),
+ })
+ })?,
+ Some(path) => path.into(),
+ };
+
+ if current_dir.join(".hg").exists() {
+ return Ok(current_dir.into());
+ }
+ let mut ancestors = current_dir.ancestors();
+ while let Some(parent) = ancestors.next() {
+ if parent.join(".hg").exists() {
+ return Ok(parent.into());
+ }
+ }
+ Err(FindRootError {
+ kind: FindRootErrorKind::RootNotFound(current_dir.to_path_buf()),
+ })
+ }
+}
+
+#[cfg(test)]
+mod tests {
+ use super::*;
+ use std::fs;
+ use tempfile;
+
+ #[test]
+ fn dot_hg_not_found() {
+ let tmp_dir = tempfile::tempdir().unwrap();
+ let path = tmp_dir.path();
+
+ let err = FindRoot::new_from_path(&path).run().unwrap_err();
+
+ // TODO do something better
+ assert!(match err {
+ FindRootError { kind } => match kind {
+ FindRootErrorKind::RootNotFound(p) => p == path.to_path_buf(),
+ _ => false,
+ },
+ })
+ }
+
+ #[test]
+ fn dot_hg_in_current_path() {
+ let tmp_dir = tempfile::tempdir().unwrap();
+ let root = tmp_dir.path();
+ fs::create_dir_all(root.join(".hg")).unwrap();
+
+ let result = FindRoot::new_from_path(&root).run().unwrap();
+
+ assert_eq!(result, root)
+ }
+
+ #[test]
+ fn dot_hg_in_parent() {
+ let tmp_dir = tempfile::tempdir().unwrap();
+ let root = tmp_dir.path();
+ fs::create_dir_all(root.join(".hg")).unwrap();
+
+ let result =
+ FindRoot::new_from_path(&root.join("some/nested/directory"))
+ .run()
+ .unwrap();
+
+ assert_eq!(result, root)
+ }
+} /* tests */
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/rust/hg-core/src/operations/mod.rs Thu Jun 25 10:32:51 2020 -0700
@@ -0,0 +1,12 @@
+mod find_root;
+pub use find_root::{FindRoot, FindRootError, FindRootErrorKind};
+
+/// An interface for high-level hg operations.
+///
+/// A distinction is made between operation and commands.
+/// An operation is what can be done whereas a command is what is exposed by
+/// the cli. A single command can use several operations to achieve its goal.
+pub trait Operation<T> {
+ type Error;
+ fn run(&self) -> Result<T, Self::Error>;
+}
--- a/rust/hg-core/src/re2/mod.rs Tue Jun 23 16:07:18 2020 +0200
+++ /dev/null Thu Jan 01 00:00:00 1970 +0000
@@ -1,21 +0,0 @@
-/// re2 module
-///
-/// The Python implementation of Mercurial uses the Re2 regex engine when
-/// possible and if the bindings are installed, falling back to Python's `re`
-/// in case of unsupported syntax (Re2 is a non-backtracking engine).
-///
-/// Using it from Rust is not ideal. We need C++ bindings, a C++ compiler,
-/// Re2 needs to be installed... why not just use the `regex` crate?
-///
-/// Using Re2 from the Rust implementation guarantees backwards compatibility.
-/// We know it will work out of the box without needing to figure out the
-/// subtle differences in syntax. For example, `regex` currently does not
-/// support empty alternations (regex like `a||b`) which happens more often
-/// than we might think. Old benchmarks also showed worse performance from
-/// regex than with Re2, but the methodology and results were lost, so take
-/// this with a grain of salt.
-///
-/// The idea is to use Re2 for now as a temporary phase and then investigate
-/// how much work would be needed to use `regex`.
-mod re2;
-pub use re2::Re2;
--- a/rust/hg-core/src/re2/re2.rs Tue Jun 23 16:07:18 2020 +0200
+++ /dev/null Thu Jan 01 00:00:00 1970 +0000
@@ -1,66 +0,0 @@
-/*
-re2.rs
-
-Rust FFI bindings to Re2.
-
-Copyright 2020 Valentin Gatien-Baron
-
-This software may be used and distributed according to the terms of the
-GNU General Public License version 2 or any later version.
-*/
-use libc::{c_int, c_void};
-
-type Re2Ptr = *const c_void;
-
-pub struct Re2(Re2Ptr);
-
-/// `re2.h` says:
-/// "An "RE2" object is safe for concurrent use by multiple threads."
-unsafe impl Sync for Re2 {}
-
-/// These bind to the C ABI in `rust_re2.cpp`.
-extern "C" {
- fn rust_re2_create(data: *const u8, len: usize) -> Re2Ptr;
- fn rust_re2_destroy(re2: Re2Ptr);
- fn rust_re2_ok(re2: Re2Ptr) -> bool;
- fn rust_re2_error(
- re2: Re2Ptr,
- outdata: *mut *const u8,
- outlen: *mut usize,
- ) -> bool;
- fn rust_re2_match(
- re2: Re2Ptr,
- data: *const u8,
- len: usize,
- anchor: c_int,
- ) -> bool;
-}
-
-impl Re2 {
- pub fn new(pattern: &[u8]) -> Result<Re2, String> {
- unsafe {
- let re2 = rust_re2_create(pattern.as_ptr(), pattern.len());
- if rust_re2_ok(re2) {
- Ok(Re2(re2))
- } else {
- let mut data: *const u8 = std::ptr::null();
- let mut len: usize = 0;
- rust_re2_error(re2, &mut data, &mut len);
- Err(String::from_utf8_lossy(std::slice::from_raw_parts(
- data, len,
- ))
- .to_string())
- }
- }
- }
-
- pub fn is_match(&self, data: &[u8]) -> bool {
- unsafe { rust_re2_match(self.0, data.as_ptr(), data.len(), 1) }
- }
-}
-
-impl Drop for Re2 {
- fn drop(&mut self) {
- unsafe { rust_re2_destroy(self.0) }
- }
-}
--- a/rust/hg-core/src/re2/rust_re2.cpp Tue Jun 23 16:07:18 2020 +0200
+++ /dev/null Thu Jan 01 00:00:00 1970 +0000
@@ -1,49 +0,0 @@
-/*
-rust_re2.cpp
-
-C ABI export of Re2's C++ interface for Rust FFI.
-
-Copyright 2020 Valentin Gatien-Baron
-
-This software may be used and distributed according to the terms of the
-GNU General Public License version 2 or any later version.
-*/
-
-#include <re2/re2.h>
-using namespace re2;
-
-extern "C" {
- RE2* rust_re2_create(const char* data, size_t len) {
- RE2::Options o;
- o.set_encoding(RE2::Options::Encoding::EncodingLatin1);
- o.set_log_errors(false);
- o.set_max_mem(50000000);
-
- return new RE2(StringPiece(data, len), o);
- }
-
- void rust_re2_destroy(RE2* re) {
- delete re;
- }
-
- bool rust_re2_ok(RE2* re) {
- return re->ok();
- }
-
- void rust_re2_error(RE2* re, const char** outdata, size_t* outlen) {
- const std::string& e = re->error();
- *outdata = e.data();
- *outlen = e.length();
- }
-
- bool rust_re2_match(RE2* re, char* data, size_t len, int ianchor) {
- const StringPiece sp = StringPiece(data, len);
-
- RE2::Anchor anchor =
- ianchor == 0 ? RE2::Anchor::UNANCHORED :
- (ianchor == 1 ? RE2::Anchor::ANCHOR_START :
- RE2::Anchor::ANCHOR_BOTH);
-
- return re->Match(sp, 0, len, anchor, NULL, 0);
- }
-}
--- a/rust/hg-core/src/revlog.rs Tue Jun 23 16:07:18 2020 +0200
+++ b/rust/hg-core/src/revlog.rs Thu Jun 25 10:32:51 2020 -0700
@@ -25,6 +25,7 @@
///
/// This is also equal to `i32::max_value()`, but it's better to spell
/// it out explicitely, same as in `mercurial.node`
+#[allow(clippy::unreadable_literal)]
pub const WORKING_DIRECTORY_REVISION: Revision = 0x7fffffff;
/// The simplest expression of what we need of Mercurial DAGs.
@@ -49,6 +50,10 @@
/// Total number of Revisions referenced in this index
fn len(&self) -> usize;
+ fn is_empty(&self) -> bool {
+ self.len() == 0
+ }
+
/// Return a reference to the Node or `None` if rev is out of bounds
///
/// `NULL_REVISION` is not considered to be out of bounds.
--- a/rust/hg-core/src/revlog/node.rs Tue Jun 23 16:07:18 2020 +0200
+++ b/rust/hg-core/src/revlog/node.rs Thu Jun 25 10:32:51 2020 -0700
@@ -208,6 +208,10 @@
}
}
+ pub fn is_empty(&self) -> bool {
+ self.len() == 0
+ }
+
pub fn is_prefix_of(&self, node: &Node) -> bool {
if self.is_odd {
let buf = self.buf;
@@ -242,13 +246,13 @@
} else {
buf.len()
};
- for i in 0..until {
- if buf[i] != node.data[i] {
- if buf[i] & 0xf0 == node.data[i] & 0xf0 {
- return Some(2 * i + 1);
+ for (i, item) in buf.iter().enumerate().take(until) {
+ if *item != node.data[i] {
+ return if *item & 0xf0 == node.data[i] & 0xf0 {
+ Some(2 * i + 1)
} else {
- return Some(2 * i);
- }
+ Some(2 * i)
+ };
}
}
if self.is_odd && buf[until] & 0xf0 != node.data[until] & 0xf0 {
--- a/rust/hg-core/src/revlog/nodemap.rs Tue Jun 23 16:07:18 2020 +0200
+++ b/rust/hg-core/src/revlog/nodemap.rs Thu Jun 25 10:32:51 2020 -0700
@@ -218,7 +218,7 @@
/// Not derivable for arrays of length >32 until const generics are stable
impl PartialEq for Block {
fn eq(&self, other: &Self) -> bool {
- &self.0[..] == &other.0[..]
+ self.0[..] == other.0[..]
}
}
@@ -343,14 +343,11 @@
///
/// We keep `readonly` and clone its root block if it isn't empty.
fn new(readonly: Box<dyn Deref<Target = [Block]> + Send>) -> Self {
- let root = readonly
- .last()
- .map(|b| b.clone())
- .unwrap_or_else(|| Block::new());
+ let root = readonly.last().cloned().unwrap_or_else(Block::new);
NodeTree {
- readonly: readonly,
+ readonly,
growable: Vec::new(),
- root: root,
+ root,
masked_inner_blocks: 0,
}
}
@@ -461,7 +458,7 @@
) -> NodeTreeVisitor<'n, 'p> {
NodeTreeVisitor {
nt: self,
- prefix: prefix,
+ prefix,
visit: self.len() - 1,
nybble_idx: 0,
done: false,
@@ -486,8 +483,7 @@
let glen = self.growable.len();
if idx < ro_len {
self.masked_inner_blocks += 1;
- // TODO OPTIM I think this makes two copies
- self.growable.push(ro_blocks[idx].clone());
+ self.growable.push(ro_blocks[idx]);
(glen + ro_len, &mut self.growable[glen], glen + 1)
} else if glen + ro_len == idx {
(idx, &mut self.root, glen)
@@ -674,8 +670,8 @@
Some(NodeTreeVisitItem {
block_idx: visit,
- nybble: nybble,
- element: element,
+ nybble,
+ element,
})
}
}
--- a/rust/hg-core/src/utils.rs Tue Jun 23 16:07:18 2020 +0200
+++ b/rust/hg-core/src/utils.rs Thu Jun 25 10:32:51 2020 -0700
@@ -68,6 +68,7 @@
fn drop_prefix(&self, needle: &Self) -> Option<&Self>;
}
+#[allow(clippy::trivially_copy_pass_by_ref)]
fn is_not_whitespace(c: &u8) -> bool {
!(*c as char).is_whitespace()
}
@@ -75,7 +76,7 @@
impl SliceExt for [u8] {
fn trim_end(&self) -> &[u8] {
if let Some(last) = self.iter().rposition(is_not_whitespace) {
- &self[..last + 1]
+ &self[..=last]
} else {
&[]
}
@@ -151,7 +152,7 @@
impl<'a, T: Escaped> Escaped for &'a [T] {
fn escaped_bytes(&self) -> Vec<u8> {
- self.iter().flat_map(|item| item.escaped_bytes()).collect()
+ self.iter().flat_map(Escaped::escaped_bytes).collect()
}
}
--- a/rust/hg-core/src/utils/files.rs Tue Jun 23 16:07:18 2020 +0200
+++ b/rust/hg-core/src/utils/files.rs Thu Jun 25 10:32:51 2020 -0700
@@ -98,7 +98,7 @@
///
/// The path itself isn't included unless it is b"" (meaning the root
/// directory.)
-pub fn find_dirs<'a>(path: &'a HgPath) -> Ancestors<'a> {
+pub fn find_dirs(path: &HgPath) -> Ancestors {
let mut dirs = Ancestors { next: Some(path) };
if !path.is_empty() {
dirs.next(); // skip itself
@@ -113,9 +113,7 @@
///
/// The path itself isn't included unless it is b"" (meaning the root
/// directory.)
-pub(crate) fn find_dirs_with_base<'a>(
- path: &'a HgPath,
-) -> AncestorsWithBase<'a> {
+pub(crate) fn find_dirs_with_base(path: &HgPath) -> AncestorsWithBase {
let mut dirs = AncestorsWithBase {
next: Some((path, HgPath::new(b""))),
};
@@ -214,9 +212,9 @@
if name != root && name.starts_with(&root) {
let name = name.strip_prefix(&root).unwrap();
auditor.audit_path(path_to_hg_path_buf(name)?)?;
- return Ok(name.to_owned());
+ Ok(name.to_owned())
} else if name == root {
- return Ok("".into());
+ Ok("".into())
} else {
// Determine whether `name' is in the hierarchy at or beneath `root',
// by iterating name=name.parent() until it returns `None` (can't
--- a/rust/hg-core/src/utils/hg_path.rs Tue Jun 23 16:07:18 2020 +0200
+++ b/rust/hg-core/src/utils/hg_path.rs Thu Jun 25 10:32:51 2020 -0700
@@ -208,7 +208,7 @@
}
pub fn join<T: ?Sized + AsRef<Self>>(&self, other: &T) -> HgPathBuf {
let mut inner = self.inner.to_owned();
- if inner.len() != 0 && inner.last() != Some(&b'/') {
+ if !inner.is_empty() && inner.last() != Some(&b'/') {
inner.push(b'/');
}
inner.extend(other.as_ref().bytes());
@@ -315,7 +315,7 @@
/// This generates fine-grained errors useful for debugging.
/// To simply check if the path is valid during tests, use `is_valid`.
pub fn check_state(&self) -> Result<(), HgPathError> {
- if self.len() == 0 {
+ if self.is_empty() {
return Ok(());
}
let bytes = self.as_bytes();
@@ -366,14 +366,14 @@
}
}
-#[derive(Eq, Ord, Clone, PartialEq, PartialOrd, Hash)]
+#[derive(Default, Eq, Ord, Clone, PartialEq, PartialOrd, Hash)]
pub struct HgPathBuf {
inner: Vec<u8>,
}
impl HgPathBuf {
pub fn new() -> Self {
- Self { inner: Vec::new() }
+ Default::default()
}
pub fn push(&mut self, byte: u8) {
self.inner.push(byte);
@@ -384,9 +384,6 @@
pub fn into_vec(self) -> Vec<u8> {
self.inner
}
- pub fn as_ref(&self) -> &[u8] {
- self.inner.as_ref()
- }
}
impl fmt::Debug for HgPathBuf {
--- a/rust/hg-core/src/utils/path_auditor.rs Tue Jun 23 16:07:18 2020 +0200
+++ b/rust/hg-core/src/utils/path_auditor.rs Thu Jun 25 10:32:51 2020 -0700
@@ -112,7 +112,7 @@
// accidentally traverse a symlink into some other filesystem (which
// is potentially expensive to access).
for index in 0..parts.len() {
- let prefix = &parts[..index + 1].join(&b'/');
+ let prefix = &parts[..=index].join(&b'/');
let prefix = HgPath::new(prefix);
if self.audited_dirs.read().unwrap().contains(prefix) {
continue;
--- a/rust/hg-cpython/Cargo.toml Tue Jun 23 16:07:18 2020 +0200
+++ b/rust/hg-cpython/Cargo.toml Thu Jun 25 10:32:51 2020 -0700
@@ -10,7 +10,6 @@
[features]
default = ["python27"]
-with-re2 = ["hg-core/with-re2"]
# Features to build an extension module:
python27 = ["cpython/python27-sys", "cpython/extension-module-2-7"]
--- a/rust/hg-cpython/src/cindex.rs Tue Jun 23 16:07:18 2020 +0200
+++ b/rust/hg-cpython/src/cindex.rs Thu Jun 25 10:32:51 2020 -0700
@@ -90,10 +90,7 @@
),
));
}
- Ok(Index {
- index: index,
- capi: capi,
- })
+ Ok(Index { index, capi })
}
/// return a reference to the CPython Index object in this Struct
@@ -158,7 +155,7 @@
unsafe { (self.capi.index_length)(self.index.as_ptr()) as usize }
}
- fn node<'a>(&'a self, rev: Revision) -> Option<&'a Node> {
+ fn node(&self, rev: Revision) -> Option<&Node> {
let raw = unsafe {
(self.capi.index_node)(self.index.as_ptr(), rev as c_int)
};
--- a/rust/hg-cpython/src/debug.rs Tue Jun 23 16:07:18 2020 +0200
+++ b/rust/hg-cpython/src/debug.rs Thu Jun 25 10:32:51 2020 -0700
@@ -16,8 +16,6 @@
m.add(py, "__package__", package)?;
m.add(py, "__doc__", "Rust debugging information")?;
- m.add(py, "re2_installed", cfg!(feature = "with-re2"))?;
-
let sys = PyModule::import(py, "sys")?;
let sys_modules: PyDict = sys.get(py, "modules")?.extract(py)?;
sys_modules.set_item(py, dotted_name, &m)?;
--- a/rust/hg-cpython/src/dirstate.rs Tue Jun 23 16:07:18 2020 +0200
+++ b/rust/hg-cpython/src/dirstate.rs Thu Jun 25 10:32:51 2020 -0700
@@ -133,7 +133,8 @@
last_normal_time: i64,
list_clean: bool,
list_ignored: bool,
- list_unknown: bool
+ list_unknown: bool,
+ collect_traversed_dirs: bool
)
),
)?;
--- a/rust/hg-cpython/src/dirstate/copymap.rs Tue Jun 23 16:07:18 2020 +0200
+++ b/rust/hg-cpython/src/dirstate/copymap.rs Thu Jun 25 10:32:51 2020 -0700
@@ -89,7 +89,7 @@
py: Python,
res: (&HgPathBuf, &HgPathBuf),
) -> PyResult<Option<PyBytes>> {
- Ok(Some(PyBytes::new(py, res.0.as_ref())))
+ Ok(Some(PyBytes::new(py, res.0.as_bytes())))
}
fn translate_key_value(
py: Python,
@@ -97,8 +97,8 @@
) -> PyResult<Option<(PyBytes, PyBytes)>> {
let (k, v) = res;
Ok(Some((
- PyBytes::new(py, k.as_ref()),
- PyBytes::new(py, v.as_ref()),
+ PyBytes::new(py, k.as_bytes()),
+ PyBytes::new(py, v.as_bytes()),
)))
}
}
--- a/rust/hg-cpython/src/dirstate/dirs_multiset.rs Tue Jun 23 16:07:18 2020 +0200
+++ b/rust/hg-cpython/src/dirstate/dirs_multiset.rs Thu Jun 25 10:32:51 2020 -0700
@@ -128,7 +128,7 @@
py: Python,
res: &HgPathBuf,
) -> PyResult<Option<PyBytes>> {
- Ok(Some(PyBytes::new(py, res.as_ref())))
+ Ok(Some(PyBytes::new(py, res.as_bytes())))
}
}
--- a/rust/hg-cpython/src/dirstate/dirstate_map.rs Tue Jun 23 16:07:18 2020 +0200
+++ b/rust/hg-cpython/src/dirstate/dirstate_map.rs Thu Jun 25 10:32:51 2020 -0700
@@ -179,7 +179,7 @@
"other_parent",
other_parent
.iter()
- .map(|v| PyBytes::new(py, v.as_ref()))
+ .map(|v| PyBytes::new(py, v.as_bytes()))
.collect::<Vec<PyBytes>>()
.to_py_object(py),
)?;
@@ -348,7 +348,11 @@
for (key, value) in
self.inner(py).borrow_mut().build_file_fold_map().iter()
{
- dict.set_item(py, key.as_ref().to_vec(), value.as_ref().to_vec())?;
+ dict.set_item(
+ py,
+ key.as_bytes().to_vec(),
+ value.as_bytes().to_vec(),
+ )?;
}
Ok(dict)
}
@@ -440,8 +444,8 @@
for (key, value) in self.inner(py).borrow().copy_map.iter() {
dict.set_item(
py,
- PyBytes::new(py, key.as_ref()),
- PyBytes::new(py, value.as_ref()),
+ PyBytes::new(py, key.as_bytes()),
+ PyBytes::new(py, value.as_bytes()),
)?;
}
Ok(dict)
@@ -450,7 +454,7 @@
def copymapgetitem(&self, key: PyObject) -> PyResult<PyBytes> {
let key = key.extract::<PyBytes>(py)?;
match self.inner(py).borrow().copy_map.get(HgPath::new(key.data(py))) {
- Some(copy) => Ok(PyBytes::new(py, copy.as_ref())),
+ Some(copy) => Ok(PyBytes::new(py, copy.as_bytes())),
None => Err(PyErr::new::<exc::KeyError, _>(
py,
String::from_utf8_lossy(key.data(py)),
@@ -485,7 +489,7 @@
.get(HgPath::new(key.data(py)))
{
Some(copy) => Ok(Some(
- PyBytes::new(py, copy.as_ref()).into_object(),
+ PyBytes::new(py, copy.as_bytes()).into_object(),
)),
None => Ok(default),
}
@@ -549,7 +553,7 @@
py: Python,
res: (&HgPathBuf, &DirstateEntry),
) -> PyResult<Option<PyBytes>> {
- Ok(Some(PyBytes::new(py, res.0.as_ref())))
+ Ok(Some(PyBytes::new(py, res.0.as_bytes())))
}
fn translate_key_value(
py: Python,
@@ -557,7 +561,7 @@
) -> PyResult<Option<(PyBytes, PyObject)>> {
let (f, entry) = res;
Ok(Some((
- PyBytes::new(py, f.as_ref()),
+ PyBytes::new(py, f.as_bytes()),
make_dirstate_tuple(py, entry)?,
)))
}
--- a/rust/hg-cpython/src/dirstate/non_normal_entries.rs Tue Jun 23 16:07:18 2020 +0200
+++ b/rust/hg-cpython/src/dirstate/non_normal_entries.rs Thu Jun 25 10:32:51 2020 -0700
@@ -62,7 +62,7 @@
py: Python,
key: &HgPathBuf,
) -> PyResult<Option<PyBytes>> {
- Ok(Some(PyBytes::new(py, key.as_ref())))
+ Ok(Some(PyBytes::new(py, key.as_bytes())))
}
}
--- a/rust/hg-cpython/src/dirstate/status.rs Tue Jun 23 16:07:18 2020 +0200
+++ b/rust/hg-cpython/src/dirstate/status.rs Thu Jun 25 10:32:51 2020 -0700
@@ -104,6 +104,7 @@
list_clean: bool,
list_ignored: bool,
list_unknown: bool,
+ collect_traversed_dirs: bool,
) -> PyResult<PyTuple> {
let bytes = root_dir.extract::<PyBytes>(py)?;
let root_dir = get_path_from_bytes(bytes.data(py));
@@ -134,6 +135,7 @@
list_clean,
list_ignored,
list_unknown,
+ collect_traversed_dirs,
},
)
.map_err(|e| handle_fallback(py, e))?;
@@ -170,6 +172,7 @@
list_clean,
list_ignored,
list_unknown,
+ collect_traversed_dirs,
},
)
.map_err(|e| handle_fallback(py, e))?;
@@ -224,6 +227,7 @@
list_clean,
list_ignored,
list_unknown,
+ collect_traversed_dirs,
},
)
.map_err(|e| handle_fallback(py, e))?;
@@ -232,12 +236,10 @@
build_response(py, lookup, status_res, all_warnings)
}
- e => {
- return Err(PyErr::new::<ValueError, _>(
- py,
- format!("Unsupported matcher {}", e),
- ));
- }
+ e => Err(PyErr::new::<ValueError, _>(
+ py,
+ format!("Unsupported matcher {}", e),
+ )),
}
}
@@ -256,6 +258,7 @@
let unknown = collect_pybytes_list(py, status_res.unknown.as_ref());
let lookup = collect_pybytes_list(py, lookup.as_ref());
let bad = collect_bad_matches(py, status_res.bad.as_ref())?;
+ let traversed = collect_pybytes_list(py, status_res.traversed.as_ref());
let py_warnings = PyList::new(py, &[]);
for warning in warnings.iter() {
// We use duck-typing on the Python side for dispatch, good enough for
@@ -292,6 +295,7 @@
unknown.into_object(),
py_warnings.into_object(),
bad.into_object(),
+ traversed.into_object(),
][..],
))
}
--- a/rust/hg-cpython/src/parsers.rs Tue Jun 23 16:07:18 2020 +0200
+++ b/rust/hg-cpython/src/parsers.rs Thu Jun 25 10:32:51 2020 -0700
@@ -37,15 +37,15 @@
for (filename, entry) in &dirstate_map {
dmap.set_item(
py,
- PyBytes::new(py, filename.as_ref()),
+ PyBytes::new(py, filename.as_bytes()),
make_dirstate_tuple(py, entry)?,
)?;
}
for (path, copy_path) in copies {
copymap.set_item(
py,
- PyBytes::new(py, path.as_ref()),
- PyBytes::new(py, copy_path.as_ref()),
+ PyBytes::new(py, path.as_bytes()),
+ PyBytes::new(py, copy_path.as_bytes()),
)?;
}
Ok(
@@ -116,7 +116,7 @@
for (filename, entry) in &dirstate_map {
dmap.set_item(
py,
- PyBytes::new(py, filename.as_ref()),
+ PyBytes::new(py, filename.as_bytes()),
make_dirstate_tuple(py, entry)?,
)?;
}
--- a/rust/hg-cpython/src/utils.rs Tue Jun 23 16:07:18 2020 +0200
+++ b/rust/hg-cpython/src/utils.rs Thu Jun 25 10:32:51 2020 -0700
@@ -32,10 +32,7 @@
/// Clone incoming Python bytes given as `PyBytes` as a `Node`,
/// doing the necessary checks.
-pub fn node_from_py_bytes<'a>(
- py: Python,
- bytes: &'a PyBytes,
-) -> PyResult<Node> {
+pub fn node_from_py_bytes(py: Python, bytes: &PyBytes) -> PyResult<Node> {
<NodeData>::try_from(bytes.data(py))
.map_err(|_| {
PyErr::new::<ValueError, _>(
@@ -43,5 +40,5 @@
format!("{}-byte hash required", NODE_BYTES_LENGTH),
)
})
- .map(|n| n.into())
+ .map(Into::into)
}
--- a/rust/hgcli/pyoxidizer.bzl Tue Jun 23 16:07:18 2020 +0200
+++ b/rust/hgcli/pyoxidizer.bzl Thu Jun 25 10:32:51 2020 -0700
@@ -3,19 +3,16 @@
# Code to run in Python interpreter.
RUN_CODE = "import hgdemandimport; hgdemandimport.enable(); from mercurial import dispatch; dispatch.run()"
-
set_build_path(ROOT + "/build/pyoxidizer")
-
def make_distribution():
return default_python_distribution()
-
def make_distribution_windows():
- return default_python_distribution(flavor="standalone_dynamic")
-
+ return default_python_distribution(flavor = "standalone_dynamic")
def make_exe(dist):
+ """Builds a Rust-wrapped Mercurial binary."""
config = PythonInterpreterConfig(
raw_allocator = "system",
run_eval = RUN_CODE,
@@ -58,23 +55,20 @@
# On Windows, we install extra packages for convenience.
if "windows" in BUILD_TARGET_TRIPLE:
exe.add_python_resources(
- dist.pip_install(["-r", ROOT + "/contrib/packaging/requirements_win32.txt"])
+ dist.pip_install(["-r", ROOT + "/contrib/packaging/requirements_win32.txt"]),
)
return exe
-
def make_manifest(dist, exe):
m = FileManifest()
m.add_python_resource(".", exe)
return m
-
def make_embedded_resources(exe):
return exe.to_embedded_resources()
-
register_target("distribution_posix", make_distribution)
register_target("distribution_windows", make_distribution_windows)
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/rust/rhg/Cargo.toml Thu Jun 25 10:32:51 2020 -0700
@@ -0,0 +1,8 @@
+[package]
+name = "rhg"
+version = "0.1.0"
+authors = ["Antoine Cezar <antoine.cezar@octobus.net>"]
+edition = "2018"
+
+[dependencies]
+
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/rust/rhg/README.md Thu Jun 25 10:32:51 2020 -0700
@@ -0,0 +1,4 @@
+# rhg
+
+This project provides a fastpath Rust implementation of the Mercurial (`hg`)
+version control tool.
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/rust/rhg/rustfmt.toml Thu Jun 25 10:32:51 2020 -0700
@@ -0,0 +1,3 @@
+max_width = 79
+wrap_comments = true
+error_on_line_overflow = true
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/rust/rhg/src/commands.rs Thu Jun 25 10:32:51 2020 -0700
@@ -0,0 +1,8 @@
+use crate::error::CommandError;
+
+/// The common trait for rhg commands
+///
+/// Normalize the interface of the commands provided by rhg
+pub trait Command {
+ fn run(&self) -> Result<(), CommandError>;
+}
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/rust/rhg/src/error.rs Thu Jun 25 10:32:51 2020 -0700
@@ -0,0 +1,3 @@
+/// The error type for the Command trait
+#[derive(Debug, PartialEq)]
+pub struct CommandError {}
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/rust/rhg/src/exitcode.rs Thu Jun 25 10:32:51 2020 -0700
@@ -0,0 +1,4 @@
+pub type ExitCode = i32;
+
+/// Command not implemented by rhg
+pub const UNIMPLEMENTED_COMMAND: ExitCode = 252;
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/rust/rhg/src/main.rs Thu Jun 25 10:32:51 2020 -0700
@@ -0,0 +1,7 @@
+mod commands;
+mod error;
+mod exitcode;
+
+fn main() {
+ std::process::exit(exitcode::UNIMPLEMENTED_COMMAND)
+}
--- a/setup.py Tue Jun 23 16:07:18 2020 +0200
+++ b/setup.py Thu Jun 25 10:32:51 2020 -0700
@@ -83,6 +83,43 @@
printf(error, file=sys.stderr)
sys.exit(1)
+import ssl
+
+try:
+ ssl.SSLContext
+except AttributeError:
+ error = """
+The `ssl` module does not have the `SSLContext` class. This indicates an old
+Python version which does not support modern security features (which were
+added to Python 2.7 as part of "PEP 466"). Please make sure you have installed
+at least Python 2.7.9 or a Python version with backports of these security
+features.
+"""
+ printf(error, file=sys.stderr)
+ sys.exit(1)
+
+# ssl.HAS_TLSv1* are preferred to check support but they were added in Python
+# 3.7. Prior to CPython commit 6e8cda91d92da72800d891b2fc2073ecbc134d98
+# (backported to the 3.7 branch), ssl.PROTOCOL_TLSv1_1 / ssl.PROTOCOL_TLSv1_2
+# were defined only if compiled against a OpenSSL version with TLS 1.1 / 1.2
+# support. At the mentioned commit, they were unconditionally defined.
+_notset = object()
+has_tlsv1_1 = getattr(ssl, 'HAS_TLSv1_1', _notset)
+if has_tlsv1_1 is _notset:
+ has_tlsv1_1 = getattr(ssl, 'PROTOCOL_TLSv1_1', _notset) is not _notset
+has_tlsv1_2 = getattr(ssl, 'HAS_TLSv1_2', _notset)
+if has_tlsv1_2 is _notset:
+ has_tlsv1_2 = getattr(ssl, 'PROTOCOL_TLSv1_2', _notset) is not _notset
+if not (has_tlsv1_1 or has_tlsv1_2):
+ error = """
+The `ssl` module does not advertise support for TLS 1.1 or TLS 1.2.
+Please make sure that your Python installation was compiled against an OpenSSL
+version enabling these features (likely this requires the OpenSSL version to
+be at least 1.0.1).
+"""
+ printf(error, file=sys.stderr)
+ sys.exit(1)
+
if sys.version_info[0] >= 3:
DYLIB_SUFFIX = sysconfig.get_config_vars()['EXT_SUFFIX']
else:
@@ -1396,7 +1433,7 @@
env['HOME'] = pwd.getpwuid(os.getuid()).pw_dir
- cargocmd = ['cargo', 'rustc', '-vv', '--release']
+ cargocmd = ['cargo', 'rustc', '--release']
feature_flags = []
--- a/tests/fakemergerecord.py Tue Jun 23 16:07:18 2020 +0200
+++ b/tests/fakemergerecord.py Thu Jun 25 10:32:51 2020 -0700
@@ -5,7 +5,7 @@
from __future__ import absolute_import
from mercurial import (
- merge,
+ mergestate as mergestatemod,
registrar,
)
@@ -23,7 +23,7 @@
)
def fakemergerecord(ui, repo, *pats, **opts):
with repo.wlock():
- ms = merge.mergestate.read(repo)
+ ms = mergestatemod.mergestate.read(repo)
records = ms._makerecords()
if opts.get('mandatory'):
records.append((b'X', b'mandatory record'))
--- a/tests/hghave.py Tue Jun 23 16:07:18 2020 +0200
+++ b/tests/hghave.py Thu Jun 25 10:32:51 2020 -0700
@@ -645,35 +645,11 @@
return False
-@check("sslcontext", "python >= 2.7.9 ssl")
-def has_sslcontext():
- try:
- import ssl
-
- ssl.SSLContext
- return True
- except (ImportError, AttributeError):
- return False
-
-
-@check("defaultcacerts", "can verify SSL certs by system's CA certs store")
-def has_defaultcacerts():
- from mercurial import sslutil, ui as uimod
-
- ui = uimod.ui.load()
- return sslutil._defaultcacerts(ui) or sslutil._canloaddefaultcerts
-
-
@check("defaultcacertsloaded", "detected presence of loaded system CA certs")
def has_defaultcacertsloaded():
import ssl
from mercurial import sslutil, ui as uimod
- if not has_defaultcacerts():
- return False
- if not has_sslcontext():
- return False
-
ui = uimod.ui.load()
cafile = sslutil._defaultcacerts(ui)
ctx = ssl.create_default_context()
@@ -707,6 +683,17 @@
return True
+@check("setprocname", "whether osutil.setprocname is available or not")
+def has_setprocname():
+ try:
+ from mercurial.utils import procutil
+
+ procutil.setprocname
+ return True
+ except AttributeError:
+ return False
+
+
@check("test-repo", "running tests from repository")
def has_test_repo():
t = os.environ["TESTDIR"]
--- a/tests/run-tests.py Tue Jun 23 16:07:18 2020 +0200
+++ b/tests/run-tests.py Thu Jun 25 10:32:51 2020 -0700
@@ -2260,7 +2260,7 @@
'changes)'
)
else:
- self.stream.write('Accept this change? [n] ')
+ self.stream.write('Accept this change? [y/N] ')
self.stream.flush()
answer = sys.stdin.readline().strip()
if answer.lower() in ('y', 'yes'):
--- a/tests/test-absorb.t Tue Jun 23 16:07:18 2020 +0200
+++ b/tests/test-absorb.t Thu Jun 25 10:32:51 2020 -0700
@@ -97,7 +97,7 @@
84e5416 commit 5
ff5d556 commit 3
f548282 commit 1
- apply changes (yn)? y
+ apply changes (y/N)? y
saved backup bundle to * (glob)
3 of 3 chunk(s) applied
$ hg annotate a
@@ -525,3 +525,83 @@
a: 1 of 1 chunk(s) applied
$ hg id
bfafb49242db tip
+
+ $ cd ..
+ $ hg init repo6
+ $ cd repo6
+ $ echo a1 > a
+ $ touch b
+ $ hg commit -m a -A a b
+ $ hg branch foo -q
+ $ echo b > b
+ $ hg commit -m foo # will become empty
+ $ hg branch bar -q
+ $ hg commit -m bar # is already empty
+ $ echo a2 > a
+ $ printf '' > b
+ $ hg absorb --apply-changes --verbose | grep became
+ 0:0cde1ae39321: 1 file(s) changed, became 3:fc7fcdd90fdb
+ 1:795dfb1adcef: 2 file(s) changed, became 4:a8740537aa53
+ 2:b02935f68891: 2 file(s) changed, became 5:59533e01c707
+ $ hg log -T '{rev} (branch: {branch}) {desc}\n' -G --stat
+ @ 5 (branch: bar) bar
+ |
+ o 4 (branch: foo) foo
+ |
+ o 3 (branch: default) a
+ a | 1 +
+ b | 0
+ 2 files changed, 1 insertions(+), 0 deletions(-)
+
+
+ $ cd ..
+ $ hg init repo7
+ $ cd repo7
+ $ echo a1 > a
+ $ touch b
+ $ hg commit -m a -A a b
+ $ echo b > b
+ $ hg commit -m foo --close-branch # will become empty
+ $ echo c > c
+ $ hg commit -m reopen -A c -q
+ $ hg commit -m bar --close-branch # is already empty
+ $ echo a2 > a
+ $ printf '' > b
+ $ hg absorb --apply-changes --verbose | grep became
+ 0:0cde1ae39321: 1 file(s) changed, became 4:fc7fcdd90fdb
+ 1:651b953d5764: 2 file(s) changed, became 5:0c9de988ecdc
+ 2:76017bba73f6: 2 file(s) changed, became 6:d53ac896eb25
+ 3:c7c1d67efc1d: 2 file(s) changed, became 7:66520267fe96
+ $ hg up null -q # to make visible closed heads
+ $ hg log -T '{rev} {desc}\n' -G --stat
+ _ 7 bar
+ |
+ o 6 reopen
+ | c | 1 +
+ | 1 files changed, 1 insertions(+), 0 deletions(-)
+ |
+ _ 5 foo
+ |
+ o 4 a
+ a | 1 +
+ b | 0
+ 2 files changed, 1 insertions(+), 0 deletions(-)
+
+
+ $ cd ..
+ $ hg init repo8
+ $ cd repo8
+ $ echo a1 > a
+ $ hg commit -m a -A a
+ $ hg commit -m empty --config ui.allowemptycommit=True
+ $ echo a2 > a
+ $ hg absorb --apply-changes --verbose | grep became
+ 0:ecf99a8d6699: 1 file(s) changed, became 2:7e3ccf8e2fa5
+ 1:97f72456ae0d: 1 file(s) changed, became 3:2df488325d6f
+ $ hg log -T '{rev} {desc}\n' -G --stat
+ @ 3 empty
+ |
+ o 2 a
+ a | 1 +
+ 1 files changed, 1 insertions(+), 0 deletions(-)
+
--- a/tests/test-check-rust-format.t Tue Jun 23 16:07:18 2020 +0200
+++ b/tests/test-check-rust-format.t Thu Jun 25 10:32:51 2020 -0700
@@ -5,5 +5,5 @@
$ cd "$TESTDIR"/..
$ RUSTFMT=$(rustup which --toolchain nightly rustfmt)
$ for f in `testrepohg files 'glob:**/*.rs'` ; do
- > $RUSTFMT --check --unstable-features --color=never $f
+ > $RUSTFMT --check --edition=2018 --unstable-features --color=never $f
> done
--- a/tests/test-chg.t Tue Jun 23 16:07:18 2020 +0200
+++ b/tests/test-chg.t Thu Jun 25 10:32:51 2020 -0700
@@ -229,13 +229,13 @@
server.log.1
print only the last 10 lines, since we aren't sure how many records are
-preserved (since setprocname isn't available on py3, the 10th-most-recent line
-is different when using py3):
+preserved (since setprocname isn't available on py3 and pure version,
+the 10th-most-recent line is different when using py3):
$ cat log/server.log.1 log/server.log | tail -10 | filterlog
- YYYY/MM/DD HH:MM:SS (PID)> confighash = ... mtimehash = ... (py3 !)
+ YYYY/MM/DD HH:MM:SS (PID)> confighash = ... mtimehash = ... (no-setprocname !)
YYYY/MM/DD HH:MM:SS (PID)> forked worker process (pid=...)
- YYYY/MM/DD HH:MM:SS (PID)> setprocname: ... (no-py3 !)
+ YYYY/MM/DD HH:MM:SS (PID)> setprocname: ... (setprocname !)
YYYY/MM/DD HH:MM:SS (PID)> received fds: ...
YYYY/MM/DD HH:MM:SS (PID)> chdir to '$TESTTMP/extreload'
YYYY/MM/DD HH:MM:SS (PID)> setumask 18
--- a/tests/test-clonebundles.t Tue Jun 23 16:07:18 2020 +0200
+++ b/tests/test-clonebundles.t Thu Jun 25 10:32:51 2020 -0700
@@ -255,7 +255,8 @@
added 2 changesets with 2 changes to 2 files
new changesets 53245c60e682:aaff8d2ffbbf
-URLs requiring SNI are filtered in Python <2.7.9
+We require a Python version that supports SNI. Therefore, URLs requiring SNI
+are not filtered.
$ cp full.hg sni.hg
$ cat > server/.hg/clonebundles.manifest << EOF
@@ -263,9 +264,6 @@
> http://localhost:$HGPORT1/full.hg
> EOF
-#if sslcontext
-Python 2.7.9+ support SNI
-
$ hg clone -U http://localhost:$HGPORT sni-supported
applying clone bundle from http://localhost:$HGPORT1/sni.hg
adding changesets
@@ -276,20 +274,6 @@
searching for changes
no changes found
2 local changesets published
-#else
-Python <2.7.9 will filter SNI URLs
-
- $ hg clone -U http://localhost:$HGPORT sni-unsupported
- applying clone bundle from http://localhost:$HGPORT1/full.hg
- adding changesets
- adding manifests
- adding file changes
- added 2 changesets with 2 changes to 2 files
- finished applying clone bundle
- searching for changes
- no changes found
- 2 local changesets published
-#endif
Stream clone bundles are supported
--- a/tests/test-contrib-perf.t Tue Jun 23 16:07:18 2020 +0200
+++ b/tests/test-contrib-perf.t Thu Jun 25 10:32:51 2020 -0700
@@ -180,7 +180,7 @@
perfvolatilesets
benchmark the computation of various volatile set
perfwalk (no help text available)
- perfwrite microbenchmark ui.write
+ perfwrite microbenchmark ui.write (and others)
(use 'hg help -v perf' to show built-in aliases and global options)
$ hg perfaddremove
--- a/tests/test-copies-chain-merge.t Tue Jun 23 16:07:18 2020 +0200
+++ b/tests/test-copies-chain-merge.t Thu Jun 25 10:32:51 2020 -0700
@@ -1,3 +1,5 @@
+#testcases filelog compatibility sidedata
+
=====================================================
Test Copy tracing for chain of copies involving merge
=====================================================
@@ -6,6 +8,7 @@
are involved. It cheks we do not have unwanted update of behavior and that the
different options to retrieve copies behave correctly.
+
Setup
=====
@@ -18,6 +21,22 @@
> logtemplate={rev} {desc}\n
> EOF
+#if compatibility
+ $ cat >> $HGRCPATH << EOF
+ > [experimental]
+ > copies.read-from = compatibility
+ > EOF
+#endif
+
+#if sidedata
+ $ cat >> $HGRCPATH << EOF
+ > [format]
+ > exp-use-side-data = yes
+ > exp-use-copies-side-data-changeset = yes
+ > EOF
+#endif
+
+
$ hg init repo-chain
$ cd repo-chain
@@ -453,17 +472,26 @@
0 4 0dd616bc7ab1 000000000000 000000000000
1 10 6da5a2eecb9c 000000000000 000000000000
2 19 eb806e34ef6b 0dd616bc7ab1 6da5a2eecb9c
+
+# Here the filelog based implementation is not looking at the rename
+# information (because the file exist on both side). However the changelog
+# based on works fine. We have different output.
+
$ hg status --copies --rev 'desc("a-2")' --rev 'desc("mAEm-0")'
M f
+ b (no-filelog !)
R b
$ hg status --copies --rev 'desc("a-2")' --rev 'desc("mEAm-0")'
M f
+ b (no-filelog !)
R b
$ hg status --copies --rev 'desc("e-2")' --rev 'desc("mAEm-0")'
M f
+ d (no-filelog !)
R d
$ hg status --copies --rev 'desc("e-2")' --rev 'desc("mEAm-0")'
M f
+ d (no-filelog !)
R d
$ hg status --copies --rev 'desc("i-2")' --rev 'desc("a-2")'
A f
@@ -473,6 +501,18 @@
A f
b
R b
+
+# From here, we run status against revision where both source file exists.
+#
+# The filelog based implementation picks an arbitrary side based on revision
+# numbers. So the same side "wins" whatever the parents order is. This is
+# sub-optimal because depending on revision numbers means the result can be
+# different from one repository to the next.
+#
+# The changeset based algorithm use the parent order to break tie on conflicting
+# information and will have a different order depending on who is p1 and p2.
+# That order is stable accross repositories. (data from p1 prevails)
+
$ hg status --copies --rev 'desc("i-2")' --rev 'desc("mAEm-0")'
A f
d
@@ -480,7 +520,8 @@
R d
$ hg status --copies --rev 'desc("i-2")' --rev 'desc("mEAm-0")'
A f
- d
+ d (filelog !)
+ b (no-filelog !)
R b
R d
$ hg status --copies --rev 'desc("i-0")' --rev 'desc("mAEm-0")'
@@ -490,7 +531,8 @@
R b
$ hg status --copies --rev 'desc("i-0")' --rev 'desc("mEAm-0")'
A f
- a
+ a (filelog !)
+ b (no-filelog !)
R a
R b
@@ -563,21 +605,25 @@
R h
$ hg status --copies --rev 'desc("b-1")' --rev 'desc("mBFm-0")'
M d
+ h (no-filelog !)
R h
$ hg status --copies --rev 'desc("f-2")' --rev 'desc("mBFm-0")'
M b
$ hg status --copies --rev 'desc("f-1")' --rev 'desc("mBFm-0")'
M b
M d
+ i (no-filelog !)
R i
$ hg status --copies --rev 'desc("b-1")' --rev 'desc("mFBm-0")'
M d
+ h (no-filelog !)
R h
$ hg status --copies --rev 'desc("f-2")' --rev 'desc("mFBm-0")'
M b
$ hg status --copies --rev 'desc("f-1")' --rev 'desc("mFBm-0")'
M b
M d
+ i (no-filelog !)
R i
The following graphlog is wrong, the "a -> c -> d" chain was overwritten and should not appear.
@@ -645,9 +691,15 @@
|
o 0 i-0 initial commit: a b h
+One side of the merge have a long history with rename. The other side of the
+merge point to a new file with a smaller history. Each side is "valid".
+
+(and again the filelog based algorithm only explore one, with a pick based on
+revision numbers)
+
$ hg status --copies --rev 'desc("i-0")' --rev 'desc("mDGm-0")'
A d
- a
+ a (filelog !)
R a
$ hg status --copies --rev 'desc("i-0")' --rev 'desc("mGDm-0")'
A d
@@ -740,7 +792,8 @@
$ hg status --copies --rev 'desc("i-0")' --rev 'desc("mFGm-0")'
A d
- a
+ h (no-filelog !)
+ a (filelog !)
R a
R h
$ hg status --copies --rev 'desc("i-0")' --rev 'desc("mGFm-0")'
@@ -754,15 +807,19 @@
M d
$ hg status --copies --rev 'desc("f-1")' --rev 'desc("mFGm-0")'
M d
+ i (no-filelog !)
R i
$ hg status --copies --rev 'desc("f-1")' --rev 'desc("mGFm-0")'
M d
+ i (no-filelog !)
R i
$ hg status --copies --rev 'desc("g-1")' --rev 'desc("mFGm-0")'
M d
+ h (no-filelog !)
R h
$ hg status --copies --rev 'desc("g-1")' --rev 'desc("mGFm-0")'
M d
+ h (no-filelog !)
R h
$ hg log -Gfr 'desc("mFGm-0")' d
--- a/tests/test-copies-in-changeset.t Tue Jun 23 16:07:18 2020 +0200
+++ b/tests/test-copies-in-changeset.t Thu Jun 25 10:32:51 2020 -0700
@@ -33,28 +33,30 @@
$ cd repo
#if sidedata
$ hg debugformat -v
- format-variant repo config default
- fncache: yes yes yes
- dotencode: yes yes yes
- generaldelta: yes yes yes
- sparserevlog: yes yes yes
- sidedata: yes yes no
- copies-sdc: yes yes no
- plain-cl-delta: yes yes yes
- compression: zlib zlib zlib
- compression-level: default default default
+ format-variant repo config default
+ fncache: yes yes yes
+ dotencode: yes yes yes
+ generaldelta: yes yes yes
+ sparserevlog: yes yes yes
+ sidedata: yes yes no
+ persistent-nodemap: no no no
+ copies-sdc: yes yes no
+ plain-cl-delta: yes yes yes
+ compression: zlib zlib zlib
+ compression-level: default default default
#else
$ hg debugformat -v
- format-variant repo config default
- fncache: yes yes yes
- dotencode: yes yes yes
- generaldelta: yes yes yes
- sparserevlog: yes yes yes
- sidedata: no no no
- copies-sdc: no no no
- plain-cl-delta: yes yes yes
- compression: zlib zlib zlib
- compression-level: default default default
+ format-variant repo config default
+ fncache: yes yes yes
+ dotencode: yes yes yes
+ generaldelta: yes yes yes
+ sparserevlog: yes yes yes
+ sidedata: no no no
+ persistent-nodemap: no no no
+ copies-sdc: no no no
+ plain-cl-delta: yes yes yes
+ compression: zlib zlib zlib
+ compression-level: default default default
#endif
$ echo a > a
$ hg add a
@@ -424,16 +426,17 @@
downgrading (keeping some sidedata)
$ hg debugformat -v
- format-variant repo config default
- fncache: yes yes yes
- dotencode: yes yes yes
- generaldelta: yes yes yes
- sparserevlog: yes yes yes
- sidedata: yes yes no
- copies-sdc: yes yes no
- plain-cl-delta: yes yes yes
- compression: zlib zlib zlib
- compression-level: default default default
+ format-variant repo config default
+ fncache: yes yes yes
+ dotencode: yes yes yes
+ generaldelta: yes yes yes
+ sparserevlog: yes yes yes
+ sidedata: yes yes no
+ persistent-nodemap: no no no
+ copies-sdc: yes yes no
+ plain-cl-delta: yes yes yes
+ compression: zlib zlib zlib
+ compression-level: default default default
$ hg debugsidedata -c -- 0
1 sidedata entries
entry-0012 size 1
@@ -448,16 +451,17 @@
> EOF
$ hg debugupgraderepo --run --quiet --no-backup > /dev/null
$ hg debugformat -v
- format-variant repo config default
- fncache: yes yes yes
- dotencode: yes yes yes
- generaldelta: yes yes yes
- sparserevlog: yes yes yes
- sidedata: yes yes no
- copies-sdc: no no no
- plain-cl-delta: yes yes yes
- compression: zlib zlib zlib
- compression-level: default default default
+ format-variant repo config default
+ fncache: yes yes yes
+ dotencode: yes yes yes
+ generaldelta: yes yes yes
+ sparserevlog: yes yes yes
+ sidedata: yes yes no
+ persistent-nodemap: no no no
+ copies-sdc: no no no
+ plain-cl-delta: yes yes yes
+ compression: zlib zlib zlib
+ compression-level: default default default
$ hg debugsidedata -c -- 0
$ hg debugsidedata -c -- 1
$ hg debugsidedata -m -- 0
@@ -470,16 +474,17 @@
> EOF
$ hg debugupgraderepo --run --quiet --no-backup > /dev/null
$ hg debugformat -v
- format-variant repo config default
- fncache: yes yes yes
- dotencode: yes yes yes
- generaldelta: yes yes yes
- sparserevlog: yes yes yes
- sidedata: yes yes no
- copies-sdc: yes yes no
- plain-cl-delta: yes yes yes
- compression: zlib zlib zlib
- compression-level: default default default
+ format-variant repo config default
+ fncache: yes yes yes
+ dotencode: yes yes yes
+ generaldelta: yes yes yes
+ sparserevlog: yes yes yes
+ sidedata: yes yes no
+ persistent-nodemap: no no no
+ copies-sdc: yes yes no
+ plain-cl-delta: yes yes yes
+ compression: zlib zlib zlib
+ compression-level: default default default
$ hg debugsidedata -c -- 0
1 sidedata entries
entry-0012 size 1
--- a/tests/test-dirstate.t Tue Jun 23 16:07:18 2020 +0200
+++ b/tests/test-dirstate.t Thu Jun 25 10:32:51 2020 -0700
@@ -70,14 +70,15 @@
> from mercurial import (
> error,
> extensions,
- > merge,
+ > mergestate as mergestatemod,
> )
>
> def wraprecordupdates(*args):
> raise error.Abort("simulated error while recording dirstateupdates")
>
> def reposetup(ui, repo):
- > extensions.wrapfunction(merge, 'recordupdates', wraprecordupdates)
+ > extensions.wrapfunction(mergestatemod, 'recordupdates',
+ > wraprecordupdates)
> EOF
$ hg rm a
--- a/tests/test-extension.t Tue Jun 23 16:07:18 2020 +0200
+++ b/tests/test-extension.t Thu Jun 25 10:32:51 2020 -0700
@@ -152,21 +152,25 @@
> from __future__ import print_function
> import os
> from mercurial import exthelper
+ > from mercurial.utils import procutil
+ >
+ > write = procutil.stdout.write
> name = os.path.basename(__file__).rsplit('.', 1)[0]
- > print("1) %s imported" % name, flush=True)
+ > bytesname = name.encode('utf-8')
+ > write(b"1) %s imported\n" % bytesname)
> eh = exthelper.exthelper()
> @eh.uisetup
> def _uisetup(ui):
- > print("2) %s uisetup" % name, flush=True)
+ > write(b"2) %s uisetup\n" % bytesname)
> @eh.extsetup
> def _extsetup(ui):
- > print("3) %s extsetup" % name, flush=True)
+ > write(b"3) %s extsetup\n" % bytesname)
> @eh.uipopulate
> def _uipopulate(ui):
- > print("4) %s uipopulate" % name, flush=True)
+ > write(b"4) %s uipopulate\n" % bytesname)
> @eh.reposetup
> def _reposetup(ui, repo):
- > print("5) %s reposetup" % name, flush=True)
+ > write(b"5) %s reposetup\n" % bytesname)
>
> extsetup = eh.finalextsetup
> reposetup = eh.finalreposetup
@@ -174,7 +178,6 @@
> uisetup = eh.finaluisetup
> revsetpredicate = eh.revsetpredicate
>
- > bytesname = name.encode('utf-8')
> # custom predicate to check registration of functions at loading
> from mercurial import (
> smartset,
@@ -1852,17 +1855,6 @@
GREPME make sure that this is in the help!
$ cd ..
-Show deprecation warning for the use of cmdutil.command
-
- $ cat > nonregistrar.py <<EOF
- > from mercurial import cmdutil
- > cmdtable = {}
- > command = cmdutil.command(cmdtable)
- > @command(b'foo', [], norepo=True)
- > def foo(ui):
- > pass
- > EOF
-
Prohibit the use of unicode strings as the default value of options
$ hg init $TESTTMP/opt-unicode-default
--- a/tests/test-git-interop.t Tue Jun 23 16:07:18 2020 +0200
+++ b/tests/test-git-interop.t Thu Jun 25 10:32:51 2020 -0700
@@ -36,8 +36,12 @@
$ cd ..
Now globally enable extension for the rest of the test:
- $ echo "[extensions]" >> $HGRCPATH
- > echo "git=" >> $HGRCPATH
+ $ cat <<EOF >> $HGRCPATH
+ > [extensions]
+ > git=
+ > [git]
+ > log-index-cache-miss = yes
+ > EOF
Make a new repo with git:
$ mkdir foo
@@ -68,6 +72,7 @@
But if you run hg init --git, it works:
$ hg init --git
$ hg id --traceback
+ heads mismatch, rebuilding dagcache
3d9be8deba43 tip master
$ hg status
? gamma
@@ -167,9 +172,12 @@
$ hg ci -m 'more alpha' --traceback --date '1583522787 18000'
$ echo b >> beta
$ hg ci -m 'more beta'
+ heads mismatch, rebuilding dagcache
$ echo a >> alpha
$ hg ci -m 'even more alpha'
+ heads mismatch, rebuilding dagcache
$ hg log -G alpha
+ heads mismatch, rebuilding dagcache
@ changeset: 4:6626247b7dc8
: bookmark: master
: tag: tip
@@ -199,6 +207,9 @@
summary: Add beta
+ $ hg log -r "children(3d9be8deba43)" -T"{node|short} {children}\n"
+ a1983dd7fb19 3:d8ee22687733
+
hg annotate
$ hg annotate alpha
@@ -235,6 +246,7 @@
On branch master
nothing to commit, working tree clean
$ hg status
+ heads mismatch, rebuilding dagcache
node|shortest works correctly
@@ -248,3 +260,13 @@
$ hg log -r ae1ab744f95bfd5b07cf573baef98a778058537b --template "{shortest(node,1)}\n"
ae
+This coveres changelog.findmissing()
+ $ hg merge --preview 3d9be8deba43
+
+This covers manifest.diff()
+ $ hg diff -c 3d9be8deba43
+ diff -r c5864c9d16fb -r 3d9be8deba43 beta
+ --- /dev/null Thu Jan 01 00:00:00 1970 +0000
+ +++ b/beta Mon Jan 01 00:00:11 2007 +0000
+ @@ -0,0 +1,1 @@
+ +beta
--- a/tests/test-githelp.t Tue Jun 23 16:07:18 2020 +0200
+++ b/tests/test-githelp.t Thu Jun 25 10:32:51 2020 -0700
@@ -318,3 +318,10 @@
hg journal --all
note: in hg commits can be deleted from repo but we always have backups
+
+ $ hg githelp -- git log -Gnarf
+ hg grep --diff narf
+ $ hg githelp -- git log -S narf
+ hg grep --diff narf
+ $ hg githelp -- git log --pickaxe-regex narf
+ hg grep --diff narf
--- a/tests/test-grep.t Tue Jun 23 16:07:18 2020 +0200
+++ b/tests/test-grep.t Thu Jun 25 10:32:51 2020 -0700
@@ -645,22 +645,45 @@
$ hg init sng
$ cd sng
$ echo "unmod" >> um
- $ hg ci -A -m "adds unmod to um"
- adding um
+ $ echo old > old
+ $ hg ci -q -A -m "adds unmod to um"
$ echo "something else" >> new
$ hg ci -A -m "second commit"
adding new
$ hg grep -r "." "unmod"
um:1:unmod
-Working directory is searched by default
+Existing tracked files in the working directory are searched by default
$ echo modified >> new
- $ hg grep mod
+ $ echo 'added' > added; hg add added
+ $ echo 'added, missing' > added-missing; hg add added-missing; rm added-missing
+ $ echo 'untracked' > untracked
+ $ hg rm old
+ $ hg grep ''
+ added:added
+ new:something else
new:modified
um:unmod
- which can be overridden by -rREV
+#if symlink
+Grepping a symlink greps its destination
+
+ $ rm -f added; ln -s symlink-added added
+ $ hg grep '' | grep added
+ added:symlink-added
+
+But we reject symlinks as directories components of a tracked file as
+usual:
+
+ $ mkdir dir; touch dir/f; hg add dir/f
+ $ rm -rf dir; ln -s / dir
+ $ hg grep ''
+ abort: path 'dir/f' traverses symbolic link 'dir'
+ [255]
+#endif
+
+But we can search files from some other revision with -rREV
$ hg grep -r. mod
um:1:unmod
@@ -670,17 +693,6 @@
$ cd ..
-Fix_Wdir(): test that passing wdir() t -r flag does greps on the
-files modified in the working directory
-
- $ cd a
- $ echo "abracadara" >> a
- $ hg add a
- $ hg grep -r "wdir()" "abra"
- a:2147483647:abracadara
-
- $ cd ..
-
Change Default of grep by ui.tweakdefaults, that is, the files not in current
working directory should not be grepp-ed on
--- a/tests/test-histedit-edit.t Tue Jun 23 16:07:18 2020 +0200
+++ b/tests/test-histedit-edit.t Thu Jun 25 10:32:51 2020 -0700
@@ -373,6 +373,7 @@
transaction abort!
rollback completed
note: commit message saved in .hg/last-message.txt
+ note: use 'hg commit --logfile .hg/last-message.txt --edit' to reuse it
abort: pretxncommit.unexpectedabort hook exited with status 1
[255]
$ cat .hg/last-message.txt
@@ -397,6 +398,7 @@
transaction abort!
rollback completed
note: commit message saved in .hg/last-message.txt
+ note: use 'hg commit --logfile .hg/last-message.txt --edit' to reuse it
abort: pretxncommit.unexpectedabort hook exited with status 1
[255]
--- a/tests/test-hook.t Tue Jun 23 16:07:18 2020 +0200
+++ b/tests/test-hook.t Thu Jun 25 10:32:51 2020 -0700
@@ -443,7 +443,7 @@
HG_PENDING=$TESTTMP/a
transaction abort!
- txnabort Python hook: txnid,txnname
+ txnabort Python hook: changes,txnid,txnname
txnabort hook: HG_HOOKNAME=txnabort.1
HG_HOOKTYPE=txnabort
HG_TXNID=TXN:$ID$
--- a/tests/test-https.t Tue Jun 23 16:07:18 2020 +0200
+++ b/tests/test-https.t Thu Jun 25 10:32:51 2020 -0700
@@ -34,7 +34,6 @@
cacert not found
$ hg in --config web.cacerts=no-such.pem https://localhost:$HGPORT/
- warning: connecting to localhost using legacy security technology (TLS 1.0); see https://mercurial-scm.org/wiki/SecureConnections for more info (?)
abort: could not find web.cacerts: no-such.pem
[255]
@@ -49,101 +48,47 @@
Our test cert is not signed by a trusted CA. It should fail to verify if
we are able to load CA certs.
-#if sslcontext defaultcacerts no-defaultcacertsloaded
+#if no-defaultcacertsloaded
$ hg clone https://localhost:$HGPORT/ copy-pull
(an attempt was made to load CA certificates but none were loaded; see https://mercurial-scm.org/wiki/SecureConnections for how to configure Mercurial to avoid this error)
abort: error: *certificate verify failed* (glob)
[255]
#endif
-#if no-sslcontext defaultcacerts
- $ hg clone https://localhost:$HGPORT/ copy-pull
- warning: connecting to localhost using legacy security technology (TLS 1.0); see https://mercurial-scm.org/wiki/SecureConnections for more info (?)
- (using CA certificates from *; if you see this message, your Mercurial install is not properly configured; see https://mercurial-scm.org/wiki/SecureConnections for how to configure Mercurial to avoid this message) (glob) (?)
- abort: error: *certificate verify failed* (glob)
- [255]
-#endif
-
-#if no-sslcontext windows
- $ hg clone https://localhost:$HGPORT/ copy-pull
- warning: connecting to localhost using legacy security technology (TLS 1.0); see https://mercurial-scm.org/wiki/SecureConnections for more info
- (unable to load Windows CA certificates; see https://mercurial-scm.org/wiki/SecureConnections for how to configure Mercurial to avoid this message)
- abort: error: *certificate verify failed* (glob)
- [255]
-#endif
-
-#if no-sslcontext osx
- $ hg clone https://localhost:$HGPORT/ copy-pull
- warning: connecting to localhost using legacy security technology (TLS 1.0); see https://mercurial-scm.org/wiki/SecureConnections for more info
- (unable to load CA certificates; see https://mercurial-scm.org/wiki/SecureConnections for how to configure Mercurial to avoid this message)
- abort: localhost certificate error: no certificate received
- (set hostsecurity.localhost:certfingerprints=sha256:20:de:b3:ad:b4:cd:a5:42:f0:74:41:1c:a2:70:1e:da:6e:c0:5c:16:9e:e7:22:0f:f1:b7:e5:6e:e4:92:af:7e config setting or use --insecure to connect insecurely)
- [255]
-#endif
-
#if defaultcacertsloaded
$ hg clone https://localhost:$HGPORT/ copy-pull
- warning: connecting to localhost using legacy security technology (TLS 1.0); see https://mercurial-scm.org/wiki/SecureConnections for more info (?)
- (using CA certificates from *; if you see this message, your Mercurial install is not properly configured; see https://mercurial-scm.org/wiki/SecureConnections for how to configure Mercurial to avoid this message) (glob) (?)
(the full certificate chain may not be available locally; see "hg help debugssl") (windows !)
abort: error: *certificate verify failed* (glob)
[255]
#endif
-#if no-defaultcacerts
- $ hg clone https://localhost:$HGPORT/ copy-pull
- warning: connecting to localhost using legacy security technology (TLS 1.0); see https://mercurial-scm.org/wiki/SecureConnections for more info (?)
- (unable to load * certificates; see https://mercurial-scm.org/wiki/SecureConnections for how to configure Mercurial to avoid this message) (glob) (?)
- abort: localhost certificate error: no certificate received
- (set hostsecurity.localhost:certfingerprints=sha256:20:de:b3:ad:b4:cd:a5:42:f0:74:41:1c:a2:70:1e:da:6e:c0:5c:16:9e:e7:22:0f:f1:b7:e5:6e:e4:92:af:7e config setting or use --insecure to connect insecurely)
- [255]
-#endif
-
Specifying a per-host certificate file that doesn't exist will abort. The full
C:/path/to/msysroot will print on Windows.
$ hg --config hostsecurity.localhost:verifycertsfile=/does/not/exist clone https://localhost:$HGPORT/
- warning: connecting to localhost using legacy security technology (TLS 1.0); see https://mercurial-scm.org/wiki/SecureConnections for more info (?)
abort: path specified by hostsecurity.localhost:verifycertsfile does not exist: */does/not/exist (glob)
[255]
A malformed per-host certificate file will raise an error
$ echo baddata > badca.pem
-#if sslcontext
$ hg --config hostsecurity.localhost:verifycertsfile=badca.pem clone https://localhost:$HGPORT/
- warning: connecting to localhost using legacy security technology (TLS 1.0); see https://mercurial-scm.org/wiki/SecureConnections for more info (?)
abort: error loading CA file badca.pem: * (glob)
(file is empty or malformed?)
[255]
-#else
- $ hg --config hostsecurity.localhost:verifycertsfile=badca.pem clone https://localhost:$HGPORT/
- warning: connecting to localhost using legacy security technology (TLS 1.0); see https://mercurial-scm.org/wiki/SecureConnections for more info (?)
- abort: error: * (glob)
- [255]
-#endif
A per-host certificate mismatching the server will fail verification
(modern ssl is able to discern whether the loaded cert is a CA cert)
-#if sslcontext
$ hg --config hostsecurity.localhost:verifycertsfile="$CERTSDIR/client-cert.pem" clone https://localhost:$HGPORT/
- warning: connecting to localhost using legacy security technology (TLS 1.0); see https://mercurial-scm.org/wiki/SecureConnections for more info (?)
(an attempt was made to load CA certificates but none were loaded; see https://mercurial-scm.org/wiki/SecureConnections for how to configure Mercurial to avoid this error)
(the full certificate chain may not be available locally; see "hg help debugssl") (windows !)
abort: error: *certificate verify failed* (glob)
[255]
-#else
- $ hg --config hostsecurity.localhost:verifycertsfile="$CERTSDIR/client-cert.pem" clone https://localhost:$HGPORT/
- warning: connecting to localhost using legacy security technology (TLS 1.0); see https://mercurial-scm.org/wiki/SecureConnections for more info (?)
- abort: error: *certificate verify failed* (glob)
- [255]
-#endif
A per-host certificate matching the server's cert will be accepted
$ hg --config hostsecurity.localhost:verifycertsfile="$CERTSDIR/pub.pem" clone -U https://localhost:$HGPORT/ perhostgood1
- warning: connecting to localhost using legacy security technology (TLS 1.0); see https://mercurial-scm.org/wiki/SecureConnections for more info (?)
requesting all changes
adding changesets
adding manifests
@@ -155,7 +100,6 @@
$ cat "$CERTSDIR/client-cert.pem" "$CERTSDIR/pub.pem" > perhost.pem
$ hg --config hostsecurity.localhost:verifycertsfile=perhost.pem clone -U https://localhost:$HGPORT/ perhostgood2
- warning: connecting to localhost using legacy security technology (TLS 1.0); see https://mercurial-scm.org/wiki/SecureConnections for more info (?)
requesting all changes
adding changesets
adding manifests
@@ -166,7 +110,6 @@
Defining both per-host certificate and a fingerprint will print a warning
$ hg --config hostsecurity.localhost:verifycertsfile="$CERTSDIR/pub.pem" --config hostsecurity.localhost:fingerprints=sha1:ecd87cd6b386d04fc1b8b41c9d8f5e168eef1c03 clone -U https://localhost:$HGPORT/ caandfingerwarning
- warning: connecting to localhost using legacy security technology (TLS 1.0); see https://mercurial-scm.org/wiki/SecureConnections for more info (?)
(hostsecurity.localhost:verifycertsfile ignored when host fingerprints defined; using host fingerprints for verification)
requesting all changes
adding changesets
@@ -180,13 +123,11 @@
Inability to verify peer certificate will result in abort
$ hg clone https://localhost:$HGPORT/ copy-pull $DISABLECACERTS
- warning: connecting to localhost using legacy security technology (TLS 1.0); see https://mercurial-scm.org/wiki/SecureConnections for more info (?)
abort: unable to verify security of localhost (no loaded CA certificates); refusing to connect
(see https://mercurial-scm.org/wiki/SecureConnections for how to configure Mercurial to avoid this error or set hostsecurity.localhost:fingerprints=sha256:20:de:b3:ad:b4:cd:a5:42:f0:74:41:1c:a2:70:1e:da:6e:c0:5c:16:9e:e7:22:0f:f1:b7:e5:6e:e4:92:af:7e to trust this server)
[255]
$ hg clone --insecure https://localhost:$HGPORT/ copy-pull
- warning: connecting to localhost using legacy security technology (TLS 1.0); see https://mercurial-scm.org/wiki/SecureConnections for more info (?)
warning: connection security to localhost is disabled per current settings; communication is susceptible to eavesdropping and tampering
requesting all changes
adding changesets
@@ -217,14 +158,12 @@
> EOF
$ hg pull $DISABLECACERTS
pulling from https://localhost:$HGPORT/
- warning: connecting to localhost using legacy security technology (TLS 1.0); see https://mercurial-scm.org/wiki/SecureConnections for more info (?)
abort: unable to verify security of localhost (no loaded CA certificates); refusing to connect
(see https://mercurial-scm.org/wiki/SecureConnections for how to configure Mercurial to avoid this error or set hostsecurity.localhost:fingerprints=sha256:20:de:b3:ad:b4:cd:a5:42:f0:74:41:1c:a2:70:1e:da:6e:c0:5c:16:9e:e7:22:0f:f1:b7:e5:6e:e4:92:af:7e to trust this server)
[255]
$ hg pull --insecure
pulling from https://localhost:$HGPORT/
- warning: connecting to localhost using legacy security technology (TLS 1.0); see https://mercurial-scm.org/wiki/SecureConnections for more info (?)
warning: connection security to localhost is disabled per current settings; communication is susceptible to eavesdropping and tampering
searching for changes
adding changesets
@@ -252,7 +191,6 @@
$ echo "cacerts=$CERTSDIR/pub.pem" >> copy-pull/.hg/hgrc
$ hg -R copy-pull pull
pulling from https://localhost:$HGPORT/
- warning: connecting to localhost using legacy security technology (TLS 1.0); see https://mercurial-scm.org/wiki/SecureConnections for more info (?)
searching for changes
no changes found
$ mv copy-pull/.hg/hgrc.bu copy-pull/.hg/hgrc
@@ -264,12 +202,10 @@
$ echo 'cacerts=$P/pub.pem' >> $HGRCPATH
$ P="$CERTSDIR" hg -R copy-pull pull
pulling from https://localhost:$HGPORT/
- warning: connecting to localhost using legacy security technology (TLS 1.0); see https://mercurial-scm.org/wiki/SecureConnections for more info (?)
searching for changes
no changes found
$ P="$CERTSDIR" hg -R copy-pull pull --insecure
pulling from https://localhost:$HGPORT/
- warning: connecting to localhost using legacy security technology (TLS 1.0); see https://mercurial-scm.org/wiki/SecureConnections for more info (?)
warning: connection security to localhost is disabled per current settings; communication is susceptible to eavesdropping and tampering
searching for changes
no changes found
@@ -278,47 +214,34 @@
$ touch emptycafile
-#if sslcontext
$ hg --config web.cacerts=emptycafile -R copy-pull pull
pulling from https://localhost:$HGPORT/
- warning: connecting to localhost using legacy security technology (TLS 1.0); see https://mercurial-scm.org/wiki/SecureConnections for more info (?)
abort: error loading CA file emptycafile: * (glob)
(file is empty or malformed?)
[255]
-#else
- $ hg --config web.cacerts=emptycafile -R copy-pull pull
- pulling from https://localhost:$HGPORT/
- warning: connecting to localhost using legacy security technology (TLS 1.0); see https://mercurial-scm.org/wiki/SecureConnections for more info (?)
- abort: error: * (glob)
- [255]
-#endif
cacert mismatch
$ hg -R copy-pull pull --config web.cacerts="$CERTSDIR/pub.pem" \
> https://$LOCALIP:$HGPORT/
pulling from https://*:$HGPORT/ (glob)
- warning: connecting to $LOCALIP using legacy security technology (TLS 1.0); see https://mercurial-scm.org/wiki/SecureConnections for more info (?)
abort: $LOCALIP certificate error: certificate is for localhost (glob)
(set hostsecurity.$LOCALIP:certfingerprints=sha256:20:de:b3:ad:b4:cd:a5:42:f0:74:41:1c:a2:70:1e:da:6e:c0:5c:16:9e:e7:22:0f:f1:b7:e5:6e:e4:92:af:7e config setting or use --insecure to connect insecurely)
[255]
$ hg -R copy-pull pull --config web.cacerts="$CERTSDIR/pub.pem" \
> https://$LOCALIP:$HGPORT/ --insecure
pulling from https://*:$HGPORT/ (glob)
- warning: connecting to $LOCALIP using legacy security technology (TLS 1.0); see https://mercurial-scm.org/wiki/SecureConnections for more info (?)
warning: connection security to $LOCALIP is disabled per current settings; communication is susceptible to eavesdropping and tampering (glob)
searching for changes
no changes found
$ hg -R copy-pull pull --config web.cacerts="$CERTSDIR/pub-other.pem"
pulling from https://localhost:$HGPORT/
- warning: connecting to localhost using legacy security technology (TLS 1.0); see https://mercurial-scm.org/wiki/SecureConnections for more info (?)
(the full certificate chain may not be available locally; see "hg help debugssl") (windows !)
abort: error: *certificate verify failed* (glob)
[255]
$ hg -R copy-pull pull --config web.cacerts="$CERTSDIR/pub-other.pem" \
> --insecure
pulling from https://localhost:$HGPORT/
- warning: connecting to localhost using legacy security technology (TLS 1.0); see https://mercurial-scm.org/wiki/SecureConnections for more info (?)
warning: connection security to localhost is disabled per current settings; communication is susceptible to eavesdropping and tampering
searching for changes
no changes found
@@ -330,7 +253,6 @@
$ hg -R copy-pull pull --config web.cacerts="$CERTSDIR/pub-not-yet.pem" \
> https://localhost:$HGPORT1/
pulling from https://localhost:$HGPORT1/
- warning: connecting to localhost using legacy security technology (TLS 1.0); see https://mercurial-scm.org/wiki/SecureConnections for more info (?)
(the full certificate chain may not be available locally; see "hg help debugssl") (windows !)
abort: error: *certificate verify failed* (glob)
[255]
@@ -342,40 +264,17 @@
$ hg -R copy-pull pull --config web.cacerts="$CERTSDIR/pub-expired.pem" \
> https://localhost:$HGPORT2/
pulling from https://localhost:$HGPORT2/
- warning: connecting to localhost using legacy security technology (TLS 1.0); see https://mercurial-scm.org/wiki/SecureConnections for more info (?)
(the full certificate chain may not be available locally; see "hg help debugssl") (windows !)
abort: error: *certificate verify failed* (glob)
[255]
-Disabling the TLS 1.0 warning works
- $ hg -R copy-pull id https://localhost:$HGPORT/ \
- > --config hostsecurity.localhost:fingerprints=sha1:ecd87cd6b386d04fc1b8b41c9d8f5e168eef1c03 \
- > --config hostsecurity.disabletls10warning=true
- 5fed3813f7f5
-
-Error message for setting ciphers is different depending on SSLContext support
-
-#if no-sslcontext
- $ P="$CERTSDIR" hg --config hostsecurity.ciphers=invalid -R copy-pull id https://localhost:$HGPORT/
- warning: connecting to localhost using legacy security technology (TLS 1.0); see https://mercurial-scm.org/wiki/SecureConnections for more info
- abort: *No cipher can be selected. (glob)
- [255]
-
- $ P="$CERTSDIR" hg --config hostsecurity.ciphers=HIGH -R copy-pull id https://localhost:$HGPORT/
- warning: connecting to localhost using legacy security technology (TLS 1.0); see https://mercurial-scm.org/wiki/SecureConnections for more info
- 5fed3813f7f5
-#endif
-
-#if sslcontext
Setting ciphers to an invalid value aborts
$ P="$CERTSDIR" hg --config hostsecurity.ciphers=invalid -R copy-pull id https://localhost:$HGPORT/
- warning: connecting to localhost using legacy security technology (TLS 1.0); see https://mercurial-scm.org/wiki/SecureConnections for more info (?)
abort: could not set ciphers: No cipher can be selected.
(change cipher string (invalid) in config)
[255]
$ P="$CERTSDIR" hg --config hostsecurity.localhost:ciphers=invalid -R copy-pull id https://localhost:$HGPORT/
- warning: connecting to localhost using legacy security technology (TLS 1.0); see https://mercurial-scm.org/wiki/SecureConnections for more info (?)
abort: could not set ciphers: No cipher can be selected.
(change cipher string (invalid) in config)
[255]
@@ -383,64 +282,52 @@
Changing the cipher string works
$ P="$CERTSDIR" hg --config hostsecurity.ciphers=HIGH -R copy-pull id https://localhost:$HGPORT/
- warning: connecting to localhost using legacy security technology (TLS 1.0); see https://mercurial-scm.org/wiki/SecureConnections for more info (?)
5fed3813f7f5
-#endif
Fingerprints
- works without cacerts (hostfingerprints)
$ hg -R copy-pull id https://localhost:$HGPORT/ --insecure --config hostfingerprints.localhost=ec:d8:7c:d6:b3:86:d0:4f:c1:b8:b4:1c:9d:8f:5e:16:8e:ef:1c:03
- warning: connecting to localhost using legacy security technology (TLS 1.0); see https://mercurial-scm.org/wiki/SecureConnections for more info (?)
(SHA-1 fingerprint for localhost found in legacy [hostfingerprints] section; if you trust this fingerprint, remove the old SHA-1 fingerprint from [hostfingerprints] and add the following entry to the new [hostsecurity] section: localhost:fingerprints=sha256:20:de:b3:ad:b4:cd:a5:42:f0:74:41:1c:a2:70:1e:da:6e:c0:5c:16:9e:e7:22:0f:f1:b7:e5:6e:e4:92:af:7e)
5fed3813f7f5
- works without cacerts (hostsecurity)
$ hg -R copy-pull id https://localhost:$HGPORT/ --config hostsecurity.localhost:fingerprints=sha1:ecd87cd6b386d04fc1b8b41c9d8f5e168eef1c03
- warning: connecting to localhost using legacy security technology (TLS 1.0); see https://mercurial-scm.org/wiki/SecureConnections for more info (?)
5fed3813f7f5
$ hg -R copy-pull id https://localhost:$HGPORT/ --config hostsecurity.localhost:fingerprints=sha256:20:de:b3:ad:b4:cd:a5:42:f0:74:41:1c:a2:70:1e:da:6e:c0:5c:16:9e:e7:22:0f:f1:b7:e5:6e:e4:92:af:7e
- warning: connecting to localhost using legacy security technology (TLS 1.0); see https://mercurial-scm.org/wiki/SecureConnections for more info (?)
5fed3813f7f5
- multiple fingerprints specified and first matches
$ hg --config 'hostfingerprints.localhost=ecd87cd6b386d04fc1b8b41c9d8f5e168eef1c03, deadbeefdeadbeefdeadbeefdeadbeefdeadbeef' -R copy-pull id https://localhost:$HGPORT/ --insecure
- warning: connecting to localhost using legacy security technology (TLS 1.0); see https://mercurial-scm.org/wiki/SecureConnections for more info (?)
(SHA-1 fingerprint for localhost found in legacy [hostfingerprints] section; if you trust this fingerprint, remove the old SHA-1 fingerprint from [hostfingerprints] and add the following entry to the new [hostsecurity] section: localhost:fingerprints=sha256:20:de:b3:ad:b4:cd:a5:42:f0:74:41:1c:a2:70:1e:da:6e:c0:5c:16:9e:e7:22:0f:f1:b7:e5:6e:e4:92:af:7e)
5fed3813f7f5
$ hg --config 'hostsecurity.localhost:fingerprints=sha1:ecd87cd6b386d04fc1b8b41c9d8f5e168eef1c03, sha1:deadbeefdeadbeefdeadbeefdeadbeefdeadbeef' -R copy-pull id https://localhost:$HGPORT/
- warning: connecting to localhost using legacy security technology (TLS 1.0); see https://mercurial-scm.org/wiki/SecureConnections for more info (?)
5fed3813f7f5
- multiple fingerprints specified and last matches
$ hg --config 'hostfingerprints.localhost=deadbeefdeadbeefdeadbeefdeadbeefdeadbeef, ecd87cd6b386d04fc1b8b41c9d8f5e168eef1c03' -R copy-pull id https://localhost:$HGPORT/ --insecure
- warning: connecting to localhost using legacy security technology (TLS 1.0); see https://mercurial-scm.org/wiki/SecureConnections for more info (?)
(SHA-1 fingerprint for localhost found in legacy [hostfingerprints] section; if you trust this fingerprint, remove the old SHA-1 fingerprint from [hostfingerprints] and add the following entry to the new [hostsecurity] section: localhost:fingerprints=sha256:20:de:b3:ad:b4:cd:a5:42:f0:74:41:1c:a2:70:1e:da:6e:c0:5c:16:9e:e7:22:0f:f1:b7:e5:6e:e4:92:af:7e)
5fed3813f7f5
$ hg --config 'hostsecurity.localhost:fingerprints=sha1:deadbeefdeadbeefdeadbeefdeadbeefdeadbeef, sha1:ecd87cd6b386d04fc1b8b41c9d8f5e168eef1c03' -R copy-pull id https://localhost:$HGPORT/
- warning: connecting to localhost using legacy security technology (TLS 1.0); see https://mercurial-scm.org/wiki/SecureConnections for more info (?)
5fed3813f7f5
- multiple fingerprints specified and none match
$ hg --config 'hostfingerprints.localhost=deadbeefdeadbeefdeadbeefdeadbeefdeadbeef, aeadbeefdeadbeefdeadbeefdeadbeefdeadbeef' -R copy-pull id https://localhost:$HGPORT/ --insecure
- warning: connecting to localhost using legacy security technology (TLS 1.0); see https://mercurial-scm.org/wiki/SecureConnections for more info (?)
abort: certificate for localhost has unexpected fingerprint ec:d8:7c:d6:b3:86:d0:4f:c1:b8:b4:1c:9d:8f:5e:16:8e:ef:1c:03
(check hostfingerprint configuration)
[255]
$ hg --config 'hostsecurity.localhost:fingerprints=sha1:deadbeefdeadbeefdeadbeefdeadbeefdeadbeef, sha1:aeadbeefdeadbeefdeadbeefdeadbeefdeadbeef' -R copy-pull id https://localhost:$HGPORT/
- warning: connecting to localhost using legacy security technology (TLS 1.0); see https://mercurial-scm.org/wiki/SecureConnections for more info (?)
abort: certificate for localhost has unexpected fingerprint sha1:ec:d8:7c:d6:b3:86:d0:4f:c1:b8:b4:1c:9d:8f:5e:16:8e:ef:1c:03
(check hostsecurity configuration)
[255]
- fails when cert doesn't match hostname (port is ignored)
$ hg -R copy-pull id https://localhost:$HGPORT1/ --config hostfingerprints.localhost=ecd87cd6b386d04fc1b8b41c9d8f5e168eef1c03
- warning: connecting to localhost using legacy security technology (TLS 1.0); see https://mercurial-scm.org/wiki/SecureConnections for more info (?)
abort: certificate for localhost has unexpected fingerprint f4:2f:5a:0c:3e:52:5b:db:e7:24:a8:32:1d:18:97:6d:69:b5:87:84
(check hostfingerprint configuration)
[255]
@@ -448,7 +335,6 @@
- ignores that certificate doesn't match hostname
$ hg -R copy-pull id https://$LOCALIP:$HGPORT/ --config hostfingerprints.$LOCALIP=ecd87cd6b386d04fc1b8b41c9d8f5e168eef1c03
- warning: connecting to $LOCALIP using legacy security technology (TLS 1.0); see https://mercurial-scm.org/wiki/SecureConnections for more info (?)
(SHA-1 fingerprint for $LOCALIP found in legacy [hostfingerprints] section; if you trust this fingerprint, remove the old SHA-1 fingerprint from [hostfingerprints] and add the following entry to the new [hostsecurity] section: $LOCALIP:fingerprints=sha256:20:de:b3:ad:b4:cd:a5:42:f0:74:41:1c:a2:70:1e:da:6e:c0:5c:16:9e:e7:22:0f:f1:b7:e5:6e:e4:92:af:7e)
5fed3813f7f5
@@ -458,7 +344,7 @@
$ killdaemons.py hg1.pid
$ killdaemons.py hg2.pid
-#if sslcontext tls1.2
+#if tls1.2
Start servers running supported TLS versions
$ cd test
@@ -572,7 +458,6 @@
$ http_proxy=http://localhost:$HGPORT1/ hg -R copy-pull pull --insecure
pulling from https://localhost:$HGPORT/
- warning: connecting to localhost using legacy security technology (TLS 1.0); see https://mercurial-scm.org/wiki/SecureConnections for more info (?)
warning: connection security to localhost is disabled per current settings; communication is susceptible to eavesdropping and tampering
searching for changes
no changes found
@@ -582,12 +467,10 @@
$ http_proxy=http://localhost:$HGPORT1/ hg -R copy-pull pull \
> --config web.cacerts="$CERTSDIR/pub.pem"
pulling from https://localhost:$HGPORT/
- warning: connecting to localhost using legacy security technology (TLS 1.0); see https://mercurial-scm.org/wiki/SecureConnections for more info (?)
searching for changes
no changes found
$ http_proxy=http://localhost:$HGPORT1/ hg -R copy-pull pull https://localhost:$HGPORT/ --config hostfingerprints.localhost=ecd87cd6b386d04fc1b8b41c9d8f5e168eef1c03 --trace
pulling from https://*:$HGPORT/ (glob)
- warning: connecting to localhost using legacy security technology (TLS 1.0); see https://mercurial-scm.org/wiki/SecureConnections for more info (?)
(SHA-1 fingerprint for localhost found in legacy [hostfingerprints] section; if you trust this fingerprint, remove the old SHA-1 fingerprint from [hostfingerprints] and add the following entry to the new [hostsecurity] section: localhost:fingerprints=sha256:20:de:b3:ad:b4:cd:a5:42:f0:74:41:1c:a2:70:1e:da:6e:c0:5c:16:9e:e7:22:0f:f1:b7:e5:6e:e4:92:af:7e)
searching for changes
no changes found
@@ -597,14 +480,12 @@
$ http_proxy=http://localhost:$HGPORT1/ hg -R copy-pull pull \
> --config web.cacerts="$CERTSDIR/pub-other.pem"
pulling from https://localhost:$HGPORT/
- warning: connecting to localhost using legacy security technology (TLS 1.0); see https://mercurial-scm.org/wiki/SecureConnections for more info (?)
(the full certificate chain may not be available locally; see "hg help debugssl") (windows !)
abort: error: *certificate verify failed* (glob)
[255]
$ http_proxy=http://localhost:$HGPORT1/ hg -R copy-pull pull \
> --config web.cacerts="$CERTSDIR/pub-expired.pem" https://localhost:$HGPORT2/
pulling from https://localhost:$HGPORT2/
- warning: connecting to localhost using legacy security technology (TLS 1.0); see https://mercurial-scm.org/wiki/SecureConnections for more info (?)
(the full certificate chain may not be available locally; see "hg help debugssl") (windows !)
abort: error: *certificate verify failed* (glob)
[255]
@@ -612,8 +493,6 @@
$ killdaemons.py hg0.pid
-#if sslcontext
-
$ cd test
Missing certificate file(s) are detected
@@ -638,7 +517,6 @@
without client certificate:
$ P="$CERTSDIR" hg id https://localhost:$HGPORT/
- warning: connecting to localhost using legacy security technology (TLS 1.0); see https://mercurial-scm.org/wiki/SecureConnections for more info (?)
abort: error: .*(\$ECONNRESET\$|certificate required|handshake failure).* (re)
[255]
@@ -653,16 +531,13 @@
$ P="$CERTSDIR" hg id https://localhost:$HGPORT/ \
> --config auth.l.key="$CERTSDIR/client-key-decrypted.pem"
- warning: connecting to localhost using legacy security technology (TLS 1.0); see https://mercurial-scm.org/wiki/SecureConnections for more info (?)
5fed3813f7f5
$ printf '1234\n' | env P="$CERTSDIR" hg id https://localhost:$HGPORT/ \
> --config ui.interactive=True --config ui.nontty=True
- warning: connecting to localhost using legacy security technology (TLS 1.0); see https://mercurial-scm.org/wiki/SecureConnections for more info (?)
passphrase for */client-key.pem: 5fed3813f7f5 (glob)
$ env P="$CERTSDIR" hg id https://localhost:$HGPORT/
- warning: connecting to localhost using legacy security technology (TLS 1.0); see https://mercurial-scm.org/wiki/SecureConnections for more info (?)
abort: error: * (glob)
[255]
@@ -677,5 +552,3 @@
abort: certificate file (*/missing/key) does not exist; cannot connect to localhost (glob)
(restore missing file or fix references in Mercurial config)
[255]
-
-#endif
--- a/tests/test-install.t Tue Jun 23 16:07:18 2020 +0200
+++ b/tests/test-install.t Thu Jun 25 10:32:51 2020 -0700
@@ -18,7 +18,6 @@
checking available compression engines (*zlib*) (glob)
checking available compression engines for wire protocol (*zlib*) (glob)
checking "re2" regexp engine \((available|missing)\) (re)
- checking "re2" regexp engine Rust bindings \((installed|missing)\) (re) (rust !)
checking templates (*mercurial?templates)... (glob)
checking default template (*mercurial?templates?map-cmdline.default) (glob)
checking commit editor... (*) (glob)
@@ -78,7 +77,6 @@
checking available compression engines (*zlib*) (glob)
checking available compression engines for wire protocol (*zlib*) (glob)
checking "re2" regexp engine \((available|missing)\) (re)
- checking "re2" regexp engine Rust bindings \((installed|missing)\) (re) (rust !)
checking templates (*mercurial?templates)... (glob)
checking default template (*mercurial?templates?map-cmdline.default) (glob)
checking commit editor... (*) (glob)
@@ -126,7 +124,6 @@
checking available compression engines (*zlib*) (glob)
checking available compression engines for wire protocol (*zlib*) (glob)
checking "re2" regexp engine \((available|missing)\) (re)
- checking "re2" regexp engine Rust bindings \((installed|missing)\) (re) (rust !)
checking templates (*mercurial?templates)... (glob)
checking default template (*mercurial?templates?map-cmdline.default) (glob)
checking commit editor... ($TESTTMP/tools/testeditor.exe)
@@ -154,7 +151,6 @@
checking available compression engines (*zlib*) (glob)
checking available compression engines for wire protocol (*zlib*) (glob)
checking "re2" regexp engine \((available|missing)\) (re)
- checking "re2" regexp engine Rust bindings \((installed|missing)\) (re) (rust !)
checking templates (*mercurial?templates)... (glob)
checking default template (*mercurial?templates?map-cmdline.default) (glob)
checking commit editor... (c:\foo\bar\baz.exe) (windows !)
@@ -211,7 +207,6 @@
checking available compression engines (*) (glob)
checking available compression engines for wire protocol (*) (glob)
checking "re2" regexp engine \((available|missing)\) (re)
- checking "re2" regexp engine Rust bindings \((installed|missing)\) (re) (rust !)
checking templates ($TESTTMP/installenv/*/site-packages/mercurial/templates)... (glob)
checking default template ($TESTTMP/installenv/*/site-packages/mercurial/templates/map-cmdline.default) (glob)
checking commit editor... (*) (glob)
@@ -252,7 +247,6 @@
checking available compression engines (*) (glob)
checking available compression engines for wire protocol (*) (glob)
checking "re2" regexp engine \((available|missing)\) (re)
- checking "re2" regexp engine Rust bindings \((installed|missing)\) (re) (rust !)
checking templates ($TESTTMP/installenv/*/site-packages/mercurial/templates)... (glob)
checking default template ($TESTTMP/installenv/*/site-packages/mercurial/templates/map-cmdline.default) (glob)
checking commit editor... (*) (glob)
--- a/tests/test-lfs-serve.t Tue Jun 23 16:07:18 2020 +0200
+++ b/tests/test-lfs-serve.t Thu Jun 25 10:32:51 2020 -0700
@@ -133,30 +133,6 @@
requirements
preserved: dotencode, fncache, generaldelta, revlogv1, sparserevlog, store
- beginning upgrade...
- repository locked and read-only
- creating temporary repository to stage migrated data: * (glob)
- (it is safe to interrupt this process any time before data migration completes)
- migrating 3 total revisions (1 in filelogs, 1 in manifests, 1 in changelog)
- migrating 324 bytes in store; 129 bytes tracked data
- migrating 1 filelogs containing 1 revisions (73 bytes in store; 8 bytes tracked data)
- finished migrating 1 filelog revisions across 1 filelogs; change in size: 0 bytes
- migrating 1 manifests containing 1 revisions (117 bytes in store; 52 bytes tracked data)
- finished migrating 1 manifest revisions across 1 manifests; change in size: 0 bytes
- migrating changelog containing 1 revisions (134 bytes in store; 69 bytes tracked data)
- finished migrating 1 changelog revisions; change in size: 0 bytes
- finished migrating 3 total revisions; total change in store size: 0 bytes
- copying phaseroots
- data fully migrated to temporary repository
- marking source repository as being upgraded; clients will be unable to read from repository
- starting in-place swap of repository data
- replaced files will be backed up at * (glob)
- replacing store...
- store replacement complete; repository was inconsistent for *s (glob)
- finalizing requirements file and making repository readable again
- removing temporary repository * (glob)
- copy of old repository backed up at * (glob)
- the old repository will not be deleted; remove it to free up disk space once the upgraded repository is verified
$ grep 'lfs' .hg/requires $SERVER_REQUIRES
[1]
--- a/tests/test-mq-qfold.t Tue Jun 23 16:07:18 2020 +0200
+++ b/tests/test-mq-qfold.t Thu Jun 25 10:32:51 2020 -0700
@@ -230,6 +230,7 @@
HG: changed a
====
note: commit message saved in .hg/last-message.txt
+ note: use 'hg commit --logfile .hg/last-message.txt --edit' to reuse it
transaction abort!
rollback completed
qrefresh interrupted while patch was popped! (revert --all, qpush to recover)
--- a/tests/test-mq-qnew.t Tue Jun 23 16:07:18 2020 +0200
+++ b/tests/test-mq-qnew.t Thu Jun 25 10:32:51 2020 -0700
@@ -308,6 +308,7 @@
transaction abort!
rollback completed
note: commit message saved in .hg/last-message.txt
+ note: use 'hg commit --logfile .hg/last-message.txt --edit' to reuse it
abort: pretxncommit.unexpectedabort hook exited with status 1
[255]
$ cat .hg/last-message.txt
--- a/tests/test-mq-qrefresh-replace-log-message.t Tue Jun 23 16:07:18 2020 +0200
+++ b/tests/test-mq-qrefresh-replace-log-message.t Thu Jun 25 10:32:51 2020 -0700
@@ -186,6 +186,7 @@
HG: added file2
====
note: commit message saved in .hg/last-message.txt
+ note: use 'hg commit --logfile .hg/last-message.txt --edit' to reuse it
transaction abort!
rollback completed
qrefresh interrupted while patch was popped! (revert --all, qpush to recover)
@@ -229,6 +230,7 @@
A file2
====
note: commit message saved in .hg/last-message.txt
+ note: use 'hg commit --logfile .hg/last-message.txt --edit' to reuse it
transaction abort!
rollback completed
qrefresh interrupted while patch was popped! (revert --all, qpush to recover)
--- a/tests/test-patchbomb-tls.t Tue Jun 23 16:07:18 2020 +0200
+++ b/tests/test-patchbomb-tls.t Thu Jun 25 10:32:51 2020 -0700
@@ -39,7 +39,7 @@
Our test cert is not signed by a trusted CA. It should fail to verify if
we are able to load CA certs:
-#if sslcontext defaultcacerts no-defaultcacertsloaded
+#if no-defaultcacertsloaded
$ try
this patch series consists of 1 patches.
@@ -49,41 +49,17 @@
[255]
#endif
-#if no-sslcontext defaultcacerts
- $ try
- this patch series consists of 1 patches.
-
-
- warning: connecting to localhost using legacy security technology (TLS 1.0); see https://mercurial-scm.org/wiki/SecureConnections for more info
- (using CA certificates from *; if you see this message, your Mercurial install is not properly configured; see https://mercurial-scm.org/wiki/SecureConnections for how to configure Mercurial to avoid this message) (glob) (?)
- (?i)abort: .*?certificate.verify.failed.* (re)
- [255]
-#endif
-
#if defaultcacertsloaded
$ try
this patch series consists of 1 patches.
- warning: connecting to localhost using legacy security technology (TLS 1.0); see https://mercurial-scm.org/wiki/SecureConnections for more info (?)
- (using CA certificates from *; if you see this message, your Mercurial install is not properly configured; see https://mercurial-scm.org/wiki/SecureConnections for how to configure Mercurial to avoid this message) (glob) (?)
(the full certificate chain may not be available locally; see "hg help debugssl") (windows !)
(?i)abort: .*?certificate.verify.failed.* (re)
[255]
#endif
-#if no-defaultcacerts
- $ try
- this patch series consists of 1 patches.
-
-
- (unable to load * certificates; see https://mercurial-scm.org/wiki/SecureConnections for how to configure Mercurial to avoid this message) (glob) (?)
- abort: localhost certificate error: no certificate received
- (set hostsecurity.localhost:certfingerprints=sha256:62:09:97:2f:97:60:e3:65:8f:12:5d:78:9e:35:a1:36:7a:65:4b:0e:9f:ac:db:c3:bc:6e:b6:a3:c0:16:e0:30 config setting or use --insecure to connect insecurely)
- [255]
-#endif
-
$ DISABLECACERTS="--config devel.disableloaddefaultcerts=true"
Without certificates:
@@ -94,7 +70,6 @@
(using smtps)
sending mail: smtp host localhost, port * (glob)
- warning: connecting to localhost using legacy security technology (TLS 1.0); see https://mercurial-scm.org/wiki/SecureConnections for more info (?)
(verifying remote certificate)
abort: unable to verify security of localhost (no loaded CA certificates); refusing to connect
(see https://mercurial-scm.org/wiki/SecureConnections for how to configure Mercurial to avoid this error or set hostsecurity.localhost:fingerprints=sha256:20:de:b3:ad:b4:cd:a5:42:f0:74:41:1c:a2:70:1e:da:6e:c0:5c:16:9e:e7:22:0f:f1:b7:e5:6e:e4:92:af:7e to trust this server)
@@ -108,7 +83,6 @@
(using smtps)
sending mail: smtp host localhost, port * (glob)
- warning: connecting to localhost using legacy security technology (TLS 1.0); see https://mercurial-scm.org/wiki/SecureConnections for more info (?)
(verifying remote certificate)
sending [PATCH] a ...
@@ -118,7 +92,6 @@
this patch series consists of 1 patches.
- warning: connecting to localhost using legacy security technology (TLS 1.0); see https://mercurial-scm.org/wiki/SecureConnections for more info (?)
(the full certificate chain may not be available locally; see "hg help debugssl") (windows !)
(?i)abort: .*?certificate.verify.failed.* (re)
[255]
--- a/tests/test-persistent-nodemap.t Tue Jun 23 16:07:18 2020 +0200
+++ b/tests/test-persistent-nodemap.t Thu Jun 25 10:32:51 2020 -0700
@@ -2,20 +2,34 @@
Test the persistent on-disk nodemap
===================================
- $ hg init test-repo
- $ cd test-repo
- $ cat << EOF >> .hg/hgrc
- > [experimental]
- > exp-persistent-nodemap=yes
+ $ cat << EOF >> $HGRCPATH
+ > [format]
+ > use-persistent-nodemap=yes
> [devel]
> persistent-nodemap=yes
> EOF
- $ hg debugbuilddag .+5000
+ $ hg init test-repo
+ $ cd test-repo
+ $ hg debugformat
+ format-variant repo
+ fncache: yes
+ dotencode: yes
+ generaldelta: yes
+ sparserevlog: yes
+ sidedata: no
+ persistent-nodemap: yes
+ copies-sdc: no
+ plain-cl-delta: yes
+ compression: zlib
+ compression-level: default
+ $ hg debugbuilddag .+5000 --new-file --config "storage.revlog.nodemap.mode=warn"
+ persistent nodemap in strict mode without efficient method (no-rust no-pure !)
+ persistent nodemap in strict mode without efficient method (no-rust no-pure !)
$ hg debugnodemap --metadata
uid: ???????????????? (glob)
tip-rev: 5000
- tip-node: 06ddac466af534d365326c13c3879f97caca3cb1
- data-length: 122880
+ tip-node: 6b02b8c7b96654c25e86ba69eda198d7e6ad8b3c
+ data-length: 121088
data-unused: 0
data-unused: 0.000%
$ f --size .hg/store/00changelog.n
@@ -31,53 +45,56 @@
#if rust
$ f --sha256 .hg/store/00changelog-*.nd
- .hg/store/00changelog-????????????????.nd: sha256=1e38e9ffaa45cad13f15c1a9880ad606f4241e8beea2f61b4d5365abadfb55f6 (glob)
+ .hg/store/00changelog-????????????????.nd: sha256=2e029d3200bd1a986b32784fc2ef1a3bd60dc331f025718bcf5ff44d93f026fd (glob)
+
+ $ f --sha256 .hg/store/00manifest-*.nd
+ .hg/store/00manifest-????????????????.nd: sha256=97117b1c064ea2f86664a124589e47db0e254e8d34739b5c5cc5bf31c9da2b51 (glob)
$ hg debugnodemap --dump-new | f --sha256 --size
- size=122880, sha256=1e38e9ffaa45cad13f15c1a9880ad606f4241e8beea2f61b4d5365abadfb55f6
+ size=121088, sha256=2e029d3200bd1a986b32784fc2ef1a3bd60dc331f025718bcf5ff44d93f026fd
$ hg debugnodemap --dump-disk | f --sha256 --bytes=256 --hexdump --size
- size=122880, sha256=1e38e9ffaa45cad13f15c1a9880ad606f4241e8beea2f61b4d5365abadfb55f6
- 0000: 00 00 00 76 00 00 01 65 00 00 00 95 00 00 01 34 |...v...e.......4|
- 0010: 00 00 00 19 00 00 01 69 00 00 00 ab 00 00 00 4b |.......i.......K|
- 0020: 00 00 00 07 00 00 01 4c 00 00 00 f8 00 00 00 8f |.......L........|
- 0030: 00 00 00 c0 00 00 00 a7 00 00 00 89 00 00 01 46 |...............F|
- 0040: 00 00 00 92 00 00 01 bc 00 00 00 71 00 00 00 ac |...........q....|
- 0050: 00 00 00 af 00 00 00 b4 00 00 00 34 00 00 01 ca |...........4....|
- 0060: 00 00 00 23 00 00 01 45 00 00 00 2d 00 00 00 b2 |...#...E...-....|
- 0070: 00 00 00 56 00 00 01 0f 00 00 00 4e 00 00 02 4c |...V.......N...L|
- 0080: 00 00 00 e7 00 00 00 cd 00 00 01 5b 00 00 00 78 |...........[...x|
- 0090: 00 00 00 e3 00 00 01 8e 00 00 00 4f 00 00 00 b1 |...........O....|
- 00a0: 00 00 00 30 00 00 00 11 00 00 00 25 00 00 00 d2 |...0.......%....|
- 00b0: 00 00 00 ec 00 00 00 69 00 00 01 2b 00 00 01 2e |.......i...+....|
- 00c0: 00 00 00 aa 00 00 00 15 00 00 00 3a 00 00 01 4e |...........:...N|
- 00d0: 00 00 00 4d 00 00 00 9d 00 00 00 8e 00 00 00 a4 |...M............|
- 00e0: 00 00 00 c3 00 00 00 eb 00 00 00 29 00 00 00 ad |...........)....|
- 00f0: 00 00 01 3a 00 00 01 32 00 00 00 04 00 00 00 53 |...:...2.......S|
+ size=121088, sha256=2e029d3200bd1a986b32784fc2ef1a3bd60dc331f025718bcf5ff44d93f026fd
+ 0000: 00 00 00 91 00 00 00 20 00 00 00 bb 00 00 00 e7 |....... ........|
+ 0010: 00 00 00 66 00 00 00 a1 00 00 01 13 00 00 01 22 |...f..........."|
+ 0020: 00 00 00 23 00 00 00 fc 00 00 00 ba 00 00 00 5e |...#...........^|
+ 0030: 00 00 00 df 00 00 01 4e 00 00 01 65 00 00 00 ab |.......N...e....|
+ 0040: 00 00 00 a9 00 00 00 95 00 00 00 73 00 00 00 38 |...........s...8|
+ 0050: 00 00 00 cc 00 00 00 92 00 00 00 90 00 00 00 69 |...............i|
+ 0060: 00 00 00 ec 00 00 00 8d 00 00 01 4f 00 00 00 12 |...........O....|
+ 0070: 00 00 02 0c 00 00 00 77 00 00 00 9c 00 00 00 8f |.......w........|
+ 0080: 00 00 00 d5 00 00 00 6b 00 00 00 48 00 00 00 b3 |.......k...H....|
+ 0090: 00 00 00 e5 00 00 00 b5 00 00 00 8e 00 00 00 ad |................|
+ 00a0: 00 00 00 7b 00 00 00 7c 00 00 00 0b 00 00 00 2b |...{...|.......+|
+ 00b0: 00 00 00 c6 00 00 00 1e 00 00 01 08 00 00 00 11 |................|
+ 00c0: 00 00 01 30 00 00 00 26 00 00 01 9c 00 00 00 35 |...0...&.......5|
+ 00d0: 00 00 00 b8 00 00 01 31 00 00 00 2c 00 00 00 55 |.......1...,...U|
+ 00e0: 00 00 00 8a 00 00 00 9a 00 00 00 0c 00 00 01 1e |................|
+ 00f0: 00 00 00 a4 00 00 00 83 00 00 00 c9 00 00 00 8c |................|
#else
$ f --sha256 .hg/store/00changelog-*.nd
- .hg/store/00changelog-????????????????.nd: sha256=b961925120e1c9bc345c199b2cc442abc477029fdece37ef9d99cbe59c0558b7 (glob)
+ .hg/store/00changelog-????????????????.nd: sha256=f544f5462ff46097432caf6d764091f6d8c46d6121be315ead8576d548c9dd79 (glob)
$ hg debugnodemap --dump-new | f --sha256 --size
- size=122880, sha256=b961925120e1c9bc345c199b2cc442abc477029fdece37ef9d99cbe59c0558b7
+ size=121088, sha256=f544f5462ff46097432caf6d764091f6d8c46d6121be315ead8576d548c9dd79
$ hg debugnodemap --dump-disk | f --sha256 --bytes=256 --hexdump --size
- size=122880, sha256=b961925120e1c9bc345c199b2cc442abc477029fdece37ef9d99cbe59c0558b7
+ size=121088, sha256=f544f5462ff46097432caf6d764091f6d8c46d6121be315ead8576d548c9dd79
0000: ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff |................|
- 0010: ff ff ff ff ff ff ff ff ff ff fa c2 ff ff ff ff |................|
- 0020: ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff |................|
- 0030: ff ff ff ff ff ff ed b3 ff ff ff ff ff ff ff ff |................|
- 0040: ff ff ff ff ff ff ee 34 00 00 00 00 ff ff ff ff |.......4........|
- 0050: ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff |................|
- 0060: ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff |................|
+ 0010: ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff |................|
+ 0020: ff ff ff ff ff ff f5 06 ff ff ff ff ff ff f3 e7 |................|
+ 0030: ff ff ef ca ff ff ff ff ff ff ff ff ff ff ff ff |................|
+ 0040: ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff |................|
+ 0050: ff ff ff ff ff ff ff ff ff ff ff ff ff ff ed 08 |................|
+ 0060: ff ff ed 66 ff ff ff ff ff ff ff ff ff ff ff ff |...f............|
0070: ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff |................|
- 0080: ff ff ff ff ff ff f8 50 ff ff ff ff ff ff ff ff |.......P........|
- 0090: ff ff ff ff ff ff ff ff ff ff ec c7 ff ff ff ff |................|
- 00a0: ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff |................|
- 00b0: ff ff ff ff ff ff fa be ff ff f2 fc ff ff ff ff |................|
- 00c0: ff ff ff ff ff ff ef ea ff ff ff ff ff ff f9 17 |................|
+ 0080: ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff |................|
+ 0090: ff ff ff ff ff ff ff ff ff ff ff ff ff ff f6 ed |................|
+ 00a0: ff ff ff ff ff ff fe 61 ff ff ff ff ff ff ff ff |.......a........|
+ 00b0: ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff |................|
+ 00c0: ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff |................|
00d0: ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff |................|
- 00e0: ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff |................|
- 00f0: ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff |................|
+ 00e0: ff ff ff ff ff ff ff ff ff ff ff ff ff ff f1 02 |................|
+ 00f0: ff ff ff ff ff ff ed 1b ff ff ff ff ff ff ff ff |................|
#endif
@@ -88,27 +105,38 @@
add a new commit
$ hg up
- 0 files updated, 0 files merged, 0 files removed, 0 files unresolved
+ 5001 files updated, 0 files merged, 0 files removed, 0 files unresolved
$ echo foo > foo
$ hg add foo
+
+#if no-pure no-rust
+
+ $ hg ci -m 'foo' --config "storage.revlog.nodemap.mode=strict"
+ transaction abort!
+ rollback completed
+ abort: persistent nodemap in strict mode without efficient method
+ [255]
+
+#endif
+
$ hg ci -m 'foo'
#if no-pure no-rust
$ hg debugnodemap --metadata
uid: ???????????????? (glob)
tip-rev: 5001
- tip-node: 2dd9b5258caa46469ff07d4a3da1eb3529a51f49
- data-length: 122880
+ tip-node: 16395c3cf7e231394735e6b1717823ada303fb0c
+ data-length: 121088
data-unused: 0
data-unused: 0.000%
#else
$ hg debugnodemap --metadata
uid: ???????????????? (glob)
tip-rev: 5001
- tip-node: 2dd9b5258caa46469ff07d4a3da1eb3529a51f49
- data-length: 123072
- data-unused: 192
- data-unused: 0.156%
+ tip-node: 16395c3cf7e231394735e6b1717823ada303fb0c
+ data-length: 121344
+ data-unused: 256
+ data-unused: 0.211%
#endif
$ f --size .hg/store/00changelog.n
@@ -118,17 +146,17 @@
#if pure
$ f --sha256 .hg/store/00changelog-*.nd --size
- .hg/store/00changelog-????????????????.nd: size=123072, sha256=136472751566c8198ff09e306a7d2f9bd18bd32298d614752b73da4d6df23340 (glob)
+ .hg/store/00changelog-????????????????.nd: size=121344, sha256=cce54c5da5bde3ad72a4938673ed4064c86231b9c64376b082b163fdb20f8f66 (glob)
#endif
#if rust
$ f --sha256 .hg/store/00changelog-*.nd --size
- .hg/store/00changelog-????????????????.nd: size=123072, sha256=ccc8a43310ace13812fcc648683e259346754ef934c12dd238cf9b7fadfe9a4b (glob)
+ .hg/store/00changelog-????????????????.nd: size=121344, sha256=952b042fcf614ceb37b542b1b723e04f18f83efe99bee4e0f5ccd232ef470e58 (glob)
#endif
#if no-pure no-rust
$ f --sha256 .hg/store/00changelog-*.nd --size
- .hg/store/00changelog-????????????????.nd: size=122880, sha256=bfafebd751c4f6d116a76a37a1dee2a251747affe7efbcc4f4842ccc746d4db9 (glob)
+ .hg/store/00changelog-????????????????.nd: size=121088, sha256=df7c06a035b96cb28c7287d349d603baef43240be7736fe34eea419a49702e17 (glob)
#endif
$ hg debugnodemap --check
@@ -140,12 +168,12 @@
$ echo bar > bar
$ hg add bar
- $ hg ci -m 'bar' --config experimental.exp-persistent-nodemap.mmap=no
+ $ hg ci -m 'bar' --config storage.revlog.nodemap.mmap=no
- $ hg debugnodemap --check --config experimental.exp-persistent-nodemap.mmap=yes
+ $ hg debugnodemap --check --config storage.revlog.nodemap.mmap=yes
revision in index: 5003
revision in nodemap: 5003
- $ hg debugnodemap --check --config experimental.exp-persistent-nodemap.mmap=no
+ $ hg debugnodemap --check --config storage.revlog.nodemap.mmap=no
revision in index: 5003
revision in nodemap: 5003
@@ -154,34 +182,34 @@
$ hg debugnodemap --metadata
uid: ???????????????? (glob)
tip-rev: 5002
- tip-node: 6ce944fafcee85af91f29ea5b51654cc6101ad7e
- data-length: 123328
- data-unused: 384
- data-unused: 0.311%
+ tip-node: 880b18d239dfa9f632413a2071bfdbcc4806a4fd
+ data-length: 121600
+ data-unused: 512
+ data-unused: 0.421%
$ f --sha256 .hg/store/00changelog-*.nd --size
- .hg/store/00changelog-????????????????.nd: size=123328, sha256=10d26e9776b6596af0f89143a54eba8cc581e929c38242a02a7b0760698c6c70 (glob)
+ .hg/store/00changelog-????????????????.nd: size=121600, sha256=def52503d049ccb823974af313a98a935319ba61f40f3aa06a8be4d35c215054 (glob)
#endif
#if rust
$ hg debugnodemap --metadata
uid: ???????????????? (glob)
tip-rev: 5002
- tip-node: 6ce944fafcee85af91f29ea5b51654cc6101ad7e
- data-length: 123328
- data-unused: 384
- data-unused: 0.311%
+ tip-node: 880b18d239dfa9f632413a2071bfdbcc4806a4fd
+ data-length: 121600
+ data-unused: 512
+ data-unused: 0.421%
$ f --sha256 .hg/store/00changelog-*.nd --size
- .hg/store/00changelog-????????????????.nd: size=123328, sha256=081eec9eb6708f2bf085d939b4c97bc0b6762bc8336bc4b93838f7fffa1516bf (glob)
+ .hg/store/00changelog-????????????????.nd: size=121600, sha256=dacf5b5f1d4585fee7527d0e67cad5b1ba0930e6a0928f650f779aefb04ce3fb (glob)
#endif
#if no-pure no-rust
$ hg debugnodemap --metadata
uid: ???????????????? (glob)
tip-rev: 5002
- tip-node: 6ce944fafcee85af91f29ea5b51654cc6101ad7e
- data-length: 122944
+ tip-node: 880b18d239dfa9f632413a2071bfdbcc4806a4fd
+ data-length: 121088
data-unused: 0
data-unused: 0.000%
$ f --sha256 .hg/store/00changelog-*.nd --size
- .hg/store/00changelog-????????????????.nd: size=122944, sha256=755976b22b64ab680401b45395953504e64e7fa8c31ac570f58dee21e15f9bc0 (glob)
+ .hg/store/00changelog-????????????????.nd: size=121088, sha256=59fcede3e3cc587755916ceed29e3c33748cd1aa7d2f91828ac83e7979d935e8 (glob)
#endif
Test force warming the cache
@@ -193,16 +221,16 @@
$ hg debugnodemap --metadata
uid: ???????????????? (glob)
tip-rev: 5002
- tip-node: 6ce944fafcee85af91f29ea5b51654cc6101ad7e
- data-length: 122944
+ tip-node: 880b18d239dfa9f632413a2071bfdbcc4806a4fd
+ data-length: 121088
data-unused: 0
data-unused: 0.000%
#else
$ hg debugnodemap --metadata
uid: ???????????????? (glob)
tip-rev: 5002
- tip-node: 6ce944fafcee85af91f29ea5b51654cc6101ad7e
- data-length: 122944
+ tip-node: 880b18d239dfa9f632413a2071bfdbcc4806a4fd
+ data-length: 121088
data-unused: 0
data-unused: 0.000%
#endif
@@ -231,22 +259,22 @@
$ hg debugnodemap --metadata
uid: ???????????????? (glob)
tip-rev: 5003
- tip-node: 5c049e9c4a4af159bdcd65dce1b6bf303a0da6cf
- data-length: 123200 (pure !)
- data-length: 123200 (rust !)
- data-length: 122944 (no-rust no-pure !)
- data-unused: 256 (pure !)
- data-unused: 256 (rust !)
+ tip-node: c9329770f979ade2d16912267c38ba5f82fd37b3
+ data-length: 121344 (pure !)
+ data-length: 121344 (rust !)
+ data-length: 121152 (no-rust no-pure !)
+ data-unused: 192 (pure !)
+ data-unused: 192 (rust !)
data-unused: 0 (no-rust no-pure !)
- data-unused: 0.208% (pure !)
- data-unused: 0.208% (rust !)
+ data-unused: 0.158% (pure !)
+ data-unused: 0.158% (rust !)
data-unused: 0.000% (no-rust no-pure !)
$ cp -f ../tmp-copies/* .hg/store/
$ hg debugnodemap --metadata
uid: ???????????????? (glob)
tip-rev: 5002
- tip-node: 6ce944fafcee85af91f29ea5b51654cc6101ad7e
- data-length: 122944
+ tip-node: 880b18d239dfa9f632413a2071bfdbcc4806a4fd
+ data-length: 121088
data-unused: 0
data-unused: 0.000%
$ hg log -r "$NODE" -T '{rev}\n'
@@ -260,7 +288,7 @@
compatible with the persistent nodemap. We need to detect that.
$ hg up "$NODE~5"
- 0 files updated, 0 files merged, 2 files removed, 0 files unresolved
+ 0 files updated, 0 files merged, 4 files removed, 0 files unresolved
$ echo bar > babar
$ hg add babar
$ hg ci -m 'babar'
@@ -276,23 +304,23 @@
$ hg debugnodemap --metadata
uid: ???????????????? (glob)
tip-rev: 5002
- tip-node: 42bf3068c7ddfdfded53c4eb11d02266faeebfee
- data-length: 123456 (pure !)
- data-length: 123008 (rust !)
- data-length: 123008 (no-pure no-rust !)
+ tip-node: b355ef8adce0949b8bdf6afc72ca853740d65944
+ data-length: 121536 (pure !)
+ data-length: 121088 (rust !)
+ data-length: 121088 (no-pure no-rust !)
data-unused: 448 (pure !)
data-unused: 0 (rust !)
data-unused: 0 (no-pure no-rust !)
data-unused: 0.000% (rust !)
- data-unused: 0.363% (pure !)
+ data-unused: 0.369% (pure !)
data-unused: 0.000% (no-pure no-rust !)
$ cp -f ../tmp-copies/* .hg/store/
$ hg debugnodemap --metadata
uid: ???????????????? (glob)
tip-rev: 5002
- tip-node: 6ce944fafcee85af91f29ea5b51654cc6101ad7e
- data-length: 122944
+ tip-node: 880b18d239dfa9f632413a2071bfdbcc4806a4fd
+ data-length: 121088
data-unused: 0
data-unused: 0.000%
$ hg log -r "$OTHERNODE" -T '{rev}\n'
@@ -309,36 +337,36 @@
$ hg debugnodemap --metadata
uid: ???????????????? (glob)
tip-rev: 5003
- tip-node: c91af76d172f1053cca41b83f7c2e4e514fe2bcf
- data-length: 123008
+ tip-node: a52c5079765b5865d97b993b303a18740113bbb2
+ data-length: 121088
data-unused: 0
data-unused: 0.000%
$ echo babar2 > babar
$ hg ci -m 'babar2' --config "hooks.pretxnclose.nodemap-test=hg debugnodemap --metadata"
uid: ???????????????? (glob)
tip-rev: 5004
- tip-node: ba87cd9559559e4b91b28cb140d003985315e031
- data-length: 123328 (pure !)
- data-length: 123328 (rust !)
- data-length: 123136 (no-pure no-rust !)
+ tip-node: 2f5fb1c06a16834c5679d672e90da7c5f3b1a984
+ data-length: 121280 (pure !)
+ data-length: 121280 (rust !)
+ data-length: 121088 (no-pure no-rust !)
data-unused: 192 (pure !)
data-unused: 192 (rust !)
data-unused: 0 (no-pure no-rust !)
- data-unused: 0.156% (pure !)
- data-unused: 0.156% (rust !)
+ data-unused: 0.158% (pure !)
+ data-unused: 0.158% (rust !)
data-unused: 0.000% (no-pure no-rust !)
$ hg debugnodemap --metadata
uid: ???????????????? (glob)
tip-rev: 5004
- tip-node: ba87cd9559559e4b91b28cb140d003985315e031
- data-length: 123328 (pure !)
- data-length: 123328 (rust !)
- data-length: 123136 (no-pure no-rust !)
+ tip-node: 2f5fb1c06a16834c5679d672e90da7c5f3b1a984
+ data-length: 121280 (pure !)
+ data-length: 121280 (rust !)
+ data-length: 121088 (no-pure no-rust !)
data-unused: 192 (pure !)
data-unused: 192 (rust !)
data-unused: 0 (no-pure no-rust !)
- data-unused: 0.156% (pure !)
- data-unused: 0.156% (rust !)
+ data-unused: 0.158% (pure !)
+ data-unused: 0.158% (rust !)
data-unused: 0.000% (no-pure no-rust !)
Another process does not see the pending nodemap content during run.
@@ -356,28 +384,28 @@
> wait-on-file 20 sync-txn-close sync-repo-read
uid: ???????????????? (glob)
tip-rev: 5004
- tip-node: ba87cd9559559e4b91b28cb140d003985315e031
- data-length: 123328 (pure !)
- data-length: 123328 (rust !)
- data-length: 123136 (no-pure no-rust !)
+ tip-node: 2f5fb1c06a16834c5679d672e90da7c5f3b1a984
+ data-length: 121280 (pure !)
+ data-length: 121280 (rust !)
+ data-length: 121088 (no-pure no-rust !)
data-unused: 192 (pure !)
data-unused: 192 (rust !)
data-unused: 0 (no-pure no-rust !)
- data-unused: 0.156% (pure !)
- data-unused: 0.156% (rust !)
+ data-unused: 0.158% (pure !)
+ data-unused: 0.158% (rust !)
data-unused: 0.000% (no-pure no-rust !)
$ hg debugnodemap --metadata
uid: ???????????????? (glob)
tip-rev: 5005
- tip-node: bae4d45c759e30f1cb1a40e1382cf0e0414154db
- data-length: 123584 (pure !)
- data-length: 123584 (rust !)
- data-length: 123136 (no-pure no-rust !)
+ tip-node: 90d5d3ba2fc47db50f712570487cb261a68c8ffe
+ data-length: 121536 (pure !)
+ data-length: 121536 (rust !)
+ data-length: 121088 (no-pure no-rust !)
data-unused: 448 (pure !)
data-unused: 448 (rust !)
data-unused: 0 (no-pure no-rust !)
- data-unused: 0.363% (pure !)
- data-unused: 0.363% (rust !)
+ data-unused: 0.369% (pure !)
+ data-unused: 0.369% (rust !)
data-unused: 0.000% (no-pure no-rust !)
$ cat output.txt
@@ -386,9 +414,9 @@
$ echo plakfe > a
$ f --size --sha256 .hg/store/00changelog-*.nd
- .hg/store/00changelog-????????????????.nd: size=123584, sha256=8c6cef6fd3d3fac291968793ee19a4be6d0b8375e9508bd5c7d4a8879e8df180 (glob) (pure !)
- .hg/store/00changelog-????????????????.nd: size=123584, sha256=eb9e9a4bcafdb5e1344bc8a0cbb3288b2106413b8efae6265fb8a7973d7e97f9 (glob) (rust !)
- .hg/store/00changelog-????????????????.nd: size=123136, sha256=4f504f5a834db3811ced50ab3e9e80bcae3581bb0f9b13a7a9f94b7fc34bcebe (glob) (no-pure no-rust !)
+ .hg/store/00changelog-????????????????.nd: size=121536, sha256=bb414468d225cf52d69132e1237afba34d4346ee2eb81b505027e6197b107f03 (glob) (pure !)
+ .hg/store/00changelog-????????????????.nd: size=121536, sha256=909ac727bc4d1c0fda5f7bff3c620c98bd4a2967c143405a1503439e33b377da (glob) (rust !)
+ .hg/store/00changelog-????????????????.nd: size=121088, sha256=342d36d30d86dde67d3cb6c002606c4a75bcad665595d941493845066d9c8ee0 (glob) (no-pure no-rust !)
$ hg ci -m a3 --config "extensions.abort=$RUNTESTDIR/testlib/crash_transaction_late.py"
transaction abort!
rollback completed
@@ -397,20 +425,20 @@
$ hg debugnodemap --metadata
uid: ???????????????? (glob)
tip-rev: 5005
- tip-node: bae4d45c759e30f1cb1a40e1382cf0e0414154db
- data-length: 123584 (pure !)
- data-length: 123584 (rust !)
- data-length: 123136 (no-pure no-rust !)
+ tip-node: 90d5d3ba2fc47db50f712570487cb261a68c8ffe
+ data-length: 121536 (pure !)
+ data-length: 121536 (rust !)
+ data-length: 121088 (no-pure no-rust !)
data-unused: 448 (pure !)
data-unused: 448 (rust !)
data-unused: 0 (no-pure no-rust !)
- data-unused: 0.363% (pure !)
- data-unused: 0.363% (rust !)
+ data-unused: 0.369% (pure !)
+ data-unused: 0.369% (rust !)
data-unused: 0.000% (no-pure no-rust !)
$ f --size --sha256 .hg/store/00changelog-*.nd
.hg/store/00changelog-????????????????.nd: size=123584, sha256=8c6cef6fd3d3fac291968793ee19a4be6d0b8375e9508bd5c7d4a8879e8df180 (glob) (pure !)
.hg/store/00changelog-????????????????.nd: size=123584, sha256=eb9e9a4bcafdb5e1344bc8a0cbb3288b2106413b8efae6265fb8a7973d7e97f9 (glob) (rust !)
- .hg/store/00changelog-????????????????.nd: size=123136, sha256=4f504f5a834db3811ced50ab3e9e80bcae3581bb0f9b13a7a9f94b7fc34bcebe (glob) (no-pure no-rust !)
+ .hg/store/00changelog-????????????????.nd: size=121088, sha256=342d36d30d86dde67d3cb6c002606c4a75bcad665595d941493845066d9c8ee0 (glob) (no-pure no-rust !)
Check that removing content does not confuse the nodemap
--------------------------------------------------------
@@ -423,7 +451,7 @@
repository tip rolled back to revision 5005 (undo commit)
working directory now based on revision 5005
$ hg id -r .
- bae4d45c759e tip
+ 90d5d3ba2fc4 tip
roming data with strip
@@ -432,4 +460,100 @@
$ hg --config extensions.strip= strip -r . --no-backup
1 files updated, 0 files merged, 0 files removed, 0 files unresolved
$ hg id -r . --traceback
- bae4d45c759e tip
+ 90d5d3ba2fc4 tip
+
+Test upgrade / downgrade
+========================
+
+downgrading
+
+ $ cat << EOF >> .hg/hgrc
+ > [format]
+ > use-persistent-nodemap=no
+ > EOF
+ $ hg debugformat -v
+ format-variant repo config default
+ fncache: yes yes yes
+ dotencode: yes yes yes
+ generaldelta: yes yes yes
+ sparserevlog: yes yes yes
+ sidedata: no no no
+ persistent-nodemap: yes no no
+ copies-sdc: no no no
+ plain-cl-delta: yes yes yes
+ compression: zlib zlib zlib
+ compression-level: default default default
+ $ hg debugupgraderepo --run --no-backup --quiet
+ upgrade will perform the following actions:
+
+ requirements
+ preserved: dotencode, fncache, generaldelta, revlogv1, sparserevlog, store
+ removed: persistent-nodemap
+
+ $ ls -1 .hg/store/ | egrep '00(changelog|manifest)(\.n|-.*\.nd)'
+ [1]
+ $ hg debugnodemap --metadata
+
+
+upgrading
+
+ $ cat << EOF >> .hg/hgrc
+ > [format]
+ > use-persistent-nodemap=yes
+ > EOF
+ $ hg debugformat -v
+ format-variant repo config default
+ fncache: yes yes yes
+ dotencode: yes yes yes
+ generaldelta: yes yes yes
+ sparserevlog: yes yes yes
+ sidedata: no no no
+ persistent-nodemap: no yes no
+ copies-sdc: no no no
+ plain-cl-delta: yes yes yes
+ compression: zlib zlib zlib
+ compression-level: default default default
+ $ hg debugupgraderepo --run --no-backup --quiet
+ upgrade will perform the following actions:
+
+ requirements
+ preserved: dotencode, fncache, generaldelta, revlogv1, sparserevlog, store
+ added: persistent-nodemap
+
+ $ ls -1 .hg/store/ | egrep '00(changelog|manifest)(\.n|-.*\.nd)'
+ 00changelog-*.nd (glob)
+ 00changelog.n
+ 00manifest-*.nd (glob)
+ 00manifest.n
+
+ $ hg debugnodemap --metadata
+ uid: * (glob)
+ tip-rev: 5005
+ tip-node: 90d5d3ba2fc47db50f712570487cb261a68c8ffe
+ data-length: 121088
+ data-unused: 0
+ data-unused: 0.000%
+
+Running unrelated upgrade
+
+ $ hg debugupgraderepo --run --no-backup --quiet --optimize re-delta-all
+ upgrade will perform the following actions:
+
+ requirements
+ preserved: dotencode, fncache, generaldelta, persistent-nodemap, revlogv1, sparserevlog, store
+
+ optimisations: re-delta-all
+
+ $ ls -1 .hg/store/ | egrep '00(changelog|manifest)(\.n|-.*\.nd)'
+ 00changelog-*.nd (glob)
+ 00changelog.n
+ 00manifest-*.nd (glob)
+ 00manifest.n
+
+ $ hg debugnodemap --metadata
+ uid: * (glob)
+ tip-rev: 5005
+ tip-node: 90d5d3ba2fc47db50f712570487cb261a68c8ffe
+ data-length: 121088
+ data-unused: 0
+ data-unused: 0.000%
--- a/tests/test-phabricator.t Tue Jun 23 16:07:18 2020 +0200
+++ b/tests/test-phabricator.t Thu Jun 25 10:32:51 2020 -0700
@@ -670,7 +670,7 @@
NEW - a959a3f69d8d: one: first commit to review
NEW - 24a4438154ba: two: second commit to review
NEW - d235829e802c: 3: a commit with no detailed message
- Send the above changes to https://phab.mercurial-scm.org/ (yn)? y
+ Send the above changes to https://phab.mercurial-scm.org/ (Y/n)? y
D8387 - created - a959a3f69d8d: one: first commit to review
D8387 - created - 24a4438154ba: two: second commit to review
D8387 - created - d235829e802c: 3: a commit with no detailed message
@@ -734,7 +734,7 @@
D8387 - 602c4e738243: one: first commit to review
D8387 - 0124e5474c88: two: second commit to review
D8387 - e4edb1fe3565: 3: a commit with no detailed message
- Send the above changes to https://phab.mercurial-scm.org/ (yn)? y
+ Send the above changes to https://phab.mercurial-scm.org/ (Y/n)? y
D8387 - updated - 602c4e738243: one: first commit to review
D8387 - updated - 0124e5474c88: two: second commit to review
D8387 - updated - e4edb1fe3565: 3: a commit with no detailed message
--- a/tests/test-resolve.t Tue Jun 23 16:07:18 2020 +0200
+++ b/tests/test-resolve.t Thu Jun 25 10:32:51 2020 -0700
@@ -92,7 +92,7 @@
$ cat > $TESTTMP/markdriver.py << EOF
> '''mark and unmark files as driver-resolved'''
> from mercurial import (
- > merge,
+ > mergestate,
> pycompat,
> registrar,
> scmutil,
@@ -106,7 +106,7 @@
> wlock = repo.wlock()
> opts = pycompat.byteskwargs(opts)
> try:
- > ms = merge.mergestate.read(repo)
+ > ms = mergestate.mergestate.read(repo)
> m = scmutil.match(repo[None], pats, opts)
> for f in ms:
> if not m(f):
--- a/tests/test-revset.t Tue Jun 23 16:07:18 2020 +0200
+++ b/tests/test-revset.t Thu Jun 25 10:32:51 2020 -0700
@@ -1864,12 +1864,12 @@
$ log 'id(2)'
$ log 'id(8)'
3
- $ hg --config experimental.revisions.prefixhexnode=yes log --template '{rev}\n' -r 'id(x8)'
+ $ hg log --template '{rev}\n' -r 'id(x8)'
3
- $ hg --config experimental.revisions.prefixhexnode=yes log --template '{rev}\n' -r 'x8'
+ $ hg log --template '{rev}\n' -r 'x8'
3
- $ hg --config experimental.revisions.prefixhexnode=yes log --template '{rev}\n' -r 'id(x)'
- $ hg --config experimental.revisions.prefixhexnode=yes log --template '{rev}\n' -r 'x'
+ $ hg log --template '{rev}\n' -r 'id(x)'
+ $ hg log --template '{rev}\n' -r 'x'
abort: 00changelog.i@: ambiguous identifier!
[255]
$ log 'id(23268)'
--- a/tests/test-rollback.t Tue Jun 23 16:07:18 2020 +0200
+++ b/tests/test-rollback.t Thu Jun 25 10:32:51 2020 -0700
@@ -116,6 +116,7 @@
transaction abort!
rollback completed
note: commit message saved in .hg/last-message.txt
+ note: use 'hg commit --logfile .hg/last-message.txt --edit' to reuse it
abort: pretxncommit hook exited with status * (glob)
[255]
$ cat .hg/last-message.txt
--- a/tests/test-run-tests.t Tue Jun 23 16:07:18 2020 +0200
+++ b/tests/test-run-tests.t Thu Jun 25 10:32:51 2020 -0700
@@ -747,7 +747,7 @@
This is a noop statement so that
this test is still more bytes than success.
pad pad pad pad............................................................
- Accept this change? [n]
+ Accept this change? [y/N]
ERROR: test-failure.t output changed
!.
Failed test-failure.t: output changed
@@ -772,7 +772,7 @@
$ echo 'n' | rt -i --view echo
running 2 tests using 1 parallel processes
$TESTTMP/test-failure.t $TESTTMP/test-failure.t.err
- Accept this change? [n]* (glob)
+ Accept this change? [y/N]* (glob)
ERROR: test-failure.t output changed
!.
Failed test-failure.t: output changed
@@ -823,7 +823,7 @@
+ saved backup bundle to $TESTTMP/foo.hg
$ echo 'saved backup bundle to $TESTTMP/foo.hg'
saved backup bundle to $TESTTMP/*.hg (glob)
- Accept this change? [n] ..
+ Accept this change? [y/N] ..
# Ran 2 tests, 0 skipped, 0 failed.
$ sed -e 's,(glob)$,&<,g' test-failure.t
@@ -900,7 +900,7 @@
#endif
#if b
$ echo 2
- Accept this change? [n] .
+ Accept this change? [y/N] .
--- $TESTTMP/test-cases.t
+++ $TESTTMP/test-cases.t#b.err
@@ -5,4 +5,5 @@
@@ -909,7 +909,7 @@
$ echo 2
+ 2
#endif
- Accept this change? [n] .
+ Accept this change? [y/N] .
# Ran 2 tests, 0 skipped, 0 failed.
$ cat test-cases.t
@@ -1285,7 +1285,7 @@
This is a noop statement so that
this test is still more bytes than success.
pad pad pad pad............................................................
- Accept this change? [n] ..s
+ Accept this change? [y/N] ..s
Skipped test-skip.t: missing feature: nail clipper
# Ran 2 tests, 1 skipped, 0 failed.
--- a/tests/test-sidedata.t Tue Jun 23 16:07:18 2020 +0200
+++ b/tests/test-sidedata.t Thu Jun 25 10:32:51 2020 -0700
@@ -50,27 +50,29 @@
$ hg init up-no-side-data --config format.exp-use-side-data=no
$ hg debugformat -v -R up-no-side-data
- format-variant repo config default
- fncache: yes yes yes
- dotencode: yes yes yes
- generaldelta: yes yes yes
- sparserevlog: yes yes yes
- sidedata: no no no
- copies-sdc: no no no
- plain-cl-delta: yes yes yes
- compression: zlib zlib zlib
- compression-level: default default default
+ format-variant repo config default
+ fncache: yes yes yes
+ dotencode: yes yes yes
+ generaldelta: yes yes yes
+ sparserevlog: yes yes yes
+ sidedata: no no no
+ persistent-nodemap: no no no
+ copies-sdc: no no no
+ plain-cl-delta: yes yes yes
+ compression: zlib zlib zlib
+ compression-level: default default default
$ hg debugformat -v -R up-no-side-data --config format.exp-use-side-data=yes
- format-variant repo config default
- fncache: yes yes yes
- dotencode: yes yes yes
- generaldelta: yes yes yes
- sparserevlog: yes yes yes
- sidedata: no yes no
- copies-sdc: no no no
- plain-cl-delta: yes yes yes
- compression: zlib zlib zlib
- compression-level: default default default
+ format-variant repo config default
+ fncache: yes yes yes
+ dotencode: yes yes yes
+ generaldelta: yes yes yes
+ sparserevlog: yes yes yes
+ sidedata: no yes no
+ persistent-nodemap: no no no
+ copies-sdc: no no no
+ plain-cl-delta: yes yes yes
+ compression: zlib zlib zlib
+ compression-level: default default default
$ hg debugupgraderepo -R up-no-side-data --config format.exp-use-side-data=yes > /dev/null
Check that we can downgrade from sidedata
@@ -78,25 +80,27 @@
$ hg init up-side-data --config format.exp-use-side-data=yes
$ hg debugformat -v -R up-side-data
- format-variant repo config default
- fncache: yes yes yes
- dotencode: yes yes yes
- generaldelta: yes yes yes
- sparserevlog: yes yes yes
- sidedata: yes no no
- copies-sdc: no no no
- plain-cl-delta: yes yes yes
- compression: zlib zlib zlib
- compression-level: default default default
+ format-variant repo config default
+ fncache: yes yes yes
+ dotencode: yes yes yes
+ generaldelta: yes yes yes
+ sparserevlog: yes yes yes
+ sidedata: yes no no
+ persistent-nodemap: no no no
+ copies-sdc: no no no
+ plain-cl-delta: yes yes yes
+ compression: zlib zlib zlib
+ compression-level: default default default
$ hg debugformat -v -R up-side-data --config format.exp-use-side-data=no
- format-variant repo config default
- fncache: yes yes yes
- dotencode: yes yes yes
- generaldelta: yes yes yes
- sparserevlog: yes yes yes
- sidedata: yes no no
- copies-sdc: no no no
- plain-cl-delta: yes yes yes
- compression: zlib zlib zlib
- compression-level: default default default
+ format-variant repo config default
+ fncache: yes yes yes
+ dotencode: yes yes yes
+ generaldelta: yes yes yes
+ sparserevlog: yes yes yes
+ sidedata: yes no no
+ persistent-nodemap: no no no
+ copies-sdc: no no no
+ plain-cl-delta: yes yes yes
+ compression: zlib zlib zlib
+ compression-level: default default default
$ hg debugupgraderepo -R up-side-data --config format.exp-use-side-data=no > /dev/null
--- a/tests/test-ssh.t Tue Jun 23 16:07:18 2020 +0200
+++ b/tests/test-ssh.t Thu Jun 25 10:32:51 2020 -0700
@@ -46,6 +46,10 @@
remote: abort: repository nonexistent not found!
abort: no suitable response from remote hg!
[255]
+ $ hg clone -q -e "\"$PYTHON\" \"$TESTDIR/dummyssh\"" ssh://user@dummy/nonexistent local
+ remote: abort: repository nonexistent not found!
+ abort: no suitable response from remote hg!
+ [255]
non-existent absolute path
@@ -553,6 +557,7 @@
$ cat dummylog
Got arguments 1:user@dummy 2:hg -R nonexistent serve --stdio
+ Got arguments 1:user@dummy 2:hg -R nonexistent serve --stdio
Got arguments 1:user@dummy 2:hg -R $TESTTMP/nonexistent serve --stdio
Got arguments 1:user@dummy 2:hg -R remote serve --stdio
Got arguments 1:user@dummy 2:hg -R local-stream serve --stdio (no-reposimplestore !)
--- a/tests/test-tag.t Tue Jun 23 16:07:18 2020 +0200
+++ b/tests/test-tag.t Thu Jun 25 10:32:51 2020 -0700
@@ -323,6 +323,7 @@
transaction abort!
rollback completed
note: commit message saved in .hg/last-message.txt
+ note: use 'hg commit --logfile .hg/last-message.txt --edit' to reuse it
abort: pretxncommit.unexpectedabort hook exited with status 1
[255]
$ cat .hg/last-message.txt
--- a/tests/test-upgrade-repo.t Tue Jun 23 16:07:18 2020 +0200
+++ b/tests/test-upgrade-repo.t Thu Jun 25 10:32:51 2020 -0700
@@ -52,49 +52,53 @@
$ hg init empty
$ cd empty
$ hg debugformat
- format-variant repo
- fncache: yes
- dotencode: yes
- generaldelta: yes
- sparserevlog: yes
- sidedata: no
- copies-sdc: no
- plain-cl-delta: yes
- compression: zlib
- compression-level: default
+ format-variant repo
+ fncache: yes
+ dotencode: yes
+ generaldelta: yes
+ sparserevlog: yes
+ sidedata: no
+ persistent-nodemap: no
+ copies-sdc: no
+ plain-cl-delta: yes
+ compression: zlib
+ compression-level: default
$ hg debugformat --verbose
- format-variant repo config default
- fncache: yes yes yes
- dotencode: yes yes yes
- generaldelta: yes yes yes
- sparserevlog: yes yes yes
- sidedata: no no no
- copies-sdc: no no no
- plain-cl-delta: yes yes yes
- compression: zlib zlib zlib
- compression-level: default default default
+ format-variant repo config default
+ fncache: yes yes yes
+ dotencode: yes yes yes
+ generaldelta: yes yes yes
+ sparserevlog: yes yes yes
+ sidedata: no no no
+ persistent-nodemap: no no no
+ copies-sdc: no no no
+ plain-cl-delta: yes yes yes
+ compression: zlib zlib zlib
+ compression-level: default default default
$ hg debugformat --verbose --config format.usefncache=no
- format-variant repo config default
- fncache: yes no yes
- dotencode: yes no yes
- generaldelta: yes yes yes
- sparserevlog: yes yes yes
- sidedata: no no no
- copies-sdc: no no no
- plain-cl-delta: yes yes yes
- compression: zlib zlib zlib
- compression-level: default default default
+ format-variant repo config default
+ fncache: yes no yes
+ dotencode: yes no yes
+ generaldelta: yes yes yes
+ sparserevlog: yes yes yes
+ sidedata: no no no
+ persistent-nodemap: no no no
+ copies-sdc: no no no
+ plain-cl-delta: yes yes yes
+ compression: zlib zlib zlib
+ compression-level: default default default
$ hg debugformat --verbose --config format.usefncache=no --color=debug
- format-variant repo config default
- [formatvariant.name.mismatchconfig|fncache: ][formatvariant.repo.mismatchconfig| yes][formatvariant.config.special| no][formatvariant.default| yes]
- [formatvariant.name.mismatchconfig|dotencode: ][formatvariant.repo.mismatchconfig| yes][formatvariant.config.special| no][formatvariant.default| yes]
- [formatvariant.name.uptodate|generaldelta: ][formatvariant.repo.uptodate| yes][formatvariant.config.default| yes][formatvariant.default| yes]
- [formatvariant.name.uptodate|sparserevlog: ][formatvariant.repo.uptodate| yes][formatvariant.config.default| yes][formatvariant.default| yes]
- [formatvariant.name.uptodate|sidedata: ][formatvariant.repo.uptodate| no][formatvariant.config.default| no][formatvariant.default| no]
- [formatvariant.name.uptodate|copies-sdc: ][formatvariant.repo.uptodate| no][formatvariant.config.default| no][formatvariant.default| no]
- [formatvariant.name.uptodate|plain-cl-delta: ][formatvariant.repo.uptodate| yes][formatvariant.config.default| yes][formatvariant.default| yes]
- [formatvariant.name.uptodate|compression: ][formatvariant.repo.uptodate| zlib][formatvariant.config.default| zlib][formatvariant.default| zlib]
- [formatvariant.name.uptodate|compression-level:][formatvariant.repo.uptodate| default][formatvariant.config.default| default][formatvariant.default| default]
+ format-variant repo config default
+ [formatvariant.name.mismatchconfig|fncache: ][formatvariant.repo.mismatchconfig| yes][formatvariant.config.special| no][formatvariant.default| yes]
+ [formatvariant.name.mismatchconfig|dotencode: ][formatvariant.repo.mismatchconfig| yes][formatvariant.config.special| no][formatvariant.default| yes]
+ [formatvariant.name.uptodate|generaldelta: ][formatvariant.repo.uptodate| yes][formatvariant.config.default| yes][formatvariant.default| yes]
+ [formatvariant.name.uptodate|sparserevlog: ][formatvariant.repo.uptodate| yes][formatvariant.config.default| yes][formatvariant.default| yes]
+ [formatvariant.name.uptodate|sidedata: ][formatvariant.repo.uptodate| no][formatvariant.config.default| no][formatvariant.default| no]
+ [formatvariant.name.uptodate|persistent-nodemap:][formatvariant.repo.uptodate| no][formatvariant.config.default| no][formatvariant.default| no]
+ [formatvariant.name.uptodate|copies-sdc: ][formatvariant.repo.uptodate| no][formatvariant.config.default| no][formatvariant.default| no]
+ [formatvariant.name.uptodate|plain-cl-delta: ][formatvariant.repo.uptodate| yes][formatvariant.config.default| yes][formatvariant.default| yes]
+ [formatvariant.name.uptodate|compression: ][formatvariant.repo.uptodate| zlib][formatvariant.config.default| zlib][formatvariant.default| zlib]
+ [formatvariant.name.uptodate|compression-level: ][formatvariant.repo.uptodate| default][formatvariant.config.default| default][formatvariant.default| default]
$ hg debugformat -Tjson
[
{
@@ -130,6 +134,12 @@
{
"config": false,
"default": false,
+ "name": "persistent-nodemap",
+ "repo": false
+ },
+ {
+ "config": false,
+ "default": false,
"name": "copies-sdc",
"repo": false
},
@@ -174,6 +184,11 @@
every revision will be re-added as if it was new content. It will go through the full storage mechanism giving extensions a chance to process it (eg. lfs). This is similar to "re-delta-all" but even slower since more logic is involved.
+ $ hg debugupgraderepo --quiet
+ requirements
+ preserved: dotencode, fncache, generaldelta, revlogv1, sparserevlog, store
+
+
--optimize can be used to add optimizations
$ hg debugupgrade --optimize redeltaparent
@@ -183,6 +198,8 @@
requirements
preserved: dotencode, fncache, generaldelta, revlogv1, sparserevlog, store
+ optimisations: re-delta-parent
+
re-delta-parent
deltas within internal storage will choose a new base revision if needed
@@ -207,6 +224,8 @@
requirements
preserved: dotencode, fncache, generaldelta, revlogv1, sparserevlog, store
+ optimisations: re-delta-parent
+
re-delta-parent
deltas within internal storage will choose a new base revision if needed
@@ -221,6 +240,12 @@
re-delta-fulladd
every revision will be re-added as if it was new content. It will go through the full storage mechanism giving extensions a chance to process it (eg. lfs). This is similar to "re-delta-all" but even slower since more logic is involved.
+ $ hg debugupgrade --optimize re-delta-parent --quiet
+ requirements
+ preserved: dotencode, fncache, generaldelta, revlogv1, sparserevlog, store
+
+ optimisations: re-delta-parent
+
unknown optimization:
@@ -237,49 +262,53 @@
> EOF
$ hg debugformat
- format-variant repo
- fncache: no
- dotencode: no
- generaldelta: no
- sparserevlog: no
- sidedata: no
- copies-sdc: no
- plain-cl-delta: yes
- compression: zlib
- compression-level: default
+ format-variant repo
+ fncache: no
+ dotencode: no
+ generaldelta: no
+ sparserevlog: no
+ sidedata: no
+ persistent-nodemap: no
+ copies-sdc: no
+ plain-cl-delta: yes
+ compression: zlib
+ compression-level: default
$ hg debugformat --verbose
- format-variant repo config default
- fncache: no yes yes
- dotencode: no yes yes
- generaldelta: no yes yes
- sparserevlog: no yes yes
- sidedata: no no no
- copies-sdc: no no no
- plain-cl-delta: yes yes yes
- compression: zlib zlib zlib
- compression-level: default default default
+ format-variant repo config default
+ fncache: no yes yes
+ dotencode: no yes yes
+ generaldelta: no yes yes
+ sparserevlog: no yes yes
+ sidedata: no no no
+ persistent-nodemap: no no no
+ copies-sdc: no no no
+ plain-cl-delta: yes yes yes
+ compression: zlib zlib zlib
+ compression-level: default default default
$ hg debugformat --verbose --config format.usegeneraldelta=no
- format-variant repo config default
- fncache: no yes yes
- dotencode: no yes yes
- generaldelta: no no yes
- sparserevlog: no no yes
- sidedata: no no no
- copies-sdc: no no no
- plain-cl-delta: yes yes yes
- compression: zlib zlib zlib
- compression-level: default default default
+ format-variant repo config default
+ fncache: no yes yes
+ dotencode: no yes yes
+ generaldelta: no no yes
+ sparserevlog: no no yes
+ sidedata: no no no
+ persistent-nodemap: no no no
+ copies-sdc: no no no
+ plain-cl-delta: yes yes yes
+ compression: zlib zlib zlib
+ compression-level: default default default
$ hg debugformat --verbose --config format.usegeneraldelta=no --color=debug
- format-variant repo config default
- [formatvariant.name.mismatchconfig|fncache: ][formatvariant.repo.mismatchconfig| no][formatvariant.config.default| yes][formatvariant.default| yes]
- [formatvariant.name.mismatchconfig|dotencode: ][formatvariant.repo.mismatchconfig| no][formatvariant.config.default| yes][formatvariant.default| yes]
- [formatvariant.name.mismatchdefault|generaldelta: ][formatvariant.repo.mismatchdefault| no][formatvariant.config.special| no][formatvariant.default| yes]
- [formatvariant.name.mismatchdefault|sparserevlog: ][formatvariant.repo.mismatchdefault| no][formatvariant.config.special| no][formatvariant.default| yes]
- [formatvariant.name.uptodate|sidedata: ][formatvariant.repo.uptodate| no][formatvariant.config.default| no][formatvariant.default| no]
- [formatvariant.name.uptodate|copies-sdc: ][formatvariant.repo.uptodate| no][formatvariant.config.default| no][formatvariant.default| no]
- [formatvariant.name.uptodate|plain-cl-delta: ][formatvariant.repo.uptodate| yes][formatvariant.config.default| yes][formatvariant.default| yes]
- [formatvariant.name.uptodate|compression: ][formatvariant.repo.uptodate| zlib][formatvariant.config.default| zlib][formatvariant.default| zlib]
- [formatvariant.name.uptodate|compression-level:][formatvariant.repo.uptodate| default][formatvariant.config.default| default][formatvariant.default| default]
+ format-variant repo config default
+ [formatvariant.name.mismatchconfig|fncache: ][formatvariant.repo.mismatchconfig| no][formatvariant.config.default| yes][formatvariant.default| yes]
+ [formatvariant.name.mismatchconfig|dotencode: ][formatvariant.repo.mismatchconfig| no][formatvariant.config.default| yes][formatvariant.default| yes]
+ [formatvariant.name.mismatchdefault|generaldelta: ][formatvariant.repo.mismatchdefault| no][formatvariant.config.special| no][formatvariant.default| yes]
+ [formatvariant.name.mismatchdefault|sparserevlog: ][formatvariant.repo.mismatchdefault| no][formatvariant.config.special| no][formatvariant.default| yes]
+ [formatvariant.name.uptodate|sidedata: ][formatvariant.repo.uptodate| no][formatvariant.config.default| no][formatvariant.default| no]
+ [formatvariant.name.uptodate|persistent-nodemap:][formatvariant.repo.uptodate| no][formatvariant.config.default| no][formatvariant.default| no]
+ [formatvariant.name.uptodate|copies-sdc: ][formatvariant.repo.uptodate| no][formatvariant.config.default| no][formatvariant.default| no]
+ [formatvariant.name.uptodate|plain-cl-delta: ][formatvariant.repo.uptodate| yes][formatvariant.config.default| yes][formatvariant.default| yes]
+ [formatvariant.name.uptodate|compression: ][formatvariant.repo.uptodate| zlib][formatvariant.config.default| zlib][formatvariant.default| zlib]
+ [formatvariant.name.uptodate|compression-level: ][formatvariant.repo.uptodate| default][formatvariant.config.default| default][formatvariant.default| default]
$ hg debugupgraderepo
repository lacks features recommended by current config options:
@@ -328,6 +357,11 @@
re-delta-fulladd
every revision will be re-added as if it was new content. It will go through the full storage mechanism giving extensions a chance to process it (eg. lfs). This is similar to "re-delta-all" but even slower since more logic is involved.
+ $ hg debugupgraderepo --quiet
+ requirements
+ preserved: revlogv1, store
+ added: dotencode, fncache, generaldelta, sparserevlog
+
$ hg --config format.dotencode=false debugupgraderepo
repository lacks features recommended by current config options:
@@ -569,6 +603,8 @@
requirements
preserved: dotencode, fncache, generaldelta, revlogv1, sparserevlog, store
+ optimisations: re-delta-parent
+
re-delta-parent
deltas within internal storage will choose a new base revision if needed
@@ -643,6 +679,8 @@
requirements
preserved: dotencode, fncache, generaldelta, revlogv1, sparserevlog, store
+ optimisations: re-delta-parent
+
re-delta-parent
deltas within internal storage will choose a new base revision if needed
@@ -689,6 +727,8 @@
requirements
preserved: dotencode, fncache, generaldelta, revlogv1, sparserevlog, store
+ optimisations: re-delta-parent
+
re-delta-parent
deltas within internal storage will choose a new base revision if needed
@@ -735,6 +775,8 @@
requirements
preserved: dotencode, fncache, generaldelta, revlogv1, sparserevlog, store
+ optimisations: re-delta-parent
+
re-delta-parent
deltas within internal storage will choose a new base revision if needed
@@ -786,6 +828,8 @@
preserved: dotencode, fncache, generaldelta, revlogv1, store
removed: sparserevlog
+ optimisations: re-delta-parent
+
re-delta-parent
deltas within internal storage will choose a new base revision if needed
@@ -835,6 +879,8 @@
preserved: dotencode, fncache, generaldelta, revlogv1, store
added: sparserevlog
+ optimisations: re-delta-parent
+
sparserevlog
Revlog supports delta chain with more unused data between payload. These gaps will be skipped at read time. This allows for better delta chains, making a better compression and faster exchange with server.
@@ -923,6 +969,8 @@
requirements
preserved: dotencode, fncache, generaldelta, revlogv1, sparserevlog, store
+ optimisations: re-delta-fulladd
+
re-delta-fulladd
each revision will be added as new content to the internal storage; this will likely drastically slow down execution time, but some extensions might need it
@@ -1135,6 +1183,8 @@
requirements
preserved: dotencode, fncache, generaldelta, revlogv1, sparserevlog, store
+ optimisations: re-delta-all
+
re-delta-all
deltas within internal storage will be fully recomputed; this will likely drastically slow down execution time
@@ -1190,9 +1240,13 @@
store
Check that we can add the sparse-revlog format requirement
- $ hg --config format.sparse-revlog=yes debugupgraderepo --run >/dev/null
- copy of old repository backed up at $TESTTMP/sparserevlogrepo/.hg/upgradebackup.* (glob)
- the old repository will not be deleted; remove it to free up disk space once the upgraded repository is verified
+ $ hg --config format.sparse-revlog=yes debugupgraderepo --run --quiet
+ upgrade will perform the following actions:
+
+ requirements
+ preserved: dotencode, fncache, generaldelta, revlogv1, store
+ added: sparserevlog
+
$ cat .hg/requires
dotencode
fncache
@@ -1202,9 +1256,13 @@
store
Check that we can remove the sparse-revlog format requirement
- $ hg --config format.sparse-revlog=no debugupgraderepo --run >/dev/null
- copy of old repository backed up at $TESTTMP/sparserevlogrepo/.hg/upgradebackup.* (glob)
- the old repository will not be deleted; remove it to free up disk space once the upgraded repository is verified
+ $ hg --config format.sparse-revlog=no debugupgraderepo --run --quiet
+ upgrade will perform the following actions:
+
+ requirements
+ preserved: dotencode, fncache, generaldelta, revlogv1, store
+ removed: sparserevlog
+
$ cat .hg/requires
dotencode
fncache
@@ -1219,18 +1277,25 @@
upgrade
- $ hg --config format.revlog-compression=zstd debugupgraderepo --run --no-backup >/dev/null
+ $ hg --config format.revlog-compression=zstd debugupgraderepo --run --no-backup --quiet
+ upgrade will perform the following actions:
+
+ requirements
+ preserved: dotencode, fncache, generaldelta, revlogv1, store
+ added: revlog-compression-zstd, sparserevlog
+
$ hg debugformat -v
- format-variant repo config default
- fncache: yes yes yes
- dotencode: yes yes yes
- generaldelta: yes yes yes
- sparserevlog: yes yes yes
- sidedata: no no no
- copies-sdc: no no no
- plain-cl-delta: yes yes yes
- compression: zstd zlib zlib
- compression-level: default default default
+ format-variant repo config default
+ fncache: yes yes yes
+ dotencode: yes yes yes
+ generaldelta: yes yes yes
+ sparserevlog: yes yes yes
+ sidedata: no no no
+ persistent-nodemap: no no no
+ copies-sdc: no no no
+ plain-cl-delta: yes yes yes
+ compression: zstd zlib zlib
+ compression-level: default default default
$ cat .hg/requires
dotencode
fncache
@@ -1242,18 +1307,25 @@
downgrade
- $ hg debugupgraderepo --run --no-backup > /dev/null
+ $ hg debugupgraderepo --run --no-backup --quiet
+ upgrade will perform the following actions:
+
+ requirements
+ preserved: dotencode, fncache, generaldelta, revlogv1, sparserevlog, store
+ removed: revlog-compression-zstd
+
$ hg debugformat -v
- format-variant repo config default
- fncache: yes yes yes
- dotencode: yes yes yes
- generaldelta: yes yes yes
- sparserevlog: yes yes yes
- sidedata: no no no
- copies-sdc: no no no
- plain-cl-delta: yes yes yes
- compression: zlib zlib zlib
- compression-level: default default default
+ format-variant repo config default
+ fncache: yes yes yes
+ dotencode: yes yes yes
+ generaldelta: yes yes yes
+ sparserevlog: yes yes yes
+ sidedata: no no no
+ persistent-nodemap: no no no
+ copies-sdc: no no no
+ plain-cl-delta: yes yes yes
+ compression: zlib zlib zlib
+ compression-level: default default default
$ cat .hg/requires
dotencode
fncache
@@ -1268,18 +1340,25 @@
> [format]
> revlog-compression=zstd
> EOF
- $ hg debugupgraderepo --run --no-backup > /dev/null
+ $ hg debugupgraderepo --run --no-backup --quiet
+ upgrade will perform the following actions:
+
+ requirements
+ preserved: dotencode, fncache, generaldelta, revlogv1, sparserevlog, store
+ added: revlog-compression-zstd
+
$ hg debugformat -v
- format-variant repo config default
- fncache: yes yes yes
- dotencode: yes yes yes
- generaldelta: yes yes yes
- sparserevlog: yes yes yes
- sidedata: no no no
- copies-sdc: no no no
- plain-cl-delta: yes yes yes
- compression: zstd zstd zlib
- compression-level: default default default
+ format-variant repo config default
+ fncache: yes yes yes
+ dotencode: yes yes yes
+ generaldelta: yes yes yes
+ sparserevlog: yes yes yes
+ sidedata: no no no
+ persistent-nodemap: no no no
+ copies-sdc: no no no
+ plain-cl-delta: yes yes yes
+ compression: zstd zstd zlib
+ compression-level: default default default
$ cat .hg/requires
dotencode
fncache
@@ -1296,19 +1375,28 @@
upgrade
- $ hg --config format.exp-use-side-data=yes debugupgraderepo --run --no-backup --config "extensions.sidedata=$TESTDIR/testlib/ext-sidedata.py" >/dev/null
+ $ hg --config format.exp-use-side-data=yes debugupgraderepo --run --no-backup --config "extensions.sidedata=$TESTDIR/testlib/ext-sidedata.py" --quiet
+ upgrade will perform the following actions:
+
+ requirements
+ preserved: dotencode, fncache, generaldelta, revlogv1, store (no-zstd !)
+ preserved: dotencode, fncache, generaldelta, revlog-compression-zstd, revlogv1, sparserevlog, store (zstd !)
+ added: exp-sidedata-flag (zstd !)
+ added: exp-sidedata-flag, sparserevlog (no-zstd !)
+
$ hg debugformat -v
- format-variant repo config default
- fncache: yes yes yes
- dotencode: yes yes yes
- generaldelta: yes yes yes
- sparserevlog: yes yes yes
- sidedata: yes no no
- copies-sdc: no no no
- plain-cl-delta: yes yes yes
- compression: zstd zstd zlib (zstd !)
- compression: zlib zlib zlib (no-zstd !)
- compression-level: default default default
+ format-variant repo config default
+ fncache: yes yes yes
+ dotencode: yes yes yes
+ generaldelta: yes yes yes
+ sparserevlog: yes yes yes
+ sidedata: yes no no
+ persistent-nodemap: no no no
+ copies-sdc: no no no
+ plain-cl-delta: yes yes yes
+ compression: zlib zlib zlib (no-zstd !)
+ compression: zstd zstd zlib (zstd !)
+ compression-level: default default default
$ cat .hg/requires
dotencode
exp-sidedata-flag
@@ -1325,19 +1413,27 @@
downgrade
- $ hg debugupgraderepo --config format.exp-use-side-data=no --run --no-backup > /dev/null
+ $ hg debugupgraderepo --config format.exp-use-side-data=no --run --no-backup --quiet
+ upgrade will perform the following actions:
+
+ requirements
+ preserved: dotencode, fncache, generaldelta, revlogv1, sparserevlog, store (no-zstd !)
+ preserved: dotencode, fncache, generaldelta, revlog-compression-zstd, revlogv1, sparserevlog, store (zstd !)
+ removed: exp-sidedata-flag
+
$ hg debugformat -v
- format-variant repo config default
- fncache: yes yes yes
- dotencode: yes yes yes
- generaldelta: yes yes yes
- sparserevlog: yes yes yes
- sidedata: no no no
- copies-sdc: no no no
- plain-cl-delta: yes yes yes
- compression: zstd zstd zlib (zstd !)
- compression: zlib zlib zlib (no-zstd !)
- compression-level: default default default
+ format-variant repo config default
+ fncache: yes yes yes
+ dotencode: yes yes yes
+ generaldelta: yes yes yes
+ sparserevlog: yes yes yes
+ sidedata: no no no
+ persistent-nodemap: no no no
+ copies-sdc: no no no
+ plain-cl-delta: yes yes yes
+ compression: zlib zlib zlib (no-zstd !)
+ compression: zstd zstd zlib (zstd !)
+ compression-level: default default default
$ cat .hg/requires
dotencode
fncache
@@ -1354,19 +1450,27 @@
> [format]
> exp-use-side-data=yes
> EOF
- $ hg debugupgraderepo --run --no-backup > /dev/null
+ $ hg debugupgraderepo --run --no-backup --quiet
+ upgrade will perform the following actions:
+
+ requirements
+ preserved: dotencode, fncache, generaldelta, revlogv1, sparserevlog, store (no-zstd !)
+ preserved: dotencode, fncache, generaldelta, revlog-compression-zstd, revlogv1, sparserevlog, store (zstd !)
+ added: exp-sidedata-flag
+
$ hg debugformat -v
- format-variant repo config default
- fncache: yes yes yes
- dotencode: yes yes yes
- generaldelta: yes yes yes
- sparserevlog: yes yes yes
- sidedata: yes yes no
- copies-sdc: no no no
- plain-cl-delta: yes yes yes
- compression: zstd zstd zlib (zstd !)
- compression: zlib zlib zlib (no-zstd !)
- compression-level: default default default
+ format-variant repo config default
+ fncache: yes yes yes
+ dotencode: yes yes yes
+ generaldelta: yes yes yes
+ sparserevlog: yes yes yes
+ sidedata: yes yes no
+ persistent-nodemap: no no no
+ copies-sdc: no no no
+ plain-cl-delta: yes yes yes
+ compression: zlib zlib zlib (no-zstd !)
+ compression: zstd zstd zlib (zstd !)
+ compression-level: default default default
$ cat .hg/requires
dotencode
exp-sidedata-flag