--- a/.hgignore Tue Jul 14 10:25:41 2020 +0200
+++ b/.hgignore Mon Jul 20 21:56:27 2020 +0530
@@ -52,6 +52,7 @@
.idea/*
.asv/*
.pytype/*
+.mypy_cache
i18n/hg.pot
locale/*/LC_MESSAGES/hg.mo
hgext/__index__.py
--- a/contrib/chg/chg.c Tue Jul 14 10:25:41 2020 +0200
+++ b/contrib/chg/chg.c Mon Jul 20 21:56:27 2020 +0530
@@ -232,7 +232,7 @@
abortmsgerrno("failed to putenv CHG_CLEAR_LC_CTYPE");
} else {
if (setenv("CHGORIG_LC_CTYPE", lc_ctype_env, 1) != 0) {
- abortmsgerrno("failed to setenv CHGORIG_LC_CTYYPE");
+ abortmsgerrno("failed to setenv CHGORIG_LC_CTYPE");
}
}
--- a/contrib/dumprevlog Tue Jul 14 10:25:41 2020 +0200
+++ b/contrib/dumprevlog Mon Jul 20 21:56:27 2020 +0530
@@ -28,7 +28,7 @@
def printb(data, end=b'\n'):
sys.stdout.flush()
- pycompat.stdout.write(data + end)
+ procutil.stdout.write(data + end)
for f in sys.argv[1:]:
--- a/contrib/fuzz/Makefile Tue Jul 14 10:25:41 2020 +0200
+++ b/contrib/fuzz/Makefile Mon Jul 20 21:56:27 2020 +0530
@@ -11,6 +11,7 @@
LIB_FUZZING_ENGINE ?= standalone_fuzz_target_runner.o
PYTHON_CONFIG ?= $$OUT/sanpy/bin/python-config
+PYTHON_CONFIG_FLAGS ?= --ldflags
CXXFLAGS += -Wno-deprecated-register
@@ -67,7 +68,7 @@
-Wno-register -Wno-macro-redefined \
-I../../mercurial dirs.cc \
pyutil.o $(PARSERS_OBJS) \
- $(LIB_FUZZING_ENGINE) `$(PYTHON_CONFIG) --ldflags` \
+ $(LIB_FUZZING_ENGINE) `$(PYTHON_CONFIG) $(PYTHON_CONFIG_FLAGS)` \
-o $$OUT/dirs_fuzzer
fncache_fuzzer: fncache.cc
@@ -75,7 +76,7 @@
-Wno-register -Wno-macro-redefined \
-I../../mercurial fncache.cc \
pyutil.o $(PARSERS_OBJS) \
- $(LIB_FUZZING_ENGINE) `$(PYTHON_CONFIG) --ldflags` \
+ $(LIB_FUZZING_ENGINE) `$(PYTHON_CONFIG) $(PYTHON_CONFIG_FLAGS)` \
-o $$OUT/fncache_fuzzer
jsonescapeu8fast_fuzzer: jsonescapeu8fast.cc pyutil.o $(PARSERS_OBJS)
@@ -83,7 +84,7 @@
-Wno-register -Wno-macro-redefined \
-I../../mercurial jsonescapeu8fast.cc \
pyutil.o $(PARSERS_OBJS) \
- $(LIB_FUZZING_ENGINE) `$(PYTHON_CONFIG) --ldflags` \
+ $(LIB_FUZZING_ENGINE) `$(PYTHON_CONFIG) $(PYTHON_CONFIG_FLAGS)` \
-o $$OUT/jsonescapeu8fast_fuzzer
manifest_fuzzer: manifest.cc pyutil.o $(PARSERS_OBJS) $$OUT/manifest_fuzzer_seed_corpus.zip
@@ -91,7 +92,7 @@
-Wno-register -Wno-macro-redefined \
-I../../mercurial manifest.cc \
pyutil.o $(PARSERS_OBJS) \
- $(LIB_FUZZING_ENGINE) `$(PYTHON_CONFIG) --ldflags` \
+ $(LIB_FUZZING_ENGINE) `$(PYTHON_CONFIG) $(PYTHON_CONFIG_FLAGS)` \
-o $$OUT/manifest_fuzzer
revlog_fuzzer: revlog.cc pyutil.o $(PARSERS_OBJS) $$OUT/revlog_fuzzer_seed_corpus.zip
@@ -99,7 +100,7 @@
-Wno-register -Wno-macro-redefined \
-I../../mercurial revlog.cc \
pyutil.o $(PARSERS_OBJS) \
- $(LIB_FUZZING_ENGINE) `$(PYTHON_CONFIG) --ldflags` \
+ $(LIB_FUZZING_ENGINE) `$(PYTHON_CONFIG) $(PYTHON_CONFIG_FLAGS)` \
-o $$OUT/revlog_fuzzer
dirstate_fuzzer: dirstate.cc pyutil.o $(PARSERS_OBJS) $$OUT/dirstate_fuzzer_seed_corpus.zip
@@ -107,7 +108,7 @@
-Wno-register -Wno-macro-redefined \
-I../../mercurial dirstate.cc \
pyutil.o $(PARSERS_OBJS) \
- $(LIB_FUZZING_ENGINE) `$(PYTHON_CONFIG) --ldflags` \
+ $(LIB_FUZZING_ENGINE) `$(PYTHON_CONFIG) $(PYTHON_CONFIG_FLAGS)` \
-o $$OUT/dirstate_fuzzer
fm1readmarkers_fuzzer: fm1readmarkers.cc pyutil.o $(PARSERS_OBJS) $$OUT/fm1readmarkers_fuzzer_seed_corpus.zip
@@ -115,7 +116,7 @@
-Wno-register -Wno-macro-redefined \
-I../../mercurial fm1readmarkers.cc \
pyutil.o $(PARSERS_OBJS) \
- $(LIB_FUZZING_ENGINE) `$(PYTHON_CONFIG) --ldflags` \
+ $(LIB_FUZZING_ENGINE) `$(PYTHON_CONFIG) $(PYTHON_CONFIG_FLAGS)` \
-o $$OUT/fm1readmarkers_fuzzer
clean:
--- a/contrib/fuzz/manifest.cc Tue Jul 14 10:25:41 2020 +0200
+++ b/contrib/fuzz/manifest.cc Mon Jul 20 21:56:27 2020 +0530
@@ -3,6 +3,7 @@
#include <stdlib.h>
#include <unistd.h>
+#include "FuzzedDataProvider.h"
#include "pyutil.h"
#include <string>
@@ -24,7 +25,7 @@
lm[e]
e in lm
(e + 'nope') in lm
- lm[b'xyzzy'] = (b'\0' * 20, 'x')
+ lm[b'xyzzy'] = (b'\0' * nlen, 'x')
# do an insert, text should change
assert lm.text() != mdata, "insert should change text and didn't: %r %r" % (lm.text(), mdata)
cloned = lm.filtercopy(lambda x: x != 'xyzzy')
@@ -51,10 +52,14 @@
if (Size > 100000) {
return 0;
}
+ FuzzedDataProvider provider(Data, Size);
+ Py_ssize_t nodelength = provider.ConsumeBool() ? 20 : 32;
+ PyObject *nlen = PyLong_FromSsize_t(nodelength);
PyObject *mtext =
PyBytes_FromStringAndSize((const char *)Data, (Py_ssize_t)Size);
PyObject *locals = PyDict_New();
PyDict_SetItemString(locals, "mdata", mtext);
+ PyDict_SetItemString(locals, "nlen", nlen);
PyObject *res = PyEval_EvalCode(code, contrib::pyglobals(), locals);
if (!res) {
PyErr_Print();
--- a/contrib/fuzz/manifest_corpus.py Tue Jul 14 10:25:41 2020 +0200
+++ b/contrib/fuzz/manifest_corpus.py Mon Jul 20 21:56:27 2020 +0530
@@ -10,7 +10,7 @@
with zipfile.ZipFile(args.out[0], "w", zipfile.ZIP_STORED) as zf:
zf.writestr(
"manifest_zero",
- '''PKG-INFO\09b3ed8f2b81095a13064402e930565f083346e9a
+ '''\0PKG-INFO\09b3ed8f2b81095a13064402e930565f083346e9a
README\080b6e76643dcb44d4bc729e932fc464b3e36dbe3
hg\0b6444347c629cc058d478023905cfb83b7f5bb9d
mercurial/__init__.py\0b80de5d138758541c5f05265ad144ab9fa86d1db
@@ -25,9 +25,14 @@
tkmerge\03c922edb43a9c143682f7bc7b00f98b3c756ebe7
''',
)
- zf.writestr("badmanifest_shorthashes", "narf\0aa\nnarf2\0aaa\n")
+ zf.writestr("badmanifest_shorthashes", "\0narf\0aa\nnarf2\0aaa\n")
zf.writestr(
"badmanifest_nonull",
- "narf\0cccccccccccccccccccccccccccccccccccccccc\n"
+ "\0narf\0cccccccccccccccccccccccccccccccccccccccc\n"
"narf2aaaaaaaaaaaaaaaaaaaa\n",
)
+
+ zf.writestr(
+ "manifest_long_nodes",
+ "\1a\0ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff\n",
+ )
--- a/contrib/fuzz/pyutil.cc Tue Jul 14 10:25:41 2020 +0200
+++ b/contrib/fuzz/pyutil.cc Mon Jul 20 21:56:27 2020 +0530
@@ -21,7 +21,7 @@
void initpy(const char *cselfpath)
{
#ifdef HG_FUZZER_PY3
- const std::string subdir = "/sanpy/lib/python3.7";
+ const std::string subdir = "/sanpy/lib/python3.8";
#else
const std::string subdir = "/sanpy/lib/python2.7";
#endif
--- a/contrib/heptapod-ci.yml Tue Jul 14 10:25:41 2020 +0200
+++ b/contrib/heptapod-ci.yml Mon Jul 20 21:56:27 2020 +0530
@@ -5,6 +5,8 @@
before_script:
- hg clone . /tmp/mercurial-ci/ --noupdate
- hg -R /tmp/mercurial-ci/ update `hg log --rev '.' --template '{node}'`
+ - cd /tmp/mercurial-ci/rust/rhg
+ - cargo build
- cd /tmp/mercurial-ci/
- ls -1 tests/test-check-*.* > /tmp/check-tests.txt
@@ -79,3 +81,9 @@
RUNTEST_ARGS: "--rust --blacklist /tmp/check-tests.txt"
PYTHON: python3
TEST_HGMODULEPOLICY: "rust+c"
+
+test-py2-chg:
+ <<: *runtests
+ variables:
+ RUNTEST_ARGS: "--blacklist /tmp/check-tests.txt --chg"
+ TEST_HGMODULEPOLICY: "c"
--- a/contrib/packaging/debian/rules Tue Jul 14 10:25:41 2020 +0200
+++ b/contrib/packaging/debian/rules Mon Jul 20 21:56:27 2020 +0530
@@ -2,14 +2,51 @@
# Uncomment this to turn on verbose mode.
# export DH_VERBOSE=1
+# By default we build a .deb where the native components are built with the
+# current "default" version of py3 on the build machine. If you wish to build a
+# .deb that has native components built for multiple versions of py3:
+#
+# 1. install python3.x and python3.x-dev for each version you want
+# 2. set DEB_HG_MULTI_VERSION=1 or DEB_HG_PYTHON_VERSIONS in your environment
+# (if both are set, DEB_HG_PYTHON_VERSIONS has precedence)
+#
+# If you choose `DEB_HG_MULTI_VERSION=1`, it will build for every "supported"
+# version of py3 that's installed on the build machine. This may not be equal to
+# the actual versions that are installed, see the comment above where we set
+# DEB_HG_PYTHON_VERSIONS below. If you choose to set `DEB_HG_PYTHON_VERSIONS`
+# yourself, set it to a space-separated string of python version numbers, like:
+# DEB_HG_PYTHON_VERSIONS="3.7 3.8" make deb
+DEB_HG_MULTI_VERSION?=0
+
CPUS=$(shell cat /proc/cpuinfo | grep -E ^processor | wc -l)
+# By default, only build for the version of python3 that the system considers
+# the 'default' (which should be the one invoked by just running 'python3'
+# without a minor version). If DEB_HG_PYTHON_VERSIONS is set, this is ignored.
+ifeq ($(DEB_HG_MULTI_VERSION), 1)
+ # If we're building for multiple versions, use all of the "supported" versions
+ # on the build machine. Note: the mechanism in use here (`py3versions`) is the
+ # recommended one, but it relies on a file written by the python3-minimal
+ # package, and this file is not dynamic and does not account for manual
+ # installations, just the ones that would be installed by `python3-all`. This
+ # includes the `-i` flag, which claims it's to list all "installed" versions,
+ # but it doesn't. This was quite confusing, hence this tale of woe. :)
+ DEB_HG_PYTHON_VERSIONS?=$(shell py3versions -vs)
+else
+ # If we're building for only one version, identify the "default" version on
+ # the build machine and use that when building; this is just so that we don't
+ # have to duplicate the rules below for multi-version vs. single-version. The
+ # shebang line will still be /usr/bin/python3 (no minor version).
+ DEB_HG_PYTHON_VERSIONS?=$(shell py3versions -vd)
+endif
+
export HGPYTHON3=1
export PYTHON=python3
%:
dh $@ --with python3
+# Note: testing can be disabled using the standard `DEB_BUILD_OPTIONS=nocheck`
override_dh_auto_test:
http_proxy='' dh_auto_test -- TESTFLAGS="-j$(CPUS)"
@@ -24,8 +61,15 @@
$(MAKE) all
$(MAKE) -C contrib/chg all
-override_dh_auto_install:
- python3 setup.py install --root "$(CURDIR)"/debian/mercurial --install-layout=deb
+# Build the native extensions for a specfic python3 version (which must be
+# installed on the build machine).
+install-python%:
+ python$* setup.py install --root "$(CURDIR)"/debian/mercurial --install-layout=deb
+
+# Build the final package. This rule has a dependencies section that causes the
+# native extensions to be compiled for every version of python3 listed in
+# DEB_HG_PYTHON_VERSIONS.
+override_dh_auto_install: $(DEB_HG_PYTHON_VERSIONS:%=install-python%)
# chg
make -C contrib/chg \
DESTDIR="$(CURDIR)"/debian/mercurial \
--- a/contrib/perf.py Tue Jul 14 10:25:41 2020 +0200
+++ b/contrib/perf.py Mon Jul 20 21:56:27 2020 +0530
@@ -3794,19 +3794,47 @@
fm.end()
-@command(b'perfwrite', formatteropts)
+@command(
+ b'perfwrite',
+ formatteropts
+ + [
+ (b'', b'write-method', b'write', b'ui write method'),
+ (b'', b'nlines', 100, b'number of lines'),
+ (b'', b'nitems', 100, b'number of items (per line)'),
+ (b'', b'item', b'x', b'item that is written'),
+ (b'', b'batch-line', None, b'pass whole line to write method at once'),
+ (b'', b'flush-line', None, b'flush after each line'),
+ ],
+)
def perfwrite(ui, repo, **opts):
- """microbenchmark ui.write
+ """microbenchmark ui.write (and others)
"""
opts = _byteskwargs(opts)
+ write = getattr(ui, _sysstr(opts[b'write_method']))
+ nlines = int(opts[b'nlines'])
+ nitems = int(opts[b'nitems'])
+ item = opts[b'item']
+ batch_line = opts.get(b'batch_line')
+ flush_line = opts.get(b'flush_line')
+
+ if batch_line:
+ line = item * nitems + b'\n'
+
+ def benchmark():
+ for i in pycompat.xrange(nlines):
+ if batch_line:
+ write(line)
+ else:
+ for i in pycompat.xrange(nitems):
+ write(item)
+ write(b'\n')
+ if flush_line:
+ ui.flush()
+ ui.flush()
+
timer, fm = gettimer(ui, opts)
-
- def write():
- for i in range(100000):
- ui.writenoi18n(b'Testing write performance\n')
-
- timer(write)
+ timer(benchmark)
fm.end()
--- a/contrib/simplemerge Tue Jul 14 10:25:41 2020 +0200
+++ b/contrib/simplemerge Mon Jul 20 21:56:27 2020 +0530
@@ -45,8 +45,8 @@
def showhelp():
- pycompat.stdout.write(usage)
- pycompat.stdout.write(b'\noptions:\n')
+ procutil.stdout.write(usage)
+ procutil.stdout.write(b'\noptions:\n')
out_opts = []
for shortopt, longopt, default, desc in options:
@@ -62,11 +62,11 @@
)
opts_len = max([len(opt[0]) for opt in out_opts])
for first, second in out_opts:
- pycompat.stdout.write(b' %-*s %s\n' % (opts_len, first, second))
+ procutil.stdout.write(b' %-*s %s\n' % (opts_len, first, second))
try:
- for fp in (sys.stdin, pycompat.stdout, sys.stderr):
+ for fp in (sys.stdin, procutil.stdout, sys.stderr):
procutil.setbinary(fp)
opts = {}
@@ -92,11 +92,11 @@
)
except ParseError as e:
e = stringutil.forcebytestr(e)
- pycompat.stdout.write(b"%s: %s\n" % (sys.argv[0].encode('utf8'), e))
+ procutil.stdout.write(b"%s: %s\n" % (sys.argv[0].encode('utf8'), e))
showhelp()
sys.exit(1)
except error.Abort as e:
- pycompat.stderr.write(b"abort: %s\n" % e)
+ procutil.stderr.write(b"abort: %s\n" % e)
sys.exit(255)
except KeyboardInterrupt:
sys.exit(255)
--- a/contrib/undumprevlog Tue Jul 14 10:25:41 2020 +0200
+++ b/contrib/undumprevlog Mon Jul 20 21:56:27 2020 +0530
@@ -9,7 +9,6 @@
from mercurial import (
encoding,
node,
- pycompat,
revlog,
transaction,
vfs as vfsmod,
@@ -30,7 +29,7 @@
if l.startswith("file:"):
f = encoding.strtolocal(l[6:-1])
r = revlog.revlog(opener, f)
- pycompat.stdout.write(b'%s\n' % f)
+ procutil.stdout.write(b'%s\n' % f)
elif l.startswith("node:"):
n = node.bin(l[6:-1])
elif l.startswith("linkrev:"):
--- a/hgext/absorb.py Tue Jul 14 10:25:41 2020 +0200
+++ b/hgext/absorb.py Mon Jul 20 21:56:27 2020 +0530
@@ -50,6 +50,7 @@
phases,
pycompat,
registrar,
+ rewriteutil,
scmutil,
util,
)
@@ -782,7 +783,10 @@
# nothing changed, nothing commited
nextp1 = ctx
continue
- if self._willbecomenoop(memworkingcopy, ctx, nextp1):
+ willbecomenoop = ctx.files() and self._willbecomenoop(
+ memworkingcopy, ctx, nextp1
+ )
+ if self.skip_empty_successor and willbecomenoop:
# changeset is no longer necessary
self.replacemap[ctx.node()] = None
msg = _(b'became empty and was dropped')
@@ -793,7 +797,11 @@
nextp1 = lastcommitted
self.replacemap[ctx.node()] = lastcommitted.node()
if memworkingcopy:
- msg = _(b'%d file(s) changed, became %s') % (
+ if willbecomenoop:
+ msg = _(b'%d file(s) changed, became empty as %s')
+ else:
+ msg = _(b'%d file(s) changed, became %s')
+ msg = msg % (
len(memworkingcopy),
self._ctx2str(lastcommitted),
)
@@ -887,6 +895,10 @@
if len(parents) != 1:
return False
pctx = parents[0]
+ if ctx.branch() != pctx.branch():
+ return False
+ if ctx.extra().get(b'close'):
+ return False
# ctx changes more files (not a subset of memworkingcopy)
if not set(ctx.files()).issubset(set(memworkingcopy)):
return False
@@ -929,6 +941,10 @@
self.repo, replacements, operation=b'absorb', fixphase=True
)
+ @util.propertycache
+ def skip_empty_successor(self):
+ return rewriteutil.skip_empty_successor(self.ui, b'absorb')
+
def _parsechunk(hunk):
"""(crecord.uihunk or patch.recordhunk) -> (path, (a1, a2, [bline]))"""
@@ -1045,7 +1061,7 @@
not opts.get(b'apply_changes')
and state.ctxaffected
and ui.promptchoice(
- b"apply changes (yn)? $$ &Yes $$ &No", default=1
+ b"apply changes (y/N)? $$ &Yes $$ &No", default=1
)
):
raise error.Abort(_(b'absorb cancelled\n'))
--- a/hgext/convert/cvs.py Tue Jul 14 10:25:41 2020 +0200
+++ b/hgext/convert/cvs.py Mon Jul 20 21:56:27 2020 +0530
@@ -226,8 +226,7 @@
cmd = [rsh, host] + cmd
# popen2 does not support argument lists under Windows
- cmd = [procutil.shellquote(arg) for arg in cmd]
- cmd = procutil.quotecommand(b' '.join(cmd))
+ cmd = b' '.join(procutil.shellquote(arg) for arg in cmd)
self.writep, self.readp = procutil.popen2(cmd)
self.realroot = root
--- a/hgext/convert/gnuarch.py Tue Jul 14 10:25:41 2020 +0200
+++ b/hgext/convert/gnuarch.py Mon Jul 20 21:56:27 2020 +0530
@@ -217,7 +217,7 @@
cmdline = [procutil.shellquote(arg) for arg in cmdline]
bdevnull = pycompat.bytestr(os.devnull)
cmdline += [b'>', bdevnull, b'2>', bdevnull]
- cmdline = procutil.quotecommand(b' '.join(cmdline))
+ cmdline = b' '.join(cmdline)
self.ui.debug(cmdline, b'\n')
return os.system(pycompat.rapply(procutil.tonativestr, cmdline))
--- a/hgext/convert/subversion.py Tue Jul 14 10:25:41 2020 +0200
+++ b/hgext/convert/subversion.py Mon Jul 20 21:56:27 2020 +0530
@@ -1366,7 +1366,7 @@
arg = encodeargs(args)
hgexe = procutil.hgexecutable()
cmd = b'%s debugsvnlog' % procutil.shellquote(hgexe)
- stdin, stdout = procutil.popen2(procutil.quotecommand(cmd))
+ stdin, stdout = procutil.popen2(cmd)
stdin.write(arg)
try:
stdin.close()
--- a/hgext/extdiff.py Tue Jul 14 10:25:41 2020 +0200
+++ b/hgext/extdiff.py Mon Jul 20 21:56:27 2020 +0530
@@ -233,7 +233,6 @@
''' like 'procutil.system', but returns the Popen object directly
so we don't have to wait on it.
'''
- cmd = procutil.quotecommand(cmd)
env = procutil.shellenviron(environ)
proc = subprocess.Popen(
procutil.tonativestr(cmd),
@@ -351,6 +350,187 @@
proc.wait()
+def diffpatch(ui, repo, node1, node2, tmproot, matcher, cmdline):
+ template = b'hg-%h.patch'
+ # write patches to temporary files
+ with formatter.nullformatter(ui, b'extdiff', {}) as fm:
+ cmdutil.export(
+ repo,
+ [repo[node1].rev(), repo[node2].rev()],
+ fm,
+ fntemplate=repo.vfs.reljoin(tmproot, template),
+ match=matcher,
+ )
+ label1 = cmdutil.makefilename(repo[node1], template)
+ label2 = cmdutil.makefilename(repo[node2], template)
+ file1 = repo.vfs.reljoin(tmproot, label1)
+ file2 = repo.vfs.reljoin(tmproot, label2)
+ cmdline = formatcmdline(
+ cmdline,
+ repo.root,
+ # no 3way while comparing patches
+ do3way=False,
+ parent1=file1,
+ plabel1=label1,
+ # while comparing patches, there is no second parent
+ parent2=None,
+ plabel2=None,
+ child=file2,
+ clabel=label2,
+ )
+ ui.debug(b'running %r in %s\n' % (pycompat.bytestr(cmdline), tmproot))
+ ui.system(cmdline, cwd=tmproot, blockedtag=b'extdiff')
+ return 1
+
+
+def diffrevs(
+ ui,
+ repo,
+ node1a,
+ node1b,
+ node2,
+ matcher,
+ tmproot,
+ cmdline,
+ do3way,
+ guitool,
+ opts,
+):
+
+ subrepos = opts.get(b'subrepos')
+
+ # calculate list of files changed between both revs
+ st = repo.status(node1a, node2, matcher, listsubrepos=subrepos)
+ mod_a, add_a, rem_a = set(st.modified), set(st.added), set(st.removed)
+ if do3way:
+ stb = repo.status(node1b, node2, matcher, listsubrepos=subrepos)
+ mod_b, add_b, rem_b = (
+ set(stb.modified),
+ set(stb.added),
+ set(stb.removed),
+ )
+ else:
+ mod_b, add_b, rem_b = set(), set(), set()
+ modadd = mod_a | add_a | mod_b | add_b
+ common = modadd | rem_a | rem_b
+ if not common:
+ return 0
+
+ # Always make a copy of node1a (and node1b, if applicable)
+ # dir1a should contain files which are:
+ # * modified or removed from node1a to node2
+ # * modified or added from node1b to node2
+ # (except file added from node1a to node2 as they were not present in
+ # node1a)
+ dir1a_files = mod_a | rem_a | ((mod_b | add_b) - add_a)
+ dir1a = snapshot(ui, repo, dir1a_files, node1a, tmproot, subrepos)[0]
+ rev1a = b'@%d' % repo[node1a].rev()
+ if do3way:
+ # file calculation criteria same as dir1a
+ dir1b_files = mod_b | rem_b | ((mod_a | add_a) - add_b)
+ dir1b = snapshot(ui, repo, dir1b_files, node1b, tmproot, subrepos)[0]
+ rev1b = b'@%d' % repo[node1b].rev()
+ else:
+ dir1b = None
+ rev1b = b''
+
+ fnsandstat = []
+
+ # If node2 in not the wc or there is >1 change, copy it
+ dir2root = b''
+ rev2 = b''
+ if node2:
+ dir2 = snapshot(ui, repo, modadd, node2, tmproot, subrepos)[0]
+ rev2 = b'@%d' % repo[node2].rev()
+ elif len(common) > 1:
+ # we only actually need to get the files to copy back to
+ # the working dir in this case (because the other cases
+ # are: diffing 2 revisions or single file -- in which case
+ # the file is already directly passed to the diff tool).
+ dir2, fnsandstat = snapshot(ui, repo, modadd, None, tmproot, subrepos)
+ else:
+ # This lets the diff tool open the changed file directly
+ dir2 = b''
+ dir2root = repo.root
+
+ label1a = rev1a
+ label1b = rev1b
+ label2 = rev2
+
+ # If only one change, diff the files instead of the directories
+ # Handle bogus modifies correctly by checking if the files exist
+ if len(common) == 1:
+ common_file = util.localpath(common.pop())
+ dir1a = os.path.join(tmproot, dir1a, common_file)
+ label1a = common_file + rev1a
+ if not os.path.isfile(dir1a):
+ dir1a = pycompat.osdevnull
+ if do3way:
+ dir1b = os.path.join(tmproot, dir1b, common_file)
+ label1b = common_file + rev1b
+ if not os.path.isfile(dir1b):
+ dir1b = pycompat.osdevnull
+ dir2 = os.path.join(dir2root, dir2, common_file)
+ label2 = common_file + rev2
+
+ if not opts.get(b'per_file'):
+ # Run the external tool on the 2 temp directories or the patches
+ cmdline = formatcmdline(
+ cmdline,
+ repo.root,
+ do3way=do3way,
+ parent1=dir1a,
+ plabel1=label1a,
+ parent2=dir1b,
+ plabel2=label1b,
+ child=dir2,
+ clabel=label2,
+ )
+ ui.debug(b'running %r in %s\n' % (pycompat.bytestr(cmdline), tmproot))
+ ui.system(cmdline, cwd=tmproot, blockedtag=b'extdiff')
+ else:
+ # Run the external tool once for each pair of files
+ _runperfilediff(
+ cmdline,
+ repo.root,
+ ui,
+ guitool=guitool,
+ do3way=do3way,
+ confirm=opts.get(b'confirm'),
+ commonfiles=common,
+ tmproot=tmproot,
+ dir1a=dir1a,
+ dir1b=dir1b,
+ dir2root=dir2root,
+ dir2=dir2,
+ rev1a=rev1a,
+ rev1b=rev1b,
+ rev2=rev2,
+ )
+
+ for copy_fn, working_fn, st in fnsandstat:
+ cpstat = os.lstat(copy_fn)
+ # Some tools copy the file and attributes, so mtime may not detect
+ # all changes. A size check will detect more cases, but not all.
+ # The only certain way to detect every case is to diff all files,
+ # which could be expensive.
+ # copyfile() carries over the permission, so the mode check could
+ # be in an 'elif' branch, but for the case where the file has
+ # changed without affecting mtime or size.
+ if (
+ cpstat[stat.ST_MTIME] != st[stat.ST_MTIME]
+ or cpstat.st_size != st.st_size
+ or (cpstat.st_mode & 0o100) != (st.st_mode & 0o100)
+ ):
+ ui.debug(
+ b'file changed while diffing. '
+ b'Overwriting: %s (src: %s)\n' % (working_fn, copy_fn)
+ )
+ util.copyfile(copy_fn, working_fn)
+
+ return 1
+
+
def dodiff(ui, repo, cmdline, pats, opts, guitool=False):
'''Do the actual diff:
@@ -360,14 +540,12 @@
- just invoke the diff for a single file in the working dir
'''
+ cmdutil.check_at_most_one_arg(opts, b'rev', b'change')
revs = opts.get(b'rev')
change = opts.get(b'change')
do3way = b'$parent2' in cmdline
- if revs and change:
- msg = _(b'cannot specify --rev and --change at the same time')
- raise error.Abort(msg)
- elif change:
+ if change:
ctx2 = scmutil.revsingle(repo, change, None)
ctx1a, ctx1b = ctx2.p1(), ctx2.p2()
else:
@@ -377,9 +555,6 @@
else:
ctx1b = repo[nullid]
- perfile = opts.get(b'per_file')
- confirm = opts.get(b'confirm')
-
node1a = ctx1a.node()
node1b = ctx1b.node()
node2 = ctx2.node()
@@ -389,169 +564,35 @@
if node1b == nullid:
do3way = False
- subrepos = opts.get(b'subrepos')
-
matcher = scmutil.match(repo[node2], pats, opts)
if opts.get(b'patch'):
- if subrepos:
+ if opts.get(b'subrepos'):
raise error.Abort(_(b'--patch cannot be used with --subrepos'))
- if perfile:
+ if opts.get(b'per_file'):
raise error.Abort(_(b'--patch cannot be used with --per-file'))
if node2 is None:
raise error.Abort(_(b'--patch requires two revisions'))
- else:
- st = repo.status(node1a, node2, matcher, listsubrepos=subrepos)
- mod_a, add_a, rem_a = set(st.modified), set(st.added), set(st.removed)
- if do3way:
- stb = repo.status(node1b, node2, matcher, listsubrepos=subrepos)
- mod_b, add_b, rem_b = (
- set(stb.modified),
- set(stb.added),
- set(stb.removed),
- )
- else:
- mod_b, add_b, rem_b = set(), set(), set()
- modadd = mod_a | add_a | mod_b | add_b
- common = modadd | rem_a | rem_b
- if not common:
- return 0
tmproot = pycompat.mkdtemp(prefix=b'extdiff.')
try:
- if not opts.get(b'patch'):
- # Always make a copy of node1a (and node1b, if applicable)
- dir1a_files = mod_a | rem_a | ((mod_b | add_b) - add_a)
- dir1a = snapshot(ui, repo, dir1a_files, node1a, tmproot, subrepos)[
- 0
- ]
- rev1a = b'@%d' % repo[node1a].rev()
- if do3way:
- dir1b_files = mod_b | rem_b | ((mod_a | add_a) - add_b)
- dir1b = snapshot(
- ui, repo, dir1b_files, node1b, tmproot, subrepos
- )[0]
- rev1b = b'@%d' % repo[node1b].rev()
- else:
- dir1b = None
- rev1b = b''
-
- fnsandstat = []
-
- # If node2 in not the wc or there is >1 change, copy it
- dir2root = b''
- rev2 = b''
- if node2:
- dir2 = snapshot(ui, repo, modadd, node2, tmproot, subrepos)[0]
- rev2 = b'@%d' % repo[node2].rev()
- elif len(common) > 1:
- # we only actually need to get the files to copy back to
- # the working dir in this case (because the other cases
- # are: diffing 2 revisions or single file -- in which case
- # the file is already directly passed to the diff tool).
- dir2, fnsandstat = snapshot(
- ui, repo, modadd, None, tmproot, subrepos
- )
- else:
- # This lets the diff tool open the changed file directly
- dir2 = b''
- dir2root = repo.root
-
- label1a = rev1a
- label1b = rev1b
- label2 = rev2
+ if opts.get(b'patch'):
+ return diffpatch(ui, repo, node1a, node2, tmproot, matcher, cmdline)
- # If only one change, diff the files instead of the directories
- # Handle bogus modifies correctly by checking if the files exist
- if len(common) == 1:
- common_file = util.localpath(common.pop())
- dir1a = os.path.join(tmproot, dir1a, common_file)
- label1a = common_file + rev1a
- if not os.path.isfile(dir1a):
- dir1a = pycompat.osdevnull
- if do3way:
- dir1b = os.path.join(tmproot, dir1b, common_file)
- label1b = common_file + rev1b
- if not os.path.isfile(dir1b):
- dir1b = pycompat.osdevnull
- dir2 = os.path.join(dir2root, dir2, common_file)
- label2 = common_file + rev2
- else:
- template = b'hg-%h.patch'
- with formatter.nullformatter(ui, b'extdiff', {}) as fm:
- cmdutil.export(
- repo,
- [repo[node1a].rev(), repo[node2].rev()],
- fm,
- fntemplate=repo.vfs.reljoin(tmproot, template),
- match=matcher,
- )
- label1a = cmdutil.makefilename(repo[node1a], template)
- label2 = cmdutil.makefilename(repo[node2], template)
- dir1a = repo.vfs.reljoin(tmproot, label1a)
- dir2 = repo.vfs.reljoin(tmproot, label2)
- dir1b = None
- label1b = None
- fnsandstat = []
+ return diffrevs(
+ ui,
+ repo,
+ node1a,
+ node1b,
+ node2,
+ matcher,
+ tmproot,
+ cmdline,
+ do3way,
+ guitool,
+ opts,
+ )
- if not perfile:
- # Run the external tool on the 2 temp directories or the patches
- cmdline = formatcmdline(
- cmdline,
- repo.root,
- do3way=do3way,
- parent1=dir1a,
- plabel1=label1a,
- parent2=dir1b,
- plabel2=label1b,
- child=dir2,
- clabel=label2,
- )
- ui.debug(
- b'running %r in %s\n' % (pycompat.bytestr(cmdline), tmproot)
- )
- ui.system(cmdline, cwd=tmproot, blockedtag=b'extdiff')
- else:
- # Run the external tool once for each pair of files
- _runperfilediff(
- cmdline,
- repo.root,
- ui,
- guitool=guitool,
- do3way=do3way,
- confirm=confirm,
- commonfiles=common,
- tmproot=tmproot,
- dir1a=dir1a,
- dir1b=dir1b,
- dir2root=dir2root,
- dir2=dir2,
- rev1a=rev1a,
- rev1b=rev1b,
- rev2=rev2,
- )
-
- for copy_fn, working_fn, st in fnsandstat:
- cpstat = os.lstat(copy_fn)
- # Some tools copy the file and attributes, so mtime may not detect
- # all changes. A size check will detect more cases, but not all.
- # The only certain way to detect every case is to diff all files,
- # which could be expensive.
- # copyfile() carries over the permission, so the mode check could
- # be in an 'elif' branch, but for the case where the file has
- # changed without affecting mtime or size.
- if (
- cpstat[stat.ST_MTIME] != st[stat.ST_MTIME]
- or cpstat.st_size != st.st_size
- or (cpstat.st_mode & 0o100) != (st.st_mode & 0o100)
- ):
- ui.debug(
- b'file changed while diffing. '
- b'Overwriting: %s (src: %s)\n' % (working_fn, copy_fn)
- )
- util.copyfile(copy_fn, working_fn)
-
- return 1
finally:
ui.note(_(b'cleaning up temp directory\n'))
shutil.rmtree(tmproot)
--- a/hgext/fix.py Tue Jul 14 10:25:41 2020 +0200
+++ b/hgext/fix.py Mon Jul 20 21:56:27 2020 +0530
@@ -144,6 +144,7 @@
match as matchmod,
mdiff,
merge,
+ mergestate as mergestatemod,
pycompat,
registrar,
rewriteutil,
@@ -267,8 +268,14 @@
workqueue, numitems = getworkqueue(
ui, repo, pats, opts, revstofix, basectxs
)
+ basepaths = getbasepaths(repo, opts, workqueue, basectxs)
fixers = getfixers(ui)
+ # Rather than letting each worker independently fetch the files
+ # (which also would add complications for shared/keepalive
+ # connections), prefetch them all first.
+ _prefetchfiles(repo, workqueue, basepaths)
+
# There are no data dependencies between the workers fixing each file
# revision, so we can use all available parallelism.
def getfixes(items):
@@ -276,7 +283,7 @@
ctx = repo[rev]
olddata = ctx[path].data()
metadata, newdata = fixfile(
- ui, repo, opts, fixers, ctx, path, basectxs[rev]
+ ui, repo, opts, fixers, ctx, path, basepaths, basectxs[rev]
)
# Don't waste memory/time passing unchanged content back, but
# produce one result per item either way.
@@ -426,7 +433,9 @@
if not (len(revs) == 1 and wdirrev in revs):
cmdutil.checkunfinished(repo)
rewriteutil.precheck(repo, revs, b'fix')
- if wdirrev in revs and list(merge.mergestate.read(repo).unresolved()):
+ if wdirrev in revs and list(
+ mergestatemod.mergestate.read(repo).unresolved()
+ ):
raise error.Abort(b'unresolved conflicts', hint=b"use 'hg resolve'")
if not revs:
raise error.Abort(
@@ -470,7 +479,7 @@
return files
-def lineranges(opts, path, basectxs, fixctx, content2):
+def lineranges(opts, path, basepaths, basectxs, fixctx, content2):
"""Returns the set of line ranges that should be fixed in a file
Of the form [(10, 20), (30, 40)].
@@ -489,7 +498,8 @@
rangeslist = []
for basectx in basectxs:
- basepath = copies.pathcopies(basectx, fixctx).get(path, path)
+ basepath = basepaths.get((basectx.rev(), fixctx.rev(), path), path)
+
if basepath in basectx:
content1 = basectx[basepath].data()
else:
@@ -498,6 +508,21 @@
return unionranges(rangeslist)
+def getbasepaths(repo, opts, workqueue, basectxs):
+ if opts.get(b'whole'):
+ # Base paths will never be fetched for line range determination.
+ return {}
+
+ basepaths = {}
+ for rev, path in workqueue:
+ fixctx = repo[rev]
+ for basectx in basectxs[rev]:
+ basepath = copies.pathcopies(basectx, fixctx).get(path, path)
+ if basepath in basectx:
+ basepaths[(basectx.rev(), fixctx.rev(), path)] = basepath
+ return basepaths
+
+
def unionranges(rangeslist):
"""Return the union of some closed intervals
@@ -610,7 +635,30 @@
return basectxs
-def fixfile(ui, repo, opts, fixers, fixctx, path, basectxs):
+def _prefetchfiles(repo, workqueue, basepaths):
+ toprefetch = set()
+
+ # Prefetch the files that will be fixed.
+ for rev, path in workqueue:
+ if rev == wdirrev:
+ continue
+ toprefetch.add((rev, path))
+
+ # Prefetch the base contents for lineranges().
+ for (baserev, fixrev, path), basepath in basepaths.items():
+ toprefetch.add((baserev, basepath))
+
+ if toprefetch:
+ scmutil.prefetchfiles(
+ repo,
+ [
+ (rev, scmutil.matchfiles(repo, [path]))
+ for rev, path in toprefetch
+ ],
+ )
+
+
+def fixfile(ui, repo, opts, fixers, fixctx, path, basepaths, basectxs):
"""Run any configured fixers that should affect the file in this context
Returns the file content that results from applying the fixers in some order
@@ -626,7 +674,9 @@
newdata = fixctx[path].data()
for fixername, fixer in pycompat.iteritems(fixers):
if fixer.affects(opts, fixctx, path):
- ranges = lineranges(opts, path, basectxs, fixctx, newdata)
+ ranges = lineranges(
+ opts, path, basepaths, basectxs, fixctx, newdata
+ )
command = fixer.command(ui, path, ranges)
if command is None:
continue
--- a/hgext/git/__init__.py Tue Jul 14 10:25:41 2020 +0200
+++ b/hgext/git/__init__.py Mon Jul 20 21:56:27 2020 +0530
@@ -16,6 +16,7 @@
extensions,
localrepo,
pycompat,
+ registrar,
scmutil,
store,
util,
@@ -28,6 +29,13 @@
index,
)
+configtable = {}
+configitem = registrar.configitem(configtable)
+# git.log-index-cache-miss: internal knob for testing
+configitem(
+ b"git", b"log-index-cache-miss", default=False,
+)
+
# TODO: extract an interface for this in core
class gitstore(object): # store.basicstore):
def __init__(self, path, vfstype):
@@ -41,13 +49,14 @@
os.path.normpath(os.path.join(path, b'..', b'.git'))
)
self._progress_factory = lambda *args, **kwargs: None
+ self._logfn = lambda x: None
@util.propertycache
def _db(self):
# We lazy-create the database because we want to thread a
# progress callback down to the indexing process if it's
# required, and we don't have a ui handle in makestore().
- return index.get_index(self.git, self._progress_factory)
+ return index.get_index(self.git, self._logfn, self._progress_factory)
def join(self, f):
"""Fake store.join method for git repositories.
@@ -276,6 +285,8 @@
if repo.local() and isinstance(repo.store, gitstore):
orig = repo.__class__
repo.store._progress_factory = repo.ui.makeprogress
+ if ui.configbool(b'git', b'log-index-cache-miss'):
+ repo.store._logfn = repo.ui.warn
class gitlocalrepo(orig):
def _makedirstate(self):
--- a/hgext/git/dirstate.py Tue Jul 14 10:25:41 2020 +0200
+++ b/hgext/git/dirstate.py Mon Jul 20 21:56:27 2020 +0530
@@ -288,6 +288,10 @@
# TODO: track copies?
return None
+ def prefetch_parents(self):
+ # TODO
+ pass
+
@contextlib.contextmanager
def parentchange(self):
# TODO: track this maybe?
--- a/hgext/git/gitlog.py Tue Jul 14 10:25:41 2020 +0200
+++ b/hgext/git/gitlog.py Mon Jul 20 21:56:27 2020 +0530
@@ -247,6 +247,60 @@
def descendants(self, revs):
return dagop.descendantrevs(revs, self.revs, self.parentrevs)
+ def incrementalmissingrevs(self, common=None):
+ """Return an object that can be used to incrementally compute the
+ revision numbers of the ancestors of arbitrary sets that are not
+ ancestors of common. This is an ancestor.incrementalmissingancestors
+ object.
+
+ 'common' is a list of revision numbers. If common is not supplied, uses
+ nullrev.
+ """
+ if common is None:
+ common = [nodemod.nullrev]
+
+ return ancestor.incrementalmissingancestors(self.parentrevs, common)
+
+ def findmissing(self, common=None, heads=None):
+ """Return the ancestors of heads that are not ancestors of common.
+
+ More specifically, return a list of nodes N such that every N
+ satisfies the following constraints:
+
+ 1. N is an ancestor of some node in 'heads'
+ 2. N is not an ancestor of any node in 'common'
+
+ The list is sorted by revision number, meaning it is
+ topologically sorted.
+
+ 'heads' and 'common' are both lists of node IDs. If heads is
+ not supplied, uses all of the revlog's heads. If common is not
+ supplied, uses nullid."""
+ if common is None:
+ common = [nodemod.nullid]
+ if heads is None:
+ heads = self.heads()
+
+ common = [self.rev(n) for n in common]
+ heads = [self.rev(n) for n in heads]
+
+ inc = self.incrementalmissingrevs(common=common)
+ return [self.node(r) for r in inc.missingancestors(heads)]
+
+ def children(self, node):
+ """find the children of a given node"""
+ c = []
+ p = self.rev(node)
+ for r in self.revs(start=p + 1):
+ prevs = [pr for pr in self.parentrevs(r) if pr != nodemod.nullrev]
+ if prevs:
+ for pr in prevs:
+ if pr == p:
+ c.append(self.node(r))
+ elif p == nodemod.nullrev:
+ c.append(self.node(r))
+ return c
+
def reachableroots(self, minroot, heads, roots, includepath=False):
return dagop._reachablerootspure(
self.parentrevs, minroot, roots, heads, includepath
@@ -270,7 +324,10 @@
def parentrevs(self, rev):
n = self.node(rev)
hn = gitutil.togitnode(n)
- c = self.gitrepo[hn]
+ if hn != gitutil.nullgit:
+ c = self.gitrepo[hn]
+ else:
+ return nodemod.nullrev, nodemod.nullrev
p1 = p2 = nodemod.nullrev
if c.parents:
p1 = self.rev(c.parents[0].id.raw)
@@ -342,7 +399,7 @@
'refs/hg/internal/latest-commit', oid, force=True
)
# Reindex now to pick up changes. We omit the progress
- # callback because this will be very quick.
+ # and log callbacks because this will be very quick.
index._index_repo(self.gitrepo, self._db)
return oid.raw
--- a/hgext/git/index.py Tue Jul 14 10:25:41 2020 +0200
+++ b/hgext/git/index.py Mon Jul 20 21:56:27 2020 +0530
@@ -216,7 +216,12 @@
db.commit()
-def _index_repo(gitrepo, db, progress_factory=lambda *args, **kwargs: None):
+def _index_repo(
+ gitrepo,
+ db,
+ logfn=lambda x: None,
+ progress_factory=lambda *args, **kwargs: None,
+):
# Identify all references so we can tell the walker to visit all of them.
all_refs = gitrepo.listall_references()
possible_heads = set()
@@ -245,11 +250,15 @@
# TODO: we should figure out how to incrementally index history
# (preferably by detecting rewinds!) so that we don't have to do a
# full changelog walk every time a new commit is created.
- cache_heads = {x[0] for x in db.execute('SELECT node FROM possible_heads')}
+ cache_heads = {
+ pycompat.sysstr(x[0])
+ for x in db.execute('SELECT node FROM possible_heads')
+ }
walker = None
cur_cache_heads = {h.hex for h in possible_heads}
if cur_cache_heads == cache_heads:
return
+ logfn(b'heads mismatch, rebuilding dagcache\n')
for start in possible_heads:
if walker is None:
walker = gitrepo.walk(start, _OUR_ORDER)
@@ -336,7 +345,9 @@
prog.complete()
-def get_index(gitrepo, progress_factory=lambda *args, **kwargs: None):
+def get_index(
+ gitrepo, logfn=lambda x: None, progress_factory=lambda *args, **kwargs: None
+):
cachepath = os.path.join(
pycompat.fsencode(gitrepo.path), b'..', b'.hg', b'cache'
)
@@ -346,5 +357,5 @@
db = _createdb(dbpath)
# TODO check against gitrepo heads before doing a full index
# TODO thread a ui.progress call into this layer
- _index_repo(gitrepo, db, progress_factory)
+ _index_repo(gitrepo, db, logfn, progress_factory)
return db
--- a/hgext/git/manifest.py Tue Jul 14 10:25:41 2020 +0200
+++ b/hgext/git/manifest.py Mon Jul 20 21:56:27 2020 +0530
@@ -56,8 +56,9 @@
return val
t = self._tree
comps = upath.split('/')
+ te = self._tree
for comp in comps[:-1]:
- te = self._tree[comp]
+ te = te[comp]
t = self._git_repo[te.id]
ent = t[comps[-1]]
if ent.filemode == pygit2.GIT_FILEMODE_BLOB:
@@ -125,9 +126,79 @@
def hasdir(self, dir):
return dir in self._dirs
- def diff(self, other, match=None, clean=False):
- # TODO
- assert False
+ def diff(self, other, match=lambda x: True, clean=False):
+ '''Finds changes between the current manifest and m2.
+
+ The result is returned as a dict with filename as key and
+ values of the form ((n1,fl1),(n2,fl2)), where n1/n2 is the
+ nodeid in the current/other manifest and fl1/fl2 is the flag
+ in the current/other manifest. Where the file does not exist,
+ the nodeid will be None and the flags will be the empty
+ string.
+ '''
+ result = {}
+
+ def _iterativediff(t1, t2, subdir):
+ """compares two trees and appends new tree nodes to examine to
+ the stack"""
+ if t1 is None:
+ t1 = {}
+ if t2 is None:
+ t2 = {}
+
+ for e1 in t1:
+ realname = subdir + pycompat.fsencode(e1.name)
+
+ if e1.type == pygit2.GIT_OBJ_TREE:
+ try:
+ e2 = t2[e1.name]
+ if e2.type != pygit2.GIT_OBJ_TREE:
+ e2 = None
+ except KeyError:
+ e2 = None
+
+ stack.append((realname + b'/', e1, e2))
+ else:
+ n1, fl1 = self.find(realname)
+
+ try:
+ e2 = t2[e1.name]
+ n2, fl2 = other.find(realname)
+ except KeyError:
+ e2 = None
+ n2, fl2 = (None, b'')
+
+ if e2 is not None and e2.type == pygit2.GIT_OBJ_TREE:
+ stack.append((realname + b'/', None, e2))
+
+ if not match(realname):
+ continue
+
+ if n1 != n2 or fl1 != fl2:
+ result[realname] = ((n1, fl1), (n2, fl2))
+ elif clean:
+ result[realname] = None
+
+ for e2 in t2:
+ if e2.name in t1:
+ continue
+
+ realname = subdir + pycompat.fsencode(e2.name)
+
+ if e2.type == pygit2.GIT_OBJ_TREE:
+ stack.append((realname + b'/', None, e2))
+ elif match(realname):
+ n2, fl2 = other.find(realname)
+ result[realname] = ((None, b''), (n2, fl2))
+
+ stack = []
+ _iterativediff(self._tree, other._tree, b'')
+ while stack:
+ subdir, t1, t2 = stack.pop()
+ # stack is populated in the function call
+ _iterativediff(t1, t2, subdir)
+
+ return result
def setflag(self, path, flag):
node, unused_flag = self._resolve_entry(path)
@@ -168,14 +239,13 @@
for te in tree:
# TODO: can we prune dir walks with the matcher?
realname = subdir + pycompat.fsencode(te.name)
- if te.type == r'tree':
+ if te.type == pygit2.GIT_OBJ_TREE:
for inner in self._walkonetree(
self._git_repo[te.id], match, realname + b'/'
):
yield inner
- if not match(realname):
- continue
- yield pycompat.fsencode(realname)
+ elif match(realname):
+ yield pycompat.fsencode(realname)
def walk(self, match):
# TODO: this is a very lazy way to merge in the pending
@@ -205,7 +275,7 @@
return memgittreemanifestctx(self._repo, self._tree)
def find(self, path):
- self.read()[path]
+ return self.read()[path]
@interfaceutil.implementer(repository.imanifestrevisionwritable)
--- a/hgext/githelp.py Tue Jul 14 10:25:41 2020 +0200
+++ b/hgext/githelp.py Mon Jul 20 21:56:27 2020 +0530
@@ -628,8 +628,17 @@
(b'', b'stat', None, b''),
(b'', b'graph', None, b''),
(b'p', b'patch', None, b''),
+ (b'G', b'grep-diff', b'', b''),
+ (b'S', b'pickaxe-regex', b'', b''),
]
args, opts = parseoptions(ui, cmdoptions, args)
+ grep_pat = opts.get(b'grep_diff') or opts.get(b'pickaxe_regex')
+ if grep_pat:
+ cmd = Command(b'grep')
+ cmd[b'--diff'] = grep_pat
+ ui.status(b'%s\n' % bytes(cmd))
+ return
+
ui.status(
_(
b'note: -v prints the entire commit message like Git does. To '
--- a/hgext/histedit.py Tue Jul 14 10:25:41 2020 +0200
+++ b/hgext/histedit.py Mon Jul 20 21:56:27 2020 +0530
@@ -223,6 +223,7 @@
hg,
logcmdutil,
merge as mergemod,
+ mergestate as mergestatemod,
mergeutil,
node,
obsolete,
@@ -2285,7 +2286,7 @@
def bootstrapcontinue(ui, state, opts):
repo = state.repo
- ms = mergemod.mergestate.read(repo)
+ ms = mergestatemod.mergestate.read(repo)
mergeutil.checkunresolved(ms)
if state.actions:
--- a/hgext/hooklib/changeset_obsoleted.py Tue Jul 14 10:25:41 2020 +0200
+++ b/hgext/hooklib/changeset_obsoleted.py Mon Jul 20 21:56:27 2020 +0530
@@ -122,10 +122,18 @@
)
+def has_successor(repo, rev):
+ return any(
+ r for r in obsutil.allsuccessors(repo.obsstore, [rev]) if r != rev
+ )
+
+
def hook(ui, repo, hooktype, node=None, **kwargs):
- if hooktype != b"pretxnclose":
+ if hooktype != b"txnclose":
raise error.Abort(
_(b'Unsupported hook type %r') % pycompat.bytestr(hooktype)
)
- for rev in obsutil.getobsoleted(repo, repo.currenttransaction()):
- _report_commit(ui, repo, repo.unfiltered()[rev])
+ for rev in obsutil.getobsoleted(repo, changes=kwargs['changes']):
+ ctx = repo.unfiltered()[rev]
+ if not has_successor(repo, ctx.node()):
+ _report_commit(ui, repo, ctx)
--- a/hgext/infinitepush/__init__.py Tue Jul 14 10:25:41 2020 +0200
+++ b/hgext/infinitepush/__init__.py Mon Jul 20 21:56:27 2020 +0530
@@ -466,7 +466,7 @@
version = b'02'
outgoing = discovery.outgoing(
- bundlerepo, commonheads=bundleroots, missingheads=[unknownhead]
+ bundlerepo, commonheads=bundleroots, ancestorsof=[unknownhead]
)
cgstream = changegroup.makestream(bundlerepo, outgoing, version, b'pull')
cgstream = util.chunkbuffer(cgstream).read()
--- a/hgext/largefiles/lfcommands.py Tue Jul 14 10:25:41 2020 +0200
+++ b/hgext/largefiles/lfcommands.py Mon Jul 20 21:56:27 2020 +0530
@@ -163,7 +163,7 @@
# to the destination repository's requirements.
if lfiles:
rdst.requirements.add(b'largefiles')
- rdst._writerequirements()
+ scmutil.writereporequirements(rdst)
else:
class lfsource(filemap.filemap_source):
--- a/hgext/largefiles/overrides.py Tue Jul 14 10:25:41 2020 +0200
+++ b/hgext/largefiles/overrides.py Mon Jul 20 21:56:27 2020 +0530
@@ -31,6 +31,7 @@
logcmdutil,
match as matchmod,
merge,
+ mergestate as mergestatemod,
pathutil,
pycompat,
scmutil,
@@ -622,7 +623,7 @@
return actions, diverge, renamedelete
-@eh.wrapfunction(merge, b'recordupdates')
+@eh.wrapfunction(mergestatemod, b'recordupdates')
def mergerecordupdates(orig, repo, actions, branchmerge, getfiledata):
if b'lfmr' in actions:
lfdirstate = lfutil.openlfdirstate(repo.ui, repo)
--- a/hgext/largefiles/reposetup.py Tue Jul 14 10:25:41 2020 +0200
+++ b/hgext/largefiles/reposetup.py Mon Jul 20 21:56:27 2020 +0530
@@ -448,7 +448,7 @@
lfutil.shortname + b'/' in f[0] for f in repo.store.datafiles()
):
repo.requirements.add(b'largefiles')
- repo._writerequirements()
+ scmutil.writereporequirements(repo)
ui.setconfig(
b'hooks', b'changegroup.lfiles', checkrequireslfiles, b'largefiles'
--- a/hgext/lfs/__init__.py Tue Jul 14 10:25:41 2020 +0200
+++ b/hgext/lfs/__init__.py Mon Jul 20 21:56:27 2020 +0530
@@ -255,7 +255,7 @@
):
repo.requirements.add(b'lfs')
repo.features.add(repository.REPO_FEATURE_LFS)
- repo._writerequirements()
+ scmutil.writereporequirements(repo)
repo.prepushoutgoinghooks.add(b'lfs', wrapper.prepush)
break
--- a/hgext/lfs/wrapper.py Tue Jul 14 10:25:41 2020 +0200
+++ b/hgext/lfs/wrapper.py Mon Jul 20 21:56:27 2020 +0530
@@ -312,7 +312,7 @@
# membership before assuming it is in the context.
if any(f in ctx and ctx[f].islfs() for f, n in files):
self.repo.requirements.add(b'lfs')
- self.repo._writerequirements()
+ scmutil.writereporequirements(self.repo)
return node
@@ -337,7 +337,7 @@
setattr(self, name, getattr(othervfs, name))
-def _prefetchfiles(repo, revs, match):
+def _prefetchfiles(repo, revmatches):
"""Ensure that required LFS blobs are present, fetching them as a group if
needed."""
if not util.safehasattr(repo.svfs, b'lfslocalblobstore'):
@@ -347,7 +347,7 @@
oids = set()
localstore = repo.svfs.lfslocalblobstore
- for rev in revs:
+ for rev, match in revmatches:
ctx = repo[rev]
for f in ctx.walk(match):
p = pointerfromctx(ctx, f)
--- a/hgext/mq.py Tue Jul 14 10:25:41 2020 +0200
+++ b/hgext/mq.py Mon Jul 20 21:56:27 2020 +0530
@@ -836,7 +836,15 @@
stat = opts.get(b'stat')
m = scmutil.match(repo[node1], files, opts)
logcmdutil.diffordiffstat(
- self.ui, repo, diffopts, node1, node2, m, changes, stat, fp
+ self.ui,
+ repo,
+ diffopts,
+ repo[node1],
+ repo[node2],
+ m,
+ changes,
+ stat,
+ fp,
)
def mergeone(self, repo, mergeq, head, patch, rev, diffopts):
--- a/hgext/narrow/narrowbundle2.py Tue Jul 14 10:25:41 2020 +0200
+++ b/hgext/narrow/narrowbundle2.py Mon Jul 20 21:56:27 2020 +0530
@@ -20,6 +20,7 @@
localrepo,
narrowspec,
repair,
+ scmutil,
util,
wireprototypes,
)
@@ -179,7 +180,7 @@
if not repository.NARROW_REQUIREMENT in op.repo.requirements:
op.repo.requirements.add(repository.NARROW_REQUIREMENT)
- op.repo._writerequirements()
+ scmutil.writereporequirements(op.repo)
op.repo.setnarrowpats(includepats, excludepats)
narrowspec.copytoworkingcopy(op.repo)
@@ -195,7 +196,7 @@
if repository.NARROW_REQUIREMENT not in op.repo.requirements:
op.repo.requirements.add(repository.NARROW_REQUIREMENT)
- op.repo._writerequirements()
+ scmutil.writereporequirements(op.repo)
op.repo.setnarrowpats(includepats, excludepats)
narrowspec.copytoworkingcopy(op.repo)
--- a/hgext/phabricator.py Tue Jul 14 10:25:41 2020 +0200
+++ b/hgext/phabricator.py Mon Jul 20 21:56:27 2020 +0530
@@ -238,8 +238,8 @@
def decorate(fn):
def inner(*args, **kwargs):
- cassette = pycompat.fsdecode(kwargs.pop('test_vcr', None))
- if cassette:
+ if kwargs.get('test_vcr'):
+ cassette = pycompat.fsdecode(kwargs.pop('test_vcr'))
import hgdemandimport
with hgdemandimport.deactivated():
@@ -1311,8 +1311,8 @@
# --fold option implies this, and the auto restacking of orphans requires
# it. Otherwise A+C in A->B->C will cause B to be orphaned, and C' to
# get A' as a parent.
- def _fail_nonlinear_revs(revs, skiprev, revtype):
- badnodes = [repo[r].node() for r in revs if r != skiprev]
+ def _fail_nonlinear_revs(revs, revtype):
+ badnodes = [repo[r].node() for r in revs]
raise error.Abort(
_(b"cannot phabsend multiple %s revisions: %s")
% (revtype, scmutil.nodesummaries(repo, badnodes)),
@@ -1321,11 +1321,11 @@
heads = repo.revs(b'heads(%ld)', revs)
if len(heads) > 1:
- _fail_nonlinear_revs(heads, heads.max(), b"head")
+ _fail_nonlinear_revs(heads, b"head")
roots = repo.revs(b'roots(%ld)', revs)
if len(roots) > 1:
- _fail_nonlinear_revs(roots, roots.min(), b"root")
+ _fail_nonlinear_revs(roots, b"root")
fold = opts.get(b'fold')
if fold:
@@ -1650,7 +1650,7 @@
)
if ui.promptchoice(
- _(b'Send the above changes to %s (yn)?$$ &Yes $$ &No') % url
+ _(b'Send the above changes to %s (Y/n)?$$ &Yes $$ &No') % url
):
return False
@@ -2162,8 +2162,14 @@
[
(b'', b'accept', False, _(b'accept revisions')),
(b'', b'reject', False, _(b'reject revisions')),
+ (b'', b'request-review', False, _(b'request review on revisions')),
(b'', b'abandon', False, _(b'abandon revisions')),
(b'', b'reclaim', False, _(b'reclaim revisions')),
+ (b'', b'close', False, _(b'close revisions')),
+ (b'', b'reopen', False, _(b'reopen revisions')),
+ (b'', b'plan-changes', False, _(b'plan changes for revisions')),
+ (b'', b'resign', False, _(b'resign as a reviewer from revisions')),
+ (b'', b'commandeer', False, _(b'commandeer revisions')),
(b'm', b'comment', b'', _(b'comment on the last revision')),
],
_(b'DREVSPEC... [OPTIONS]'),
@@ -2176,7 +2182,19 @@
DREVSPEC selects revisions. See :hg:`help phabread` for its usage.
"""
opts = pycompat.byteskwargs(opts)
- flags = [n for n in b'accept reject abandon reclaim'.split() if opts.get(n)]
+ transactions = [
+ b'abandon',
+ b'accept',
+ b'close',
+ b'commandeer',
+ b'plan-changes',
+ b'reclaim',
+ b'reject',
+ b'reopen',
+ b'request-review',
+ b'resign',
+ ]
+ flags = [n for n in transactions if opts.get(n.replace(b'-', b'_'))]
if len(flags) > 1:
raise error.Abort(_(b'%s cannot be used together') % b', '.join(flags))
--- a/hgext/purge.py Tue Jul 14 10:25:41 2020 +0200
+++ b/hgext/purge.py Mon Jul 20 21:56:27 2020 +0530
@@ -64,7 +64,7 @@
]
+ cmdutil.walkopts,
_(b'hg purge [OPTION]... [DIR]...'),
- helpcategory=command.CATEGORY_MAINTENANCE,
+ helpcategory=command.CATEGORY_WORKING_DIRECTORY,
)
def purge(ui, repo, *dirs, **opts):
'''removes files not tracked by Mercurial
--- a/hgext/rebase.py Tue Jul 14 10:25:41 2020 +0200
+++ b/hgext/rebase.py Mon Jul 20 21:56:27 2020 +0530
@@ -36,6 +36,7 @@
extensions,
hg,
merge as mergemod,
+ mergestate as mergestatemod,
mergeutil,
node as nodemod,
obsolete,
@@ -205,6 +206,9 @@
self.backupf = ui.configbool(b'rewrite', b'backup-bundle')
self.keepf = opts.get(b'keep', False)
self.keepbranchesf = opts.get(b'keepbranches', False)
+ self.skipemptysuccessorf = rewriteutil.skip_empty_successor(
+ repo.ui, b'rebase'
+ )
self.obsoletenotrebased = {}
self.obsoletewithoutsuccessorindestination = set()
self.inmemory = inmemory
@@ -528,11 +532,11 @@
extra = {b'rebase_source': ctx.hex()}
for c in self.extrafns:
c(ctx, extra)
- keepbranch = self.keepbranchesf and repo[p1].branch() != ctx.branch()
destphase = max(ctx.phase(), phases.draft)
- overrides = {(b'phases', b'new-commit'): destphase}
- if keepbranch:
- overrides[(b'ui', b'allowemptycommit')] = True
+ overrides = {
+ (b'phases', b'new-commit'): destphase,
+ (b'ui', b'allowemptycommit'): not self.skipemptysuccessorf,
+ }
with repo.ui.configoverride(overrides, b'rebase'):
if self.inmemory:
newnode = commitmemorynode(
@@ -544,7 +548,7 @@
user=ctx.user(),
date=date,
)
- mergemod.mergestate.clean(repo)
+ mergestatemod.mergestate.clean(repo)
else:
newnode = commitnode(
repo,
@@ -626,12 +630,7 @@
if self.inmemory:
raise error.InMemoryMergeConflictsError()
else:
- raise error.InterventionRequired(
- _(
- b'unresolved conflicts (see hg '
- b'resolve, then hg rebase --continue)'
- )
- )
+ raise error.ConflictResolutionRequired(b'rebase')
if not self.collapsef:
merging = p2 != nullrev
editform = cmdutil.mergeeditform(merging, b'rebase')
@@ -652,6 +651,14 @@
if newnode is not None:
self.state[rev] = repo[newnode].rev()
ui.debug(b'rebased as %s\n' % short(newnode))
+ if repo[newnode].isempty():
+ ui.warn(
+ _(
+ b'note: created empty successor for %s, its '
+ b'destination already has all its changes\n'
+ )
+ % desc
+ )
else:
if not self.collapsef:
ui.warn(
@@ -1084,7 +1091,7 @@
)
# TODO: Make in-memory merge not use the on-disk merge state, so
# we don't have to clean it here
- mergemod.mergestate.clean(repo)
+ mergestatemod.mergestate.clean(repo)
clearstatus(repo)
clearcollapsemsg(repo)
return _dorebase(ui, repo, action, opts, inmemory=False)
@@ -1191,7 +1198,7 @@
if action == b'abort' and opts.get(b'tool', False):
ui.warn(_(b'tool option will be ignored\n'))
if action == b'continue':
- ms = mergemod.mergestate.read(repo)
+ ms = mergestatemod.mergestate.read(repo)
mergeutil.checkunresolved(ms)
retcode = rbsrt._prepareabortorcontinue(
@@ -1429,10 +1436,6 @@
def commitmemorynode(repo, wctx, editor, extra, user, date, commitmsg):
'''Commit the memory changes with parents p1 and p2.
Return node of committed revision.'''
- # Replicates the empty check in ``repo.commit``.
- if wctx.isempty() and not repo.ui.configbool(b'ui', b'allowemptycommit'):
- return None
-
# By convention, ``extra['branch']`` (set by extrafn) clobbers
# ``branch`` (used when passing ``--keepbranches``).
branch = None
@@ -1447,6 +1450,8 @@
branch=branch,
editor=editor,
)
+ if memctx.isempty() and not repo.ui.configbool(b'ui', b'allowemptycommit'):
+ return None
commitres = repo.commitctx(memctx)
wctx.clean() # Might be reused
return commitres
@@ -2201,7 +2206,7 @@
def continuerebase(ui, repo):
with repo.wlock(), repo.lock():
rbsrt = rebaseruntime(repo, ui)
- ms = mergemod.mergestate.read(repo)
+ ms = mergestatemod.mergestate.read(repo)
mergeutil.checkunresolved(ms)
retcode = rbsrt._prepareabortorcontinue(isabort=False)
if retcode is not None:
--- a/hgext/releasenotes.py Tue Jul 14 10:25:41 2020 +0200
+++ b/hgext/releasenotes.py Mon Jul 20 21:56:27 2020 +0530
@@ -30,7 +30,10 @@
scmutil,
util,
)
-from mercurial.utils import stringutil
+from mercurial.utils import (
+ procutil,
+ stringutil,
+)
cmdtable = {}
command = registrar.command(cmdtable)
@@ -689,7 +692,7 @@
def debugparsereleasenotes(ui, path, repo=None):
"""parse release notes and print resulting data structure"""
if path == b'-':
- text = pycompat.stdin.read()
+ text = procutil.stdin.read()
else:
with open(path, b'rb') as fh:
text = fh.read()
--- a/hgext/remotefilelog/__init__.py Tue Jul 14 10:25:41 2020 +0200
+++ b/hgext/remotefilelog/__init__.py Mon Jul 20 21:56:27 2020 +0530
@@ -148,7 +148,7 @@
extensions,
hg,
localrepo,
- match,
+ match as matchmod,
merge,
node as nodemod,
patch,
@@ -361,7 +361,7 @@
self.unfiltered().__class__,
)
self.requirements.add(constants.SHALLOWREPO_REQUIREMENT)
- self._writerequirements()
+ scmutil.writereporequirements(self)
# Since setupclient hadn't been called, exchange.pull was not
# wrapped. So we need to manually invoke our version of it.
@@ -824,12 +824,12 @@
# i18n: "filelog" is a keyword
pat = revset.getstring(x, _(b"filelog requires a pattern"))
- m = match.match(
+ m = matchmod.match(
repo.root, repo.getcwd(), [pat], default=b'relpath', ctx=repo[None]
)
s = set()
- if not match.patkind(pat):
+ if not matchmod.patkind(pat):
# slow
for r in subset:
ctx = repo[r]
@@ -1118,10 +1118,10 @@
return orig(repo, remote, *args, **kwargs)
-def _fileprefetchhook(repo, revs, match):
+def _fileprefetchhook(repo, revmatches):
if isenabled(repo):
allfiles = []
- for rev in revs:
+ for rev, match in revmatches:
if rev == nodemod.wdirrev or rev is None:
continue
ctx = repo[rev]
--- a/hgext/strip.py Tue Jul 14 10:25:41 2020 +0200
+++ b/hgext/strip.py Mon Jul 20 21:56:27 2020 +0530
@@ -13,7 +13,7 @@
error,
hg,
lock as lockmod,
- merge,
+ mergestate as mergestatemod,
node as nodemod,
pycompat,
registrar,
@@ -269,7 +269,7 @@
repo.dirstate.write(repo.currenttransaction())
# clear resolve state
- merge.mergestate.clean(repo, repo[b'.'].node())
+ mergestatemod.mergestate.clean(repo, repo[b'.'].node())
update = False
--- a/mercurial/archival.py Tue Jul 14 10:25:41 2020 +0200
+++ b/mercurial/archival.py Mon Jul 20 21:56:27 2020 +0530
@@ -369,7 +369,7 @@
if total:
files.sort()
scmutil.prefetchfiles(
- repo, [ctx.rev()], scmutil.matchfiles(repo, files)
+ repo, [(ctx.rev(), scmutil.matchfiles(repo, files))]
)
progress = repo.ui.makeprogress(
_(b'archiving'), unit=_(b'files'), total=total
--- a/mercurial/bundle2.py Tue Jul 14 10:25:41 2020 +0200
+++ b/mercurial/bundle2.py Mon Jul 20 21:56:27 2020 +0530
@@ -166,6 +166,7 @@
phases,
pushkey,
pycompat,
+ scmutil,
streamclone,
tags,
url,
@@ -1710,7 +1711,7 @@
b'nbchanges', b'%d' % cg.extras[b'clcount'], mandatory=False
)
if opts.get(b'phases') and repo.revs(
- b'%ln and secret()', outgoing.missingheads
+ b'%ln and secret()', outgoing.ancestorsof
):
part.addparam(
b'targetphase', b'%d' % phases.secret, mandatory=False
@@ -1752,7 +1753,7 @@
# consume little memory (1M heads is 40MB) b) we don't want to send the
# part if we don't have entries and knowing if we have entries requires
# cache lookups.
- for node in outgoing.missingheads:
+ for node in outgoing.ancestorsof:
# Don't compute missing, as this may slow down serving.
fnode = cache.getfnode(node, computemissing=False)
if fnode is not None:
@@ -1977,7 +1978,7 @@
op.repo.svfs.options = localrepo.resolvestorevfsoptions(
op.repo.ui, op.repo.requirements, op.repo.features
)
- op.repo._writerequirements()
+ scmutil.writereporequirements(op.repo)
bundlesidedata = bool(b'exp-sidedata' in inpart.params)
reposidedata = bool(b'exp-sidedata-flag' in op.repo.requirements)
@@ -2207,7 +2208,7 @@
b'remote repository changed while pushing - please try again '
b'(%s is %s expected %s)'
)
- for expectedphase, nodes in enumerate(phasetonodes):
+ for expectedphase, nodes in pycompat.iteritems(phasetonodes):
for n in nodes:
actualphase = phasecache.phase(unfi, cl.rev(n))
if actualphase != expectedphase:
--- a/mercurial/cext/manifest.c Tue Jul 14 10:25:41 2020 +0200
+++ b/mercurial/cext/manifest.c Mon Jul 20 21:56:27 2020 +0530
@@ -49,23 +49,35 @@
}
/* get the node value of a single line */
-static PyObject *nodeof(line *l)
+static PyObject *nodeof(line *l, char *flag)
{
char *s = l->start;
Py_ssize_t llen = pathlen(l);
Py_ssize_t hlen = l->len - llen - 2;
- Py_ssize_t hlen_raw = 20;
+ Py_ssize_t hlen_raw;
PyObject *hash;
if (llen + 1 + 40 + 1 > l->len) { /* path '\0' hash '\n' */
PyErr_SetString(PyExc_ValueError, "manifest line too short");
return NULL;
}
+ /* Detect flags after the hash first. */
+ switch (s[llen + hlen]) {
+ case 'l':
+ case 't':
+ case 'x':
+ *flag = s[llen + hlen];
+ --hlen;
+ break;
+ default:
+ *flag = '\0';
+ break;
+ }
+
switch (hlen) {
case 40: /* sha1 */
- case 41: /* sha1 with cruft for a merge */
+ hlen_raw = 20;
break;
case 64: /* new hash */
- case 65: /* new hash with cruft for a merge */
hlen_raw = 32;
break;
default:
@@ -89,24 +101,14 @@
/* get the node hash and flags of a line as a tuple */
static PyObject *hashflags(line *l)
{
- char *s = l->start;
- Py_ssize_t plen = pathlen(l);
- PyObject *hash = nodeof(l);
- ssize_t hlen;
- Py_ssize_t hplen, flen;
+ char flag;
+ PyObject *hash = nodeof(l, &flag);
PyObject *flags;
PyObject *tup;
if (!hash)
return NULL;
- /* hash is either 20 or 21 bytes for an old hash, so we use a
- ternary here to get the "real" hexlified sha length. */
- hlen = PyBytes_GET_SIZE(hash) < 22 ? 40 : 64;
- /* 1 for null byte, 1 for newline */
- hplen = plen + hlen + 2;
- flen = l->len - hplen;
-
- flags = PyBytes_FromStringAndSize(s + hplen - 1, flen);
+ flags = PyBytes_FromStringAndSize(&flag, flag ? 1 : 0);
if (!flags) {
Py_DECREF(hash);
return NULL;
@@ -291,7 +293,7 @@
{
Py_ssize_t pl;
line *l;
- Py_ssize_t consumed;
+ char flag;
PyObject *ret = NULL, *path = NULL, *hash = NULL, *flags = NULL;
l = lmiter_nextline((lmIter *)o);
if (!l) {
@@ -299,13 +301,11 @@
}
pl = pathlen(l);
path = PyBytes_FromStringAndSize(l->start, pl);
- hash = nodeof(l);
+ hash = nodeof(l, &flag);
if (!path || !hash) {
goto done;
}
- consumed = pl + 41;
- flags = PyBytes_FromStringAndSize(l->start + consumed,
- l->len - consumed - 1);
+ flags = PyBytes_FromStringAndSize(&flag, flag ? 1 : 0);
if (!flags) {
goto done;
}
@@ -568,19 +568,13 @@
pyhash = PyTuple_GetItem(value, 0);
if (!PyBytes_Check(pyhash)) {
PyErr_Format(PyExc_TypeError,
- "node must be a 20-byte string");
+ "node must be a 20 or 32 bytes string");
return -1;
}
hlen = PyBytes_Size(pyhash);
- /* Some parts of the codebase try and set 21 or 22
- * byte "hash" values in order to perturb things for
- * status. We have to preserve at least the 21st
- * byte. Sigh. If there's a 22nd byte, we drop it on
- * the floor, which works fine.
- */
- if (hlen != 20 && hlen != 21 && hlen != 22) {
+ if (hlen != 20 && hlen != 32) {
PyErr_Format(PyExc_TypeError,
- "node must be a 20-byte string");
+ "node must be a 20 or 32 bytes string");
return -1;
}
hash = PyBytes_AsString(pyhash);
@@ -588,28 +582,39 @@
pyflags = PyTuple_GetItem(value, 1);
if (!PyBytes_Check(pyflags) || PyBytes_Size(pyflags) > 1) {
PyErr_Format(PyExc_TypeError,
- "flags must a 0 or 1 byte string");
+ "flags must a 0 or 1 bytes string");
return -1;
}
if (PyBytes_AsStringAndSize(pyflags, &flags, &flen) == -1) {
return -1;
}
+ if (flen == 1) {
+ switch (*flags) {
+ case 'l':
+ case 't':
+ case 'x':
+ break;
+ default:
+ PyErr_Format(PyExc_TypeError, "invalid manifest flag");
+ return -1;
+ }
+ }
/* one null byte and one newline */
- dlen = plen + 41 + flen + 1;
+ dlen = plen + hlen * 2 + 1 + flen + 1;
dest = malloc(dlen);
if (!dest) {
PyErr_NoMemory();
return -1;
}
memcpy(dest, path, plen + 1);
- for (i = 0; i < 20; i++) {
+ for (i = 0; i < hlen; i++) {
/* Cast to unsigned, so it will not get sign-extended when promoted
* to int (as is done when passing to a variadic function)
*/
sprintf(dest + plen + 1 + (i * 2), "%02x", (unsigned char)hash[i]);
}
- memcpy(dest + plen + 41, flags, flen);
- dest[plen + 41 + flen] = '\n';
+ memcpy(dest + plen + 2 * hlen + 1, flags, flen);
+ dest[plen + 2 * hlen + 1 + flen] = '\n';
new.start = dest;
new.len = dlen;
new.hash_suffix = '\0';
--- a/mercurial/cext/osutil.c Tue Jul 14 10:25:41 2020 +0200
+++ b/mercurial/cext/osutil.c Mon Jul 20 21:56:27 2020 +0530
@@ -336,7 +336,7 @@
static PyObject *_listdir_stat(char *path, int pathlen, int keepstat,
char *skip)
{
- PyObject *list, *elem, *stat = NULL, *ret = NULL;
+ PyObject *list, *elem, *ret = NULL;
char fullpath[PATH_MAX + 10];
int kind, err;
struct stat st;
@@ -409,7 +409,7 @@
}
if (keepstat) {
- stat = makestat(&st);
+ PyObject *stat = makestat(&st);
if (!stat)
goto error;
elem = Py_BuildValue(PY23("siN", "yiN"), ent->d_name,
@@ -419,7 +419,6 @@
kind);
if (!elem)
goto error;
- stat = NULL;
PyList_Append(list, elem);
Py_DECREF(elem);
@@ -430,7 +429,6 @@
error:
Py_DECREF(list);
- Py_XDECREF(stat);
error_list:
closedir(dir);
/* closedir also closes its dirfd */
@@ -480,7 +478,7 @@
static PyObject *_listdir_batch(char *path, int pathlen, int keepstat,
char *skip, bool *fallback)
{
- PyObject *list, *elem, *stat = NULL, *ret = NULL;
+ PyObject *list, *elem, *ret = NULL;
int kind, err;
unsigned long index;
unsigned int count, old_state, new_state;
@@ -586,6 +584,7 @@
}
if (keepstat) {
+ PyObject *stat = NULL;
/* from the getattrlist(2) man page: "Only the
permission bits ... are valid". */
st.st_mode = (entry->access_mask & ~S_IFMT) | kind;
@@ -601,7 +600,6 @@
filename, kind);
if (!elem)
goto error;
- stat = NULL;
PyList_Append(list, elem);
Py_DECREF(elem);
@@ -615,7 +613,6 @@
error:
Py_DECREF(list);
- Py_XDECREF(stat);
error_dir:
close(dfd);
error_value:
--- a/mercurial/cext/parsers.c Tue Jul 14 10:25:41 2020 +0200
+++ b/mercurial/cext/parsers.c Mon Jul 20 21:56:27 2020 +0530
@@ -667,7 +667,7 @@
void manifest_module_init(PyObject *mod);
void revlog_module_init(PyObject *mod);
-static const int version = 16;
+static const int version = 17;
static void module_init(PyObject *mod)
{
--- a/mercurial/cext/revlog.c Tue Jul 14 10:25:41 2020 +0200
+++ b/mercurial/cext/revlog.c Mon Jul 20 21:56:27 2020 +0530
@@ -109,6 +109,9 @@
static Py_ssize_t inline_scan(indexObject *self, const char **offsets);
+static int index_find_node(indexObject *self, const char *node,
+ Py_ssize_t nodelen);
+
#if LONG_MAX == 0x7fffffffL
static const char *const tuple_format = PY23("Kiiiiiis#", "Kiiiiiiy#");
#else
@@ -577,34 +580,6 @@
}
}
-static Py_ssize_t add_roots_get_min(indexObject *self, PyObject *list,
- Py_ssize_t marker, char *phases)
-{
- PyObject *iter = NULL;
- PyObject *iter_item = NULL;
- Py_ssize_t min_idx = index_length(self) + 2;
- long iter_item_long;
-
- if (PyList_GET_SIZE(list) != 0) {
- iter = PyObject_GetIter(list);
- if (iter == NULL)
- return -2;
- while ((iter_item = PyIter_Next(iter))) {
- if (!pylong_to_long(iter_item, &iter_item_long)) {
- Py_DECREF(iter_item);
- return -2;
- }
- Py_DECREF(iter_item);
- if (iter_item_long < min_idx)
- min_idx = iter_item_long;
- phases[iter_item_long] = (char)marker;
- }
- Py_DECREF(iter);
- }
-
- return min_idx;
-}
-
static inline void set_phase_from_parents(char *phases, int parent_1,
int parent_2, Py_ssize_t i)
{
@@ -773,99 +748,164 @@
return NULL;
}
+static int add_roots_get_min(indexObject *self, PyObject *roots, char *phases,
+ char phase)
+{
+ Py_ssize_t len = index_length(self);
+ PyObject *item;
+ PyObject *iterator;
+ int rev, minrev = -1;
+ char *node;
+
+ if (!PySet_Check(roots)) {
+ PyErr_SetString(PyExc_TypeError,
+ "roots must be a set of nodes");
+ return -2;
+ }
+ iterator = PyObject_GetIter(roots);
+ if (iterator == NULL)
+ return -2;
+ while ((item = PyIter_Next(iterator))) {
+ if (node_check(item, &node) == -1)
+ goto failed;
+ rev = index_find_node(self, node, 20);
+ /* null is implicitly public, so negative is invalid */
+ if (rev < 0 || rev >= len)
+ goto failed;
+ phases[rev] = phase;
+ if (minrev == -1 || minrev > rev)
+ minrev = rev;
+ Py_DECREF(item);
+ }
+ Py_DECREF(iterator);
+ return minrev;
+failed:
+ Py_DECREF(iterator);
+ Py_DECREF(item);
+ return -2;
+}
+
static PyObject *compute_phases_map_sets(indexObject *self, PyObject *args)
{
+ /* 0: public (untracked), 1: draft, 2: secret, 32: archive,
+ 96: internal */
+ static const char trackedphases[] = {1, 2, 32, 96};
PyObject *roots = Py_None;
- PyObject *ret = NULL;
- PyObject *phasessize = NULL;
- PyObject *phaseroots = NULL;
- PyObject *phaseset = NULL;
- PyObject *phasessetlist = NULL;
- PyObject *rev = NULL;
+ PyObject *phasesetsdict = NULL;
+ PyObject *phasesets[4] = {NULL, NULL, NULL, NULL};
Py_ssize_t len = index_length(self);
- Py_ssize_t numphase = 0;
- Py_ssize_t minrevallphases = 0;
- Py_ssize_t minrevphase = 0;
- Py_ssize_t i = 0;
char *phases = NULL;
- long phase;
+ int minphaserev = -1, rev, i;
+ const int numphases = (int)(sizeof(phasesets) / sizeof(phasesets[0]));
if (!PyArg_ParseTuple(args, "O", &roots))
- goto done;
- if (roots == NULL || !PyList_Check(roots)) {
- PyErr_SetString(PyExc_TypeError, "roots must be a list");
- goto done;
+ return NULL;
+ if (roots == NULL || !PyDict_Check(roots)) {
+ PyErr_SetString(PyExc_TypeError, "roots must be a dictionary");
+ return NULL;
}
- phases = calloc(
- len, 1); /* phase per rev: {0: public, 1: draft, 2: secret} */
+ phases = calloc(len, 1);
if (phases == NULL) {
PyErr_NoMemory();
- goto done;
+ return NULL;
}
- /* Put the phase information of all the roots in phases */
- numphase = PyList_GET_SIZE(roots) + 1;
- minrevallphases = len + 1;
- phasessetlist = PyList_New(numphase);
- if (phasessetlist == NULL)
- goto done;
+
+ for (i = 0; i < numphases; ++i) {
+ PyObject *pyphase = PyInt_FromLong(trackedphases[i]);
+ PyObject *phaseroots = NULL;
+ if (pyphase == NULL)
+ goto release;
+ phaseroots = PyDict_GetItem(roots, pyphase);
+ Py_DECREF(pyphase);
+ if (phaseroots == NULL)
+ continue;
+ rev = add_roots_get_min(self, phaseroots, phases,
+ trackedphases[i]);
+ if (rev == -2)
+ goto release;
+ if (rev != -1 && (minphaserev == -1 || rev < minphaserev))
+ minphaserev = rev;
+ }
+
+ for (i = 0; i < numphases; ++i) {
+ phasesets[i] = PySet_New(NULL);
+ if (phasesets[i] == NULL)
+ goto release;
+ }
- PyList_SET_ITEM(phasessetlist, 0, Py_None);
- Py_INCREF(Py_None);
-
- for (i = 0; i < numphase - 1; i++) {
- phaseroots = PyList_GET_ITEM(roots, i);
- phaseset = PySet_New(NULL);
- if (phaseset == NULL)
+ if (minphaserev == -1)
+ minphaserev = len;
+ for (rev = minphaserev; rev < len; ++rev) {
+ PyObject *pyphase = NULL;
+ PyObject *pyrev = NULL;
+ int parents[2];
+ /*
+ * The parent lookup could be skipped for phaseroots, but
+ * phase --force would historically not recompute them
+ * correctly, leaving descendents with a lower phase around.
+ * As such, unconditionally recompute the phase.
+ */
+ if (index_get_parents(self, rev, parents, (int)len - 1) < 0)
goto release;
- PyList_SET_ITEM(phasessetlist, i + 1, phaseset);
- if (!PyList_Check(phaseroots)) {
- PyErr_SetString(PyExc_TypeError,
- "roots item must be a list");
+ set_phase_from_parents(phases, parents[0], parents[1], rev);
+ switch (phases[rev]) {
+ case 0:
+ continue;
+ case 1:
+ pyphase = phasesets[0];
+ break;
+ case 2:
+ pyphase = phasesets[1];
+ break;
+ case 32:
+ pyphase = phasesets[2];
+ break;
+ case 96:
+ pyphase = phasesets[3];
+ break;
+ default:
+ /* this should never happen since the phase number is
+ * specified by this function. */
+ PyErr_SetString(PyExc_SystemError,
+ "bad phase number in internal list");
goto release;
}
- minrevphase =
- add_roots_get_min(self, phaseroots, i + 1, phases);
- if (minrevphase == -2) /* Error from add_roots_get_min */
+ pyrev = PyInt_FromLong(rev);
+ if (pyrev == NULL)
goto release;
- minrevallphases = MIN(minrevallphases, minrevphase);
- }
- /* Propagate the phase information from the roots to the revs */
- if (minrevallphases != -1) {
- int parents[2];
- for (i = minrevallphases; i < len; i++) {
- if (index_get_parents(self, i, parents, (int)len - 1) <
- 0)
- goto release;
- set_phase_from_parents(phases, parents[0], parents[1],
- i);
+ if (PySet_Add(pyphase, pyrev) == -1) {
+ Py_DECREF(pyrev);
+ goto release;
}
+ Py_DECREF(pyrev);
}
- /* Transform phase list to a python list */
- phasessize = PyInt_FromSsize_t(len);
- if (phasessize == NULL)
+
+ phasesetsdict = _dict_new_presized(numphases);
+ if (phasesetsdict == NULL)
goto release;
- for (i = 0; i < len; i++) {
- phase = phases[i];
- /* We only store the sets of phase for non public phase, the
- * public phase is computed as a difference */
- if (phase != 0) {
- phaseset = PyList_GET_ITEM(phasessetlist, phase);
- rev = PyInt_FromSsize_t(i);
- if (rev == NULL)
- goto release;
- PySet_Add(phaseset, rev);
- Py_XDECREF(rev);
+ for (i = 0; i < numphases; ++i) {
+ PyObject *pyphase = PyInt_FromLong(trackedphases[i]);
+ if (pyphase == NULL)
+ goto release;
+ if (PyDict_SetItem(phasesetsdict, pyphase, phasesets[i]) ==
+ -1) {
+ Py_DECREF(pyphase);
+ goto release;
}
+ Py_DECREF(phasesets[i]);
+ phasesets[i] = NULL;
}
- ret = PyTuple_Pack(2, phasessize, phasessetlist);
+
+ return Py_BuildValue("nN", len, phasesetsdict);
release:
- Py_XDECREF(phasessize);
- Py_XDECREF(phasessetlist);
-done:
+ for (i = 0; i < numphases; ++i)
+ Py_XDECREF(phasesets[i]);
+ Py_XDECREF(phasesetsdict);
+
free(phases);
- return ret;
+ return NULL;
}
static PyObject *index_headrevs(indexObject *self, PyObject *args)
@@ -2847,7 +2887,7 @@
*/
PyObject *parse_index2(PyObject *self, PyObject *args)
{
- PyObject *tuple = NULL, *cache = NULL;
+ PyObject *cache = NULL;
indexObject *idx;
int ret;
@@ -2868,15 +2908,11 @@
Py_INCREF(cache);
}
- tuple = Py_BuildValue("NN", idx, cache);
- if (!tuple)
- goto bail;
- return tuple;
+ return Py_BuildValue("NN", idx, cache);
bail:
Py_XDECREF(idx);
Py_XDECREF(cache);
- Py_XDECREF(tuple);
return NULL;
}
--- a/mercurial/changegroup.py Tue Jul 14 10:25:41 2020 +0200
+++ b/mercurial/changegroup.py Mon Jul 20 21:56:27 2020 +0530
@@ -1629,7 +1629,7 @@
repo = repo.unfiltered()
commonrevs = outgoing.common
csets = outgoing.missing
- heads = outgoing.missingheads
+ heads = outgoing.ancestorsof
# We go through the fast path if we get told to, or if all (unfiltered
# heads have been requested (since we then know there all linkrevs will
# be pulled by the client).
--- a/mercurial/changelog.py Tue Jul 14 10:25:41 2020 +0200
+++ b/mercurial/changelog.py Mon Jul 20 21:56:27 2020 +0530
@@ -16,9 +16,9 @@
from .thirdparty import attr
from . import (
- copies,
encoding,
error,
+ metadata,
pycompat,
revlog,
)
@@ -318,7 +318,7 @@
rawindices = self.extra.get(b'filesadded')
if rawindices is None:
return None
- return copies.decodefileindices(self.files, rawindices)
+ return metadata.decodefileindices(self.files, rawindices)
@property
def filesremoved(self):
@@ -330,7 +330,7 @@
rawindices = self.extra.get(b'filesremoved')
if rawindices is None:
return None
- return copies.decodefileindices(self.files, rawindices)
+ return metadata.decodefileindices(self.files, rawindices)
@property
def p1copies(self):
@@ -342,7 +342,7 @@
rawcopies = self.extra.get(b'p1copies')
if rawcopies is None:
return None
- return copies.decodecopies(self.files, rawcopies)
+ return metadata.decodecopies(self.files, rawcopies)
@property
def p2copies(self):
@@ -354,7 +354,7 @@
rawcopies = self.extra.get(b'p2copies')
if rawcopies is None:
return None
- return copies.decodecopies(self.files, rawcopies)
+ return metadata.decodecopies(self.files, rawcopies)
@property
def description(self):
@@ -385,9 +385,7 @@
datafile=datafile,
checkambig=True,
mmaplargeindex=True,
- persistentnodemap=opener.options.get(
- b'exp-persistent-nodemap', False
- ),
+ persistentnodemap=opener.options.get(b'persistent-nodemap', False),
)
if self._initempty and (self.version & 0xFFFF == revlog.REVLOGV1):
@@ -572,13 +570,13 @@
):
extra.pop(name, None)
if p1copies is not None:
- p1copies = copies.encodecopies(sortedfiles, p1copies)
+ p1copies = metadata.encodecopies(sortedfiles, p1copies)
if p2copies is not None:
- p2copies = copies.encodecopies(sortedfiles, p2copies)
+ p2copies = metadata.encodecopies(sortedfiles, p2copies)
if filesadded is not None:
- filesadded = copies.encodefileindices(sortedfiles, filesadded)
+ filesadded = metadata.encodefileindices(sortedfiles, filesadded)
if filesremoved is not None:
- filesremoved = copies.encodefileindices(sortedfiles, filesremoved)
+ filesremoved = metadata.encodefileindices(sortedfiles, filesremoved)
if self._copiesstorage == b'extra':
extrasentries = p1copies, p2copies, filesadded, filesremoved
if extra is None and any(x is not None for x in extrasentries):
--- a/mercurial/chgserver.py Tue Jul 14 10:25:41 2020 +0200
+++ b/mercurial/chgserver.py Mon Jul 20 21:56:27 2020 +0530
@@ -320,7 +320,7 @@
self.channel = channel
def __call__(self, cmd, environ, cwd=None, type=b'system', cmdtable=None):
- args = [type, procutil.quotecommand(cmd), os.path.abspath(cwd or b'.')]
+ args = [type, cmd, os.path.abspath(cwd or b'.')]
args.extend(b'%s=%s' % (k, v) for k, v in pycompat.iteritems(environ))
data = b'\0'.join(args)
self.out.write(struct.pack(b'>cI', self.channel, len(data)))
@@ -442,7 +442,20 @@
if newfp is not fp:
newfp.close()
# restore original fd: fp is open again
- os.dup2(fd, fp.fileno())
+ try:
+ os.dup2(fd, fp.fileno())
+ except OSError as err:
+ # According to issue6330, running chg on heavy loaded systems
+ # can lead to EBUSY. [man dup2] indicates that, on Linux,
+ # EBUSY comes from a race condition between open() and dup2().
+ # However it's not clear why open() race occurred for
+ # newfd=stdin/out/err.
+ self.ui.log(
+ b'chgserver',
+ b'got %s while duplicating %s\n',
+ stringutil.forcebytestr(err),
+ fn,
+ )
os.close(fd)
setattr(self, cn, ch)
setattr(ui, fn, fp)
--- a/mercurial/cmdutil.py Tue Jul 14 10:25:41 2020 +0200
+++ b/mercurial/cmdutil.py Mon Jul 20 21:56:27 2020 +0530
@@ -38,6 +38,7 @@
logcmdutil,
match as matchmod,
merge as mergemod,
+ mergestate as mergestatemod,
mergeutil,
obsolete,
patch,
@@ -890,7 +891,7 @@
def readmorestatus(repo):
"""Returns a morestatus object if the repo has unfinished state."""
statetuple = statemod.getrepostate(repo)
- mergestate = mergemod.mergestate.read(repo)
+ mergestate = mergestatemod.mergestate.read(repo)
activemerge = mergestate.active()
if not statetuple and not activemerge:
return None
@@ -2137,7 +2138,9 @@
for file in repo[rev].files():
if not match or match(file):
allfiles.add(file)
- scmutil.prefetchfiles(repo, revs, scmutil.matchfiles(repo, allfiles))
+ match = scmutil.matchfiles(repo, allfiles)
+ revmatches = [(rev, match) for rev in revs]
+ scmutil.prefetchfiles(repo, revmatches)
def export(
@@ -2751,15 +2754,28 @@
ret = 1
needsfctx = ui.verbose or {b'size', b'flags'} & fm.datahint()
- for f in ctx.matches(m):
- fm.startitem()
- fm.context(ctx=ctx)
- if needsfctx:
- fc = ctx[f]
- fm.write(b'size flags', b'% 10d % 1s ', fc.size(), fc.flags())
- fm.data(path=f)
- fm.plain(fmt % uipathfn(f))
- ret = 0
+ if fm.isplain() and not needsfctx:
+ # Fast path. The speed-up comes from skipping the formatter, and batching
+ # calls to ui.write.
+ buf = []
+ for f in ctx.matches(m):
+ buf.append(fmt % uipathfn(f))
+ if len(buf) > 100:
+ ui.write(b''.join(buf))
+ del buf[:]
+ ret = 0
+ if buf:
+ ui.write(b''.join(buf))
+ else:
+ for f in ctx.matches(m):
+ fm.startitem()
+ fm.context(ctx=ctx)
+ if needsfctx:
+ fc = ctx[f]
+ fm.write(b'size flags', b'% 10d % 1s ', fc.size(), fc.flags())
+ fm.data(path=f)
+ fm.plain(fmt % uipathfn(f))
+ ret = 0
for subpath in sorted(ctx.substate):
submatch = matchmod.subdirmatcher(subpath, m)
@@ -2983,14 +2999,14 @@
try:
if mfnode and mfl[mfnode].find(file)[0]:
if _catfmtneedsdata(basefm):
- scmutil.prefetchfiles(repo, [ctx.rev()], matcher)
+ scmutil.prefetchfiles(repo, [(ctx.rev(), matcher)])
write(file)
return 0
except KeyError:
pass
if _catfmtneedsdata(basefm):
- scmutil.prefetchfiles(repo, [ctx.rev()], matcher)
+ scmutil.prefetchfiles(repo, [(ctx.rev(), matcher)])
for abs in ctx.walk(matcher):
write(abs)
@@ -3127,7 +3143,7 @@
if subs:
subrepoutil.writestate(repo, newsubstate)
- ms = mergemod.mergestate.read(repo)
+ ms = mergestatemod.mergestate.read(repo)
mergeutil.checkunresolved(ms)
filestoamend = {f for f in wctx.files() if matcher(f)}
@@ -3423,9 +3439,9 @@
not opts.get(b'amend')
and bheads
and node not in bheads
- and not [
- x for x in parents if x.node() in bheads and x.branch() == branch
- ]
+ and not any(
+ p.node() in bheads and p.branch() == branch for p in parents
+ )
):
repo.ui.status(_(b'created new head\n'))
# The message is not printed for initial roots. For the other
@@ -3755,11 +3771,11 @@
needdata = (b'revert', b'add', b'undelete')
oplist = [actions[name][0] for name in needdata]
prefetch = scmutil.prefetchfiles
- matchfiles = scmutil.matchfiles
+ matchfiles = scmutil.matchfiles(
+ repo, [f for sublist in oplist for f in sublist]
+ )
prefetch(
- repo,
- [ctx.rev()],
- matchfiles(repo, [f for sublist in oplist for f in sublist]),
+ repo, [(ctx.rev(), matchfiles)],
)
match = scmutil.match(repo[None], pats)
_performrevert(
--- a/mercurial/commands.py Tue Jul 14 10:25:41 2020 +0200
+++ b/mercurial/commands.py Mon Jul 20 21:56:27 2020 +0530
@@ -46,6 +46,7 @@
hg,
logcmdutil,
merge as mergemod,
+ mergestate as mergestatemod,
narrowspec,
obsolete,
obsutil,
@@ -2183,7 +2184,8 @@
"""
opts = pycompat.byteskwargs(opts)
- if opts.get(b'edit') or opts.get(b'local') or opts.get(b'global'):
+ editopts = (b'edit', b'local', b'global')
+ if any(opts.get(o) for o in editopts):
if opts.get(b'local') and opts.get(b'global'):
raise error.Abort(_(b"can't use --local and --global together"))
@@ -2350,7 +2352,7 @@
Returns 0 on success, 1 if errors are encountered.
"""
opts = pycompat.byteskwargs(opts)
- with repo.wlock(False):
+ with repo.wlock():
return cmdutil.copy(ui, repo, pats, opts)
@@ -2475,26 +2477,27 @@
Returns 0 on success.
"""
+ cmdutil.check_at_most_one_arg(opts, 'rev', 'change')
opts = pycompat.byteskwargs(opts)
revs = opts.get(b'rev')
change = opts.get(b'change')
stat = opts.get(b'stat')
reverse = opts.get(b'reverse')
- if revs and change:
- msg = _(b'cannot specify --rev and --change at the same time')
- raise error.Abort(msg)
- elif change:
+ if change:
repo = scmutil.unhidehashlikerevs(repo, [change], b'nowarn')
ctx2 = scmutil.revsingle(repo, change, None)
ctx1 = ctx2.p1()
else:
repo = scmutil.unhidehashlikerevs(repo, revs, b'nowarn')
ctx1, ctx2 = scmutil.revpair(repo, revs)
- node1, node2 = ctx1.node(), ctx2.node()
if reverse:
- node1, node2 = node2, node1
+ ctxleft = ctx2
+ ctxright = ctx1
+ else:
+ ctxleft = ctx1
+ ctxright = ctx2
diffopts = patch.diffallopts(ui, opts)
m = scmutil.match(ctx2, pats, opts)
@@ -2504,8 +2507,8 @@
ui,
repo,
diffopts,
- node1,
- node2,
+ ctxleft,
+ ctxright,
m,
stat=stat,
listsubrepos=opts.get(b'subrepos'),
@@ -2980,68 +2983,47 @@
editform=b'graft', **pycompat.strkwargs(opts)
)
+ cmdutil.check_at_most_one_arg(opts, b'abort', b'stop', b'continue')
+
cont = False
if opts.get(b'no_commit'):
- if opts.get(b'edit'):
- raise error.Abort(
- _(b"cannot specify --no-commit and --edit together")
- )
- if opts.get(b'currentuser'):
- raise error.Abort(
- _(b"cannot specify --no-commit and --currentuser together")
- )
- if opts.get(b'currentdate'):
- raise error.Abort(
- _(b"cannot specify --no-commit and --currentdate together")
- )
- if opts.get(b'log'):
- raise error.Abort(
- _(b"cannot specify --no-commit and --log together")
- )
+ cmdutil.check_incompatible_arguments(
+ opts,
+ b'no_commit',
+ [b'edit', b'currentuser', b'currentdate', b'log'],
+ )
graftstate = statemod.cmdstate(repo, b'graftstate')
if opts.get(b'stop'):
- if opts.get(b'continue'):
- raise error.Abort(
- _(b"cannot use '--continue' and '--stop' together")
- )
- if opts.get(b'abort'):
- raise error.Abort(_(b"cannot use '--abort' and '--stop' together"))
-
- if any(
- (
- opts.get(b'edit'),
- opts.get(b'log'),
- opts.get(b'user'),
- opts.get(b'date'),
- opts.get(b'currentdate'),
- opts.get(b'currentuser'),
- opts.get(b'rev'),
- )
- ):
- raise error.Abort(_(b"cannot specify any other flag with '--stop'"))
+ cmdutil.check_incompatible_arguments(
+ opts,
+ b'stop',
+ [
+ b'edit',
+ b'log',
+ b'user',
+ b'date',
+ b'currentdate',
+ b'currentuser',
+ b'rev',
+ ],
+ )
return _stopgraft(ui, repo, graftstate)
elif opts.get(b'abort'):
- if opts.get(b'continue'):
- raise error.Abort(
- _(b"cannot use '--continue' and '--abort' together")
- )
- if any(
- (
- opts.get(b'edit'),
- opts.get(b'log'),
- opts.get(b'user'),
- opts.get(b'date'),
- opts.get(b'currentdate'),
- opts.get(b'currentuser'),
- opts.get(b'rev'),
- )
- ):
- raise error.Abort(
- _(b"cannot specify any other flag with '--abort'")
- )
-
+ cmdutil.check_incompatible_arguments(
+ opts,
+ b'abort',
+ [
+ b'edit',
+ b'log',
+ b'user',
+ b'date',
+ b'currentdate',
+ b'currentuser',
+ b'rev',
+ ],
+ )
return cmdutil.abortgraft(ui, repo, graftstate)
elif opts.get(b'continue'):
cont = True
@@ -3431,8 +3413,11 @@
m = regexp.search(self.line, p)
if not m:
break
- yield m.span()
- p = m.end()
+ if m.end() == p:
+ p += 1
+ else:
+ yield m.span()
+ p = m.end()
matches = {}
copies = {}
@@ -3578,56 +3563,68 @@
getrenamed = scmutil.getrenamedfn(repo)
- def get_file_content(filename, filelog, filenode, context, revision):
- try:
- content = filelog.read(filenode)
- except error.WdirUnsupported:
- content = context[filename].data()
- except error.CensoredNodeError:
- content = None
- ui.warn(
- _(b'cannot search in censored file: %(filename)s:%(revnum)s\n')
- % {b'filename': filename, b'revnum': pycompat.bytestr(revision)}
- )
- return content
+ def readfile(ctx, fn):
+ rev = ctx.rev()
+ if rev is None:
+ fctx = ctx[fn]
+ try:
+ return fctx.data()
+ except IOError as e:
+ if e.errno != errno.ENOENT:
+ raise
+ else:
+ flog = getfile(fn)
+ fnode = ctx.filenode(fn)
+ try:
+ return flog.read(fnode)
+ except error.CensoredNodeError:
+ ui.warn(
+ _(
+ b'cannot search in censored file: %(filename)s:%(revnum)s\n'
+ )
+ % {b'filename': fn, b'revnum': pycompat.bytestr(rev),}
+ )
def prep(ctx, fns):
rev = ctx.rev()
pctx = ctx.p1()
- parent = pctx.rev()
matches.setdefault(rev, {})
- matches.setdefault(parent, {})
+ if diff:
+ parent = pctx.rev()
+ matches.setdefault(parent, {})
files = revfiles.setdefault(rev, [])
- for fn in fns:
- flog = getfile(fn)
- try:
- fnode = ctx.filenode(fn)
- except error.LookupError:
- continue
-
- copy = None
- if follow:
- copy = getrenamed(fn, rev)
- if copy:
- copies.setdefault(rev, {})[fn] = copy
- if fn in skip:
- skip.add(copy)
- if fn in skip:
- continue
- files.append(fn)
-
- if fn not in matches[rev]:
- content = get_file_content(fn, flog, fnode, ctx, rev)
- grepbody(fn, rev, content)
-
- pfn = copy or fn
- if pfn not in matches[parent]:
- try:
- pfnode = pctx.filenode(pfn)
- pcontent = get_file_content(pfn, flog, pfnode, pctx, parent)
- grepbody(pfn, parent, pcontent)
- except error.LookupError:
- pass
+ if rev is None:
+ # in `hg grep pattern`, 2/3 of the time is spent is spent in
+ # pathauditor checks without this in mozilla-central
+ contextmanager = repo.wvfs.audit.cached
+ else:
+ contextmanager = util.nullcontextmanager
+ with contextmanager():
+ for fn in fns:
+ # fn might not exist in the revision (could be a file removed by
+ # the revision). We could check `fn not in ctx` even when rev is
+ # None, but it's less racy to protect againt that in readfile.
+ if rev is not None and fn not in ctx:
+ continue
+
+ copy = None
+ if follow:
+ copy = getrenamed(fn, rev)
+ if copy:
+ copies.setdefault(rev, {})[fn] = copy
+ if fn in skip:
+ skip.add(copy)
+ if fn in skip:
+ continue
+ files.append(fn)
+
+ if fn not in matches[rev]:
+ grepbody(fn, rev, readfile(ctx, fn))
+
+ if diff:
+ pfn = copy or fn
+ if pfn not in matches[parent] and pfn in pctx:
+ grepbody(pfn, parent, readfile(pctx, pfn))
ui.pager(b'grep')
fm = ui.formatter(b'grep', opts)
@@ -5812,7 +5809,7 @@
Returns 0 on success, 1 if errors are encountered.
"""
opts = pycompat.byteskwargs(opts)
- with repo.wlock(False):
+ with repo.wlock():
return cmdutil.copy(ui, repo, pats, opts, rename=True)
@@ -5934,7 +5931,7 @@
if show:
ui.pager(b'resolve')
fm = ui.formatter(b'resolve', opts)
- ms = mergemod.mergestate.read(repo)
+ ms = mergestatemod.mergestate.read(repo)
wctx = repo[None]
m = scmutil.match(wctx, pats, opts)
@@ -5942,14 +5939,20 @@
# as 'P'. Resolved path conflicts show as 'R', the same as normal
# resolved conflicts.
mergestateinfo = {
- mergemod.MERGE_RECORD_UNRESOLVED: (b'resolve.unresolved', b'U'),
- mergemod.MERGE_RECORD_RESOLVED: (b'resolve.resolved', b'R'),
- mergemod.MERGE_RECORD_UNRESOLVED_PATH: (
+ mergestatemod.MERGE_RECORD_UNRESOLVED: (
+ b'resolve.unresolved',
+ b'U',
+ ),
+ mergestatemod.MERGE_RECORD_RESOLVED: (b'resolve.resolved', b'R'),
+ mergestatemod.MERGE_RECORD_UNRESOLVED_PATH: (
b'resolve.unresolved',
b'P',
),
- mergemod.MERGE_RECORD_RESOLVED_PATH: (b'resolve.resolved', b'R'),
- mergemod.MERGE_RECORD_DRIVER_RESOLVED: (
+ mergestatemod.MERGE_RECORD_RESOLVED_PATH: (
+ b'resolve.resolved',
+ b'R',
+ ),
+ mergestatemod.MERGE_RECORD_DRIVER_RESOLVED: (
b'resolve.driverresolved',
b'D',
),
@@ -5959,7 +5962,7 @@
if not m(f):
continue
- if ms[f] == mergemod.MERGE_RECORD_MERGED_OTHER:
+ if ms[f] == mergestatemod.MERGE_RECORD_MERGED_OTHER:
continue
label, key = mergestateinfo[ms[f]]
fm.startitem()
@@ -5971,7 +5974,7 @@
return 0
with repo.wlock():
- ms = mergemod.mergestate.read(repo)
+ ms = mergestatemod.mergestate.read(repo)
if not (ms.active() or repo.dirstate.p2() != nullid):
raise error.Abort(
@@ -5982,7 +5985,7 @@
if (
ms.mergedriver
- and ms.mdstate() == mergemod.MERGE_DRIVER_STATE_UNMARKED
+ and ms.mdstate() == mergestatemod.MERGE_DRIVER_STATE_UNMARKED
):
proceed = mergemod.driverpreprocess(repo, ms, wctx)
ms.commit()
@@ -6008,12 +6011,12 @@
didwork = True
- if ms[f] == mergemod.MERGE_RECORD_MERGED_OTHER:
+ if ms[f] == mergestatemod.MERGE_RECORD_MERGED_OTHER:
continue
# don't let driver-resolved files be marked, and run the conclude
# step if asked to resolve
- if ms[f] == mergemod.MERGE_RECORD_DRIVER_RESOLVED:
+ if ms[f] == mergestatemod.MERGE_RECORD_DRIVER_RESOLVED:
exact = m.exact(f)
if mark:
if exact:
@@ -6033,14 +6036,14 @@
# path conflicts must be resolved manually
if ms[f] in (
- mergemod.MERGE_RECORD_UNRESOLVED_PATH,
- mergemod.MERGE_RECORD_RESOLVED_PATH,
+ mergestatemod.MERGE_RECORD_UNRESOLVED_PATH,
+ mergestatemod.MERGE_RECORD_RESOLVED_PATH,
):
if mark:
- ms.mark(f, mergemod.MERGE_RECORD_RESOLVED_PATH)
+ ms.mark(f, mergestatemod.MERGE_RECORD_RESOLVED_PATH)
elif unmark:
- ms.mark(f, mergemod.MERGE_RECORD_UNRESOLVED_PATH)
- elif ms[f] == mergemod.MERGE_RECORD_UNRESOLVED_PATH:
+ ms.mark(f, mergestatemod.MERGE_RECORD_UNRESOLVED_PATH)
+ elif ms[f] == mergestatemod.MERGE_RECORD_UNRESOLVED_PATH:
ui.warn(
_(b'%s: path conflict must be resolved manually\n')
% uipathfn(f)
@@ -6052,12 +6055,12 @@
fdata = repo.wvfs.tryread(f)
if (
filemerge.hasconflictmarkers(fdata)
- and ms[f] != mergemod.MERGE_RECORD_RESOLVED
+ and ms[f] != mergestatemod.MERGE_RECORD_RESOLVED
):
hasconflictmarkers.append(f)
- ms.mark(f, mergemod.MERGE_RECORD_RESOLVED)
+ ms.mark(f, mergestatemod.MERGE_RECORD_RESOLVED)
elif unmark:
- ms.mark(f, mergemod.MERGE_RECORD_UNRESOLVED)
+ ms.mark(f, mergestatemod.MERGE_RECORD_UNRESOLVED)
else:
# backup pre-resolve (merge uses .orig for its own purposes)
a = repo.wjoin(f)
@@ -6126,7 +6129,8 @@
raise
ms.commit()
- ms.recordactions()
+ branchmerge = repo.dirstate.p2() != nullid
+ mergestatemod.recordupdates(repo, ms.actions(), branchmerge, None)
if not didwork and pats:
hint = None
@@ -6660,7 +6664,7 @@
(b'm', b'modified', None, _(b'show only modified files')),
(b'a', b'added', None, _(b'show only added files')),
(b'r', b'removed', None, _(b'show only removed files')),
- (b'd', b'deleted', None, _(b'show only deleted (but tracked) files')),
+ (b'd', b'deleted', None, _(b'show only missing files')),
(b'c', b'clean', None, _(b'show only files without changes')),
(b'u', b'unknown', None, _(b'show only unknown (not tracked) files')),
(b'i', b'ignored', None, _(b'show only ignored files')),
@@ -6791,6 +6795,7 @@
"""
+ cmdutil.check_at_most_one_arg(opts, 'rev', 'change')
opts = pycompat.byteskwargs(opts)
revs = opts.get(b'rev')
change = opts.get(b'change')
@@ -6801,10 +6806,7 @@
else:
terse = ui.config(b'commands', b'status.terse')
- if revs and change:
- msg = _(b'cannot specify --rev and --change at the same time')
- raise error.Abort(msg)
- elif revs and terse:
+ if revs and terse:
msg = _(b'cannot use --terse with --rev')
raise error.Abort(msg)
elif change:
@@ -6940,7 +6942,7 @@
marks = []
try:
- ms = mergemod.mergestate.read(repo)
+ ms = mergestatemod.mergestate.read(repo)
except error.UnsupportedMergeRecords as e:
s = b' '.join(e.recordtypes)
ui.warn(
@@ -7809,7 +7811,7 @@
names = []
vers = []
isinternals = []
- for name, module in extensions.extensions():
+ for name, module in sorted(extensions.extensions()):
names.append(name)
vers.append(extensions.moduleversion(module) or None)
isinternals.append(extensions.ismoduleinternal(module))
--- a/mercurial/commandserver.py Tue Jul 14 10:25:41 2020 +0200
+++ b/mercurial/commandserver.py Mon Jul 20 21:56:27 2020 +0530
@@ -191,7 +191,6 @@
def _selectmessageencoder(ui):
- # experimental config: cmdserver.message-encodings
encnames = ui.configlist(b'cmdserver', b'message-encodings')
for n in encnames:
f = _messageencoders.get(n)
@@ -234,9 +233,6 @@
self.ui = self.ui.copy()
setuplogging(self.ui, repo=None, fp=self.cdebug)
- # TODO: add this to help/config.txt when stabilized
- # ``channel``
- # Use separate channel for structured output. (Command-server only)
self.cmsg = None
if ui.config(b'ui', b'message-output') == b'channel':
encname, encfn = _selectmessageencoder(ui)
@@ -244,8 +240,23 @@
self.client = fin
+ # If shutdown-on-interrupt is off, the default SIGINT handler is
+ # removed so that client-server communication wouldn't be interrupted.
+ # For example, 'runcommand' handler will issue three short read()s.
+ # If one of the first two read()s were interrupted, the communication
+ # channel would be left at dirty state and the subsequent request
+ # wouldn't be parsed. So catching KeyboardInterrupt isn't enough.
+ self._shutdown_on_interrupt = ui.configbool(
+ b'cmdserver', b'shutdown-on-interrupt'
+ )
+ self._old_inthandler = None
+ if not self._shutdown_on_interrupt:
+ self._old_inthandler = signal.signal(signal.SIGINT, signal.SIG_IGN)
+
def cleanup(self):
"""release and restore resources taken during server session"""
+ if not self._shutdown_on_interrupt:
+ signal.signal(signal.SIGINT, self._old_inthandler)
def _read(self, size):
if not size:
@@ -278,6 +289,32 @@
else:
return []
+ def _dispatchcommand(self, req):
+ from . import dispatch # avoid cycle
+
+ if self._shutdown_on_interrupt:
+ # no need to restore SIGINT handler as it is unmodified.
+ return dispatch.dispatch(req)
+
+ try:
+ signal.signal(signal.SIGINT, self._old_inthandler)
+ return dispatch.dispatch(req)
+ except error.SignalInterrupt:
+ # propagate SIGBREAK, SIGHUP, or SIGTERM.
+ raise
+ except KeyboardInterrupt:
+ # SIGINT may be received out of the try-except block of dispatch(),
+ # so catch it as last ditch. Another KeyboardInterrupt may be
+ # raised while handling exceptions here, but there's no way to
+ # avoid that except for doing everything in C.
+ pass
+ finally:
+ signal.signal(signal.SIGINT, signal.SIG_IGN)
+ # On KeyboardInterrupt, print error message and exit *after* SIGINT
+ # handler removed.
+ req.ui.error(_(b'interrupted!\n'))
+ return -1
+
def runcommand(self):
""" reads a list of \0 terminated arguments, executes
and writes the return code to the result channel """
@@ -318,7 +355,10 @@
)
try:
- ret = dispatch.dispatch(req) & 255
+ ret = self._dispatchcommand(req) & 255
+ # If shutdown-on-interrupt is off, it's important to write the
+ # result code *after* SIGINT handler removed. If the result code
+ # were lost, the client wouldn't be able to continue processing.
self.cresult.write(struct.pack(b'>i', int(ret)))
finally:
# restore old cwd
--- a/mercurial/configitems.py Tue Jul 14 10:25:41 2020 +0200
+++ b/mercurial/configitems.py Mon Jul 20 21:56:27 2020 +0530
@@ -204,7 +204,7 @@
b'cmdserver', b'max-repo-cache', default=0, experimental=True,
)
coreconfigitem(
- b'cmdserver', b'message-encodings', default=list, experimental=True,
+ b'cmdserver', b'message-encodings', default=list,
)
coreconfigitem(
b'cmdserver',
@@ -212,6 +212,9 @@
default=lambda: [b'chgserver', b'cmdserver', b'repocache'],
)
coreconfigitem(
+ b'cmdserver', b'shutdown-on-interrupt', default=True,
+)
+coreconfigitem(
b'color', b'.*', default=None, generic=True,
)
coreconfigitem(
@@ -405,18 +408,6 @@
coreconfigitem(
b'devel', b'legacy.exchange', default=list,
)
-# TODO before getting `persistent-nodemap` out of experimental
-#
-# * decide for a "status" of the persistent nodemap and associated location
-# - part of the store next the revlog itself (new requirements)
-# - part of the cache directory
-# - part of an `index` directory
-# (https://www.mercurial-scm.org/wiki/ComputedIndexPlan)
-# * do we want to use this for more than just changelog? if so we need:
-# - simpler "pending" logic for them
-# - double check the memory story (we dont want to keep all revlog in memory)
-# - think about the naming scheme if we are in "cache"
-# * increment the version format to "1" and freeze it.
coreconfigitem(
b'devel', b'persistent-nodemap', default=False,
)
@@ -675,12 +666,6 @@
b'experimental', b'rust.index', default=False,
)
coreconfigitem(
- b'experimental', b'exp-persistent-nodemap', default=False,
-)
-coreconfigitem(
- b'experimental', b'exp-persistent-nodemap.mmap', default=True,
-)
-coreconfigitem(
b'experimental', b'server.filesdata.recommended-batch-size', default=50000,
)
coreconfigitem(
@@ -783,6 +768,12 @@
coreconfigitem(
b'format', b'usestore', default=True,
)
+# Right now, the only efficient implement of the nodemap logic is in Rust, so
+# the persistent nodemap feature needs to stay experimental as long as the Rust
+# extensions are an experimental feature.
+coreconfigitem(
+ b'format', b'use-persistent-nodemap', default=False, experimental=True
+)
coreconfigitem(
b'format',
b'exp-use-copies-side-data-changeset',
@@ -820,9 +811,6 @@
b'hostsecurity', b'ciphers', default=None,
)
coreconfigitem(
- b'hostsecurity', b'disabletls10warning', default=False,
-)
-coreconfigitem(
b'hostsecurity', b'minimumprotocol', default=dynamicdefault,
)
coreconfigitem(
@@ -1080,6 +1068,9 @@
b'rewrite', b'update-timestamp', default=False,
)
coreconfigitem(
+ b'rewrite', b'empty-successor', default=b'skip', experimental=True,
+)
+coreconfigitem(
b'storage', b'new-repo-backend', default=b'revlogv1', experimental=True,
)
coreconfigitem(
@@ -1088,6 +1079,14 @@
default=True,
alias=[(b'format', b'aggressivemergedeltas')],
)
+# experimental as long as rust is experimental (or a C version is implemented)
+coreconfigitem(
+ b'storage', b'revlog.nodemap.mmap', default=True, experimental=True
+)
+# experimental as long as format.use-persistent-nodemap is.
+coreconfigitem(
+ b'storage', b'revlog.nodemap.mode', default=b'compat', experimental=True
+)
coreconfigitem(
b'storage', b'revlog.reuse-external-delta', default=True,
)
@@ -1235,6 +1234,10 @@
b'ui', b'askusername', default=False,
)
coreconfigitem(
+ b'ui', b'available-memory', default=None,
+)
+
+coreconfigitem(
b'ui', b'clonebundlefallback', default=False,
)
coreconfigitem(
@@ -1391,6 +1394,9 @@
b'ui', b'timeout.warn', default=0,
)
coreconfigitem(
+ b'ui', b'timestamp-output', default=False,
+)
+coreconfigitem(
b'ui', b'traceback', default=False,
)
coreconfigitem(
--- a/mercurial/context.py Tue Jul 14 10:25:41 2020 +0200
+++ b/mercurial/context.py Mon Jul 20 21:56:27 2020 +0530
@@ -28,12 +28,13 @@
open,
)
from . import (
- copies,
dagop,
encoding,
error,
fileset,
match as matchmod,
+ mergestate as mergestatemod,
+ metadata,
obsolete as obsmod,
patch,
pathutil,
@@ -299,7 +300,7 @@
@propertycache
def _copies(self):
- return copies.computechangesetcopies(self)
+ return metadata.computechangesetcopies(self)
def p1copies(self):
return self._copies[0]
@@ -474,6 +475,20 @@
return r
+ def mergestate(self, clean=False):
+ """Get a mergestate object for this context."""
+ raise NotImplementedError(
+ '%s does not implement mergestate()' % self.__class__
+ )
+
+ def isempty(self):
+ return not (
+ len(self.parents()) > 1
+ or self.branch() != self.p1().branch()
+ or self.closesbranch()
+ or self.files()
+ )
+
class changectx(basectx):
"""A changecontext object makes access to data related to a particular
@@ -582,7 +597,7 @@
filesadded = None
if filesadded is None:
if compute_on_none:
- filesadded = copies.computechangesetfilesadded(self)
+ filesadded = metadata.computechangesetfilesadded(self)
else:
filesadded = []
return filesadded
@@ -601,7 +616,7 @@
filesremoved = None
if filesremoved is None:
if compute_on_none:
- filesremoved = copies.computechangesetfilesremoved(self)
+ filesremoved = metadata.computechangesetfilesremoved(self)
else:
filesremoved = []
return filesremoved
@@ -2009,6 +2024,11 @@
sparse.aftercommit(self._repo, node)
+ def mergestate(self, clean=False):
+ if clean:
+ return mergestatemod.mergestate.clean(self._repo)
+ return mergestatemod.mergestate.read(self._repo)
+
class committablefilectx(basefilectx):
"""A committablefilectx provides common functionality for a file context
@@ -2310,7 +2330,7 @@
return self._cache[path][b'flags']
else:
raise error.ProgrammingError(
- b"No such file or directory: %s" % self._path
+ b"No such file or directory: %s" % path
)
else:
return self._wrappedctx[path].flags()
@@ -2427,7 +2447,7 @@
return len(self._cache[path][b'data'])
else:
raise error.ProgrammingError(
- b"No such file or directory: %s" % self._path
+ b"No such file or directory: %s" % path
)
return self._wrappedctx[path].size()
@@ -2507,48 +2527,9 @@
def isdirty(self, path):
return path in self._cache
- def isempty(self):
- # We need to discard any keys that are actually clean before the empty
- # commit check.
- self._compact()
- return len(self._cache) == 0
-
def clean(self):
self._cache = {}
- def _compact(self):
- """Removes keys from the cache that are actually clean, by comparing
- them with the underlying context.
-
- This can occur during the merge process, e.g. by passing --tool :local
- to resolve a conflict.
- """
- keys = []
- # This won't be perfect, but can help performance significantly when
- # using things like remotefilelog.
- scmutil.prefetchfiles(
- self.repo(),
- [self.p1().rev()],
- scmutil.matchfiles(self.repo(), self._cache.keys()),
- )
-
- for path in self._cache.keys():
- cache = self._cache[path]
- try:
- underlying = self._wrappedctx[path]
- if (
- underlying.data() == cache[b'data']
- and underlying.flags() == cache[b'flags']
- ):
- keys.append(path)
- except error.ManifestLookupError:
- # Path not in the underlying manifest (created).
- continue
-
- for path in keys:
- del self._cache[path]
- return keys
-
def _markdirty(
self, path, exists, data=None, date=None, flags=b'', copied=None
):
@@ -2867,6 +2848,11 @@
return scmutil.status(modified, added, removed, [], [], [], [])
+ def parents(self):
+ if self._parents[1].node() == nullid:
+ return [self._parents[0]]
+ return self._parents
+
class memfilectx(committablefilectx):
"""memfilectx represents an in-memory file to commit.
--- a/mercurial/copies.py Tue Jul 14 10:25:41 2020 +0200
+++ b/mercurial/copies.py Mon Jul 20 21:56:27 2020 +0530
@@ -8,7 +8,6 @@
from __future__ import absolute_import
import collections
-import multiprocessing
import os
from .i18n import _
@@ -17,7 +16,6 @@
from .revlogutils.flagutil import REVIDX_SIDEDATA
from . import (
- error,
match as matchmod,
node,
pathutil,
@@ -25,7 +23,6 @@
util,
)
-from .revlogutils import sidedata as sidedatamod
from .utils import stringutil
@@ -183,10 +180,27 @@
* p1copies: mapping of copies from p1
* p2copies: mapping of copies from p2
* removed: a list of removed files
+ * ismerged: a callback to know if file was merged in that revision
"""
cl = repo.changelog
parents = cl.parentrevs
+ def get_ismerged(rev):
+ ctx = repo[rev]
+
+ def ismerged(path):
+ if path not in ctx.files():
+ return False
+ fctx = ctx[path]
+ parents = fctx._filelog.parents(fctx._filenode)
+ nb_parents = 0
+ for n in parents:
+ if n != node.nullid:
+ nb_parents += 1
+ return nb_parents >= 2
+
+ return ismerged
+
if repo.filecopiesmode == b'changeset-sidedata':
changelogrevision = cl.changelogrevision
flags = cl.flags
@@ -218,6 +232,7 @@
def revinfo(rev):
p1, p2 = parents(rev)
+ value = None
if flags(rev) & REVIDX_SIDEDATA:
e = merge_caches.pop(rev, None)
if e is not None:
@@ -228,12 +243,22 @@
removed = c.filesremoved
if p1 != node.nullrev and p2 != node.nullrev:
# XXX some case we over cache, IGNORE
- merge_caches[rev] = (p1, p2, p1copies, p2copies, removed)
+ value = merge_caches[rev] = (
+ p1,
+ p2,
+ p1copies,
+ p2copies,
+ removed,
+ get_ismerged(rev),
+ )
else:
p1copies = {}
p2copies = {}
removed = []
- return p1, p2, p1copies, p2copies, removed
+
+ if value is None:
+ value = (p1, p2, p1copies, p2copies, removed, get_ismerged(rev))
+ return value
else:
@@ -242,7 +267,7 @@
ctx = repo[rev]
p1copies, p2copies = ctx._copies
removed = ctx.filesremoved()
- return p1, p2, p1copies, p2copies, removed
+ return p1, p2, p1copies, p2copies, removed, get_ismerged(rev)
return revinfo
@@ -256,6 +281,7 @@
revinfo = _revinfogetter(repo)
cl = repo.changelog
+ isancestor = cl.isancestorrev # XXX we should had chaching to this.
missingrevs = cl.findmissingrevs(common=[a.rev()], heads=[b.rev()])
mrset = set(missingrevs)
roots = set()
@@ -283,10 +309,14 @@
iterrevs.update(roots)
iterrevs.remove(b.rev())
revs = sorted(iterrevs)
- return _combinechangesetcopies(revs, children, b.rev(), revinfo, match)
+ return _combinechangesetcopies(
+ revs, children, b.rev(), revinfo, match, isancestor
+ )
-def _combinechangesetcopies(revs, children, targetrev, revinfo, match):
+def _combinechangesetcopies(
+ revs, children, targetrev, revinfo, match, isancestor
+):
"""combine the copies information for each item of iterrevs
revs: sorted iterable of revision to visit
@@ -305,7 +335,7 @@
# this is a root
copies = {}
for i, c in enumerate(children[r]):
- p1, p2, p1copies, p2copies, removed = revinfo(c)
+ p1, p2, p1copies, p2copies, removed, ismerged = revinfo(c)
if r == p1:
parent = 1
childcopies = p1copies
@@ -319,9 +349,12 @@
}
newcopies = copies
if childcopies:
- newcopies = _chain(newcopies, childcopies)
- # _chain makes a copies, we can avoid doing so in some
- # simple/linear cases.
+ newcopies = copies.copy()
+ for dest, source in pycompat.iteritems(childcopies):
+ prev = copies.get(source)
+ if prev is not None and prev[1] is not None:
+ source = prev[1]
+ newcopies[dest] = (c, source)
assert newcopies is not copies
for f in removed:
if f in newcopies:
@@ -330,7 +363,7 @@
# branches. when there are no other branches, this
# could be avoided.
newcopies = copies.copy()
- del newcopies[f]
+ newcopies[f] = (c, None)
othercopies = all_copies.get(c)
if othercopies is None:
all_copies[c] = newcopies
@@ -338,21 +371,55 @@
# we are the second parent to work on c, we need to merge our
# work with the other.
#
- # Unlike when copies are stored in the filelog, we consider
- # it a copy even if the destination already existed on the
- # other branch. It's simply too expensive to check if the
- # file existed in the manifest.
- #
# In case of conflict, parent 1 take precedence over parent 2.
# This is an arbitrary choice made anew when implementing
# changeset based copies. It was made without regards with
# potential filelog related behavior.
if parent == 1:
- othercopies.update(newcopies)
+ _merge_copies_dict(
+ othercopies, newcopies, isancestor, ismerged
+ )
else:
- newcopies.update(othercopies)
+ _merge_copies_dict(
+ newcopies, othercopies, isancestor, ismerged
+ )
all_copies[c] = newcopies
- return all_copies[targetrev]
+
+ final_copies = {}
+ for dest, (tt, source) in all_copies[targetrev].items():
+ if source is not None:
+ final_copies[dest] = source
+ return final_copies
+
+
+def _merge_copies_dict(minor, major, isancestor, ismerged):
+ """merge two copies-mapping together, minor and major
+
+ In case of conflict, value from "major" will be picked.
+
+ - `isancestors(low_rev, high_rev)`: callable return True if `low_rev` is an
+ ancestors of `high_rev`,
+
+ - `ismerged(path)`: callable return True if `path` have been merged in the
+ current revision,
+ """
+ for dest, value in major.items():
+ other = minor.get(dest)
+ if other is None:
+ minor[dest] = value
+ else:
+ new_tt = value[0]
+ other_tt = other[0]
+ if value[1] == other[1]:
+ continue
+ # content from "major" wins, unless it is older
+ # than the branch point or there is a merge
+ if (
+ new_tt == other_tt
+ or not isancestor(new_tt, other_tt)
+ or ismerged(dest)
+ ):
+ minor[dest] = value
def _forwardcopies(a, b, base=None, match=None):
@@ -569,6 +636,12 @@
self.dirmove = {} if dirmove is None else dirmove
self.movewithdir = {} if movewithdir is None else movewithdir
+ def __repr__(self):
+ return (
+ '<branch_copies\n copy=%r\n renamedelete=%r\n dirmove=%r\n movewithdir=%r\n>'
+ % (self.copy, self.renamedelete, self.dirmove, self.movewithdir,)
+ )
+
def _fullcopytracing(repo, c1, c2, base):
""" The full copytracing algorithm which finds all the new files that were
@@ -922,250 +995,3 @@
_filter(wctx.p1(), wctx, new_copies)
for dst, src in pycompat.iteritems(new_copies):
wctx[dst].markcopied(src)
-
-
-def computechangesetfilesadded(ctx):
- """return the list of files added in a changeset
- """
- added = []
- for f in ctx.files():
- if not any(f in p for p in ctx.parents()):
- added.append(f)
- return added
-
-
-def computechangesetfilesremoved(ctx):
- """return the list of files removed in a changeset
- """
- removed = []
- for f in ctx.files():
- if f not in ctx:
- removed.append(f)
- return removed
-
-
-def computechangesetcopies(ctx):
- """return the copies data for a changeset
-
- The copies data are returned as a pair of dictionnary (p1copies, p2copies).
-
- Each dictionnary are in the form: `{newname: oldname}`
- """
- p1copies = {}
- p2copies = {}
- p1 = ctx.p1()
- p2 = ctx.p2()
- narrowmatch = ctx._repo.narrowmatch()
- for dst in ctx.files():
- if not narrowmatch(dst) or dst not in ctx:
- continue
- copied = ctx[dst].renamed()
- if not copied:
- continue
- src, srcnode = copied
- if src in p1 and p1[src].filenode() == srcnode:
- p1copies[dst] = src
- elif src in p2 and p2[src].filenode() == srcnode:
- p2copies[dst] = src
- return p1copies, p2copies
-
-
-def encodecopies(files, copies):
- items = []
- for i, dst in enumerate(files):
- if dst in copies:
- items.append(b'%d\0%s' % (i, copies[dst]))
- if len(items) != len(copies):
- raise error.ProgrammingError(
- b'some copy targets missing from file list'
- )
- return b"\n".join(items)
-
-
-def decodecopies(files, data):
- try:
- copies = {}
- if not data:
- return copies
- for l in data.split(b'\n'):
- strindex, src = l.split(b'\0')
- i = int(strindex)
- dst = files[i]
- copies[dst] = src
- return copies
- except (ValueError, IndexError):
- # Perhaps someone had chosen the same key name (e.g. "p1copies") and
- # used different syntax for the value.
- return None
-
-
-def encodefileindices(files, subset):
- subset = set(subset)
- indices = []
- for i, f in enumerate(files):
- if f in subset:
- indices.append(b'%d' % i)
- return b'\n'.join(indices)
-
-
-def decodefileindices(files, data):
- try:
- subset = []
- if not data:
- return subset
- for strindex in data.split(b'\n'):
- i = int(strindex)
- if i < 0 or i >= len(files):
- return None
- subset.append(files[i])
- return subset
- except (ValueError, IndexError):
- # Perhaps someone had chosen the same key name (e.g. "added") and
- # used different syntax for the value.
- return None
-
-
-def _getsidedata(srcrepo, rev):
- ctx = srcrepo[rev]
- filescopies = computechangesetcopies(ctx)
- filesadded = computechangesetfilesadded(ctx)
- filesremoved = computechangesetfilesremoved(ctx)
- sidedata = {}
- if any([filescopies, filesadded, filesremoved]):
- sortedfiles = sorted(ctx.files())
- p1copies, p2copies = filescopies
- p1copies = encodecopies(sortedfiles, p1copies)
- p2copies = encodecopies(sortedfiles, p2copies)
- filesadded = encodefileindices(sortedfiles, filesadded)
- filesremoved = encodefileindices(sortedfiles, filesremoved)
- if p1copies:
- sidedata[sidedatamod.SD_P1COPIES] = p1copies
- if p2copies:
- sidedata[sidedatamod.SD_P2COPIES] = p2copies
- if filesadded:
- sidedata[sidedatamod.SD_FILESADDED] = filesadded
- if filesremoved:
- sidedata[sidedatamod.SD_FILESREMOVED] = filesremoved
- return sidedata
-
-
-def getsidedataadder(srcrepo, destrepo):
- use_w = srcrepo.ui.configbool(b'experimental', b'worker.repository-upgrade')
- if pycompat.iswindows or not use_w:
- return _get_simple_sidedata_adder(srcrepo, destrepo)
- else:
- return _get_worker_sidedata_adder(srcrepo, destrepo)
-
-
-def _sidedata_worker(srcrepo, revs_queue, sidedata_queue, tokens):
- """The function used by worker precomputing sidedata
-
- It read an input queue containing revision numbers
- It write in an output queue containing (rev, <sidedata-map>)
-
- The `None` input value is used as a stop signal.
-
- The `tokens` semaphore is user to avoid having too many unprocessed
- entries. The workers needs to acquire one token before fetching a task.
- They will be released by the consumer of the produced data.
- """
- tokens.acquire()
- rev = revs_queue.get()
- while rev is not None:
- data = _getsidedata(srcrepo, rev)
- sidedata_queue.put((rev, data))
- tokens.acquire()
- rev = revs_queue.get()
- # processing of `None` is completed, release the token.
- tokens.release()
-
-
-BUFF_PER_WORKER = 50
-
-
-def _get_worker_sidedata_adder(srcrepo, destrepo):
- """The parallel version of the sidedata computation
-
- This code spawn a pool of worker that precompute a buffer of sidedata
- before we actually need them"""
- # avoid circular import copies -> scmutil -> worker -> copies
- from . import worker
-
- nbworkers = worker._numworkers(srcrepo.ui)
-
- tokens = multiprocessing.BoundedSemaphore(nbworkers * BUFF_PER_WORKER)
- revsq = multiprocessing.Queue()
- sidedataq = multiprocessing.Queue()
-
- assert srcrepo.filtername is None
- # queue all tasks beforehand, revision numbers are small and it make
- # synchronisation simpler
- #
- # Since the computation for each node can be quite expensive, the overhead
- # of using a single queue is not revelant. In practice, most computation
- # are fast but some are very expensive and dominate all the other smaller
- # cost.
- for r in srcrepo.changelog.revs():
- revsq.put(r)
- # queue the "no more tasks" markers
- for i in range(nbworkers):
- revsq.put(None)
-
- allworkers = []
- for i in range(nbworkers):
- args = (srcrepo, revsq, sidedataq, tokens)
- w = multiprocessing.Process(target=_sidedata_worker, args=args)
- allworkers.append(w)
- w.start()
-
- # dictionnary to store results for revision higher than we one we are
- # looking for. For example, if we need the sidedatamap for 42, and 43 is
- # received, when shelve 43 for later use.
- staging = {}
-
- def sidedata_companion(revlog, rev):
- sidedata = {}
- if util.safehasattr(revlog, b'filteredrevs'): # this is a changelog
- # Is the data previously shelved ?
- sidedata = staging.pop(rev, None)
- if sidedata is None:
- # look at the queued result until we find the one we are lookig
- # for (shelve the other ones)
- r, sidedata = sidedataq.get()
- while r != rev:
- staging[r] = sidedata
- r, sidedata = sidedataq.get()
- tokens.release()
- return False, (), sidedata
-
- return sidedata_companion
-
-
-def _get_simple_sidedata_adder(srcrepo, destrepo):
- """The simple version of the sidedata computation
-
- It just compute it in the same thread on request"""
-
- def sidedatacompanion(revlog, rev):
- sidedata = {}
- if util.safehasattr(revlog, 'filteredrevs'): # this is a changelog
- sidedata = _getsidedata(srcrepo, rev)
- return False, (), sidedata
-
- return sidedatacompanion
-
-
-def getsidedataremover(srcrepo, destrepo):
- def sidedatacompanion(revlog, rev):
- f = ()
- if util.safehasattr(revlog, 'filteredrevs'): # this is a changelog
- if revlog.flags(rev) & REVIDX_SIDEDATA:
- f = (
- sidedatamod.SD_P1COPIES,
- sidedatamod.SD_P2COPIES,
- sidedatamod.SD_FILESADDED,
- sidedatamod.SD_FILESREMOVED,
- )
- return False, f, {}
-
- return sidedatacompanion
--- a/mercurial/crecord.py Tue Jul 14 10:25:41 2020 +0200
+++ b/mercurial/crecord.py Mon Jul 20 21:56:27 2020 +0530
@@ -20,6 +20,7 @@
open,
)
from . import (
+ diffhelper,
encoding,
error,
patch as patchmod,
@@ -63,15 +64,7 @@
curses.error
except (ImportError, AttributeError):
- # I have no idea if wcurses works with crecord...
- try:
- import wcurses as curses
-
- curses.error
- except (ImportError, AttributeError):
- # wcurses is not shipped on Windows by default, or python is not
- # compiled with curses
- curses = False
+ curses = False
class fallbackerror(error.Abort):
@@ -424,7 +417,7 @@
contextlen = (
len(self.before) + len(self.after) + removedconvertedtocontext
)
- if self.after and self.after[-1] == b'\\ No newline at end of file\n':
+ if self.after and self.after[-1] == diffhelper.MISSING_NEWLINE_MARKER:
contextlen -= 1
fromlen = contextlen + self.removed
tolen = contextlen + self.added
@@ -508,8 +501,12 @@
"""
dels = []
adds = []
+ noeol = False
for line in self.changedlines:
text = line.linetext
+ if line.linetext == diffhelper.MISSING_NEWLINE_MARKER:
+ noeol = True
+ break
if line.applied:
if text.startswith(b'+'):
dels.append(text[1:])
@@ -519,6 +516,9 @@
dels.append(text[1:])
adds.append(text[1:])
hunk = [b'-%s' % l for l in dels] + [b'+%s' % l for l in adds]
+ if noeol and hunk:
+ # Remove the newline from the end of the hunk.
+ hunk[-1] = hunk[-1][:-1]
h = self._hunk
return patchmod.recordhunk(
h.header, h.toline, h.fromline, h.proc, h.before, hunk, h.after
--- a/mercurial/debugcommands.py Tue Jul 14 10:25:41 2020 +0200
+++ b/mercurial/debugcommands.py Mon Jul 20 21:56:27 2020 +0530
@@ -58,7 +58,7 @@
localrepo,
lock as lockmod,
logcmdutil,
- merge as mergemod,
+ mergestate as mergestatemod,
obsolete,
obsutil,
pathutil,
@@ -127,6 +127,23 @@
ui.write(b'%d:%s\n' % (r.rev(a), hex(a)))
+@command(b'debugantivirusrunning', [])
+def debugantivirusrunning(ui, repo):
+ """attempt to trigger an antivirus scanner to see if one is active"""
+ with repo.cachevfs.open('eicar-test-file.com', b'wb') as f:
+ f.write(
+ util.b85decode(
+ # This is a base85-armored version of the EICAR test file. See
+ # https://en.wikipedia.org/wiki/EICAR_test_file for details.
+ b'ST#=}P$fV?P+K%yP+C|uG$>GBDK|qyDK~v2MM*<JQY}+dK~6+LQba95P'
+ b'E<)&Nm5l)EmTEQR4qnHOhq9iNGnJx'
+ )
+ )
+ # Give an AV engine time to scan the file.
+ time.sleep(2)
+ util.unlink(repo.cachevfs.join('eicar-test-file.com'))
+
+
@command(b'debugapplystreamclonebundle', [], b'FILE')
def debugapplystreamclonebundle(ui, repo, fname):
"""apply a stream clone bundle file"""
@@ -1465,8 +1482,8 @@
fm = ui.formatter(b'debuginstall', opts)
fm.startitem()
- # encoding
- fm.write(b'encoding', _(b"checking encoding (%s)...\n"), encoding.encoding)
+ # encoding might be unknown or wrong. don't translate these messages.
+ fm.write(b'encoding', b"checking encoding (%s)...\n", encoding.encoding)
err = None
try:
codecs.lookup(pycompat.sysstr(encoding.encoding))
@@ -1476,7 +1493,7 @@
fm.condwrite(
err,
b'encodingerror',
- _(b" %s\n (check that your locale is properly set)\n"),
+ b" %s\n (check that your locale is properly set)\n",
err,
)
@@ -1650,13 +1667,6 @@
fm.plain(_(b'checking "re2" regexp engine (%s)\n') % re2)
fm.data(re2=bool(util._re2))
- rust_debug_mod = policy.importrust("debug")
- if rust_debug_mod is not None:
- re2_rust = b'installed' if rust_debug_mod.re2_installed else b'missing'
-
- msg = b'checking "re2" regexp engine Rust bindings (%s)\n'
- fm.plain(_(msg % re2_rust))
-
# templates
p = templater.templatepaths()
fm.write(b'templatedirs', b'checking templates (%s)...\n', b' '.join(p))
@@ -1974,7 +1984,7 @@
was chosen."""
if ui.verbose:
- ms = mergemod.mergestate(repo)
+ ms = mergestatemod.mergestate(repo)
# sort so that reasonable information is on top
v1records = ms._readrecordsv1()
@@ -2008,7 +2018,7 @@
b'"}'
)
- ms = mergemod.mergestate.read(repo)
+ ms = mergestatemod.mergestate.read(repo)
fm = ui.formatter(b'debugmergestate', opts)
fm.startitem()
@@ -2034,8 +2044,8 @@
state = ms._state[f]
fm_files.data(state=state[0])
if state[0] in (
- mergemod.MERGE_RECORD_UNRESOLVED,
- mergemod.MERGE_RECORD_RESOLVED,
+ mergestatemod.MERGE_RECORD_UNRESOLVED,
+ mergestatemod.MERGE_RECORD_RESOLVED,
):
fm_files.data(local_key=state[1])
fm_files.data(local_path=state[2])
@@ -2045,8 +2055,8 @@
fm_files.data(other_node=state[6])
fm_files.data(local_flags=state[7])
elif state[0] in (
- mergemod.MERGE_RECORD_UNRESOLVED_PATH,
- mergemod.MERGE_RECORD_RESOLVED_PATH,
+ mergestatemod.MERGE_RECORD_UNRESOLVED_PATH,
+ mergestatemod.MERGE_RECORD_RESOLVED_PATH,
):
fm_files.data(renamed_path=state[1])
fm_files.data(rename_side=state[2])
@@ -2658,6 +2668,13 @@
ui.write(_(b"%s not renamed\n") % rel)
+@command(b'debugrequires|debugrequirements', [], b'')
+def debugrequirements(ui, repo):
+ """ print the current repo requirements """
+ for r in sorted(repo.requirements):
+ ui.write(b"%s\n" % r)
+
+
@command(
b'debugrevlog',
cmdutil.debugrevlogopts + [(b'd', b'dump', False, _(b'dump index data'))],
--- a/mercurial/diffhelper.py Tue Jul 14 10:25:41 2020 +0200
+++ b/mercurial/diffhelper.py Mon Jul 20 21:56:27 2020 +0530
@@ -14,6 +14,8 @@
pycompat,
)
+MISSING_NEWLINE_MARKER = b'\\ No newline at end of file\n'
+
def addlines(fp, hunk, lena, lenb, a, b):
"""Read lines from fp into the hunk
@@ -32,7 +34,7 @@
s = fp.readline()
if not s:
raise error.ParseError(_(b'incomplete hunk'))
- if s == b"\\ No newline at end of file\n":
+ if s == MISSING_NEWLINE_MARKER:
fixnewline(hunk, a, b)
continue
if s == b'\n' or s == b'\r\n':
--- a/mercurial/dirstate.py Tue Jul 14 10:25:41 2020 +0200
+++ b/mercurial/dirstate.py Mon Jul 20 21:56:27 2020 +0530
@@ -187,7 +187,7 @@
@propertycache
def _checkexec(self):
- return util.checkexec(self._root)
+ return bool(util.checkexec(self._root))
@propertycache
def _checkcase(self):
@@ -1114,6 +1114,7 @@
unknown,
warnings,
bad,
+ traversed,
) = rustmod.status(
self._map._rustmap,
matcher,
@@ -1124,7 +1125,13 @@
bool(list_clean),
bool(list_ignored),
bool(list_unknown),
+ bool(matcher.traversedir),
)
+
+ if matcher.traversedir:
+ for dir in traversed:
+ matcher.traversedir(dir)
+
if self._ui.warn:
for item in warnings:
if isinstance(item, tuple):
@@ -1200,10 +1207,8 @@
use_rust = False
elif sparse.enabled:
use_rust = False
- elif match.traversedir is not None:
- use_rust = False
elif not isinstance(match, allowed_matchers):
- # Matchers have yet to be implemented
+ # Some matchers have yet to be implemented
use_rust = False
if use_rust:
--- a/mercurial/discovery.py Tue Jul 14 10:25:41 2020 +0200
+++ b/mercurial/discovery.py Mon Jul 20 21:56:27 2020 +0530
@@ -41,8 +41,8 @@
any longer.
"heads" is either the supplied heads, or else the remote's heads.
"ancestorsof" if not None, restrict the discovery to a subset defined by
- these nodes. Changeset outside of this set won't be considered (and
- won't appears in "common")
+ these nodes. Changeset outside of this set won't be considered (but may
+ still appear in "common").
If you pass heads and they are all known locally, the response lists just
these heads in "common" and in "heads".
@@ -75,28 +75,35 @@
class outgoing(object):
- '''Represents the set of nodes present in a local repo but not in a
- (possibly) remote one.
+ '''Represents the result of a findcommonoutgoing() call.
Members:
- missing is a list of all nodes present in local but not in remote.
- common is a list of all nodes shared between the two repos.
- excluded is the list of missing changeset that shouldn't be sent remotely.
- missingheads is the list of heads of missing.
+ ancestorsof is a list of the nodes whose ancestors are included in the
+ outgoing operation.
+
+ missing is a list of those ancestors of ancestorsof that are present in
+ local but not in remote.
+
+ common is a set containing revs common between the local and the remote
+ repository (at least all of those that are ancestors of ancestorsof).
+
commonheads is the list of heads of common.
- The sets are computed on demand from the heads, unless provided upfront
+ excluded is the list of missing changeset that shouldn't be sent
+ remotely.
+
+ Some members are computed on demand from the heads, unless provided upfront
by discovery.'''
def __init__(
- self, repo, commonheads=None, missingheads=None, missingroots=None
+ self, repo, commonheads=None, ancestorsof=None, missingroots=None
):
# at least one of them must not be set
assert None in (commonheads, missingroots)
cl = repo.changelog
- if missingheads is None:
- missingheads = cl.heads()
+ if ancestorsof is None:
+ ancestorsof = cl.heads()
if missingroots:
discbases = []
for n in missingroots:
@@ -104,14 +111,14 @@
# TODO remove call to nodesbetween.
# TODO populate attributes on outgoing instance instead of setting
# discbases.
- csets, roots, heads = cl.nodesbetween(missingroots, missingheads)
+ csets, roots, heads = cl.nodesbetween(missingroots, ancestorsof)
included = set(csets)
- missingheads = heads
+ ancestorsof = heads
commonheads = [n for n in discbases if n not in included]
elif not commonheads:
commonheads = [nullid]
self.commonheads = commonheads
- self.missingheads = missingheads
+ self.ancestorsof = ancestorsof
self._revlog = cl
self._common = None
self._missing = None
@@ -119,7 +126,7 @@
def _computecommonmissing(self):
sets = self._revlog.findcommonmissing(
- self.commonheads, self.missingheads
+ self.commonheads, self.ancestorsof
)
self._common, self._missing = sets
@@ -135,6 +142,17 @@
self._computecommonmissing()
return self._missing
+ @property
+ def missingheads(self):
+ util.nouideprecwarn(
+ b'outgoing.missingheads never contained what the name suggests and '
+ b'was renamed to outgoing.ancestorsof. check your code for '
+ b'correctness.',
+ b'5.5',
+ stacklevel=2,
+ )
+ return self.ancestorsof
+
def findcommonoutgoing(
repo, other, onlyheads=None, force=False, commoninc=None, portable=False
@@ -149,7 +167,7 @@
If commoninc is given, it must be the result of a prior call to
findcommonincoming(repo, other, force) to avoid recomputing it here.
- If portable is given, compute more conservative common and missingheads,
+ If portable is given, compute more conservative common and ancestorsof,
to make bundles created from the instance more portable.'''
# declare an empty outgoing object to be filled later
og = outgoing(repo, None, None)
@@ -164,10 +182,10 @@
# compute outgoing
mayexclude = repo._phasecache.phaseroots[phases.secret] or repo.obsstore
if not mayexclude:
- og.missingheads = onlyheads or repo.heads()
+ og.ancestorsof = onlyheads or repo.heads()
elif onlyheads is None:
# use visible heads as it should be cached
- og.missingheads = repo.filtered(b"served").heads()
+ og.ancestorsof = repo.filtered(b"served").heads()
og.excluded = [ctx.node() for ctx in repo.set(b'secret() or extinct()')]
else:
# compute common, missing and exclude secret stuff
@@ -182,12 +200,12 @@
else:
missing.append(node)
if len(missing) == len(allmissing):
- missingheads = onlyheads
+ ancestorsof = onlyheads
else: # update missing heads
- missingheads = phases.newheads(repo, onlyheads, excluded)
- og.missingheads = missingheads
+ ancestorsof = phases.newheads(repo, onlyheads, excluded)
+ og.ancestorsof = ancestorsof
if portable:
- # recompute common and missingheads as if -r<rev> had been given for
+ # recompute common and ancestorsof as if -r<rev> had been given for
# each head of missing, and --base <rev> for each head of the proper
# ancestors of missing
og._computecommonmissing()
@@ -195,7 +213,7 @@
missingrevs = {cl.rev(n) for n in og._missing}
og._common = set(cl.ancestors(missingrevs)) - missingrevs
commonheads = set(og.commonheads)
- og.missingheads = [h for h in og.missingheads if h not in commonheads]
+ og.ancestorsof = [h for h in og.ancestorsof if h not in commonheads]
return og
@@ -268,7 +286,7 @@
# If there are no obsstore, no post processing are needed.
if repo.obsstore:
torev = repo.changelog.rev
- futureheads = {torev(h) for h in outgoing.missingheads}
+ futureheads = {torev(h) for h in outgoing.ancestorsof}
futureheads |= {torev(h) for h in outgoing.commonheads}
allfuturecommon = repo.changelog.ancestors(futureheads, inclusive=True)
for branch, heads in sorted(pycompat.iteritems(headssum)):
--- a/mercurial/dispatch.py Tue Jul 14 10:25:41 2020 +0200
+++ b/mercurial/dispatch.py Mon Jul 20 21:56:27 2020 +0530
@@ -104,41 +104,46 @@
def run():
"""run the command in sys.argv"""
- initstdio()
- with tracing.log('parse args into request'):
- req = request(pycompat.sysargv[1:])
- err = None
try:
- status = dispatch(req)
- except error.StdioError as e:
- err = e
- status = -1
-
- # In all cases we try to flush stdio streams.
- if util.safehasattr(req.ui, b'fout'):
- assert req.ui is not None # help pytype
- assert req.ui.fout is not None # help pytype
+ initstdio()
+ with tracing.log('parse args into request'):
+ req = request(pycompat.sysargv[1:])
+ err = None
try:
- req.ui.fout.flush()
- except IOError as e:
+ status = dispatch(req)
+ except error.StdioError as e:
err = e
status = -1
- if util.safehasattr(req.ui, b'ferr'):
- assert req.ui is not None # help pytype
- assert req.ui.ferr is not None # help pytype
- try:
- if err is not None and err.errno != errno.EPIPE:
- req.ui.ferr.write(
- b'abort: %s\n' % encoding.strtolocal(err.strerror)
- )
- req.ui.ferr.flush()
- # There's not much we can do about an I/O error here. So (possibly)
- # change the status code and move on.
- except IOError:
- status = -1
+ # In all cases we try to flush stdio streams.
+ if util.safehasattr(req.ui, b'fout'):
+ assert req.ui is not None # help pytype
+ assert req.ui.fout is not None # help pytype
+ try:
+ req.ui.fout.flush()
+ except IOError as e:
+ err = e
+ status = -1
- _silencestdio()
+ if util.safehasattr(req.ui, b'ferr'):
+ assert req.ui is not None # help pytype
+ assert req.ui.ferr is not None # help pytype
+ try:
+ if err is not None and err.errno != errno.EPIPE:
+ req.ui.ferr.write(
+ b'abort: %s\n' % encoding.strtolocal(err.strerror)
+ )
+ req.ui.ferr.flush()
+ # There's not much we can do about an I/O error here. So (possibly)
+ # change the status code and move on.
+ except IOError:
+ status = -1
+
+ _silencestdio()
+ except KeyboardInterrupt:
+ # Catch early/late KeyboardInterrupt as last ditch. Here nothing will
+ # be printed to console to avoid another IOError/KeyboardInterrupt.
+ status = -1
sys.exit(status & 255)
--- a/mercurial/error.py Tue Jul 14 10:25:41 2020 +0200
+++ b/mercurial/error.py Mon Jul 20 21:56:27 2020 +0530
@@ -106,6 +106,22 @@
__bytes__ = _tobytes
+class ConflictResolutionRequired(InterventionRequired):
+ """Exception raised when a continuable command required merge conflict resolution."""
+
+ def __init__(self, opname):
+ from .i18n import _
+
+ self.opname = opname
+ InterventionRequired.__init__(
+ self,
+ _(
+ b"unresolved conflicts (see 'hg resolve', then 'hg %s --continue')"
+ )
+ % opname,
+ )
+
+
class Abort(Hint, Exception):
"""Raised if a command needs to print an error and exit."""
--- a/mercurial/exchange.py Tue Jul 14 10:25:41 2020 +0200
+++ b/mercurial/exchange.py Mon Jul 20 21:56:27 2020 +0530
@@ -503,7 +503,7 @@
@util.propertycache
def futureheads(self):
"""future remote heads if the changeset push succeeds"""
- return self.outgoing.missingheads
+ return self.outgoing.ancestorsof
@util.propertycache
def fallbackheads(self):
@@ -512,20 +512,20 @@
# not target to push, all common are relevant
return self.outgoing.commonheads
unfi = self.repo.unfiltered()
- # I want cheads = heads(::missingheads and ::commonheads)
- # (missingheads is revs with secret changeset filtered out)
+ # I want cheads = heads(::ancestorsof and ::commonheads)
+ # (ancestorsof is revs with secret changeset filtered out)
#
# This can be expressed as:
- # cheads = ( (missingheads and ::commonheads)
- # + (commonheads and ::missingheads))"
+ # cheads = ( (ancestorsof and ::commonheads)
+ # + (commonheads and ::ancestorsof))"
# )
#
# while trying to push we already computed the following:
# common = (::commonheads)
- # missing = ((commonheads::missingheads) - commonheads)
+ # missing = ((commonheads::ancestorsof) - commonheads)
#
# We can pick:
- # * missingheads part of common (::commonheads)
+ # * ancestorsof part of common (::commonheads)
common = self.outgoing.common
rev = self.repo.changelog.index.rev
cheads = [node for node in self.revs if rev(node) in common]
@@ -905,27 +905,32 @@
# if repo.obsstore == False --> no obsolete
# then, save the iteration
if unfi.obsstore:
- # this message are here for 80 char limit reason
- mso = _(b"push includes obsolete changeset: %s!")
- mspd = _(b"push includes phase-divergent changeset: %s!")
- mscd = _(b"push includes content-divergent changeset: %s!")
- mst = {
- b"orphan": _(b"push includes orphan changeset: %s!"),
- b"phase-divergent": mspd,
- b"content-divergent": mscd,
- }
- # If we are to push if there is at least one
- # obsolete or unstable changeset in missing, at
- # least one of the missinghead will be obsolete or
- # unstable. So checking heads only is ok
- for node in outgoing.missingheads:
+ obsoletes = []
+ unstables = []
+ for node in outgoing.missing:
ctx = unfi[node]
if ctx.obsolete():
- raise error.Abort(mso % ctx)
+ obsoletes.append(ctx)
elif ctx.isunstable():
- # TODO print more than one instability in the abort
- # message
- raise error.Abort(mst[ctx.instabilities()[0]] % ctx)
+ unstables.append(ctx)
+ if obsoletes or unstables:
+ msg = b""
+ if obsoletes:
+ msg += _(b"push includes obsolete changesets:\n")
+ msg += b"\n".join(b' %s' % ctx for ctx in obsoletes)
+ if unstables:
+ if msg:
+ msg += b"\n"
+ msg += _(b"push includes unstable changesets:\n")
+ msg += b"\n".join(
+ b' %s (%s)'
+ % (
+ ctx,
+ b", ".join(_(ins) for ins in ctx.instabilities()),
+ )
+ for ctx in unstables
+ )
+ raise error.Abort(msg)
discovery.checkheads(pushop)
return True
@@ -969,7 +974,7 @@
"""
# * 'force' do not check for push race,
# * if we don't push anything, there are nothing to check.
- if not pushop.force and pushop.outgoing.missingheads:
+ if not pushop.force and pushop.outgoing.ancestorsof:
allowunrelated = b'related' in bundler.capabilities.get(
b'checkheads', ()
)
@@ -1024,12 +1029,12 @@
hasphaseheads = b'heads' in b2caps.get(b'phases', ())
if pushop.remotephases is not None and hasphaseheads:
# check that the remote phase has not changed
- checks = [[] for p in phases.allphases]
+ checks = {p: [] for p in phases.allphases}
checks[phases.public].extend(pushop.remotephases.publicheads)
checks[phases.draft].extend(pushop.remotephases.draftroots)
- if any(checks):
- for nodes in checks:
- nodes.sort()
+ if any(pycompat.itervalues(checks)):
+ for phase in checks:
+ checks[phase].sort()
checkdata = phases.binaryencode(checks)
bundler.newpart(b'check:phases', data=checkdata)
@@ -1104,7 +1109,7 @@
"""push phase information through a bundle2 - binary part"""
pushop.stepsdone.add(b'phases')
if pushop.outdatedphases:
- updates = [[] for p in phases.allphases]
+ updates = {p: [] for p in phases.allphases}
updates[0].extend(h.node() for h in pushop.outdatedphases)
phasedata = phases.binaryencode(updates)
bundler.newpart(b'phase-heads', data=phasedata)
@@ -2658,9 +2663,9 @@
headsbyphase[phases.public].add(node(r))
# transform data in a format used by the encoding function
- phasemapping = []
- for phase in phases.allphases:
- phasemapping.append(sorted(headsbyphase[phase]))
+ phasemapping = {
+ phase: sorted(headsbyphase[phase]) for phase in phases.allphases
+ }
# generate the actual part
phasedata = phases.binaryencode(phasemapping)
@@ -3025,6 +3030,23 @@
)
continue
+ if b'REQUIREDRAM' in entry:
+ try:
+ requiredram = util.sizetoint(entry[b'REQUIREDRAM'])
+ except error.ParseError:
+ repo.ui.debug(
+ b'filtering %s due to a bad REQUIREDRAM attribute\n'
+ % entry[b'URL']
+ )
+ continue
+ actualram = repo.ui.estimatememory()
+ if actualram is not None and actualram * 0.66 < requiredram:
+ repo.ui.debug(
+ b'filtering %s as it needs more than 2/3 of system memory\n'
+ % entry[b'URL']
+ )
+ continue
+
newentries.append(entry)
return newentries
--- a/mercurial/exchangev2.py Tue Jul 14 10:25:41 2020 +0200
+++ b/mercurial/exchangev2.py Mon Jul 20 21:56:27 2020 +0530
@@ -82,15 +82,12 @@
phases.registernew(repo, tr, phases.draft, csetres[b'added'])
# And adjust the phase of all changesets accordingly.
- for phase in phases.phasenames:
+ for phasenumber, phase in phases.phasenames.items():
if phase == b'secret' or not csetres[b'nodesbyphase'][phase]:
continue
phases.advanceboundary(
- repo,
- tr,
- phases.phasenames.index(phase),
- csetres[b'nodesbyphase'][phase],
+ repo, tr, phasenumber, csetres[b'nodesbyphase'][phase],
)
# Write bookmark updates.
@@ -361,7 +358,7 @@
# so we can set the linkrev accordingly when manifests are added.
manifestnodes[cl.rev(node)] = revision.manifest
- nodesbyphase = {phase: set() for phase in phases.phasenames}
+ nodesbyphase = {phase: set() for phase in phases.phasenames.values()}
remotebookmarks = {}
# addgroup() expects a 7-tuple describing revisions. This normalizes
--- a/mercurial/extensions.py Tue Jul 14 10:25:41 2020 +0200
+++ b/mercurial/extensions.py Mon Jul 20 21:56:27 2020 +0530
@@ -706,12 +706,17 @@
'''find paths of disabled extensions. returns a dict of {name: path}'''
import hgext
- extpath = os.path.dirname(
- os.path.abspath(pycompat.fsencode(hgext.__file__))
- )
- try: # might not be a filesystem path
- files = os.listdir(extpath)
- except OSError:
+ # The hgext might not have a __file__ attribute (e.g. in PyOxidizer) and
+ # it might not be on a filesystem even if it does.
+ if util.safehasattr(hgext, '__file__'):
+ extpath = os.path.dirname(
+ os.path.abspath(pycompat.fsencode(hgext.__file__))
+ )
+ try:
+ files = os.listdir(extpath)
+ except OSError:
+ return {}
+ else:
return {}
exts = {}
--- a/mercurial/filemerge.py Tue Jul 14 10:25:41 2020 +0200
+++ b/mercurial/filemerge.py Mon Jul 20 21:56:27 2020 +0530
@@ -98,6 +98,9 @@
self._ctx = ctx
self._f = f
+ def __bytes__(self):
+ return b'absent file %s@%s' % (self._f, self._ctx)
+
def path(self):
return self._f
--- a/mercurial/fileset.py Tue Jul 14 10:25:41 2020 +0200
+++ b/mercurial/fileset.py Mon Jul 20 21:56:27 2020 +0530
@@ -16,7 +16,7 @@
error,
filesetlang,
match as matchmod,
- merge,
+ mergestate as mergestatemod,
pycompat,
registrar,
scmutil,
@@ -245,7 +245,7 @@
getargs(x, 0, 0, _(b"resolved takes no arguments"))
if mctx.ctx.rev() is not None:
return mctx.never()
- ms = merge.mergestate.read(mctx.ctx.repo())
+ ms = mergestatemod.mergestate.read(mctx.ctx.repo())
return mctx.predicate(
lambda f: f in ms and ms[f] == b'r', predrepr=b'resolved'
)
@@ -259,7 +259,7 @@
getargs(x, 0, 0, _(b"unresolved takes no arguments"))
if mctx.ctx.rev() is not None:
return mctx.never()
- ms = merge.mergestate.read(mctx.ctx.repo())
+ ms = mergestatemod.mergestate.read(mctx.ctx.repo())
return mctx.predicate(
lambda f: f in ms and ms[f] == b'u', predrepr=b'unresolved'
)
--- a/mercurial/help.py Tue Jul 14 10:25:41 2020 +0200
+++ b/mercurial/help.py Mon Jul 20 21:56:27 2020 +0530
@@ -345,6 +345,11 @@
internalstable = sorted(
[
+ (
+ [b'bid-merge'],
+ _(b'Bid Merge Algorithm'),
+ loaddoc(b'bid-merge', subdir=b'internals'),
+ ),
([b'bundle2'], _(b'Bundle2'), loaddoc(b'bundle2', subdir=b'internals')),
([b'bundles'], _(b'Bundles'), loaddoc(b'bundles', subdir=b'internals')),
([b'cbor'], _(b'CBOR'), loaddoc(b'cbor', subdir=b'internals')),
--- a/mercurial/helptext/config.txt Tue Jul 14 10:25:41 2020 +0200
+++ b/mercurial/helptext/config.txt Mon Jul 20 21:56:27 2020 +0530
@@ -408,6 +408,24 @@
If no suitable authentication entry is found, the user is prompted
for credentials as usual if required by the remote.
+``cmdserver``
+-------------
+
+Controls command server settings. (ADVANCED)
+
+``message-encodings``
+ List of encodings for the ``m`` (message) channel. The first encoding
+ supported by the server will be selected and advertised in the hello
+ message. This is useful only when ``ui.message-output`` is set to
+ ``channel``. Supported encodings are ``cbor``.
+
+``shutdown-on-interrupt``
+ If set to false, the server's main loop will continue running after
+ SIGINT received. ``runcommand`` requests can still be interrupted by
+ SIGINT. Close the write end of the pipe to shut down the server
+ process gracefully.
+ (default: True)
+
``color``
---------
@@ -1872,6 +1890,15 @@
applicable for `hg amend`, `hg commit --amend` and `hg uncommit` in the
current version.
+``empty-successor``
+
+ Control what happens with empty successors that are the result of rewrite
+ operations. If set to ``skip``, the successor is not created. If set to
+ ``keep``, the empty successor is created and kept.
+
+ Currently, only the rebase and absorb commands consider this configuration.
+ (EXPERIMENTAL)
+
``storage``
-----------
@@ -2371,6 +2398,8 @@
``message-output``
Where to write status and error messages. (default: ``stdio``)
+ ``channel``
+ Use separate channel for structured output. (Command-server only)
``stderr``
Everything to stderr.
``stdio``
--- a/mercurial/helptext/flags.txt Tue Jul 14 10:25:41 2020 +0200
+++ b/mercurial/helptext/flags.txt Mon Jul 20 21:56:27 2020 +0530
@@ -10,7 +10,9 @@
Every flag has at least a long name, such as --repository. Some flags may also
have a short one-letter name, such as the equivalent -R. Using the short or long
-name is equivalent and has the same effect.
+name is equivalent and has the same effect. The long name may be abbreviated to
+any unambiguous prefix. For example, :hg:`commit --amend` can be abbreviated
+to :hg:`commit --am`.
Flags that have a short name can also be bundled together - for instance, to
specify both --edit (short -e) and --interactive (short -i), one could use::
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/mercurial/helptext/internals/bid-merge.txt Mon Jul 20 21:56:27 2020 +0530
@@ -0,0 +1,115 @@
+Bid merge is a feature introduced in Mercurial 3.0, a merge algorithm for
+dealing with complicated merges.
+
+Bid merge is controled by the `merge.preferancestor` configuration option. The
+default is set to `merge.preferancetors=*` and enable bid merge. Mercurial will
+perform a bid merge in the cases where a merge otherwise would emit a note:
+using X as ancestor of X and X message.
+
+Problem it is solving
+=====================
+
+Mercurial's core merge algorithm is the traditional "three-way merge". This
+algorithm combines all the changes in two changesets relative to a common
+ancestor. But with complex DAGs, it is often possible to have more than one
+"best" common ancestor, with no easy way to distinguish between them.
+
+For example, C and D has 2 common ancestors in the following graph::
+
+ C D
+ |\ /|
+ | x |
+ |/ \|
+ A B
+ \ /
+ R
+
+Mercurial used to arbitrarily chooses the first of these, which can result in
+various issues:
+
+* unexpected hard 3-way merges that would have been completely trivial if
+ another ancestor had been used
+
+* conflicts that have already been resolved may reappear
+
+* changes that have been reversed can silently oscillate
+
+One common problem is a merge which with the "right" ancestor would be trivial
+to resolve because only one side changed. Using another ancestor where the same
+lines are different, it will give an annoying 3-way merge.
+
+Other systems like Git have attacked some of these problems with a so-called
+"recursive" merge strategy, that internally merges all the possible ancestors
+to produce a single "virtual" ancestor to merge against. This is awkward as the
+internal merge itself may involve conflicts (and possibly even multiple levels
+of recursion), which either requires choosing a conflict disposition (e.g.
+always choose the local version) or exposing the user to extremely confusing
+merge prompts for old revisions. Generating the virtual merge also potentially
+involves invoking filters and extensions.
+
+Concept
+=======
+
+(Bid merge is pretty much the same as Consensus merge.)
+
+Bid merge is a strategy that attempts to sensibly combine the results of the
+multiple possible three-way merges directly without producing a virtual
+ancestor. The basic idea is that for each ancestor, we perform a top-level
+manifest merge and generate a list of proposed actions, which we consider
+"bids". We then make an "auction" among all the bids for each file and pick the
+most favourable. Some files might be trivial to merge with one ancestor, other
+files with another ancestor.
+
+The most obvious advantage of considering multiple ancestors is the case where
+some of the bids for a file is a "real" (interactive) merge but where one or
+more bids just take on of the parent revisions. A bid for just taking an
+existing revision is very simple and low risk and is an obvious winner.
+
+The auction algorithm for merging the bids is so far very simple:
+
+* If there is consensus from all the ancestors, there is no doubt what to do. A
+ clever result will be indistinguishable from just picking a random bid. The
+ consensus case is thus not only trivial, it is also already handled
+ perfectly.
+
+* If "keep local" or "get from other" actions is an option (and there is only
+ one such option), just do it.
+
+* If the auction doesn't have a single clear winner, pick one of the bids
+ "randomly" - just as it would have done if only one ancestor was considered.
+
+This meta merge algorithm has room for future improvements, especially for
+doing better than picking a random bid.
+
+Some observations
+=================
+
+Experience with bid merge shows that many merges that actually have a very
+simple solution (because only one side changed) only can be solved efficiently
+when we start looking at file content in filemerge ... and it thus also
+requires all ancestors passed to filemerge. That is because Mercurial includes
+the history in filelog hashes. A file with changes that ends up not changing
+the content (could be change + backout or graft + merge or criss cross merges)
+still shows up as a changed file to manifestmerge. (The git data model has an
+advantage here when it uses hashes of content without history.) One way to
+handle that would be to refactor manifestmerge, mergestate/resolve and
+filemerge so they become more of the same thing.
+
+There is also cases where different conflicting chunks could benefit from using
+multiple ancestors in filemerge - but that will require merge tools with fancy
+support for using multiple ancestors in 3+-way merge. That is left as an
+exercise for another day. That seems to be a case where "recursive merge" has
+an advantage.
+
+The current manifest merge actions are very low level imperative and not
+symmetrical. They do not only describe how two manifests should be merged, they
+also describe a strategy for changing a context from a state where it is one of
+the parents to the state where it is the result of the merge with the other
+parent. I can imagine that manifestmerge could be simplified (and made more
+suitable for in memory merges) by separating the abstract merge actions from
+the actual file system operation actions. A more clever wcontext could perhaps
+also take care of some of the branchmerge special cases.
+
+We assume that the definition of Mercurial manifest merge will make sure that
+exactly the same files will be produced, no matter which ancestor is used. That
+assumption might be wrong in very rare cases that really not is a problem.
--- a/mercurial/helptext/internals/requirements.txt Tue Jul 14 10:25:41 2020 +0200
+++ b/mercurial/helptext/internals/requirements.txt Mon Jul 20 21:56:27 2020 +0530
@@ -142,3 +142,16 @@
August 2019). The requirement will only be present on repositories
that have opted in to this format (by having
``format.bookmarks-in-store=true`` set when they were created).
+
+persistent-nodemap
+==================
+
+The `nodemap` index (mapping nodeid to local revision number) is persisted on
+disk. This provides speed benefit (if the associated native code is used). The
+persistent nodemap is only used for two revlogs: the changelog and the
+manifestlog.
+
+Support for this requirement was added in Mercurial 5.5 (released August 2020).
+Note that as of 5.5, only installations compiled with the Rust extension will
+benefit from a speedup. The other installations will do the necessary work to
+keep the index up to date, but will suffer a slowdown.
--- a/mercurial/helptext/internals/revlogs.txt Tue Jul 14 10:25:41 2020 +0200
+++ b/mercurial/helptext/internals/revlogs.txt Mon Jul 20 21:56:27 2020 +0530
@@ -161,7 +161,7 @@
(In development. Format not finalized or stable.)
-Version 2 is identical to version 2 with the following differences.
+Version 2 is identical to version 1 with the following differences.
There is no dedicated *generaldelta* revlog format flag. Instead,
the feature is implied enabled by default.
--- a/mercurial/hg.py Tue Jul 14 10:25:41 2020 +0200
+++ b/mercurial/hg.py Mon Jul 20 21:56:27 2020 +0530
@@ -33,6 +33,7 @@
logcmdutil,
logexchange,
merge as mergemod,
+ mergestate as mergestatemod,
narrowspec,
node,
phases,
@@ -355,7 +356,7 @@
repo.requirements.discard(b'shared')
repo.requirements.discard(b'relshared')
- repo._writerequirements()
+ scmutil.writereporequirements(repo)
# Removing share changes some fundamental properties of the repo instance.
# So we instantiate a new repo object and operate on it rather than
@@ -1164,7 +1165,7 @@
def abortmerge(ui, repo):
- ms = mergemod.mergestate.read(repo)
+ ms = mergestatemod.mergestate.read(repo)
if ms.active():
# there were conflicts
node = ms.localctx.hex()
--- a/mercurial/hgweb/server.py Tue Jul 14 10:25:41 2020 +0200
+++ b/mercurial/hgweb/server.py Mon Jul 20 21:56:27 2020 +0530
@@ -313,7 +313,7 @@
try:
from .. import sslutil
- sslutil.modernssl
+ sslutil.wrapserversocket
except ImportError:
raise error.Abort(_(b"SSL support is unavailable"))
--- a/mercurial/hook.py Tue Jul 14 10:25:41 2020 +0200
+++ b/mercurial/hook.py Mon Jul 20 21:56:27 2020 +0530
@@ -158,6 +158,10 @@
env[b'HG_HOOKNAME'] = name
for k, v in pycompat.iteritems(args):
+ # transaction changes can accumulate MBs of data, so skip it
+ # for external hooks
+ if k == b'changes':
+ continue
if callable(v):
v = v()
if isinstance(v, (dict, list)):
--- a/mercurial/interfaces/repository.py Tue Jul 14 10:25:41 2020 +0200
+++ b/mercurial/interfaces/repository.py Mon Jul 20 21:56:27 2020 +0530
@@ -1395,6 +1395,9 @@
Raises ``error.LookupError`` if the node is not known.
"""
+ def update_caches(transaction):
+ """update whatever cache are relevant for the used storage."""
+
class ilocalrepositoryfilestorage(interfaceutil.Interface):
"""Local repository sub-interface providing access to tracked file storage.
--- a/mercurial/localrepo.py Tue Jul 14 10:25:41 2020 +0200
+++ b/mercurial/localrepo.py Mon Jul 20 21:56:27 2020 +0530
@@ -44,8 +44,9 @@
hook,
lock as lockmod,
match as matchmod,
- merge as mergemod,
+ mergestate as mergestatemod,
mergeutil,
+ metadata,
namespaces,
narrowspec,
obsolete,
@@ -411,13 +412,13 @@
def changegroup(self, nodes, source):
outgoing = discovery.outgoing(
- self._repo, missingroots=nodes, missingheads=self._repo.heads()
+ self._repo, missingroots=nodes, ancestorsof=self._repo.heads()
)
return changegroup.makechangegroup(self._repo, outgoing, b'01', source)
def changegroupsubset(self, bases, heads, source):
outgoing = discovery.outgoing(
- self._repo, missingroots=bases, missingheads=heads
+ self._repo, missingroots=bases, ancestorsof=heads
)
return changegroup.makechangegroup(self._repo, outgoing, b'01', source)
@@ -445,6 +446,9 @@
# copies related information in changeset's sidedata.
COPIESSDC_REQUIREMENT = b'exp-copies-sidedata-changeset'
+# The repository use persistent nodemap for the changelog and the manifest.
+NODEMAP_REQUIREMENT = b'persistent-nodemap'
+
# Functions receiving (ui, features) that extensions can register to impact
# the ability to load repositories with custom requirements. Only
# functions defined in loaded extensions are called.
@@ -505,6 +509,11 @@
except OSError as e:
if e.errno != errno.ENOENT:
raise
+ except ValueError as e:
+ # Can be raised on Python 3.8 when path is invalid.
+ raise error.Abort(
+ _(b'invalid path %s: %s') % (path, pycompat.bytestr(e))
+ )
raise error.RepoError(_(b'repository %s not found') % path)
@@ -933,10 +942,12 @@
if ui.configbool(b'experimental', b'rust.index'):
options[b'rust.index'] = True
- if ui.configbool(b'experimental', b'exp-persistent-nodemap'):
- options[b'exp-persistent-nodemap'] = True
- if ui.configbool(b'experimental', b'exp-persistent-nodemap.mmap'):
- options[b'exp-persistent-nodemap.mmap'] = True
+ if NODEMAP_REQUIREMENT in requirements:
+ options[b'persistent-nodemap'] = True
+ if ui.configbool(b'storage', b'revlog.nodemap.mmap'):
+ options[b'persistent-nodemap.mmap'] = True
+ epnm = ui.config(b'storage', b'revlog.nodemap.mode')
+ options[b'persistent-nodemap.mode'] = epnm
if ui.configbool(b'devel', b'persistent-nodemap'):
options[b'devel-force-nodemap'] = True
@@ -1021,6 +1032,7 @@
REVLOGV2_REQUIREMENT,
SIDEDATA_REQUIREMENT,
SPARSEREVLOG_REQUIREMENT,
+ NODEMAP_REQUIREMENT,
bookmarks.BOOKMARKS_IN_STORE_REQUIREMENT,
}
_basesupported = supportedformats | {
@@ -1223,8 +1235,9 @@
if path.startswith(b'cache/'):
msg = b'accessing cache with vfs instead of cachevfs: "%s"'
repo.ui.develwarn(msg % path, stacklevel=3, config=b"cache-vfs")
- if path.startswith(b'journal.') or path.startswith(b'undo.'):
- # journal is covered by 'lock'
+ # path prefixes covered by 'lock'
+ vfs_path_prefixes = (b'journal.', b'undo.', b'strip-backup/')
+ if any(path.startswith(prefix) for prefix in vfs_path_prefixes):
if repo._currentlock(repo._lockref) is None:
repo.ui.develwarn(
b'write with no lock: "%s"' % path,
@@ -1285,9 +1298,6 @@
caps.add(b'bundle2=' + urlreq.quote(capsblob))
return caps
- def _writerequirements(self):
- scmutil.writerequires(self.vfs, self.requirements)
-
# Don't cache auditor/nofsauditor, or you'll end up with reference cycle:
# self -> auditor -> self._checknested -> self
@@ -2239,6 +2249,7 @@
tr.hookargs[b'txnid'] = txnid
tr.hookargs[b'txnname'] = desc
+ tr.hookargs[b'changes'] = tr.changes
# note: writing the fncache only during finalize mean that the file is
# outdated when running hooks. As fncache is used for streaming clone,
# this is not expected to break anything that happen during the hooks.
@@ -2461,7 +2472,7 @@
ui.status(
_(b'working directory now based on revision %d\n') % parents
)
- mergemod.mergestate.clean(self, self[b'.'].node())
+ mergestatemod.mergestate.clean(self, self[b'.'].node())
# TODO: if we know which new heads may result from this rollback, pass
# them to destroy(), which will prevent the branchhead cache from being
@@ -2511,6 +2522,7 @@
unfi = self.unfiltered()
self.changelog.update_caches(transaction=tr)
+ self.manifestlog.update_caches(transaction=tr)
rbc = unfi.revbranchcache()
for r in unfi.changelog:
@@ -2771,6 +2783,22 @@
):
"""
commit an individual file as part of a larger transaction
+
+ input:
+
+ fctx: a file context with the content we are trying to commit
+ manifest1: manifest of changeset first parent
+ manifest2: manifest of changeset second parent
+ linkrev: revision number of the changeset being created
+ tr: current transation
+ changelist: list of file being changed (modified inplace)
+ individual: boolean, set to False to skip storing the copy data
+ (only used by the Google specific feature of using
+ changeset extra as copy source of truth).
+
+ output:
+
+ The resulting filenode
"""
fname = fctx.path()
@@ -2859,16 +2887,16 @@
fparent2 = nullid
elif not fparentancestors:
# TODO: this whole if-else might be simplified much more
- ms = mergemod.mergestate.read(self)
+ ms = mergestatemod.mergestate.read(self)
if (
fname in ms
- and ms[fname] == mergemod.MERGE_RECORD_MERGED_OTHER
+ and ms[fname] == mergestatemod.MERGE_RECORD_MERGED_OTHER
):
fparent1, fparent2 = fparent2, nullid
# is the file changed?
text = fctx.data()
- if fparent2 != nullid or flog.cmp(fparent1, text) or meta:
+ if fparent2 != nullid or meta or flog.cmp(fparent1, text):
changelist.append(fname)
return flog.add(text, meta, tr, linkrev, fparent1, fparent2)
# are just the flags changed during merge?
@@ -2960,18 +2988,13 @@
self, status, text, user, date, extra
)
- ms = mergemod.mergestate.read(self)
+ ms = mergestatemod.mergestate.read(self)
mergeutil.checkunresolved(ms)
# internal config: ui.allowemptycommit
- allowemptycommit = (
- wctx.branch() != wctx.p1().branch()
- or extra.get(b'close')
- or merge
- or cctx.files()
- or self.ui.configbool(b'ui', b'allowemptycommit')
- )
- if not allowemptycommit:
+ if cctx.isempty() and not self.ui.configbool(
+ b'ui', b'allowemptycommit'
+ ):
self.ui.debug(b'nothing to commit, clearing merge state\n')
ms.reset()
return None
@@ -3018,6 +3041,12 @@
self.ui.write(
_(b'note: commit message saved in %s\n') % msgfn
)
+ self.ui.write(
+ _(
+ b"note: use 'hg commit --logfile "
+ b".hg/last-message.txt --edit' to reuse it\n"
+ )
+ )
raise
def commithook(unused_success):
@@ -3131,51 +3160,8 @@
for f in drop:
del m[f]
if p2.rev() != nullrev:
-
- @util.cachefunc
- def mas():
- p1n = p1.node()
- p2n = p2.node()
- cahs = self.changelog.commonancestorsheads(p1n, p2n)
- if not cahs:
- cahs = [nullrev]
- return [self[r].manifest() for r in cahs]
-
- def deletionfromparent(f):
- # When a file is removed relative to p1 in a merge, this
- # function determines whether the absence is due to a
- # deletion from a parent, or whether the merge commit
- # itself deletes the file. We decide this by doing a
- # simplified three way merge of the manifest entry for
- # the file. There are two ways we decide the merge
- # itself didn't delete a file:
- # - neither parent (nor the merge) contain the file
- # - exactly one parent contains the file, and that
- # parent has the same filelog entry as the merge
- # ancestor (or all of them if there two). In other
- # words, that parent left the file unchanged while the
- # other one deleted it.
- # One way to think about this is that deleting a file is
- # similar to emptying it, so the list of changed files
- # should be similar either way. The computation
- # described above is not done directly in _filecommit
- # when creating the list of changed files, however
- # it does something very similar by comparing filelog
- # nodes.
- if f in m1:
- return f not in m2 and all(
- f in ma and ma.find(f) == m1.find(f)
- for ma in mas()
- )
- elif f in m2:
- return all(
- f in ma and ma.find(f) == m2.find(f)
- for ma in mas()
- )
- else:
- return True
-
- removed = [f for f in removed if not deletionfromparent(f)]
+ rf = metadata.get_removal_filter(ctx, (p1, p2, m1, m2))
+ removed = [f for f in removed if not rf(f)]
files = changed + removed
md = None
@@ -3653,6 +3639,9 @@
if ui.configbool(b'format', b'bookmarks-in-store'):
requirements.add(bookmarks.BOOKMARKS_IN_STORE_REQUIREMENT)
+ if ui.configbool(b'format', b'use-persistent-nodemap'):
+ requirements.add(NODEMAP_REQUIREMENT)
+
return requirements
--- a/mercurial/logcmdutil.py Tue Jul 14 10:25:41 2020 +0200
+++ b/mercurial/logcmdutil.py Mon Jul 20 21:56:27 2020 +0530
@@ -72,8 +72,8 @@
ui,
repo,
diffopts,
- node1,
- node2,
+ ctx1,
+ ctx2,
match,
changes=None,
stat=False,
@@ -85,8 +85,6 @@
hunksfilterfn=None,
):
'''show diff or diffstat.'''
- ctx1 = repo[node1]
- ctx2 = repo[node2]
if root:
relroot = pathutil.canonpath(repo.root, repo.getcwd(), root)
else:
@@ -173,6 +171,7 @@
for chunk, label in chunks:
ui.write(chunk, label=label)
+ node2 = ctx2.node()
for subpath, sub in scmutil.itersubrepos(ctx1, ctx2):
tempnode2 = node2
try:
@@ -208,15 +207,12 @@
return None
def showdiff(self, ui, ctx, diffopts, graphwidth=0, stat=False):
- repo = ctx.repo()
- node = ctx.node()
- prev = ctx.p1().node()
diffordiffstat(
ui,
- repo,
+ ctx.repo(),
diffopts,
- prev,
- node,
+ ctx.p1(),
+ ctx,
match=self._makefilematcher(ctx),
stat=stat,
graphwidth=graphwidth,
--- a/mercurial/manifest.py Tue Jul 14 10:25:41 2020 +0200
+++ b/mercurial/manifest.py Mon Jul 20 21:56:27 2020 +0530
@@ -58,14 +58,16 @@
prev = l
f, n = l.split(b'\0')
nl = len(n)
- if 64 < nl:
- # modern hash, full width
- yield f, bin(n[:64]), n[64:]
- elif 40 < nl < 45:
- # legacy hash, always sha1
- yield f, bin(n[:40]), n[40:]
+ flags = n[-1:]
+ if flags in _manifestflags:
+ n = n[:-1]
+ nl -= 1
else:
- yield f, bin(n), b''
+ flags = b''
+ if nl not in (40, 64):
+ raise ValueError(b'Invalid manifest line')
+
+ yield f, bin(n), flags
def _text(it):
@@ -121,8 +123,20 @@
self.pos += 1
return data
zeropos = data.find(b'\x00', pos)
- hashval = unhexlify(data, self.lm.extrainfo[self.pos], zeropos + 1, 40)
- flags = self.lm._getflags(data, self.pos, zeropos)
+ nlpos = data.find(b'\n', pos)
+ if zeropos == -1 or nlpos == -1 or nlpos < zeropos:
+ raise error.StorageError(b'Invalid manifest line')
+ flags = data[nlpos - 1 : nlpos]
+ if flags in _manifestflags:
+ hlen = nlpos - zeropos - 2
+ else:
+ hlen = nlpos - zeropos - 1
+ flags = b''
+ if hlen not in (40, 64):
+ raise error.StorageError(b'Invalid manifest line')
+ hashval = unhexlify(
+ data, self.lm.extrainfo[self.pos], zeropos + 1, hlen
+ )
self.pos += 1
return (data[pos:zeropos], hashval, flags)
@@ -140,6 +154,9 @@
return (a > b) - (a < b)
+_manifestflags = {b'', b'l', b't', b'x'}
+
+
class _lazymanifest(object):
"""A pure python manifest backed by a byte string. It is supplimented with
internal lists as it is modified, until it is compacted back to a pure byte
@@ -251,15 +268,6 @@
def __contains__(self, key):
return self.bsearch(key) != -1
- def _getflags(self, data, needle, pos):
- start = pos + 41
- end = data.find(b"\n", start)
- if end == -1:
- end = len(data) - 1
- if start == end:
- return b''
- return self.data[start:end]
-
def __getitem__(self, key):
if not isinstance(key, bytes):
raise TypeError(b"getitem: manifest keys must be a bytes.")
@@ -273,13 +281,17 @@
nlpos = data.find(b'\n', zeropos)
assert 0 <= needle <= len(self.positions)
assert len(self.extrainfo) == len(self.positions)
+ if zeropos == -1 or nlpos == -1 or nlpos < zeropos:
+ raise error.StorageError(b'Invalid manifest line')
hlen = nlpos - zeropos - 1
- # Hashes sometimes have an extra byte tucked on the end, so
- # detect that.
- if hlen % 2:
+ flags = data[nlpos - 1 : nlpos]
+ if flags in _manifestflags:
hlen -= 1
+ else:
+ flags = b''
+ if hlen not in (40, 64):
+ raise error.StorageError(b'Invalid manifest line')
hashval = unhexlify(data, self.extrainfo[needle], zeropos + 1, hlen)
- flags = self._getflags(data, needle, zeropos)
return (hashval, flags)
def __delitem__(self, key):
@@ -408,9 +420,7 @@
def _pack(self, d):
n = d[1]
- if len(n) == 21 or len(n) == 33:
- n = n[:-1]
- assert len(n) == 20 or len(n) == 32
+ assert len(n) in (20, 32)
return d[0] + b'\x00' + hex(n) + d[2] + b'\n'
def text(self):
@@ -609,6 +619,8 @@
return self._lm.diff(m2._lm, clean)
def setflag(self, key, flag):
+ if flag not in _manifestflags:
+ raise TypeError(b"Invalid manifest flag set.")
self._lm[key] = self[key], flag
def get(self, key, default=None):
@@ -1049,11 +1061,10 @@
self._dirs[dir].__setitem__(subpath, n)
else:
# manifest nodes are either 20 bytes or 32 bytes,
- # depending on the hash in use. An extra byte is
- # occasionally used by hg, but won't ever be
- # persisted. Trim to 21 or 33 bytes as appropriate.
- trim = 21 if len(n) < 25 else 33
- self._files[f] = n[:trim] # to match manifestdict's behavior
+ # depending on the hash in use. Assert this as historically
+ # sometimes extra bytes were added.
+ assert len(n) in (20, 32)
+ self._files[f] = n
self._dirty = True
def _load(self):
@@ -1066,6 +1077,8 @@
def setflag(self, f, flags):
"""Set the flags (symlink, executable) for path f."""
+ if flags not in _manifestflags:
+ raise TypeError(b"Invalid manifest flag set.")
self._load()
dir, subpath = _splittopdir(f)
if dir:
@@ -1599,6 +1612,7 @@
checkambig=not bool(tree),
mmaplargeindex=True,
upperboundcomp=MAXCOMPRESSION,
+ persistentnodemap=opener.options.get(b'persistent-nodemap', False),
)
self.index = self._revlog.index
@@ -1664,6 +1678,22 @@
readtree=None,
match=None,
):
+ """add some manifest entry in to the manifest log
+
+ input:
+
+ m: the manifest dict we want to store
+ transaction: the open transaction
+ p1: manifest-node of p1
+ p2: manifest-node of p2
+ added: file added/changed compared to parent
+ removed: file removed compared to parent
+
+ tree manifest input:
+
+ readtree: a function to read a subtree
+ match: a filematcher for the subpart of the tree manifest
+ """
try:
if p1 not in self.fulltextcache:
raise FastdeltaUnavailable()
@@ -1959,6 +1989,9 @@
def rev(self, node):
return self._rootstore.rev(node)
+ def update_caches(self, transaction):
+ return self._rootstore._revlog.update_caches(transaction=transaction)
+
@interfaceutil.implementer(repository.imanifestrevisionwritable)
class memmanifestctx(object):
--- a/mercurial/mdiff.py Tue Jul 14 10:25:41 2020 +0200
+++ b/mercurial/mdiff.py Mon Jul 20 21:56:27 2020 +0530
@@ -17,6 +17,7 @@
setattr,
)
from . import (
+ diffhelper,
encoding,
error,
policy,
@@ -25,8 +26,6 @@
)
from .utils import dateutil
-_missing_newline_marker = b"\\ No newline at end of file\n"
-
bdiff = policy.importmod('bdiff')
mpatch = policy.importmod('mpatch')
@@ -309,7 +308,7 @@
hunklines = [b"@@ -0,0 +1,%d @@\n" % size] + [b"+" + e for e in b]
if without_newline:
hunklines[-1] += b'\n'
- hunklines.append(_missing_newline_marker)
+ hunklines.append(diffhelper.MISSING_NEWLINE_MARKER)
hunks = ((hunkrange, hunklines),)
elif not b:
without_newline = not a.endswith(b'\n')
@@ -325,7 +324,7 @@
hunklines = [b"@@ -1,%d +0,0 @@\n" % size] + [b"-" + e for e in a]
if without_newline:
hunklines[-1] += b'\n'
- hunklines.append(_missing_newline_marker)
+ hunklines.append(diffhelper.MISSING_NEWLINE_MARKER)
hunks = ((hunkrange, hunklines),)
else:
hunks = _unidiff(a, b, opts=opts)
@@ -418,13 +417,13 @@
if hunklines[i].startswith(b' '):
skip = True
hunklines[i] += b'\n'
- hunklines.insert(i + 1, _missing_newline_marker)
+ hunklines.insert(i + 1, diffhelper.MISSING_NEWLINE_MARKER)
break
if not skip and not t2.endswith(b'\n') and bstart + blen == len(l2) + 1:
for i in pycompat.xrange(len(hunklines) - 1, -1, -1):
if hunklines[i].startswith(b'+'):
hunklines[i] += b'\n'
- hunklines.insert(i + 1, _missing_newline_marker)
+ hunklines.insert(i + 1, diffhelper.MISSING_NEWLINE_MARKER)
break
yield hunkrange, hunklines
--- a/mercurial/merge.py Tue Jul 14 10:25:41 2020 +0200
+++ b/mercurial/merge.py Mon Jul 20 21:56:27 2020 +0530
@@ -8,21 +8,16 @@
from __future__ import absolute_import
import errno
-import shutil
import stat
import struct
from .i18n import _
from .node import (
addednodeid,
- bin,
- hex,
modifiednodeid,
- nullhex,
nullid,
nullrev,
)
-from .pycompat import delattr
from .thirdparty import attr
from . import (
copies,
@@ -30,6 +25,7 @@
error,
filemerge,
match as matchmod,
+ mergestate as mergestatemod,
obsutil,
pathutil,
pycompat,
@@ -38,741 +34,11 @@
util,
worker,
)
-from .utils import hashutil
_pack = struct.pack
_unpack = struct.unpack
-def _droponode(data):
- # used for compatibility for v1
- bits = data.split(b'\0')
- bits = bits[:-2] + bits[-1:]
- return b'\0'.join(bits)
-
-
-# Merge state record types. See ``mergestate`` docs for more.
-RECORD_LOCAL = b'L'
-RECORD_OTHER = b'O'
-RECORD_MERGED = b'F'
-RECORD_CHANGEDELETE_CONFLICT = b'C'
-RECORD_MERGE_DRIVER_MERGE = b'D'
-RECORD_PATH_CONFLICT = b'P'
-RECORD_MERGE_DRIVER_STATE = b'm'
-RECORD_FILE_VALUES = b'f'
-RECORD_LABELS = b'l'
-RECORD_OVERRIDE = b't'
-RECORD_UNSUPPORTED_MANDATORY = b'X'
-RECORD_UNSUPPORTED_ADVISORY = b'x'
-RECORD_RESOLVED_OTHER = b'R'
-
-MERGE_DRIVER_STATE_UNMARKED = b'u'
-MERGE_DRIVER_STATE_MARKED = b'm'
-MERGE_DRIVER_STATE_SUCCESS = b's'
-
-MERGE_RECORD_UNRESOLVED = b'u'
-MERGE_RECORD_RESOLVED = b'r'
-MERGE_RECORD_UNRESOLVED_PATH = b'pu'
-MERGE_RECORD_RESOLVED_PATH = b'pr'
-MERGE_RECORD_DRIVER_RESOLVED = b'd'
-# represents that the file was automatically merged in favor
-# of other version. This info is used on commit.
-MERGE_RECORD_MERGED_OTHER = b'o'
-
-ACTION_FORGET = b'f'
-ACTION_REMOVE = b'r'
-ACTION_ADD = b'a'
-ACTION_GET = b'g'
-ACTION_PATH_CONFLICT = b'p'
-ACTION_PATH_CONFLICT_RESOLVE = b'pr'
-ACTION_ADD_MODIFIED = b'am'
-ACTION_CREATED = b'c'
-ACTION_DELETED_CHANGED = b'dc'
-ACTION_CHANGED_DELETED = b'cd'
-ACTION_MERGE = b'm'
-ACTION_LOCAL_DIR_RENAME_GET = b'dg'
-ACTION_DIR_RENAME_MOVE_LOCAL = b'dm'
-ACTION_KEEP = b'k'
-ACTION_EXEC = b'e'
-ACTION_CREATED_MERGE = b'cm'
-# GET the other/remote side and store this info in mergestate
-ACTION_GET_OTHER_AND_STORE = b'gs'
-
-
-class mergestate(object):
- '''track 3-way merge state of individual files
-
- The merge state is stored on disk when needed. Two files are used: one with
- an old format (version 1), and one with a new format (version 2). Version 2
- stores a superset of the data in version 1, including new kinds of records
- in the future. For more about the new format, see the documentation for
- `_readrecordsv2`.
-
- Each record can contain arbitrary content, and has an associated type. This
- `type` should be a letter. If `type` is uppercase, the record is mandatory:
- versions of Mercurial that don't support it should abort. If `type` is
- lowercase, the record can be safely ignored.
-
- Currently known records:
-
- L: the node of the "local" part of the merge (hexified version)
- O: the node of the "other" part of the merge (hexified version)
- F: a file to be merged entry
- C: a change/delete or delete/change conflict
- D: a file that the external merge driver will merge internally
- (experimental)
- P: a path conflict (file vs directory)
- m: the external merge driver defined for this merge plus its run state
- (experimental)
- f: a (filename, dictionary) tuple of optional values for a given file
- X: unsupported mandatory record type (used in tests)
- x: unsupported advisory record type (used in tests)
- l: the labels for the parts of the merge.
-
- Merge driver run states (experimental):
- u: driver-resolved files unmarked -- needs to be run next time we're about
- to resolve or commit
- m: driver-resolved files marked -- only needs to be run before commit
- s: success/skipped -- does not need to be run any more
-
- Merge record states (stored in self._state, indexed by filename):
- u: unresolved conflict
- r: resolved conflict
- pu: unresolved path conflict (file conflicts with directory)
- pr: resolved path conflict
- d: driver-resolved conflict
-
- The resolve command transitions between 'u' and 'r' for conflicts and
- 'pu' and 'pr' for path conflicts.
- '''
-
- statepathv1 = b'merge/state'
- statepathv2 = b'merge/state2'
-
- @staticmethod
- def clean(repo, node=None, other=None, labels=None):
- """Initialize a brand new merge state, removing any existing state on
- disk."""
- ms = mergestate(repo)
- ms.reset(node, other, labels)
- return ms
-
- @staticmethod
- def read(repo):
- """Initialize the merge state, reading it from disk."""
- ms = mergestate(repo)
- ms._read()
- return ms
-
- def __init__(self, repo):
- """Initialize the merge state.
-
- Do not use this directly! Instead call read() or clean()."""
- self._repo = repo
- self._dirty = False
- self._labels = None
-
- def reset(self, node=None, other=None, labels=None):
- self._state = {}
- self._stateextras = {}
- self._local = None
- self._other = None
- self._labels = labels
- for var in ('localctx', 'otherctx'):
- if var in vars(self):
- delattr(self, var)
- if node:
- self._local = node
- self._other = other
- self._readmergedriver = None
- if self.mergedriver:
- self._mdstate = MERGE_DRIVER_STATE_SUCCESS
- else:
- self._mdstate = MERGE_DRIVER_STATE_UNMARKED
- shutil.rmtree(self._repo.vfs.join(b'merge'), True)
- self._results = {}
- self._dirty = False
-
- def _read(self):
- """Analyse each record content to restore a serialized state from disk
-
- This function process "record" entry produced by the de-serialization
- of on disk file.
- """
- self._state = {}
- self._stateextras = {}
- self._local = None
- self._other = None
- for var in ('localctx', 'otherctx'):
- if var in vars(self):
- delattr(self, var)
- self._readmergedriver = None
- self._mdstate = MERGE_DRIVER_STATE_SUCCESS
- unsupported = set()
- records = self._readrecords()
- for rtype, record in records:
- if rtype == RECORD_LOCAL:
- self._local = bin(record)
- elif rtype == RECORD_OTHER:
- self._other = bin(record)
- elif rtype == RECORD_MERGE_DRIVER_STATE:
- bits = record.split(b'\0', 1)
- mdstate = bits[1]
- if len(mdstate) != 1 or mdstate not in (
- MERGE_DRIVER_STATE_UNMARKED,
- MERGE_DRIVER_STATE_MARKED,
- MERGE_DRIVER_STATE_SUCCESS,
- ):
- # the merge driver should be idempotent, so just rerun it
- mdstate = MERGE_DRIVER_STATE_UNMARKED
-
- self._readmergedriver = bits[0]
- self._mdstate = mdstate
- elif rtype in (
- RECORD_MERGED,
- RECORD_CHANGEDELETE_CONFLICT,
- RECORD_PATH_CONFLICT,
- RECORD_MERGE_DRIVER_MERGE,
- RECORD_RESOLVED_OTHER,
- ):
- bits = record.split(b'\0')
- self._state[bits[0]] = bits[1:]
- elif rtype == RECORD_FILE_VALUES:
- filename, rawextras = record.split(b'\0', 1)
- extraparts = rawextras.split(b'\0')
- extras = {}
- i = 0
- while i < len(extraparts):
- extras[extraparts[i]] = extraparts[i + 1]
- i += 2
-
- self._stateextras[filename] = extras
- elif rtype == RECORD_LABELS:
- labels = record.split(b'\0', 2)
- self._labels = [l for l in labels if len(l) > 0]
- elif not rtype.islower():
- unsupported.add(rtype)
- self._results = {}
- self._dirty = False
-
- if unsupported:
- raise error.UnsupportedMergeRecords(unsupported)
-
- def _readrecords(self):
- """Read merge state from disk and return a list of record (TYPE, data)
-
- We read data from both v1 and v2 files and decide which one to use.
-
- V1 has been used by version prior to 2.9.1 and contains less data than
- v2. We read both versions and check if no data in v2 contradicts
- v1. If there is not contradiction we can safely assume that both v1
- and v2 were written at the same time and use the extract data in v2. If
- there is contradiction we ignore v2 content as we assume an old version
- of Mercurial has overwritten the mergestate file and left an old v2
- file around.
-
- returns list of record [(TYPE, data), ...]"""
- v1records = self._readrecordsv1()
- v2records = self._readrecordsv2()
- if self._v1v2match(v1records, v2records):
- return v2records
- else:
- # v1 file is newer than v2 file, use it
- # we have to infer the "other" changeset of the merge
- # we cannot do better than that with v1 of the format
- mctx = self._repo[None].parents()[-1]
- v1records.append((RECORD_OTHER, mctx.hex()))
- # add place holder "other" file node information
- # nobody is using it yet so we do no need to fetch the data
- # if mctx was wrong `mctx[bits[-2]]` may fails.
- for idx, r in enumerate(v1records):
- if r[0] == RECORD_MERGED:
- bits = r[1].split(b'\0')
- bits.insert(-2, b'')
- v1records[idx] = (r[0], b'\0'.join(bits))
- return v1records
-
- def _v1v2match(self, v1records, v2records):
- oldv2 = set() # old format version of v2 record
- for rec in v2records:
- if rec[0] == RECORD_LOCAL:
- oldv2.add(rec)
- elif rec[0] == RECORD_MERGED:
- # drop the onode data (not contained in v1)
- oldv2.add((RECORD_MERGED, _droponode(rec[1])))
- for rec in v1records:
- if rec not in oldv2:
- return False
- else:
- return True
-
- def _readrecordsv1(self):
- """read on disk merge state for version 1 file
-
- returns list of record [(TYPE, data), ...]
-
- Note: the "F" data from this file are one entry short
- (no "other file node" entry)
- """
- records = []
- try:
- f = self._repo.vfs(self.statepathv1)
- for i, l in enumerate(f):
- if i == 0:
- records.append((RECORD_LOCAL, l[:-1]))
- else:
- records.append((RECORD_MERGED, l[:-1]))
- f.close()
- except IOError as err:
- if err.errno != errno.ENOENT:
- raise
- return records
-
- def _readrecordsv2(self):
- """read on disk merge state for version 2 file
-
- This format is a list of arbitrary records of the form:
-
- [type][length][content]
-
- `type` is a single character, `length` is a 4 byte integer, and
- `content` is an arbitrary byte sequence of length `length`.
-
- Mercurial versions prior to 3.7 have a bug where if there are
- unsupported mandatory merge records, attempting to clear out the merge
- state with hg update --clean or similar aborts. The 't' record type
- works around that by writing out what those versions treat as an
- advisory record, but later versions interpret as special: the first
- character is the 'real' record type and everything onwards is the data.
-
- Returns list of records [(TYPE, data), ...]."""
- records = []
- try:
- f = self._repo.vfs(self.statepathv2)
- data = f.read()
- off = 0
- end = len(data)
- while off < end:
- rtype = data[off : off + 1]
- off += 1
- length = _unpack(b'>I', data[off : (off + 4)])[0]
- off += 4
- record = data[off : (off + length)]
- off += length
- if rtype == RECORD_OVERRIDE:
- rtype, record = record[0:1], record[1:]
- records.append((rtype, record))
- f.close()
- except IOError as err:
- if err.errno != errno.ENOENT:
- raise
- return records
-
- @util.propertycache
- def mergedriver(self):
- # protect against the following:
- # - A configures a malicious merge driver in their hgrc, then
- # pauses the merge
- # - A edits their hgrc to remove references to the merge driver
- # - A gives a copy of their entire repo, including .hg, to B
- # - B inspects .hgrc and finds it to be clean
- # - B then continues the merge and the malicious merge driver
- # gets invoked
- configmergedriver = self._repo.ui.config(
- b'experimental', b'mergedriver'
- )
- if (
- self._readmergedriver is not None
- and self._readmergedriver != configmergedriver
- ):
- raise error.ConfigError(
- _(b"merge driver changed since merge started"),
- hint=_(b"revert merge driver change or abort merge"),
- )
-
- return configmergedriver
-
- @util.propertycache
- def local(self):
- if self._local is None:
- msg = b"local accessed but self._local isn't set"
- raise error.ProgrammingError(msg)
- return self._local
-
- @util.propertycache
- def localctx(self):
- return self._repo[self.local]
-
- @util.propertycache
- def other(self):
- if self._other is None:
- msg = b"other accessed but self._other isn't set"
- raise error.ProgrammingError(msg)
- return self._other
-
- @util.propertycache
- def otherctx(self):
- return self._repo[self.other]
-
- def active(self):
- """Whether mergestate is active.
-
- Returns True if there appears to be mergestate. This is a rough proxy
- for "is a merge in progress."
- """
- return bool(self._local) or bool(self._state)
-
- def commit(self):
- """Write current state on disk (if necessary)"""
- if self._dirty:
- records = self._makerecords()
- self._writerecords(records)
- self._dirty = False
-
- def _makerecords(self):
- records = []
- records.append((RECORD_LOCAL, hex(self._local)))
- records.append((RECORD_OTHER, hex(self._other)))
- if self.mergedriver:
- records.append(
- (
- RECORD_MERGE_DRIVER_STATE,
- b'\0'.join([self.mergedriver, self._mdstate]),
- )
- )
- # Write out state items. In all cases, the value of the state map entry
- # is written as the contents of the record. The record type depends on
- # the type of state that is stored, and capital-letter records are used
- # to prevent older versions of Mercurial that do not support the feature
- # from loading them.
- for filename, v in pycompat.iteritems(self._state):
- if v[0] == MERGE_RECORD_DRIVER_RESOLVED:
- # Driver-resolved merge. These are stored in 'D' records.
- records.append(
- (RECORD_MERGE_DRIVER_MERGE, b'\0'.join([filename] + v))
- )
- elif v[0] in (
- MERGE_RECORD_UNRESOLVED_PATH,
- MERGE_RECORD_RESOLVED_PATH,
- ):
- # Path conflicts. These are stored in 'P' records. The current
- # resolution state ('pu' or 'pr') is stored within the record.
- records.append(
- (RECORD_PATH_CONFLICT, b'\0'.join([filename] + v))
- )
- elif v[0] == MERGE_RECORD_MERGED_OTHER:
- records.append(
- (RECORD_RESOLVED_OTHER, b'\0'.join([filename] + v))
- )
- elif v[1] == nullhex or v[6] == nullhex:
- # Change/Delete or Delete/Change conflicts. These are stored in
- # 'C' records. v[1] is the local file, and is nullhex when the
- # file is deleted locally ('dc'). v[6] is the remote file, and
- # is nullhex when the file is deleted remotely ('cd').
- records.append(
- (RECORD_CHANGEDELETE_CONFLICT, b'\0'.join([filename] + v))
- )
- else:
- # Normal files. These are stored in 'F' records.
- records.append((RECORD_MERGED, b'\0'.join([filename] + v)))
- for filename, extras in sorted(pycompat.iteritems(self._stateextras)):
- rawextras = b'\0'.join(
- b'%s\0%s' % (k, v) for k, v in pycompat.iteritems(extras)
- )
- records.append(
- (RECORD_FILE_VALUES, b'%s\0%s' % (filename, rawextras))
- )
- if self._labels is not None:
- labels = b'\0'.join(self._labels)
- records.append((RECORD_LABELS, labels))
- return records
-
- def _writerecords(self, records):
- """Write current state on disk (both v1 and v2)"""
- self._writerecordsv1(records)
- self._writerecordsv2(records)
-
- def _writerecordsv1(self, records):
- """Write current state on disk in a version 1 file"""
- f = self._repo.vfs(self.statepathv1, b'wb')
- irecords = iter(records)
- lrecords = next(irecords)
- assert lrecords[0] == RECORD_LOCAL
- f.write(hex(self._local) + b'\n')
- for rtype, data in irecords:
- if rtype == RECORD_MERGED:
- f.write(b'%s\n' % _droponode(data))
- f.close()
-
- def _writerecordsv2(self, records):
- """Write current state on disk in a version 2 file
-
- See the docstring for _readrecordsv2 for why we use 't'."""
- # these are the records that all version 2 clients can read
- allowlist = (RECORD_LOCAL, RECORD_OTHER, RECORD_MERGED)
- f = self._repo.vfs(self.statepathv2, b'wb')
- for key, data in records:
- assert len(key) == 1
- if key not in allowlist:
- key, data = RECORD_OVERRIDE, b'%s%s' % (key, data)
- format = b'>sI%is' % len(data)
- f.write(_pack(format, key, len(data), data))
- f.close()
-
- @staticmethod
- def getlocalkey(path):
- """hash the path of a local file context for storage in the .hg/merge
- directory."""
-
- return hex(hashutil.sha1(path).digest())
-
- def add(self, fcl, fco, fca, fd):
- """add a new (potentially?) conflicting file the merge state
- fcl: file context for local,
- fco: file context for remote,
- fca: file context for ancestors,
- fd: file path of the resulting merge.
-
- note: also write the local version to the `.hg/merge` directory.
- """
- if fcl.isabsent():
- localkey = nullhex
- else:
- localkey = mergestate.getlocalkey(fcl.path())
- self._repo.vfs.write(b'merge/' + localkey, fcl.data())
- self._state[fd] = [
- MERGE_RECORD_UNRESOLVED,
- localkey,
- fcl.path(),
- fca.path(),
- hex(fca.filenode()),
- fco.path(),
- hex(fco.filenode()),
- fcl.flags(),
- ]
- self._stateextras[fd] = {b'ancestorlinknode': hex(fca.node())}
- self._dirty = True
-
- def addpath(self, path, frename, forigin):
- """add a new conflicting path to the merge state
- path: the path that conflicts
- frename: the filename the conflicting file was renamed to
- forigin: origin of the file ('l' or 'r' for local/remote)
- """
- self._state[path] = [MERGE_RECORD_UNRESOLVED_PATH, frename, forigin]
- self._dirty = True
-
- def addmergedother(self, path):
- self._state[path] = [MERGE_RECORD_MERGED_OTHER, nullhex, nullhex]
- self._dirty = True
-
- def __contains__(self, dfile):
- return dfile in self._state
-
- def __getitem__(self, dfile):
- return self._state[dfile][0]
-
- def __iter__(self):
- return iter(sorted(self._state))
-
- def files(self):
- return self._state.keys()
-
- def mark(self, dfile, state):
- self._state[dfile][0] = state
- self._dirty = True
-
- def mdstate(self):
- return self._mdstate
-
- def unresolved(self):
- """Obtain the paths of unresolved files."""
-
- for f, entry in pycompat.iteritems(self._state):
- if entry[0] in (
- MERGE_RECORD_UNRESOLVED,
- MERGE_RECORD_UNRESOLVED_PATH,
- ):
- yield f
-
- def driverresolved(self):
- """Obtain the paths of driver-resolved files."""
-
- for f, entry in self._state.items():
- if entry[0] == MERGE_RECORD_DRIVER_RESOLVED:
- yield f
-
- def extras(self, filename):
- return self._stateextras.setdefault(filename, {})
-
- def _resolve(self, preresolve, dfile, wctx):
- """rerun merge process for file path `dfile`"""
- if self[dfile] in (MERGE_RECORD_RESOLVED, MERGE_RECORD_DRIVER_RESOLVED):
- return True, 0
- if self._state[dfile][0] == MERGE_RECORD_MERGED_OTHER:
- return True, 0
- stateentry = self._state[dfile]
- state, localkey, lfile, afile, anode, ofile, onode, flags = stateentry
- octx = self._repo[self._other]
- extras = self.extras(dfile)
- anccommitnode = extras.get(b'ancestorlinknode')
- if anccommitnode:
- actx = self._repo[anccommitnode]
- else:
- actx = None
- fcd = self._filectxorabsent(localkey, wctx, dfile)
- fco = self._filectxorabsent(onode, octx, ofile)
- # TODO: move this to filectxorabsent
- fca = self._repo.filectx(afile, fileid=anode, changectx=actx)
- # "premerge" x flags
- flo = fco.flags()
- fla = fca.flags()
- if b'x' in flags + flo + fla and b'l' not in flags + flo + fla:
- if fca.node() == nullid and flags != flo:
- if preresolve:
- self._repo.ui.warn(
- _(
- b'warning: cannot merge flags for %s '
- b'without common ancestor - keeping local flags\n'
- )
- % afile
- )
- elif flags == fla:
- flags = flo
- if preresolve:
- # restore local
- if localkey != nullhex:
- f = self._repo.vfs(b'merge/' + localkey)
- wctx[dfile].write(f.read(), flags)
- f.close()
- else:
- wctx[dfile].remove(ignoremissing=True)
- complete, r, deleted = filemerge.premerge(
- self._repo,
- wctx,
- self._local,
- lfile,
- fcd,
- fco,
- fca,
- labels=self._labels,
- )
- else:
- complete, r, deleted = filemerge.filemerge(
- self._repo,
- wctx,
- self._local,
- lfile,
- fcd,
- fco,
- fca,
- labels=self._labels,
- )
- if r is None:
- # no real conflict
- del self._state[dfile]
- self._stateextras.pop(dfile, None)
- self._dirty = True
- elif not r:
- self.mark(dfile, MERGE_RECORD_RESOLVED)
-
- if complete:
- action = None
- if deleted:
- if fcd.isabsent():
- # dc: local picked. Need to drop if present, which may
- # happen on re-resolves.
- action = ACTION_FORGET
- else:
- # cd: remote picked (or otherwise deleted)
- action = ACTION_REMOVE
- else:
- if fcd.isabsent(): # dc: remote picked
- action = ACTION_GET
- elif fco.isabsent(): # cd: local picked
- if dfile in self.localctx:
- action = ACTION_ADD_MODIFIED
- else:
- action = ACTION_ADD
- # else: regular merges (no action necessary)
- self._results[dfile] = r, action
-
- return complete, r
-
- def _filectxorabsent(self, hexnode, ctx, f):
- if hexnode == nullhex:
- return filemerge.absentfilectx(ctx, f)
- else:
- return ctx[f]
-
- def preresolve(self, dfile, wctx):
- """run premerge process for dfile
-
- Returns whether the merge is complete, and the exit code."""
- return self._resolve(True, dfile, wctx)
-
- def resolve(self, dfile, wctx):
- """run merge process (assuming premerge was run) for dfile
-
- Returns the exit code of the merge."""
- return self._resolve(False, dfile, wctx)[1]
-
- def counts(self):
- """return counts for updated, merged and removed files in this
- session"""
- updated, merged, removed = 0, 0, 0
- for r, action in pycompat.itervalues(self._results):
- if r is None:
- updated += 1
- elif r == 0:
- if action == ACTION_REMOVE:
- removed += 1
- else:
- merged += 1
- return updated, merged, removed
-
- def unresolvedcount(self):
- """get unresolved count for this merge (persistent)"""
- return len(list(self.unresolved()))
-
- def actions(self):
- """return lists of actions to perform on the dirstate"""
- actions = {
- ACTION_REMOVE: [],
- ACTION_FORGET: [],
- ACTION_ADD: [],
- ACTION_ADD_MODIFIED: [],
- ACTION_GET: [],
- }
- for f, (r, action) in pycompat.iteritems(self._results):
- if action is not None:
- actions[action].append((f, None, b"merge result"))
- return actions
-
- def recordactions(self):
- """record remove/add/get actions in the dirstate"""
- branchmerge = self._repo.dirstate.p2() != nullid
- recordupdates(self._repo, self.actions(), branchmerge, None)
-
- def queueremove(self, f):
- """queues a file to be removed from the dirstate
-
- Meant for use by custom merge drivers."""
- self._results[f] = 0, ACTION_REMOVE
-
- def queueadd(self, f):
- """queues a file to be added to the dirstate
-
- Meant for use by custom merge drivers."""
- self._results[f] = 0, ACTION_ADD
-
- def queueget(self, f):
- """queues a file to be marked modified in the dirstate
-
- Meant for use by custom merge drivers."""
- self._results[f] = 0, ACTION_GET
-
-
def _getcheckunknownconfig(repo, section, name):
config = repo.ui.config(section, name)
valid = [b'abort', b'ignore', b'warn']
@@ -885,14 +151,17 @@
checkunknowndirs = _unknowndirschecker()
for f, (m, args, msg) in pycompat.iteritems(actions):
- if m in (ACTION_CREATED, ACTION_DELETED_CHANGED):
+ if m in (
+ mergestatemod.ACTION_CREATED,
+ mergestatemod.ACTION_DELETED_CHANGED,
+ ):
if _checkunknownfile(repo, wctx, mctx, f):
fileconflicts.add(f)
elif pathconfig and f not in wctx:
path = checkunknowndirs(repo, wctx, f)
if path is not None:
pathconflicts.add(path)
- elif m == ACTION_LOCAL_DIR_RENAME_GET:
+ elif m == mergestatemod.ACTION_LOCAL_DIR_RENAME_GET:
if _checkunknownfile(repo, wctx, mctx, f, args[0]):
fileconflicts.add(f)
@@ -903,7 +172,7 @@
collectconflicts(unknownconflicts, unknownconfig)
else:
for f, (m, args, msg) in pycompat.iteritems(actions):
- if m == ACTION_CREATED_MERGE:
+ if m == mergestatemod.ACTION_CREATED_MERGE:
fl2, anc = args
different = _checkunknownfile(repo, wctx, mctx, f)
if repo.dirstate._ignore(f):
@@ -924,10 +193,14 @@
# don't like an abort happening in the middle of
# merge.update.
if not different:
- actions[f] = (ACTION_GET, (fl2, False), b'remote created')
+ actions[f] = (
+ mergestatemod.ACTION_GET,
+ (fl2, False),
+ b'remote created',
+ )
elif mergeforce or config == b'abort':
actions[f] = (
- ACTION_MERGE,
+ mergestatemod.ACTION_MERGE,
(f, f, None, False, anc),
b'remote differs from untracked local',
)
@@ -936,7 +209,11 @@
else:
if config == b'warn':
warnconflicts.add(f)
- actions[f] = (ACTION_GET, (fl2, True), b'remote created')
+ actions[f] = (
+ mergestatemod.ACTION_GET,
+ (fl2, True),
+ b'remote created',
+ )
for f in sorted(abortconflicts):
warn = repo.ui.warn
@@ -962,14 +239,14 @@
repo.ui.warn(_(b"%s: replacing untracked files in directory\n") % f)
for f, (m, args, msg) in pycompat.iteritems(actions):
- if m == ACTION_CREATED:
+ if m == mergestatemod.ACTION_CREATED:
backup = (
f in fileconflicts
or f in pathconflicts
or any(p in pathconflicts for p in pathutil.finddirs(f))
)
(flags,) = args
- actions[f] = (ACTION_GET, (flags, backup), msg)
+ actions[f] = (mergestatemod.ACTION_GET, (flags, backup), msg)
def _forgetremoved(wctx, mctx, branchmerge):
@@ -988,9 +265,9 @@
"""
actions = {}
- m = ACTION_FORGET
+ m = mergestatemod.ACTION_FORGET
if branchmerge:
- m = ACTION_REMOVE
+ m = mergestatemod.ACTION_REMOVE
for f in wctx.deleted():
if f not in mctx:
actions[f] = m, None, b"forget deleted"
@@ -998,7 +275,11 @@
if not branchmerge:
for f in wctx.removed():
if f not in mctx:
- actions[f] = ACTION_FORGET, None, b"forget removed"
+ actions[f] = (
+ mergestatemod.ACTION_FORGET,
+ None,
+ b"forget removed",
+ )
return actions
@@ -1026,24 +307,24 @@
if actions:
# KEEP and EXEC are no-op
for m in (
- ACTION_ADD,
- ACTION_ADD_MODIFIED,
- ACTION_FORGET,
- ACTION_GET,
- ACTION_CHANGED_DELETED,
- ACTION_DELETED_CHANGED,
+ mergestatemod.ACTION_ADD,
+ mergestatemod.ACTION_ADD_MODIFIED,
+ mergestatemod.ACTION_FORGET,
+ mergestatemod.ACTION_GET,
+ mergestatemod.ACTION_CHANGED_DELETED,
+ mergestatemod.ACTION_DELETED_CHANGED,
):
for f, args, msg in actions[m]:
pmmf.add(f)
- for f, args, msg in actions[ACTION_REMOVE]:
+ for f, args, msg in actions[mergestatemod.ACTION_REMOVE]:
pmmf.discard(f)
- for f, args, msg in actions[ACTION_DIR_RENAME_MOVE_LOCAL]:
+ for f, args, msg in actions[mergestatemod.ACTION_DIR_RENAME_MOVE_LOCAL]:
f2, flags = args
pmmf.discard(f2)
pmmf.add(f)
- for f, args, msg in actions[ACTION_LOCAL_DIR_RENAME_GET]:
+ for f, args, msg in actions[mergestatemod.ACTION_LOCAL_DIR_RENAME_GET]:
pmmf.add(f)
- for f, args, msg in actions[ACTION_MERGE]:
+ for f, args, msg in actions[mergestatemod.ACTION_MERGE]:
f1, f2, fa, move, anc = args
if move:
pmmf.discard(f1)
@@ -1128,10 +409,10 @@
for f, (m, args, msg) in actions.items():
if m in (
- ACTION_CREATED,
- ACTION_DELETED_CHANGED,
- ACTION_MERGE,
- ACTION_CREATED_MERGE,
+ mergestatemod.ACTION_CREATED,
+ mergestatemod.ACTION_DELETED_CHANGED,
+ mergestatemod.ACTION_MERGE,
+ mergestatemod.ACTION_CREATED_MERGE,
):
# This action may create a new local file.
createdfiledirs.update(pathutil.finddirs(f))
@@ -1141,13 +422,13 @@
# will be checked once we know what all the deleted files are.
remoteconflicts.add(f)
# Track the names of all deleted files.
- if m == ACTION_REMOVE:
+ if m == mergestatemod.ACTION_REMOVE:
deletedfiles.add(f)
- if m == ACTION_MERGE:
+ if m == mergestatemod.ACTION_MERGE:
f1, f2, fa, move, anc = args
if move:
deletedfiles.add(f1)
- if m == ACTION_DIR_RENAME_MOVE_LOCAL:
+ if m == mergestatemod.ACTION_DIR_RENAME_MOVE_LOCAL:
f2, flags = args
deletedfiles.add(f2)
@@ -1164,10 +445,10 @@
# We will need to rename the local file.
localconflicts.add(p)
if p in actions and actions[p][0] in (
- ACTION_CREATED,
- ACTION_DELETED_CHANGED,
- ACTION_MERGE,
- ACTION_CREATED_MERGE,
+ mergestatemod.ACTION_CREATED,
+ mergestatemod.ACTION_DELETED_CHANGED,
+ mergestatemod.ACTION_MERGE,
+ mergestatemod.ACTION_CREATED_MERGE,
):
# The file is in a directory which aliases a remote file.
# This is an internal inconsistency within the remote
@@ -1179,12 +460,17 @@
if p not in deletedfiles:
ctxname = bytes(wctx).rstrip(b'+')
pnew = util.safename(p, ctxname, wctx, set(actions.keys()))
+ porig = wctx[p].copysource() or p
actions[pnew] = (
- ACTION_PATH_CONFLICT_RESOLVE,
- (p,),
+ mergestatemod.ACTION_PATH_CONFLICT_RESOLVE,
+ (p, porig),
b'local path conflict',
)
- actions[p] = (ACTION_PATH_CONFLICT, (pnew, b'l'), b'path conflict')
+ actions[p] = (
+ mergestatemod.ACTION_PATH_CONFLICT,
+ (pnew, b'l'),
+ b'path conflict',
+ )
if remoteconflicts:
# Check if all files in the conflicting directories have been removed.
@@ -1193,20 +479,23 @@
if f not in deletedfiles:
m, args, msg = actions[p]
pnew = util.safename(p, ctxname, wctx, set(actions.keys()))
- if m in (ACTION_DELETED_CHANGED, ACTION_MERGE):
+ if m in (
+ mergestatemod.ACTION_DELETED_CHANGED,
+ mergestatemod.ACTION_MERGE,
+ ):
# Action was merge, just update target.
actions[pnew] = (m, args, msg)
else:
# Action was create, change to renamed get action.
fl = args[0]
actions[pnew] = (
- ACTION_LOCAL_DIR_RENAME_GET,
+ mergestatemod.ACTION_LOCAL_DIR_RENAME_GET,
(p, fl),
b'remote path conflict',
)
actions[p] = (
- ACTION_PATH_CONFLICT,
- (pnew, ACTION_REMOVE),
+ mergestatemod.ACTION_PATH_CONFLICT,
+ (pnew, mergestatemod.ACTION_REMOVE),
b'path conflict',
)
remoteconflicts.remove(p)
@@ -1269,6 +558,13 @@
branchmerge and force are as passed in to update
matcher = matcher to filter file lists
acceptremote = accept the incoming changes without prompting
+
+ Returns:
+
+ actions: dict of filename as keys and action related info as values
+ diverge: mapping of source name -> list of dest name for divergent renames
+ renamedelete: mapping of source name -> list of destinations for files
+ deleted on one side and renamed on other.
"""
if matcher is not None and matcher.always():
matcher = None
@@ -1340,13 +636,13 @@
) or branch_copies2.copy.get(f, None)
if fa is not None:
actions[f] = (
- ACTION_MERGE,
+ mergestatemod.ACTION_MERGE,
(f, f, fa, False, pa.node()),
b'both renamed from %s' % fa,
)
else:
actions[f] = (
- ACTION_MERGE,
+ mergestatemod.ACTION_MERGE,
(f, f, None, False, pa.node()),
b'both created',
)
@@ -1355,35 +651,43 @@
fla = ma.flags(f)
nol = b'l' not in fl1 + fl2 + fla
if n2 == a and fl2 == fla:
- actions[f] = (ACTION_KEEP, (), b'remote unchanged')
+ actions[f] = (
+ mergestatemod.ACTION_KEEP,
+ (),
+ b'remote unchanged',
+ )
elif n1 == a and fl1 == fla: # local unchanged - use remote
if n1 == n2: # optimization: keep local content
actions[f] = (
- ACTION_EXEC,
+ mergestatemod.ACTION_EXEC,
(fl2,),
b'update permissions',
)
else:
actions[f] = (
- ACTION_GET_OTHER_AND_STORE
+ mergestatemod.ACTION_GET_OTHER_AND_STORE
if branchmerge
- else ACTION_GET,
+ else mergestatemod.ACTION_GET,
(fl2, False),
b'remote is newer',
)
elif nol and n2 == a: # remote only changed 'x'
- actions[f] = (ACTION_EXEC, (fl2,), b'update permissions')
+ actions[f] = (
+ mergestatemod.ACTION_EXEC,
+ (fl2,),
+ b'update permissions',
+ )
elif nol and n1 == a: # local only changed 'x'
actions[f] = (
- ACTION_GET_OTHER_AND_STORE
+ mergestatemod.ACTION_GET_OTHER_AND_STORE
if branchmerge
- else ACTION_GET,
+ else mergestatemod.ACTION_GET,
(fl1, False),
b'remote is newer',
)
else: # both changed something
actions[f] = (
- ACTION_MERGE,
+ mergestatemod.ACTION_MERGE,
(f, f, f, False, pa.node()),
b'versions differ',
)
@@ -1396,40 +700,51 @@
f2 = branch_copies1.movewithdir[f]
if f2 in m2:
actions[f2] = (
- ACTION_MERGE,
+ mergestatemod.ACTION_MERGE,
(f, f2, None, True, pa.node()),
b'remote directory rename, both created',
)
else:
actions[f2] = (
- ACTION_DIR_RENAME_MOVE_LOCAL,
+ mergestatemod.ACTION_DIR_RENAME_MOVE_LOCAL,
(f, fl1),
b'remote directory rename - move from %s' % f,
)
elif f in branch_copies1.copy:
f2 = branch_copies1.copy[f]
actions[f] = (
- ACTION_MERGE,
+ mergestatemod.ACTION_MERGE,
(f, f2, f2, False, pa.node()),
b'local copied/moved from %s' % f2,
)
elif f in ma: # clean, a different, no remote
if n1 != ma[f]:
if acceptremote:
- actions[f] = (ACTION_REMOVE, None, b'remote delete')
+ actions[f] = (
+ mergestatemod.ACTION_REMOVE,
+ None,
+ b'remote delete',
+ )
else:
actions[f] = (
- ACTION_CHANGED_DELETED,
+ mergestatemod.ACTION_CHANGED_DELETED,
(f, None, f, False, pa.node()),
b'prompt changed/deleted',
)
elif n1 == addednodeid:
- # This extra 'a' is added by working copy manifest to mark
- # the file as locally added. We should forget it instead of
+ # This file was locally added. We should forget it instead of
# deleting it.
- actions[f] = (ACTION_FORGET, None, b'remote deleted')
+ actions[f] = (
+ mergestatemod.ACTION_FORGET,
+ None,
+ b'remote deleted',
+ )
else:
- actions[f] = (ACTION_REMOVE, None, b'other deleted')
+ actions[f] = (
+ mergestatemod.ACTION_REMOVE,
+ None,
+ b'other deleted',
+ )
elif n2: # file exists only on remote side
if f in copied1:
pass # we'll deal with it on m1 side
@@ -1437,13 +752,13 @@
f2 = branch_copies2.movewithdir[f]
if f2 in m1:
actions[f2] = (
- ACTION_MERGE,
+ mergestatemod.ACTION_MERGE,
(f2, f, None, False, pa.node()),
b'local directory rename, both created',
)
else:
actions[f2] = (
- ACTION_LOCAL_DIR_RENAME_GET,
+ mergestatemod.ACTION_LOCAL_DIR_RENAME_GET,
(f, fl2),
b'local directory rename - get from %s' % f,
)
@@ -1451,13 +766,13 @@
f2 = branch_copies2.copy[f]
if f2 in m2:
actions[f] = (
- ACTION_MERGE,
+ mergestatemod.ACTION_MERGE,
(f2, f, f2, False, pa.node()),
b'remote copied from %s' % f2,
)
else:
actions[f] = (
- ACTION_MERGE,
+ mergestatemod.ACTION_MERGE,
(f2, f, f2, True, pa.node()),
b'remote moved from %s' % f2,
)
@@ -1474,12 +789,20 @@
# Checking whether the files are different is expensive, so we
# don't do that when we can avoid it.
if not force:
- actions[f] = (ACTION_CREATED, (fl2,), b'remote created')
+ actions[f] = (
+ mergestatemod.ACTION_CREATED,
+ (fl2,),
+ b'remote created',
+ )
elif not branchmerge:
- actions[f] = (ACTION_CREATED, (fl2,), b'remote created')
+ actions[f] = (
+ mergestatemod.ACTION_CREATED,
+ (fl2,),
+ b'remote created',
+ )
else:
actions[f] = (
- ACTION_CREATED_MERGE,
+ mergestatemod.ACTION_CREATED_MERGE,
(fl2, pa.node()),
b'remote created, get or merge',
)
@@ -1492,16 +815,20 @@
break
if df is not None and df in m1:
actions[df] = (
- ACTION_MERGE,
+ mergestatemod.ACTION_MERGE,
(df, f, f, False, pa.node()),
b'local directory rename - respect move '
b'from %s' % f,
)
elif acceptremote:
- actions[f] = (ACTION_CREATED, (fl2,), b'remote recreating')
+ actions[f] = (
+ mergestatemod.ACTION_CREATED,
+ (fl2,),
+ b'remote recreating',
+ )
else:
actions[f] = (
- ACTION_DELETED_CHANGED,
+ mergestatemod.ACTION_DELETED_CHANGED,
(None, f, f, False, pa.node()),
b'prompt deleted/changed',
)
@@ -1528,14 +855,14 @@
# actions as we resolve trivial conflicts.
for f, (m, args, msg) in list(actions.items()):
if (
- m == ACTION_CHANGED_DELETED
+ m == mergestatemod.ACTION_CHANGED_DELETED
and f in ancestor
and not wctx[f].cmp(ancestor[f])
):
# local did change but ended up with same content
- actions[f] = ACTION_REMOVE, None, b'prompt same'
+ actions[f] = mergestatemod.ACTION_REMOVE, None, b'prompt same'
elif (
- m == ACTION_DELETED_CHANGED
+ m == mergestatemod.ACTION_DELETED_CHANGED
and f in ancestor
and not mctx[f].cmp(ancestor[f])
):
@@ -1555,7 +882,17 @@
matcher=None,
mergeforce=False,
):
- """Calculate the actions needed to merge mctx into wctx using ancestors"""
+ """
+ Calculate the actions needed to merge mctx into wctx using ancestors
+
+ Uses manifestmerge() to merge manifest and get list of actions required to
+ perform for merging two manifests. If there are multiple ancestors, uses bid
+ merge if enabled.
+
+ Also filters out actions which are unrequired if repository is sparse.
+
+ Returns same 3 element tuple as manifestmerge().
+ """
# Avoid cycle.
from . import sparse
@@ -1613,8 +950,8 @@
for f, a in sorted(pycompat.iteritems(actions)):
m, args, msg = a
- if m == ACTION_GET_OTHER_AND_STORE:
- m = ACTION_GET
+ if m == mergestatemod.ACTION_GET_OTHER_AND_STORE:
+ m = mergestatemod.ACTION_GET
repo.ui.debug(b' %s: %s -> %s\n' % (f, msg, m))
if f in fbids:
d = fbids[f]
@@ -1638,14 +975,14 @@
actions[f] = l[0]
continue
# If keep is an option, just do it.
- if ACTION_KEEP in bids:
+ if mergestatemod.ACTION_KEEP in bids:
repo.ui.note(_(b" %s: picking 'keep' action\n") % f)
- actions[f] = bids[ACTION_KEEP][0]
+ actions[f] = bids[mergestatemod.ACTION_KEEP][0]
continue
# If there are gets and they all agree [how could they not?], do it.
- if ACTION_GET in bids:
- ga0 = bids[ACTION_GET][0]
- if all(a == ga0 for a in bids[ACTION_GET][1:]):
+ if mergestatemod.ACTION_GET in bids:
+ ga0 = bids[mergestatemod.ACTION_GET][0]
+ if all(a == ga0 for a in bids[mergestatemod.ACTION_GET][1:]):
repo.ui.note(_(b" %s: picking 'get' action\n") % f)
actions[f] = ga0
continue
@@ -1790,18 +1127,24 @@
oplist = [
actions[a]
for a in (
- ACTION_GET,
- ACTION_DELETED_CHANGED,
- ACTION_LOCAL_DIR_RENAME_GET,
- ACTION_MERGE,
+ mergestatemod.ACTION_GET,
+ mergestatemod.ACTION_DELETED_CHANGED,
+ mergestatemod.ACTION_LOCAL_DIR_RENAME_GET,
+ mergestatemod.ACTION_MERGE,
)
]
prefetch = scmutil.prefetchfiles
matchfiles = scmutil.matchfiles
prefetch(
repo,
- [ctx.rev()],
- matchfiles(repo, [f for sublist in oplist for f, args, msg in sublist]),
+ [
+ (
+ ctx.rev(),
+ matchfiles(
+ repo, [f for sublist in oplist for f, args, msg in sublist]
+ ),
+ )
+ ],
)
@@ -1826,21 +1169,21 @@
return {
m: []
for m in (
- ACTION_ADD,
- ACTION_ADD_MODIFIED,
- ACTION_FORGET,
- ACTION_GET,
- ACTION_CHANGED_DELETED,
- ACTION_DELETED_CHANGED,
- ACTION_REMOVE,
- ACTION_DIR_RENAME_MOVE_LOCAL,
- ACTION_LOCAL_DIR_RENAME_GET,
- ACTION_MERGE,
- ACTION_EXEC,
- ACTION_KEEP,
- ACTION_PATH_CONFLICT,
- ACTION_PATH_CONFLICT_RESOLVE,
- ACTION_GET_OTHER_AND_STORE,
+ mergestatemod.ACTION_ADD,
+ mergestatemod.ACTION_ADD_MODIFIED,
+ mergestatemod.ACTION_FORGET,
+ mergestatemod.ACTION_GET,
+ mergestatemod.ACTION_CHANGED_DELETED,
+ mergestatemod.ACTION_DELETED_CHANGED,
+ mergestatemod.ACTION_REMOVE,
+ mergestatemod.ACTION_DIR_RENAME_MOVE_LOCAL,
+ mergestatemod.ACTION_LOCAL_DIR_RENAME_GET,
+ mergestatemod.ACTION_MERGE,
+ mergestatemod.ACTION_EXEC,
+ mergestatemod.ACTION_KEEP,
+ mergestatemod.ACTION_PATH_CONFLICT,
+ mergestatemod.ACTION_PATH_CONFLICT_RESOLVE,
+ mergestatemod.ACTION_GET_OTHER_AND_STORE,
)
}
@@ -1862,10 +1205,12 @@
_prefetchfiles(repo, mctx, actions)
updated, merged, removed = 0, 0, 0
- ms = mergestate.clean(repo, wctx.p1().node(), mctx.node(), labels)
+ ms = mergestatemod.mergestate.clean(
+ repo, wctx.p1().node(), mctx.node(), labels
+ )
# add ACTION_GET_OTHER_AND_STORE to mergestate
- for e in actions[ACTION_GET_OTHER_AND_STORE]:
+ for e in actions[mergestatemod.ACTION_GET_OTHER_AND_STORE]:
ms.addmergedother(e[0])
moves = []
@@ -1873,9 +1218,9 @@
l.sort()
# 'cd' and 'dc' actions are treated like other merge conflicts
- mergeactions = sorted(actions[ACTION_CHANGED_DELETED])
- mergeactions.extend(sorted(actions[ACTION_DELETED_CHANGED]))
- mergeactions.extend(actions[ACTION_MERGE])
+ mergeactions = sorted(actions[mergestatemod.ACTION_CHANGED_DELETED])
+ mergeactions.extend(sorted(actions[mergestatemod.ACTION_DELETED_CHANGED]))
+ mergeactions.extend(actions[mergestatemod.ACTION_MERGE])
for f, args, msg in mergeactions:
f1, f2, fa, move, anc = args
if f == b'.hgsubstate': # merged internally
@@ -1906,16 +1251,22 @@
wctx[f].audit()
wctx[f].remove()
- numupdates = sum(len(l) for m, l in actions.items() if m != ACTION_KEEP)
+ numupdates = sum(
+ len(l) for m, l in actions.items() if m != mergestatemod.ACTION_KEEP
+ )
progress = repo.ui.makeprogress(
_(b'updating'), unit=_(b'files'), total=numupdates
)
- if [a for a in actions[ACTION_REMOVE] if a[0] == b'.hgsubstate']:
+ if [
+ a
+ for a in actions[mergestatemod.ACTION_REMOVE]
+ if a[0] == b'.hgsubstate'
+ ]:
subrepoutil.submerge(repo, wctx, mctx, wctx, overwrite, labels)
# record path conflicts
- for f, args, msg in actions[ACTION_PATH_CONFLICT]:
+ for f, args, msg in actions[mergestatemod.ACTION_PATH_CONFLICT]:
f1, fo = args
s = repo.ui.status
s(
@@ -1930,7 +1281,7 @@
else:
s(_(b"the remote file has been renamed to %s\n") % f1)
s(_(b"resolve manually then use 'hg resolve --mark %s'\n") % f)
- ms.addpath(f, f1, fo)
+ ms.addpathconflict(f, f1, fo)
progress.increment(item=f)
# When merging in-memory, we can't support worker processes, so set the
@@ -1939,16 +1290,20 @@
# remove in parallel (must come before resolving path conflicts and getting)
prog = worker.worker(
- repo.ui, cost, batchremove, (repo, wctx), actions[ACTION_REMOVE]
+ repo.ui,
+ cost,
+ batchremove,
+ (repo, wctx),
+ actions[mergestatemod.ACTION_REMOVE],
)
for i, item in prog:
progress.increment(step=i, item=item)
- removed = len(actions[ACTION_REMOVE])
+ removed = len(actions[mergestatemod.ACTION_REMOVE])
# resolve path conflicts (must come before getting)
- for f, args, msg in actions[ACTION_PATH_CONFLICT_RESOLVE]:
+ for f, args, msg in actions[mergestatemod.ACTION_PATH_CONFLICT_RESOLVE]:
repo.ui.debug(b" %s: %s -> pr\n" % (f, msg))
- (f0,) = args
+ (f0, origf0) = args
if wctx[f0].lexists():
repo.ui.note(_(b"moving %s to %s\n") % (f0, f))
wctx[f].audit()
@@ -1965,7 +1320,7 @@
cost,
batchget,
(repo, mctx, wctx, wantfiledata),
- actions[ACTION_GET],
+ actions[mergestatemod.ACTION_GET],
threadsafe=threadsafe,
hasretval=True,
)
@@ -1976,33 +1331,33 @@
else:
i, item = res
progress.increment(step=i, item=item)
- updated = len(actions[ACTION_GET])
+ updated = len(actions[mergestatemod.ACTION_GET])
- if [a for a in actions[ACTION_GET] if a[0] == b'.hgsubstate']:
+ if [a for a in actions[mergestatemod.ACTION_GET] if a[0] == b'.hgsubstate']:
subrepoutil.submerge(repo, wctx, mctx, wctx, overwrite, labels)
# forget (manifest only, just log it) (must come first)
- for f, args, msg in actions[ACTION_FORGET]:
+ for f, args, msg in actions[mergestatemod.ACTION_FORGET]:
repo.ui.debug(b" %s: %s -> f\n" % (f, msg))
progress.increment(item=f)
# re-add (manifest only, just log it)
- for f, args, msg in actions[ACTION_ADD]:
+ for f, args, msg in actions[mergestatemod.ACTION_ADD]:
repo.ui.debug(b" %s: %s -> a\n" % (f, msg))
progress.increment(item=f)
# re-add/mark as modified (manifest only, just log it)
- for f, args, msg in actions[ACTION_ADD_MODIFIED]:
+ for f, args, msg in actions[mergestatemod.ACTION_ADD_MODIFIED]:
repo.ui.debug(b" %s: %s -> am\n" % (f, msg))
progress.increment(item=f)
# keep (noop, just log it)
- for f, args, msg in actions[ACTION_KEEP]:
+ for f, args, msg in actions[mergestatemod.ACTION_KEEP]:
repo.ui.debug(b" %s: %s -> k\n" % (f, msg))
# no progress
# directory rename, move local
- for f, args, msg in actions[ACTION_DIR_RENAME_MOVE_LOCAL]:
+ for f, args, msg in actions[mergestatemod.ACTION_DIR_RENAME_MOVE_LOCAL]:
repo.ui.debug(b" %s: %s -> dm\n" % (f, msg))
progress.increment(item=f)
f0, flags = args
@@ -2013,7 +1368,7 @@
updated += 1
# local directory rename, get
- for f, args, msg in actions[ACTION_LOCAL_DIR_RENAME_GET]:
+ for f, args, msg in actions[mergestatemod.ACTION_LOCAL_DIR_RENAME_GET]:
repo.ui.debug(b" %s: %s -> dg\n" % (f, msg))
progress.increment(item=f)
f0, flags = args
@@ -2022,7 +1377,7 @@
updated += 1
# exec
- for f, args, msg in actions[ACTION_EXEC]:
+ for f, args, msg in actions[mergestatemod.ACTION_EXEC]:
repo.ui.debug(b" %s: %s -> e\n" % (f, msg))
progress.increment(item=f)
(flags,) = args
@@ -2087,7 +1442,7 @@
if (
usemergedriver
and not unresolved
- and ms.mdstate() != MERGE_DRIVER_STATE_SUCCESS
+ and ms.mdstate() != mergestatemod.MERGE_DRIVER_STATE_SUCCESS
):
if not driverconclude(repo, ms, wctx, labels=labels):
# XXX setting unresolved to at least 1 is a hack to make sure we
@@ -2103,10 +1458,10 @@
extraactions = ms.actions()
if extraactions:
- mfiles = {a[0] for a in actions[ACTION_MERGE]}
+ mfiles = {a[0] for a in actions[mergestatemod.ACTION_MERGE]}
for k, acts in pycompat.iteritems(extraactions):
actions[k].extend(acts)
- if k == ACTION_GET and wantfiledata:
+ if k == mergestatemod.ACTION_GET and wantfiledata:
# no filedata until mergestate is updated to provide it
for a in acts:
getfiledata[a[0]] = None
@@ -2128,110 +1483,58 @@
# those lists aren't consulted again.
mfiles.difference_update(a[0] for a in acts)
- actions[ACTION_MERGE] = [
- a for a in actions[ACTION_MERGE] if a[0] in mfiles
+ actions[mergestatemod.ACTION_MERGE] = [
+ a for a in actions[mergestatemod.ACTION_MERGE] if a[0] in mfiles
]
progress.complete()
- assert len(getfiledata) == (len(actions[ACTION_GET]) if wantfiledata else 0)
+ assert len(getfiledata) == (
+ len(actions[mergestatemod.ACTION_GET]) if wantfiledata else 0
+ )
return updateresult(updated, merged, removed, unresolved), getfiledata
-def recordupdates(repo, actions, branchmerge, getfiledata):
- """record merge actions to the dirstate"""
- # remove (must come first)
- for f, args, msg in actions.get(ACTION_REMOVE, []):
- if branchmerge:
- repo.dirstate.remove(f)
- else:
- repo.dirstate.drop(f)
-
- # forget (must come first)
- for f, args, msg in actions.get(ACTION_FORGET, []):
- repo.dirstate.drop(f)
-
- # resolve path conflicts
- for f, args, msg in actions.get(ACTION_PATH_CONFLICT_RESOLVE, []):
- (f0,) = args
- origf0 = repo.dirstate.copied(f0) or f0
- repo.dirstate.add(f)
- repo.dirstate.copy(origf0, f)
- if f0 == origf0:
- repo.dirstate.remove(f0)
- else:
- repo.dirstate.drop(f0)
-
- # re-add
- for f, args, msg in actions.get(ACTION_ADD, []):
- repo.dirstate.add(f)
-
- # re-add/mark as modified
- for f, args, msg in actions.get(ACTION_ADD_MODIFIED, []):
- if branchmerge:
- repo.dirstate.normallookup(f)
- else:
- repo.dirstate.add(f)
-
- # exec change
- for f, args, msg in actions.get(ACTION_EXEC, []):
- repo.dirstate.normallookup(f)
-
- # keep
- for f, args, msg in actions.get(ACTION_KEEP, []):
- pass
+def _advertisefsmonitor(repo, num_gets, p1node):
+ # Advertise fsmonitor when its presence could be useful.
+ #
+ # We only advertise when performing an update from an empty working
+ # directory. This typically only occurs during initial clone.
+ #
+ # We give users a mechanism to disable the warning in case it is
+ # annoying.
+ #
+ # We only allow on Linux and MacOS because that's where fsmonitor is
+ # considered stable.
+ fsmonitorwarning = repo.ui.configbool(b'fsmonitor', b'warn_when_unused')
+ fsmonitorthreshold = repo.ui.configint(
+ b'fsmonitor', b'warn_update_file_count'
+ )
+ try:
+ # avoid cycle: extensions -> cmdutil -> merge
+ from . import extensions
- # get
- for f, args, msg in actions.get(ACTION_GET, []):
- if branchmerge:
- repo.dirstate.otherparent(f)
- else:
- parentfiledata = getfiledata[f] if getfiledata else None
- repo.dirstate.normal(f, parentfiledata=parentfiledata)
+ extensions.find(b'fsmonitor')
+ fsmonitorenabled = repo.ui.config(b'fsmonitor', b'mode') != b'off'
+ # We intentionally don't look at whether fsmonitor has disabled
+ # itself because a) fsmonitor may have already printed a warning
+ # b) we only care about the config state here.
+ except KeyError:
+ fsmonitorenabled = False
- # merge
- for f, args, msg in actions.get(ACTION_MERGE, []):
- f1, f2, fa, move, anc = args
- if branchmerge:
- # We've done a branch merge, mark this file as merged
- # so that we properly record the merger later
- repo.dirstate.merge(f)
- if f1 != f2: # copy/rename
- if move:
- repo.dirstate.remove(f1)
- if f1 != f:
- repo.dirstate.copy(f1, f)
- else:
- repo.dirstate.copy(f2, f)
- else:
- # We've update-merged a locally modified file, so
- # we set the dirstate to emulate a normal checkout
- # of that file some time in the past. Thus our
- # merge will appear as a normal local file
- # modification.
- if f2 == f: # file not locally copied/moved
- repo.dirstate.normallookup(f)
- if move:
- repo.dirstate.drop(f1)
-
- # directory rename, move local
- for f, args, msg in actions.get(ACTION_DIR_RENAME_MOVE_LOCAL, []):
- f0, flag = args
- if branchmerge:
- repo.dirstate.add(f)
- repo.dirstate.remove(f0)
- repo.dirstate.copy(f0, f)
- else:
- repo.dirstate.normal(f)
- repo.dirstate.drop(f0)
-
- # directory rename, get
- for f, args, msg in actions.get(ACTION_LOCAL_DIR_RENAME_GET, []):
- f0, flag = args
- if branchmerge:
- repo.dirstate.add(f)
- repo.dirstate.copy(f0, f)
- else:
- repo.dirstate.normal(f)
+ if (
+ fsmonitorwarning
+ and not fsmonitorenabled
+ and p1node == nullid
+ and num_gets >= fsmonitorthreshold
+ and pycompat.sysplatform.startswith((b'linux', b'darwin'))
+ ):
+ repo.ui.warn(
+ _(
+ b'(warning: large working directory being used without '
+ b'fsmonitor enabled; enable fsmonitor to improve performance; '
+ b'see "hg help -e fsmonitor")\n'
+ )
+ )
UPDATECHECK_ABORT = b'abort' # handled at higher layers
@@ -2334,7 +1637,11 @@
),
)
)
- with repo.wlock():
+ if wc is not None and wc.isinmemory():
+ maybe_wlock = util.nullcontextmanager()
+ else:
+ maybe_wlock = repo.wlock()
+ with maybe_wlock:
if wc is None:
wc = repo[None]
pl = wc.parents()
@@ -2356,7 +1663,7 @@
if not overwrite:
if len(pl) > 1:
raise error.Abort(_(b"outstanding uncommitted merge"))
- ms = mergestate.read(repo)
+ ms = mergestatemod.mergestate.read(repo)
if list(ms.unresolved()):
raise error.Abort(
_(b"outstanding merge conflicts"),
@@ -2443,12 +1750,12 @@
if updatecheck == UPDATECHECK_NO_CONFLICT:
for f, (m, args, msg) in pycompat.iteritems(actionbyfile):
if m not in (
- ACTION_GET,
- ACTION_KEEP,
- ACTION_EXEC,
- ACTION_REMOVE,
- ACTION_PATH_CONFLICT_RESOLVE,
- ACTION_GET_OTHER_AND_STORE,
+ mergestatemod.ACTION_GET,
+ mergestatemod.ACTION_KEEP,
+ mergestatemod.ACTION_EXEC,
+ mergestatemod.ACTION_REMOVE,
+ mergestatemod.ACTION_PATH_CONFLICT_RESOLVE,
+ mergestatemod.ACTION_GET_OTHER_AND_STORE,
):
msg = _(b"conflicting changes")
hint = _(b"commit or update --clean to discard changes")
@@ -2462,7 +1769,7 @@
m, args, msg = actionbyfile[f]
prompts = filemerge.partextras(labels)
prompts[b'f'] = f
- if m == ACTION_CHANGED_DELETED:
+ if m == mergestatemod.ACTION_CHANGED_DELETED:
if repo.ui.promptchoice(
_(
b"local%(l)s changed %(f)s which other%(o)s deleted\n"
@@ -2472,16 +1779,24 @@
% prompts,
0,
):
- actionbyfile[f] = (ACTION_REMOVE, None, b'prompt delete')
+ actionbyfile[f] = (
+ mergestatemod.ACTION_REMOVE,
+ None,
+ b'prompt delete',
+ )
elif f in p1:
actionbyfile[f] = (
- ACTION_ADD_MODIFIED,
+ mergestatemod.ACTION_ADD_MODIFIED,
None,
b'prompt keep',
)
else:
- actionbyfile[f] = (ACTION_ADD, None, b'prompt keep')
- elif m == ACTION_DELETED_CHANGED:
+ actionbyfile[f] = (
+ mergestatemod.ACTION_ADD,
+ None,
+ b'prompt keep',
+ )
+ elif m == mergestatemod.ACTION_DELETED_CHANGED:
f1, f2, fa, move, anc = args
flags = p2[f2].flags()
if (
@@ -2497,7 +1812,7 @@
== 0
):
actionbyfile[f] = (
- ACTION_GET,
+ mergestatemod.ACTION_GET,
(flags, False),
b'prompt recreating',
)
@@ -2511,9 +1826,9 @@
actions[m] = []
actions[m].append((f, args, msg))
- # ACTION_GET_OTHER_AND_STORE is a ACTION_GET + store in mergestate
- for e in actions[ACTION_GET_OTHER_AND_STORE]:
- actions[ACTION_GET].append(e)
+ # ACTION_GET_OTHER_AND_STORE is a mergestatemod.ACTION_GET + store in mergestate
+ for e in actions[mergestatemod.ACTION_GET_OTHER_AND_STORE]:
+ actions[mergestatemod.ACTION_GET].append(e)
if not util.fscasesensitive(repo.path):
# check collision between files only in p2 for clean update
@@ -2560,46 +1875,9 @@
# note that we're in the middle of an update
repo.vfs.write(b'updatestate', p2.hex())
- # Advertise fsmonitor when its presence could be useful.
- #
- # We only advertise when performing an update from an empty working
- # directory. This typically only occurs during initial clone.
- #
- # We give users a mechanism to disable the warning in case it is
- # annoying.
- #
- # We only allow on Linux and MacOS because that's where fsmonitor is
- # considered stable.
- fsmonitorwarning = repo.ui.configbool(b'fsmonitor', b'warn_when_unused')
- fsmonitorthreshold = repo.ui.configint(
- b'fsmonitor', b'warn_update_file_count'
+ _advertisefsmonitor(
+ repo, len(actions[mergestatemod.ACTION_GET]), p1.node()
)
- try:
- # avoid cycle: extensions -> cmdutil -> merge
- from . import extensions
-
- extensions.find(b'fsmonitor')
- fsmonitorenabled = repo.ui.config(b'fsmonitor', b'mode') != b'off'
- # We intentionally don't look at whether fsmonitor has disabled
- # itself because a) fsmonitor may have already printed a warning
- # b) we only care about the config state here.
- except KeyError:
- fsmonitorenabled = False
-
- if (
- fsmonitorwarning
- and not fsmonitorenabled
- and p1.node() == nullid
- and len(actions[ACTION_GET]) >= fsmonitorthreshold
- and pycompat.sysplatform.startswith((b'linux', b'darwin'))
- ):
- repo.ui.warn(
- _(
- b'(warning: large working directory being used without '
- b'fsmonitor enabled; enable fsmonitor to improve performance; '
- b'see "hg help -e fsmonitor")\n'
- )
- )
wantfiledata = updatedirstate and not branchmerge
stats, getfiledata = applyupdates(
@@ -2609,7 +1887,9 @@
if updatedirstate:
with repo.dirstate.parentchange():
repo.setparents(fp1, fp2)
- recordupdates(repo, actions, branchmerge, getfiledata)
+ mergestatemod.recordupdates(
+ repo, actions, branchmerge, getfiledata
+ )
# update completed, clear state
util.unlink(repo.vfs.join(b'updatestate'))
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/mercurial/mergestate.py Mon Jul 20 21:56:27 2020 +0530
@@ -0,0 +1,879 @@
+from __future__ import absolute_import
+
+import errno
+import shutil
+import struct
+
+from .i18n import _
+from .node import (
+ bin,
+ hex,
+ nullhex,
+ nullid,
+)
+from .pycompat import delattr
+from . import (
+ error,
+ filemerge,
+ pycompat,
+ util,
+)
+from .utils import hashutil
+
+_pack = struct.pack
+_unpack = struct.unpack
+
+
+def _droponode(data):
+ # used for compatibility for v1
+ bits = data.split(b'\0')
+ bits = bits[:-2] + bits[-1:]
+ return b'\0'.join(bits)
+
+
+def _filectxorabsent(hexnode, ctx, f):
+ if hexnode == nullhex:
+ return filemerge.absentfilectx(ctx, f)
+ else:
+ return ctx[f]
+
+
+# Merge state record types. See ``mergestate`` docs for more.
+
+####
+# merge records which records metadata about a current merge
+# exists only once in a mergestate
+#####
+RECORD_LOCAL = b'L'
+RECORD_OTHER = b'O'
+# record merge labels
+RECORD_LABELS = b'l'
+# store info about merge driver used and it's state
+RECORD_MERGE_DRIVER_STATE = b'm'
+
+#####
+# record extra information about files, with one entry containing info about one
+# file. Hence, multiple of them can exists
+#####
+RECORD_FILE_VALUES = b'f'
+
+#####
+# merge records which represents state of individual merges of files/folders
+# These are top level records for each entry containing merge related info.
+# Each record of these has info about one file. Hence multiple of them can
+# exists
+#####
+RECORD_MERGED = b'F'
+RECORD_CHANGEDELETE_CONFLICT = b'C'
+RECORD_MERGE_DRIVER_MERGE = b'D'
+# the path was dir on one side of merge and file on another
+RECORD_PATH_CONFLICT = b'P'
+
+#####
+# possible state which a merge entry can have. These are stored inside top-level
+# merge records mentioned just above.
+#####
+MERGE_RECORD_UNRESOLVED = b'u'
+MERGE_RECORD_RESOLVED = b'r'
+MERGE_RECORD_UNRESOLVED_PATH = b'pu'
+MERGE_RECORD_RESOLVED_PATH = b'pr'
+MERGE_RECORD_DRIVER_RESOLVED = b'd'
+# represents that the file was automatically merged in favor
+# of other version. This info is used on commit.
+MERGE_RECORD_MERGED_OTHER = b'o'
+
+#####
+# top level record which stores other unknown records. Multiple of these can
+# exists
+#####
+RECORD_OVERRIDE = b't'
+
+#####
+# possible states which a merge driver can have. These are stored inside a
+# RECORD_MERGE_DRIVER_STATE entry
+#####
+MERGE_DRIVER_STATE_UNMARKED = b'u'
+MERGE_DRIVER_STATE_MARKED = b'm'
+MERGE_DRIVER_STATE_SUCCESS = b's'
+
+
+ACTION_FORGET = b'f'
+ACTION_REMOVE = b'r'
+ACTION_ADD = b'a'
+ACTION_GET = b'g'
+ACTION_PATH_CONFLICT = b'p'
+ACTION_PATH_CONFLICT_RESOLVE = b'pr'
+ACTION_ADD_MODIFIED = b'am'
+ACTION_CREATED = b'c'
+ACTION_DELETED_CHANGED = b'dc'
+ACTION_CHANGED_DELETED = b'cd'
+ACTION_MERGE = b'm'
+ACTION_LOCAL_DIR_RENAME_GET = b'dg'
+ACTION_DIR_RENAME_MOVE_LOCAL = b'dm'
+ACTION_KEEP = b'k'
+ACTION_EXEC = b'e'
+ACTION_CREATED_MERGE = b'cm'
+# GET the other/remote side and store this info in mergestate
+ACTION_GET_OTHER_AND_STORE = b'gs'
+
+
+class mergestate(object):
+ '''track 3-way merge state of individual files
+
+ The merge state is stored on disk when needed. Two files are used: one with
+ an old format (version 1), and one with a new format (version 2). Version 2
+ stores a superset of the data in version 1, including new kinds of records
+ in the future. For more about the new format, see the documentation for
+ `_readrecordsv2`.
+
+ Each record can contain arbitrary content, and has an associated type. This
+ `type` should be a letter. If `type` is uppercase, the record is mandatory:
+ versions of Mercurial that don't support it should abort. If `type` is
+ lowercase, the record can be safely ignored.
+
+ Currently known records:
+
+ L: the node of the "local" part of the merge (hexified version)
+ O: the node of the "other" part of the merge (hexified version)
+ F: a file to be merged entry
+ C: a change/delete or delete/change conflict
+ D: a file that the external merge driver will merge internally
+ (experimental)
+ P: a path conflict (file vs directory)
+ m: the external merge driver defined for this merge plus its run state
+ (experimental)
+ f: a (filename, dictionary) tuple of optional values for a given file
+ l: the labels for the parts of the merge.
+
+ Merge driver run states (experimental):
+ u: driver-resolved files unmarked -- needs to be run next time we're about
+ to resolve or commit
+ m: driver-resolved files marked -- only needs to be run before commit
+ s: success/skipped -- does not need to be run any more
+
+ Merge record states (stored in self._state, indexed by filename):
+ u: unresolved conflict
+ r: resolved conflict
+ pu: unresolved path conflict (file conflicts with directory)
+ pr: resolved path conflict
+ d: driver-resolved conflict
+
+ The resolve command transitions between 'u' and 'r' for conflicts and
+ 'pu' and 'pr' for path conflicts.
+ '''
+
+ statepathv1 = b'merge/state'
+ statepathv2 = b'merge/state2'
+
+ @staticmethod
+ def clean(repo, node=None, other=None, labels=None):
+ """Initialize a brand new merge state, removing any existing state on
+ disk."""
+ ms = mergestate(repo)
+ ms.reset(node, other, labels)
+ return ms
+
+ @staticmethod
+ def read(repo):
+ """Initialize the merge state, reading it from disk."""
+ ms = mergestate(repo)
+ ms._read()
+ return ms
+
+ def __init__(self, repo):
+ """Initialize the merge state.
+
+ Do not use this directly! Instead call read() or clean()."""
+ self._repo = repo
+ self._dirty = False
+ self._labels = None
+
+ def reset(self, node=None, other=None, labels=None):
+ self._state = {}
+ self._stateextras = {}
+ self._local = None
+ self._other = None
+ self._labels = labels
+ for var in ('localctx', 'otherctx'):
+ if var in vars(self):
+ delattr(self, var)
+ if node:
+ self._local = node
+ self._other = other
+ self._readmergedriver = None
+ if self.mergedriver:
+ self._mdstate = MERGE_DRIVER_STATE_SUCCESS
+ else:
+ self._mdstate = MERGE_DRIVER_STATE_UNMARKED
+ shutil.rmtree(self._repo.vfs.join(b'merge'), True)
+ self._results = {}
+ self._dirty = False
+
+ def _read(self):
+ """Analyse each record content to restore a serialized state from disk
+
+ This function process "record" entry produced by the de-serialization
+ of on disk file.
+ """
+ self._state = {}
+ self._stateextras = {}
+ self._local = None
+ self._other = None
+ for var in ('localctx', 'otherctx'):
+ if var in vars(self):
+ delattr(self, var)
+ self._readmergedriver = None
+ self._mdstate = MERGE_DRIVER_STATE_SUCCESS
+ unsupported = set()
+ records = self._readrecords()
+ for rtype, record in records:
+ if rtype == RECORD_LOCAL:
+ self._local = bin(record)
+ elif rtype == RECORD_OTHER:
+ self._other = bin(record)
+ elif rtype == RECORD_MERGE_DRIVER_STATE:
+ bits = record.split(b'\0', 1)
+ mdstate = bits[1]
+ if len(mdstate) != 1 or mdstate not in (
+ MERGE_DRIVER_STATE_UNMARKED,
+ MERGE_DRIVER_STATE_MARKED,
+ MERGE_DRIVER_STATE_SUCCESS,
+ ):
+ # the merge driver should be idempotent, so just rerun it
+ mdstate = MERGE_DRIVER_STATE_UNMARKED
+
+ self._readmergedriver = bits[0]
+ self._mdstate = mdstate
+ elif rtype in (
+ RECORD_MERGED,
+ RECORD_CHANGEDELETE_CONFLICT,
+ RECORD_PATH_CONFLICT,
+ RECORD_MERGE_DRIVER_MERGE,
+ ):
+ bits = record.split(b'\0')
+ self._state[bits[0]] = bits[1:]
+ elif rtype == RECORD_FILE_VALUES:
+ filename, rawextras = record.split(b'\0', 1)
+ extraparts = rawextras.split(b'\0')
+ extras = {}
+ i = 0
+ while i < len(extraparts):
+ extras[extraparts[i]] = extraparts[i + 1]
+ i += 2
+
+ self._stateextras[filename] = extras
+ elif rtype == RECORD_LABELS:
+ labels = record.split(b'\0', 2)
+ self._labels = [l for l in labels if len(l) > 0]
+ elif not rtype.islower():
+ unsupported.add(rtype)
+ # contains a mapping of form:
+ # {filename : (merge_return_value, action_to_be_performed}
+ # these are results of re-running merge process
+ # this dict is used to perform actions on dirstate caused by re-running
+ # the merge
+ self._results = {}
+ self._dirty = False
+
+ if unsupported:
+ raise error.UnsupportedMergeRecords(unsupported)
+
+ def _readrecords(self):
+ """Read merge state from disk and return a list of record (TYPE, data)
+
+ We read data from both v1 and v2 files and decide which one to use.
+
+ V1 has been used by version prior to 2.9.1 and contains less data than
+ v2. We read both versions and check if no data in v2 contradicts
+ v1. If there is not contradiction we can safely assume that both v1
+ and v2 were written at the same time and use the extract data in v2. If
+ there is contradiction we ignore v2 content as we assume an old version
+ of Mercurial has overwritten the mergestate file and left an old v2
+ file around.
+
+ returns list of record [(TYPE, data), ...]"""
+ v1records = self._readrecordsv1()
+ v2records = self._readrecordsv2()
+ if self._v1v2match(v1records, v2records):
+ return v2records
+ else:
+ # v1 file is newer than v2 file, use it
+ # we have to infer the "other" changeset of the merge
+ # we cannot do better than that with v1 of the format
+ mctx = self._repo[None].parents()[-1]
+ v1records.append((RECORD_OTHER, mctx.hex()))
+ # add place holder "other" file node information
+ # nobody is using it yet so we do no need to fetch the data
+ # if mctx was wrong `mctx[bits[-2]]` may fails.
+ for idx, r in enumerate(v1records):
+ if r[0] == RECORD_MERGED:
+ bits = r[1].split(b'\0')
+ bits.insert(-2, b'')
+ v1records[idx] = (r[0], b'\0'.join(bits))
+ return v1records
+
+ def _v1v2match(self, v1records, v2records):
+ oldv2 = set() # old format version of v2 record
+ for rec in v2records:
+ if rec[0] == RECORD_LOCAL:
+ oldv2.add(rec)
+ elif rec[0] == RECORD_MERGED:
+ # drop the onode data (not contained in v1)
+ oldv2.add((RECORD_MERGED, _droponode(rec[1])))
+ for rec in v1records:
+ if rec not in oldv2:
+ return False
+ else:
+ return True
+
+ def _readrecordsv1(self):
+ """read on disk merge state for version 1 file
+
+ returns list of record [(TYPE, data), ...]
+
+ Note: the "F" data from this file are one entry short
+ (no "other file node" entry)
+ """
+ records = []
+ try:
+ f = self._repo.vfs(self.statepathv1)
+ for i, l in enumerate(f):
+ if i == 0:
+ records.append((RECORD_LOCAL, l[:-1]))
+ else:
+ records.append((RECORD_MERGED, l[:-1]))
+ f.close()
+ except IOError as err:
+ if err.errno != errno.ENOENT:
+ raise
+ return records
+
+ def _readrecordsv2(self):
+ """read on disk merge state for version 2 file
+
+ This format is a list of arbitrary records of the form:
+
+ [type][length][content]
+
+ `type` is a single character, `length` is a 4 byte integer, and
+ `content` is an arbitrary byte sequence of length `length`.
+
+ Mercurial versions prior to 3.7 have a bug where if there are
+ unsupported mandatory merge records, attempting to clear out the merge
+ state with hg update --clean or similar aborts. The 't' record type
+ works around that by writing out what those versions treat as an
+ advisory record, but later versions interpret as special: the first
+ character is the 'real' record type and everything onwards is the data.
+
+ Returns list of records [(TYPE, data), ...]."""
+ records = []
+ try:
+ f = self._repo.vfs(self.statepathv2)
+ data = f.read()
+ off = 0
+ end = len(data)
+ while off < end:
+ rtype = data[off : off + 1]
+ off += 1
+ length = _unpack(b'>I', data[off : (off + 4)])[0]
+ off += 4
+ record = data[off : (off + length)]
+ off += length
+ if rtype == RECORD_OVERRIDE:
+ rtype, record = record[0:1], record[1:]
+ records.append((rtype, record))
+ f.close()
+ except IOError as err:
+ if err.errno != errno.ENOENT:
+ raise
+ return records
+
+ @util.propertycache
+ def mergedriver(self):
+ # protect against the following:
+ # - A configures a malicious merge driver in their hgrc, then
+ # pauses the merge
+ # - A edits their hgrc to remove references to the merge driver
+ # - A gives a copy of their entire repo, including .hg, to B
+ # - B inspects .hgrc and finds it to be clean
+ # - B then continues the merge and the malicious merge driver
+ # gets invoked
+ configmergedriver = self._repo.ui.config(
+ b'experimental', b'mergedriver'
+ )
+ if (
+ self._readmergedriver is not None
+ and self._readmergedriver != configmergedriver
+ ):
+ raise error.ConfigError(
+ _(b"merge driver changed since merge started"),
+ hint=_(b"revert merge driver change or abort merge"),
+ )
+
+ return configmergedriver
+
+ @util.propertycache
+ def local(self):
+ if self._local is None:
+ msg = b"local accessed but self._local isn't set"
+ raise error.ProgrammingError(msg)
+ return self._local
+
+ @util.propertycache
+ def localctx(self):
+ return self._repo[self.local]
+
+ @util.propertycache
+ def other(self):
+ if self._other is None:
+ msg = b"other accessed but self._other isn't set"
+ raise error.ProgrammingError(msg)
+ return self._other
+
+ @util.propertycache
+ def otherctx(self):
+ return self._repo[self.other]
+
+ def active(self):
+ """Whether mergestate is active.
+
+ Returns True if there appears to be mergestate. This is a rough proxy
+ for "is a merge in progress."
+ """
+ return bool(self._local) or bool(self._state)
+
+ def commit(self):
+ """Write current state on disk (if necessary)"""
+ if self._dirty:
+ records = self._makerecords()
+ self._writerecords(records)
+ self._dirty = False
+
+ def _makerecords(self):
+ records = []
+ records.append((RECORD_LOCAL, hex(self._local)))
+ records.append((RECORD_OTHER, hex(self._other)))
+ if self.mergedriver:
+ records.append(
+ (
+ RECORD_MERGE_DRIVER_STATE,
+ b'\0'.join([self.mergedriver, self._mdstate]),
+ )
+ )
+ # Write out state items. In all cases, the value of the state map entry
+ # is written as the contents of the record. The record type depends on
+ # the type of state that is stored, and capital-letter records are used
+ # to prevent older versions of Mercurial that do not support the feature
+ # from loading them.
+ for filename, v in pycompat.iteritems(self._state):
+ if v[0] == MERGE_RECORD_DRIVER_RESOLVED:
+ # Driver-resolved merge. These are stored in 'D' records.
+ records.append(
+ (RECORD_MERGE_DRIVER_MERGE, b'\0'.join([filename] + v))
+ )
+ elif v[0] in (
+ MERGE_RECORD_UNRESOLVED_PATH,
+ MERGE_RECORD_RESOLVED_PATH,
+ ):
+ # Path conflicts. These are stored in 'P' records. The current
+ # resolution state ('pu' or 'pr') is stored within the record.
+ records.append(
+ (RECORD_PATH_CONFLICT, b'\0'.join([filename] + v))
+ )
+ elif v[0] == MERGE_RECORD_MERGED_OTHER:
+ records.append((RECORD_MERGED, b'\0'.join([filename] + v)))
+ elif v[1] == nullhex or v[6] == nullhex:
+ # Change/Delete or Delete/Change conflicts. These are stored in
+ # 'C' records. v[1] is the local file, and is nullhex when the
+ # file is deleted locally ('dc'). v[6] is the remote file, and
+ # is nullhex when the file is deleted remotely ('cd').
+ records.append(
+ (RECORD_CHANGEDELETE_CONFLICT, b'\0'.join([filename] + v))
+ )
+ else:
+ # Normal files. These are stored in 'F' records.
+ records.append((RECORD_MERGED, b'\0'.join([filename] + v)))
+ for filename, extras in sorted(pycompat.iteritems(self._stateextras)):
+ rawextras = b'\0'.join(
+ b'%s\0%s' % (k, v) for k, v in pycompat.iteritems(extras)
+ )
+ records.append(
+ (RECORD_FILE_VALUES, b'%s\0%s' % (filename, rawextras))
+ )
+ if self._labels is not None:
+ labels = b'\0'.join(self._labels)
+ records.append((RECORD_LABELS, labels))
+ return records
+
+ def _writerecords(self, records):
+ """Write current state on disk (both v1 and v2)"""
+ self._writerecordsv1(records)
+ self._writerecordsv2(records)
+
+ def _writerecordsv1(self, records):
+ """Write current state on disk in a version 1 file"""
+ f = self._repo.vfs(self.statepathv1, b'wb')
+ irecords = iter(records)
+ lrecords = next(irecords)
+ assert lrecords[0] == RECORD_LOCAL
+ f.write(hex(self._local) + b'\n')
+ for rtype, data in irecords:
+ if rtype == RECORD_MERGED:
+ f.write(b'%s\n' % _droponode(data))
+ f.close()
+
+ def _writerecordsv2(self, records):
+ """Write current state on disk in a version 2 file
+
+ See the docstring for _readrecordsv2 for why we use 't'."""
+ # these are the records that all version 2 clients can read
+ allowlist = (RECORD_LOCAL, RECORD_OTHER, RECORD_MERGED)
+ f = self._repo.vfs(self.statepathv2, b'wb')
+ for key, data in records:
+ assert len(key) == 1
+ if key not in allowlist:
+ key, data = RECORD_OVERRIDE, b'%s%s' % (key, data)
+ format = b'>sI%is' % len(data)
+ f.write(_pack(format, key, len(data), data))
+ f.close()
+
+ @staticmethod
+ def getlocalkey(path):
+ """hash the path of a local file context for storage in the .hg/merge
+ directory."""
+
+ return hex(hashutil.sha1(path).digest())
+
+ def add(self, fcl, fco, fca, fd):
+ """add a new (potentially?) conflicting file the merge state
+ fcl: file context for local,
+ fco: file context for remote,
+ fca: file context for ancestors,
+ fd: file path of the resulting merge.
+
+ note: also write the local version to the `.hg/merge` directory.
+ """
+ if fcl.isabsent():
+ localkey = nullhex
+ else:
+ localkey = mergestate.getlocalkey(fcl.path())
+ self._repo.vfs.write(b'merge/' + localkey, fcl.data())
+ self._state[fd] = [
+ MERGE_RECORD_UNRESOLVED,
+ localkey,
+ fcl.path(),
+ fca.path(),
+ hex(fca.filenode()),
+ fco.path(),
+ hex(fco.filenode()),
+ fcl.flags(),
+ ]
+ self._stateextras[fd] = {b'ancestorlinknode': hex(fca.node())}
+ self._dirty = True
+
+ def addpathconflict(self, path, frename, forigin):
+ """add a new conflicting path to the merge state
+ path: the path that conflicts
+ frename: the filename the conflicting file was renamed to
+ forigin: origin of the file ('l' or 'r' for local/remote)
+ """
+ self._state[path] = [MERGE_RECORD_UNRESOLVED_PATH, frename, forigin]
+ self._dirty = True
+
+ def addmergedother(self, path):
+ self._state[path] = [MERGE_RECORD_MERGED_OTHER, nullhex, nullhex]
+ self._dirty = True
+
+ def __contains__(self, dfile):
+ return dfile in self._state
+
+ def __getitem__(self, dfile):
+ return self._state[dfile][0]
+
+ def __iter__(self):
+ return iter(sorted(self._state))
+
+ def files(self):
+ return self._state.keys()
+
+ def mark(self, dfile, state):
+ self._state[dfile][0] = state
+ self._dirty = True
+
+ def mdstate(self):
+ return self._mdstate
+
+ def unresolved(self):
+ """Obtain the paths of unresolved files."""
+
+ for f, entry in pycompat.iteritems(self._state):
+ if entry[0] in (
+ MERGE_RECORD_UNRESOLVED,
+ MERGE_RECORD_UNRESOLVED_PATH,
+ ):
+ yield f
+
+ def driverresolved(self):
+ """Obtain the paths of driver-resolved files."""
+
+ for f, entry in self._state.items():
+ if entry[0] == MERGE_RECORD_DRIVER_RESOLVED:
+ yield f
+
+ def extras(self, filename):
+ return self._stateextras.setdefault(filename, {})
+
+ def _resolve(self, preresolve, dfile, wctx):
+ """rerun merge process for file path `dfile`.
+ Returns whether the merge was completed and the return value of merge
+ obtained from filemerge._filemerge().
+ """
+ if self[dfile] in (MERGE_RECORD_RESOLVED, MERGE_RECORD_DRIVER_RESOLVED):
+ return True, 0
+ if self._state[dfile][0] == MERGE_RECORD_MERGED_OTHER:
+ return True, 0
+ stateentry = self._state[dfile]
+ state, localkey, lfile, afile, anode, ofile, onode, flags = stateentry
+ octx = self._repo[self._other]
+ extras = self.extras(dfile)
+ anccommitnode = extras.get(b'ancestorlinknode')
+ if anccommitnode:
+ actx = self._repo[anccommitnode]
+ else:
+ actx = None
+ fcd = _filectxorabsent(localkey, wctx, dfile)
+ fco = _filectxorabsent(onode, octx, ofile)
+ # TODO: move this to filectxorabsent
+ fca = self._repo.filectx(afile, fileid=anode, changectx=actx)
+ # "premerge" x flags
+ flo = fco.flags()
+ fla = fca.flags()
+ if b'x' in flags + flo + fla and b'l' not in flags + flo + fla:
+ if fca.node() == nullid and flags != flo:
+ if preresolve:
+ self._repo.ui.warn(
+ _(
+ b'warning: cannot merge flags for %s '
+ b'without common ancestor - keeping local flags\n'
+ )
+ % afile
+ )
+ elif flags == fla:
+ flags = flo
+ if preresolve:
+ # restore local
+ if localkey != nullhex:
+ f = self._repo.vfs(b'merge/' + localkey)
+ wctx[dfile].write(f.read(), flags)
+ f.close()
+ else:
+ wctx[dfile].remove(ignoremissing=True)
+ complete, merge_ret, deleted = filemerge.premerge(
+ self._repo,
+ wctx,
+ self._local,
+ lfile,
+ fcd,
+ fco,
+ fca,
+ labels=self._labels,
+ )
+ else:
+ complete, merge_ret, deleted = filemerge.filemerge(
+ self._repo,
+ wctx,
+ self._local,
+ lfile,
+ fcd,
+ fco,
+ fca,
+ labels=self._labels,
+ )
+ if merge_ret is None:
+ # If return value of merge is None, then there are no real conflict
+ del self._state[dfile]
+ self._stateextras.pop(dfile, None)
+ self._dirty = True
+ elif not merge_ret:
+ self.mark(dfile, MERGE_RECORD_RESOLVED)
+
+ if complete:
+ action = None
+ if deleted:
+ if fcd.isabsent():
+ # dc: local picked. Need to drop if present, which may
+ # happen on re-resolves.
+ action = ACTION_FORGET
+ else:
+ # cd: remote picked (or otherwise deleted)
+ action = ACTION_REMOVE
+ else:
+ if fcd.isabsent(): # dc: remote picked
+ action = ACTION_GET
+ elif fco.isabsent(): # cd: local picked
+ if dfile in self.localctx:
+ action = ACTION_ADD_MODIFIED
+ else:
+ action = ACTION_ADD
+ # else: regular merges (no action necessary)
+ self._results[dfile] = merge_ret, action
+
+ return complete, merge_ret
+
+ def preresolve(self, dfile, wctx):
+ """run premerge process for dfile
+
+ Returns whether the merge is complete, and the exit code."""
+ return self._resolve(True, dfile, wctx)
+
+ def resolve(self, dfile, wctx):
+ """run merge process (assuming premerge was run) for dfile
+
+ Returns the exit code of the merge."""
+ return self._resolve(False, dfile, wctx)[1]
+
+ def counts(self):
+ """return counts for updated, merged and removed files in this
+ session"""
+ updated, merged, removed = 0, 0, 0
+ for r, action in pycompat.itervalues(self._results):
+ if r is None:
+ updated += 1
+ elif r == 0:
+ if action == ACTION_REMOVE:
+ removed += 1
+ else:
+ merged += 1
+ return updated, merged, removed
+
+ def unresolvedcount(self):
+ """get unresolved count for this merge (persistent)"""
+ return len(list(self.unresolved()))
+
+ def actions(self):
+ """return lists of actions to perform on the dirstate"""
+ actions = {
+ ACTION_REMOVE: [],
+ ACTION_FORGET: [],
+ ACTION_ADD: [],
+ ACTION_ADD_MODIFIED: [],
+ ACTION_GET: [],
+ }
+ for f, (r, action) in pycompat.iteritems(self._results):
+ if action is not None:
+ actions[action].append((f, None, b"merge result"))
+ return actions
+
+ def queueremove(self, f):
+ """queues a file to be removed from the dirstate
+
+ Meant for use by custom merge drivers."""
+ self._results[f] = 0, ACTION_REMOVE
+
+ def queueadd(self, f):
+ """queues a file to be added to the dirstate
+
+ Meant for use by custom merge drivers."""
+ self._results[f] = 0, ACTION_ADD
+
+ def queueget(self, f):
+ """queues a file to be marked modified in the dirstate
+
+ Meant for use by custom merge drivers."""
+ self._results[f] = 0, ACTION_GET
+
+
+def recordupdates(repo, actions, branchmerge, getfiledata):
+ """record merge actions to the dirstate"""
+ # remove (must come first)
+ for f, args, msg in actions.get(ACTION_REMOVE, []):
+ if branchmerge:
+ repo.dirstate.remove(f)
+ else:
+ repo.dirstate.drop(f)
+
+ # forget (must come first)
+ for f, args, msg in actions.get(ACTION_FORGET, []):
+ repo.dirstate.drop(f)
+
+ # resolve path conflicts
+ for f, args, msg in actions.get(ACTION_PATH_CONFLICT_RESOLVE, []):
+ (f0, origf0) = args
+ repo.dirstate.add(f)
+ repo.dirstate.copy(origf0, f)
+ if f0 == origf0:
+ repo.dirstate.remove(f0)
+ else:
+ repo.dirstate.drop(f0)
+
+ # re-add
+ for f, args, msg in actions.get(ACTION_ADD, []):
+ repo.dirstate.add(f)
+
+ # re-add/mark as modified
+ for f, args, msg in actions.get(ACTION_ADD_MODIFIED, []):
+ if branchmerge:
+ repo.dirstate.normallookup(f)
+ else:
+ repo.dirstate.add(f)
+
+ # exec change
+ for f, args, msg in actions.get(ACTION_EXEC, []):
+ repo.dirstate.normallookup(f)
+
+ # keep
+ for f, args, msg in actions.get(ACTION_KEEP, []):
+ pass
+
+ # get
+ for f, args, msg in actions.get(ACTION_GET, []):
+ if branchmerge:
+ repo.dirstate.otherparent(f)
+ else:
+ parentfiledata = getfiledata[f] if getfiledata else None
+ repo.dirstate.normal(f, parentfiledata=parentfiledata)
+
+ # merge
+ for f, args, msg in actions.get(ACTION_MERGE, []):
+ f1, f2, fa, move, anc = args
+ if branchmerge:
+ # We've done a branch merge, mark this file as merged
+ # so that we properly record the merger later
+ repo.dirstate.merge(f)
+ if f1 != f2: # copy/rename
+ if move:
+ repo.dirstate.remove(f1)
+ if f1 != f:
+ repo.dirstate.copy(f1, f)
+ else:
+ repo.dirstate.copy(f2, f)
+ else:
+ # We've update-merged a locally modified file, so
+ # we set the dirstate to emulate a normal checkout
+ # of that file some time in the past. Thus our
+ # merge will appear as a normal local file
+ # modification.
+ if f2 == f: # file not locally copied/moved
+ repo.dirstate.normallookup(f)
+ if move:
+ repo.dirstate.drop(f1)
+
+ # directory rename, move local
+ for f, args, msg in actions.get(ACTION_DIR_RENAME_MOVE_LOCAL, []):
+ f0, flag = args
+ if branchmerge:
+ repo.dirstate.add(f)
+ repo.dirstate.remove(f0)
+ repo.dirstate.copy(f0, f)
+ else:
+ repo.dirstate.normal(f)
+ repo.dirstate.drop(f0)
+
+ # directory rename, get
+ for f, args, msg in actions.get(ACTION_LOCAL_DIR_RENAME_GET, []):
+ f0, flag = args
+ if branchmerge:
+ repo.dirstate.add(f)
+ repo.dirstate.copy(f0, f)
+ else:
+ repo.dirstate.normal(f)
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/mercurial/metadata.py Mon Jul 20 21:56:27 2020 +0530
@@ -0,0 +1,327 @@
+# metadata.py -- code related to various metadata computation and access.
+#
+# Copyright 2019 Google, Inc <martinvonz@google.com>
+# Copyright 2020 Pierre-Yves David <pierre-yves.david@octobus.net>
+#
+# This software may be used and distributed according to the terms of the
+# GNU General Public License version 2 or any later version.
+from __future__ import absolute_import, print_function
+
+import multiprocessing
+
+from . import (
+ error,
+ node,
+ pycompat,
+ util,
+)
+
+from .revlogutils import (
+ flagutil as sidedataflag,
+ sidedata as sidedatamod,
+)
+
+
+def computechangesetfilesadded(ctx):
+ """return the list of files added in a changeset
+ """
+ added = []
+ for f in ctx.files():
+ if not any(f in p for p in ctx.parents()):
+ added.append(f)
+ return added
+
+
+def get_removal_filter(ctx, x=None):
+ """return a function to detect files "wrongly" detected as `removed`
+
+ When a file is removed relative to p1 in a merge, this
+ function determines whether the absence is due to a
+ deletion from a parent, or whether the merge commit
+ itself deletes the file. We decide this by doing a
+ simplified three way merge of the manifest entry for
+ the file. There are two ways we decide the merge
+ itself didn't delete a file:
+ - neither parent (nor the merge) contain the file
+ - exactly one parent contains the file, and that
+ parent has the same filelog entry as the merge
+ ancestor (or all of them if there two). In other
+ words, that parent left the file unchanged while the
+ other one deleted it.
+ One way to think about this is that deleting a file is
+ similar to emptying it, so the list of changed files
+ should be similar either way. The computation
+ described above is not done directly in _filecommit
+ when creating the list of changed files, however
+ it does something very similar by comparing filelog
+ nodes.
+ """
+
+ if x is not None:
+ p1, p2, m1, m2 = x
+ else:
+ p1 = ctx.p1()
+ p2 = ctx.p2()
+ m1 = p1.manifest()
+ m2 = p2.manifest()
+
+ @util.cachefunc
+ def mas():
+ p1n = p1.node()
+ p2n = p2.node()
+ cahs = ctx.repo().changelog.commonancestorsheads(p1n, p2n)
+ if not cahs:
+ cahs = [node.nullrev]
+ return [ctx.repo()[r].manifest() for r in cahs]
+
+ def deletionfromparent(f):
+ if f in m1:
+ return f not in m2 and all(
+ f in ma and ma.find(f) == m1.find(f) for ma in mas()
+ )
+ elif f in m2:
+ return all(f in ma and ma.find(f) == m2.find(f) for ma in mas())
+ else:
+ return True
+
+ return deletionfromparent
+
+
+def computechangesetfilesremoved(ctx):
+ """return the list of files removed in a changeset
+ """
+ removed = []
+ for f in ctx.files():
+ if f not in ctx:
+ removed.append(f)
+ if removed:
+ rf = get_removal_filter(ctx)
+ removed = [r for r in removed if not rf(r)]
+ return removed
+
+
+def computechangesetcopies(ctx):
+ """return the copies data for a changeset
+
+ The copies data are returned as a pair of dictionnary (p1copies, p2copies).
+
+ Each dictionnary are in the form: `{newname: oldname}`
+ """
+ p1copies = {}
+ p2copies = {}
+ p1 = ctx.p1()
+ p2 = ctx.p2()
+ narrowmatch = ctx._repo.narrowmatch()
+ for dst in ctx.files():
+ if not narrowmatch(dst) or dst not in ctx:
+ continue
+ copied = ctx[dst].renamed()
+ if not copied:
+ continue
+ src, srcnode = copied
+ if src in p1 and p1[src].filenode() == srcnode:
+ p1copies[dst] = src
+ elif src in p2 and p2[src].filenode() == srcnode:
+ p2copies[dst] = src
+ return p1copies, p2copies
+
+
+def encodecopies(files, copies):
+ items = []
+ for i, dst in enumerate(files):
+ if dst in copies:
+ items.append(b'%d\0%s' % (i, copies[dst]))
+ if len(items) != len(copies):
+ raise error.ProgrammingError(
+ b'some copy targets missing from file list'
+ )
+ return b"\n".join(items)
+
+
+def decodecopies(files, data):
+ try:
+ copies = {}
+ if not data:
+ return copies
+ for l in data.split(b'\n'):
+ strindex, src = l.split(b'\0')
+ i = int(strindex)
+ dst = files[i]
+ copies[dst] = src
+ return copies
+ except (ValueError, IndexError):
+ # Perhaps someone had chosen the same key name (e.g. "p1copies") and
+ # used different syntax for the value.
+ return None
+
+
+def encodefileindices(files, subset):
+ subset = set(subset)
+ indices = []
+ for i, f in enumerate(files):
+ if f in subset:
+ indices.append(b'%d' % i)
+ return b'\n'.join(indices)
+
+
+def decodefileindices(files, data):
+ try:
+ subset = []
+ if not data:
+ return subset
+ for strindex in data.split(b'\n'):
+ i = int(strindex)
+ if i < 0 or i >= len(files):
+ return None
+ subset.append(files[i])
+ return subset
+ except (ValueError, IndexError):
+ # Perhaps someone had chosen the same key name (e.g. "added") and
+ # used different syntax for the value.
+ return None
+
+
+def _getsidedata(srcrepo, rev):
+ ctx = srcrepo[rev]
+ filescopies = computechangesetcopies(ctx)
+ filesadded = computechangesetfilesadded(ctx)
+ filesremoved = computechangesetfilesremoved(ctx)
+ sidedata = {}
+ if any([filescopies, filesadded, filesremoved]):
+ sortedfiles = sorted(ctx.files())
+ p1copies, p2copies = filescopies
+ p1copies = encodecopies(sortedfiles, p1copies)
+ p2copies = encodecopies(sortedfiles, p2copies)
+ filesadded = encodefileindices(sortedfiles, filesadded)
+ filesremoved = encodefileindices(sortedfiles, filesremoved)
+ if p1copies:
+ sidedata[sidedatamod.SD_P1COPIES] = p1copies
+ if p2copies:
+ sidedata[sidedatamod.SD_P2COPIES] = p2copies
+ if filesadded:
+ sidedata[sidedatamod.SD_FILESADDED] = filesadded
+ if filesremoved:
+ sidedata[sidedatamod.SD_FILESREMOVED] = filesremoved
+ return sidedata
+
+
+def getsidedataadder(srcrepo, destrepo):
+ use_w = srcrepo.ui.configbool(b'experimental', b'worker.repository-upgrade')
+ if pycompat.iswindows or not use_w:
+ return _get_simple_sidedata_adder(srcrepo, destrepo)
+ else:
+ return _get_worker_sidedata_adder(srcrepo, destrepo)
+
+
+def _sidedata_worker(srcrepo, revs_queue, sidedata_queue, tokens):
+ """The function used by worker precomputing sidedata
+
+ It read an input queue containing revision numbers
+ It write in an output queue containing (rev, <sidedata-map>)
+
+ The `None` input value is used as a stop signal.
+
+ The `tokens` semaphore is user to avoid having too many unprocessed
+ entries. The workers needs to acquire one token before fetching a task.
+ They will be released by the consumer of the produced data.
+ """
+ tokens.acquire()
+ rev = revs_queue.get()
+ while rev is not None:
+ data = _getsidedata(srcrepo, rev)
+ sidedata_queue.put((rev, data))
+ tokens.acquire()
+ rev = revs_queue.get()
+ # processing of `None` is completed, release the token.
+ tokens.release()
+
+
+BUFF_PER_WORKER = 50
+
+
+def _get_worker_sidedata_adder(srcrepo, destrepo):
+ """The parallel version of the sidedata computation
+
+ This code spawn a pool of worker that precompute a buffer of sidedata
+ before we actually need them"""
+ # avoid circular import copies -> scmutil -> worker -> copies
+ from . import worker
+
+ nbworkers = worker._numworkers(srcrepo.ui)
+
+ tokens = multiprocessing.BoundedSemaphore(nbworkers * BUFF_PER_WORKER)
+ revsq = multiprocessing.Queue()
+ sidedataq = multiprocessing.Queue()
+
+ assert srcrepo.filtername is None
+ # queue all tasks beforehand, revision numbers are small and it make
+ # synchronisation simpler
+ #
+ # Since the computation for each node can be quite expensive, the overhead
+ # of using a single queue is not revelant. In practice, most computation
+ # are fast but some are very expensive and dominate all the other smaller
+ # cost.
+ for r in srcrepo.changelog.revs():
+ revsq.put(r)
+ # queue the "no more tasks" markers
+ for i in range(nbworkers):
+ revsq.put(None)
+
+ allworkers = []
+ for i in range(nbworkers):
+ args = (srcrepo, revsq, sidedataq, tokens)
+ w = multiprocessing.Process(target=_sidedata_worker, args=args)
+ allworkers.append(w)
+ w.start()
+
+ # dictionnary to store results for revision higher than we one we are
+ # looking for. For example, if we need the sidedatamap for 42, and 43 is
+ # received, when shelve 43 for later use.
+ staging = {}
+
+ def sidedata_companion(revlog, rev):
+ sidedata = {}
+ if util.safehasattr(revlog, b'filteredrevs'): # this is a changelog
+ # Is the data previously shelved ?
+ sidedata = staging.pop(rev, None)
+ if sidedata is None:
+ # look at the queued result until we find the one we are lookig
+ # for (shelve the other ones)
+ r, sidedata = sidedataq.get()
+ while r != rev:
+ staging[r] = sidedata
+ r, sidedata = sidedataq.get()
+ tokens.release()
+ return False, (), sidedata
+
+ return sidedata_companion
+
+
+def _get_simple_sidedata_adder(srcrepo, destrepo):
+ """The simple version of the sidedata computation
+
+ It just compute it in the same thread on request"""
+
+ def sidedatacompanion(revlog, rev):
+ sidedata = {}
+ if util.safehasattr(revlog, 'filteredrevs'): # this is a changelog
+ sidedata = _getsidedata(srcrepo, rev)
+ return False, (), sidedata
+
+ return sidedatacompanion
+
+
+def getsidedataremover(srcrepo, destrepo):
+ def sidedatacompanion(revlog, rev):
+ f = ()
+ if util.safehasattr(revlog, 'filteredrevs'): # this is a changelog
+ if revlog.flags(rev) & sidedataflag.REVIDX_SIDEDATA:
+ f = (
+ sidedatamod.SD_P1COPIES,
+ sidedatamod.SD_P2COPIES,
+ sidedatamod.SD_FILESADDED,
+ sidedatamod.SD_FILESREMOVED,
+ )
+ return False, f, {}
+
+ return sidedatacompanion
--- a/mercurial/narrowspec.py Tue Jul 14 10:25:41 2020 +0200
+++ b/mercurial/narrowspec.py Mon Jul 20 21:56:27 2020 +0530
@@ -14,6 +14,7 @@
error,
match as matchmod,
merge,
+ mergestate as mergestatemod,
scmutil,
sparse,
util,
@@ -272,7 +273,7 @@
def _writeaddedfiles(repo, pctx, files):
actions = merge.emptyactions()
- addgaction = actions[merge.ACTION_GET].append
+ addgaction = actions[mergestatemod.ACTION_GET].append
mf = repo[b'.'].manifest()
for f in files:
if not repo.wvfs.exists(f):
--- a/mercurial/obsutil.py Tue Jul 14 10:25:41 2020 +0200
+++ b/mercurial/obsutil.py Mon Jul 20 21:56:27 2020 +0530
@@ -13,6 +13,7 @@
from . import (
diffutil,
encoding,
+ error,
node as nodemod,
phases,
pycompat,
@@ -481,14 +482,23 @@
return effects
-def getobsoleted(repo, tr):
- """return the set of pre-existing revisions obsoleted by a transaction"""
+def getobsoleted(repo, tr=None, changes=None):
+ """return the set of pre-existing revisions obsoleted by a transaction
+
+ Either the transaction or changes item of the transaction (for hooks)
+ must be provided, but not both.
+ """
+ if (tr is None) == (changes is None):
+ e = b"exactly one of tr and changes must be provided"
+ raise error.ProgrammingError(e)
torev = repo.unfiltered().changelog.index.get_rev
phase = repo._phasecache.phase
succsmarkers = repo.obsstore.successors.get
public = phases.public
- addedmarkers = tr.changes[b'obsmarkers']
- origrepolen = tr.changes[b'origrepolen']
+ if changes is None:
+ changes = tr.changes
+ addedmarkers = changes[b'obsmarkers']
+ origrepolen = changes[b'origrepolen']
seenrevs = set()
obsoleted = set()
for mark in addedmarkers:
--- a/mercurial/patch.py Tue Jul 14 10:25:41 2020 +0200
+++ b/mercurial/patch.py Mon Jul 20 21:56:27 2020 +0530
@@ -785,7 +785,7 @@
for l in x.hunk:
lines.append(l)
if l[-1:] != b'\n':
- lines.append(b"\n\\ No newline at end of file\n")
+ lines.append(b'\n' + diffhelper.MISSING_NEWLINE_MARKER)
self.backend.writerej(self.fname, len(self.rej), self.hunks, lines)
def apply(self, h):
@@ -1069,7 +1069,7 @@
def write(self, fp):
delta = len(self.before) + len(self.after)
- if self.after and self.after[-1] == b'\\ No newline at end of file\n':
+ if self.after and self.after[-1] == diffhelper.MISSING_NEWLINE_MARKER:
delta -= 1
fromlen = delta + self.removed
tolen = delta + self.added
@@ -2666,7 +2666,11 @@
prefetchmatch = scmutil.matchfiles(
repo, list(modifiedset | addedset | removedset)
)
- scmutil.prefetchfiles(repo, [ctx1.rev(), ctx2.rev()], prefetchmatch)
+ revmatches = [
+ (ctx1.rev(), prefetchmatch),
+ (ctx2.rev(), prefetchmatch),
+ ]
+ scmutil.prefetchfiles(repo, revmatches)
def difffn(opts, losedata):
return trydiff(
@@ -2918,6 +2922,18 @@
yield f1, f2, copyop
+def _gitindex(text):
+ if not text:
+ text = b""
+ l = len(text)
+ s = hashutil.sha1(b'blob %d\0' % l)
+ s.update(text)
+ return hex(s.digest())
+
+
+_gitmode = {b'l': b'120000', b'x': b'100755', b'': b'100644'}
+
+
def trydiff(
repo,
revs,
@@ -2940,14 +2956,6 @@
pathfn is applied to every path in the diff output.
'''
- def gitindex(text):
- if not text:
- text = b""
- l = len(text)
- s = hashutil.sha1(b'blob %d\0' % l)
- s.update(text)
- return hex(s.digest())
-
if opts.noprefix:
aprefix = bprefix = b''
else:
@@ -2964,8 +2972,6 @@
date1 = dateutil.datestr(ctx1.date())
date2 = dateutil.datestr(ctx2.date())
- gitmode = {b'l': b'120000', b'x': b'100755', b'': b'100644'}
-
if not pathfn:
pathfn = lambda f: f
@@ -3019,11 +3025,11 @@
b'diff --git %s%s %s%s' % (aprefix, path1, bprefix, path2)
)
if not f1: # added
- header.append(b'new file mode %s' % gitmode[flag2])
+ header.append(b'new file mode %s' % _gitmode[flag2])
elif not f2: # removed
- header.append(b'deleted file mode %s' % gitmode[flag1])
+ header.append(b'deleted file mode %s' % _gitmode[flag1])
else: # modified/copied/renamed
- mode1, mode2 = gitmode[flag1], gitmode[flag2]
+ mode1, mode2 = _gitmode[flag1], _gitmode[flag2]
if mode1 != mode2:
header.append(b'old mode %s' % mode1)
header.append(b'new mode %s' % mode2)
@@ -3067,39 +3073,66 @@
if fctx2 is not None:
content2 = fctx2.data()
- if binary and opts.git and not opts.nobinary:
- text = mdiff.b85diff(content1, content2)
- if text:
- header.append(
- b'index %s..%s' % (gitindex(content1), gitindex(content2))
+ data1 = (ctx1, fctx1, path1, flag1, content1, date1)
+ data2 = (ctx2, fctx2, path2, flag2, content2, date2)
+ yield diffcontent(data1, data2, header, binary, opts)
+
+
+def diffcontent(data1, data2, header, binary, opts):
+ """ diffs two versions of a file.
+
+ data1 and data2 are tuples containg:
+
+ * ctx: changeset for the file
+ * fctx: file context for that file
+ * path1: name of the file
+ * flag: flags of the file
+ * content: full content of the file (can be null in case of binary)
+ * date: date of the changeset
+
+ header: the patch header
+ binary: whether the any of the version of file is binary or not
+ opts: user passed options
+
+ It exists as a separate function so that extensions like extdiff can wrap
+ it and use the file content directly.
+ """
+
+ ctx1, fctx1, path1, flag1, content1, date1 = data1
+ ctx2, fctx2, path2, flag2, content2, date2 = data2
+ if binary and opts.git and not opts.nobinary:
+ text = mdiff.b85diff(content1, content2)
+ if text:
+ header.append(
+ b'index %s..%s' % (_gitindex(content1), _gitindex(content2))
+ )
+ hunks = ((None, [text]),)
+ else:
+ if opts.git and opts.index > 0:
+ flag = flag1
+ if flag is None:
+ flag = flag2
+ header.append(
+ b'index %s..%s %s'
+ % (
+ _gitindex(content1)[0 : opts.index],
+ _gitindex(content2)[0 : opts.index],
+ _gitmode[flag],
)
- hunks = ((None, [text]),)
- else:
- if opts.git and opts.index > 0:
- flag = flag1
- if flag is None:
- flag = flag2
- header.append(
- b'index %s..%s %s'
- % (
- gitindex(content1)[0 : opts.index],
- gitindex(content2)[0 : opts.index],
- gitmode[flag],
- )
- )
-
- uheaders, hunks = mdiff.unidiff(
- content1,
- date1,
- content2,
- date2,
- path1,
- path2,
- binary=binary,
- opts=opts,
)
- header.extend(uheaders)
- yield fctx1, fctx2, header, hunks
+
+ uheaders, hunks = mdiff.unidiff(
+ content1,
+ date1,
+ content2,
+ date2,
+ path1,
+ path2,
+ binary=binary,
+ opts=opts,
+ )
+ header.extend(uheaders)
+ return fctx1, fctx2, header, hunks
def diffstatsum(stats):
--- a/mercurial/pathutil.py Tue Jul 14 10:25:41 2020 +0200
+++ b/mercurial/pathutil.py Mon Jul 20 21:56:27 2020 +0530
@@ -1,5 +1,6 @@
from __future__ import absolute_import
+import contextlib
import errno
import os
import posixpath
@@ -148,6 +149,19 @@
except (OSError, error.Abort):
return False
+ @contextlib.contextmanager
+ def cached(self):
+ if self._cached:
+ yield
+ else:
+ try:
+ self._cached = True
+ yield
+ finally:
+ self.audited.clear()
+ self.auditeddir.clear()
+ self._cached = False
+
def canonpath(root, cwd, myname, auditor=None):
'''return the canonical path of myname, given cwd and root
--- a/mercurial/phases.py Tue Jul 14 10:25:41 2020 +0200
+++ b/mercurial/phases.py Mon Jul 20 21:56:27 2020 +0530
@@ -128,25 +128,28 @@
_fphasesentry = struct.Struct(b'>i20s')
-INTERNAL_FLAG = 64 # Phases for mercurial internal usage only
-HIDEABLE_FLAG = 32 # Phases that are hideable
-
# record phase index
public, draft, secret = range(3)
-internal = INTERNAL_FLAG | HIDEABLE_FLAG
-archived = HIDEABLE_FLAG
-allphases = list(range(internal + 1))
-trackedphases = allphases[1:]
+archived = 32 # non-continuous for compatibility
+internal = 96 # non-continuous for compatibility
+allphases = (public, draft, secret, archived, internal)
+trackedphases = (draft, secret, archived, internal)
# record phase names
cmdphasenames = [b'public', b'draft', b'secret'] # known to `hg phase` command
-phasenames = [None] * len(allphases)
-phasenames[: len(cmdphasenames)] = cmdphasenames
+phasenames = dict(enumerate(cmdphasenames))
phasenames[archived] = b'archived'
phasenames[internal] = b'internal'
+# map phase name to phase number
+phasenumber = {name: phase for phase, name in phasenames.items()}
+# like phasenumber, but also include maps for the numeric and binary
+# phase number to the phase number
+phasenumber2 = phasenumber.copy()
+phasenumber2.update({phase: phase for phase in phasenames})
+phasenumber2.update({b'%i' % phase: phase for phase in phasenames})
# record phase property
-mutablephases = tuple(allphases[1:])
-remotehiddenphases = tuple(allphases[2:])
-localhiddenphases = tuple(p for p in allphases if p & HIDEABLE_FLAG)
+mutablephases = (draft, secret, archived, internal)
+remotehiddenphases = (secret, archived, internal)
+localhiddenphases = (internal, archived)
def supportinternal(repo):
@@ -167,7 +170,7 @@
"""
repo = repo.unfiltered()
dirty = False
- roots = [set() for i in allphases]
+ roots = {i: set() for i in allphases}
try:
f, pending = txnutil.trypending(repo.root, repo.svfs, b'phaseroots')
try:
@@ -189,11 +192,10 @@
def binaryencode(phasemapping):
"""encode a 'phase -> nodes' mapping into a binary stream
- Since phases are integer the mapping is actually a python list:
- [[PUBLIC_HEADS], [DRAFTS_HEADS], [SECRET_HEADS]]
+ The revision lists are encoded as (phase, root) pairs.
"""
binarydata = []
- for phase, nodes in enumerate(phasemapping):
+ for phase, nodes in pycompat.iteritems(phasemapping):
for head in nodes:
binarydata.append(_fphasesentry.pack(phase, head))
return b''.join(binarydata)
@@ -202,8 +204,9 @@
def binarydecode(stream):
"""decode a binary stream into a 'phase -> nodes' mapping
- Since phases are integer the mapping is actually a python list."""
- headsbyphase = [[] for i in allphases]
+ The (phase, root) pairs are turned back into a dictionary with
+ the phase as index and the aggregated roots of that phase as value."""
+ headsbyphase = {i: [] for i in allphases}
entrysize = _fphasesentry.size
while True:
entry = stream.read(entrysize)
@@ -323,6 +326,38 @@
self.filterunknown(repo)
self.opener = repo.svfs
+ def hasnonpublicphases(self, repo):
+ """detect if there are revisions with non-public phase"""
+ repo = repo.unfiltered()
+ cl = repo.changelog
+ if len(cl) >= self._loadedrevslen:
+ self.invalidate()
+ self.loadphaserevs(repo)
+ return any(
+ revs
+ for phase, revs in pycompat.iteritems(self.phaseroots)
+ if phase != public
+ )
+
+ def nonpublicphaseroots(self, repo):
+ """returns the roots of all non-public phases
+
+ The roots are not minimized, so if the secret revisions are
+ descendants of draft revisions, their roots will still be present.
+ """
+ repo = repo.unfiltered()
+ cl = repo.changelog
+ if len(cl) >= self._loadedrevslen:
+ self.invalidate()
+ self.loadphaserevs(repo)
+ return set().union(
+ *[
+ revs
+ for phase, revs in pycompat.iteritems(self.phaseroots)
+ if phase != public
+ ]
+ )
+
def getrevset(self, repo, phases, subset=None):
"""return a smartset for the given phases"""
self.loadphaserevs(repo) # ensure phase's sets are loaded
@@ -380,7 +415,7 @@
# Shallow copy meant to ensure isolation in
# advance/retractboundary(), nothing more.
ph = self.__class__(None, None, _load=False)
- ph.phaseroots = self.phaseroots[:]
+ ph.phaseroots = self.phaseroots.copy()
ph.dirty = self.dirty
ph.opener = self.opener
ph._loadedrevslen = self._loadedrevslen
@@ -400,17 +435,12 @@
def _getphaserevsnative(self, repo):
repo = repo.unfiltered()
- nativeroots = []
- for phase in trackedphases:
- nativeroots.append(
- pycompat.maplist(repo.changelog.rev, self.phaseroots[phase])
- )
- return repo.changelog.computephases(nativeroots)
+ return repo.changelog.computephases(self.phaseroots)
def _computephaserevspure(self, repo):
repo = repo.unfiltered()
cl = repo.changelog
- self._phasesets = [set() for phase in allphases]
+ self._phasesets = {phase: set() for phase in allphases}
lowerroots = set()
for phase in reversed(trackedphases):
roots = pycompat.maplist(cl.rev, self.phaseroots[phase])
@@ -464,7 +494,7 @@
f.close()
def _write(self, fp):
- for phase, roots in enumerate(self.phaseroots):
+ for phase, roots in pycompat.iteritems(self.phaseroots):
for h in sorted(roots):
fp.write(b'%i %s\n' % (phase, hex(h)))
self.dirty = False
@@ -511,7 +541,7 @@
changes = set() # set of revisions to be changed
delroots = [] # set of root deleted by this path
- for phase in pycompat.xrange(targetphase + 1, len(allphases)):
+ for phase in (phase for phase in allphases if phase > targetphase):
# filter nodes that are not in a compatible phase already
nodes = [
n for n in nodes if self.phase(repo, repo[n].rev()) >= phase
@@ -546,7 +576,11 @@
return changes
def retractboundary(self, repo, tr, targetphase, nodes):
- oldroots = self.phaseroots[: targetphase + 1]
+ oldroots = {
+ phase: revs
+ for phase, revs in pycompat.iteritems(self.phaseroots)
+ if phase <= targetphase
+ }
if tr is None:
phasetracking = None
else:
@@ -565,7 +599,7 @@
# find the phase of the affected revision
for phase in pycompat.xrange(targetphase, -1, -1):
if phase:
- roots = oldroots[phase]
+ roots = oldroots.get(phase, [])
revs = set(repo.revs(b'%ln::%ld', roots, affected))
affected -= revs
else: # public phase
@@ -583,30 +617,32 @@
raise error.ProgrammingError(msg)
repo = repo.unfiltered()
- currentroots = self.phaseroots[targetphase]
+ torev = repo.changelog.rev
+ tonode = repo.changelog.node
+ currentroots = {torev(node) for node in self.phaseroots[targetphase]}
finalroots = oldroots = set(currentroots)
+ newroots = [torev(node) for node in nodes]
newroots = [
- n for n in nodes if self.phase(repo, repo[n].rev()) < targetphase
+ rev for rev in newroots if self.phase(repo, rev) < targetphase
]
+
if newroots:
-
- if nullid in newroots:
+ if nullrev in newroots:
raise error.Abort(_(b'cannot change null revision phase'))
- currentroots = currentroots.copy()
currentroots.update(newroots)
# Only compute new roots for revs above the roots that are being
# retracted.
- minnewroot = min(repo[n].rev() for n in newroots)
- aboveroots = [
- n for n in currentroots if repo[n].rev() >= minnewroot
- ]
- updatedroots = repo.set(b'roots(%ln::)', aboveroots)
+ minnewroot = min(newroots)
+ aboveroots = [rev for rev in currentroots if rev >= minnewroot]
+ updatedroots = repo.revs(b'roots(%ld::)', aboveroots)
- finalroots = {n for n in currentroots if repo[n].rev() < minnewroot}
- finalroots.update(ctx.node() for ctx in updatedroots)
+ finalroots = {rev for rev in currentroots if rev < minnewroot}
+ finalroots.update(updatedroots)
if finalroots != oldroots:
- self._updateroots(targetphase, finalroots, tr)
+ self._updateroots(
+ targetphase, {tonode(rev) for rev in finalroots}, tr
+ )
return True
return False
@@ -617,7 +653,7 @@
"""
filtered = False
has_node = repo.changelog.index.has_node # to filter unknown nodes
- for phase, nodes in enumerate(self.phaseroots):
+ for phase, nodes in pycompat.iteritems(self.phaseroots):
missing = sorted(node for node in nodes if not has_node(node))
if missing:
for mnode in missing:
@@ -742,7 +778,7 @@
"""
cl = repo.changelog
- headsbyphase = [[] for i in allphases]
+ headsbyphase = {i: [] for i in allphases}
# No need to keep track of secret phase; any heads in the subset that
# are not mentioned are implicitly secret.
for phase in allphases[:secret]:
@@ -753,12 +789,12 @@
def updatephases(repo, trgetter, headsbyphase):
"""Updates the repo with the given phase heads"""
- # Now advance phase boundaries of all but secret phase
+ # Now advance phase boundaries of all phases
#
# run the update (and fetch transaction) only if there are actually things
# to update. This avoid creating empty transaction during no-op operation.
- for phase in allphases[:-1]:
+ for phase in allphases:
revset = b'%ln - _phase(%s)'
heads = [c.node() for c in repo.set(revset, headsbyphase[phase], phase)]
if heads:
@@ -873,18 +909,16 @@
"""
v = ui.config(b'phases', b'new-commit')
try:
- return phasenames.index(v)
- except ValueError:
- try:
- return int(v)
- except ValueError:
- msg = _(b"phases.new-commit: not a valid phase name ('%s')")
- raise error.ConfigError(msg % v)
+ return phasenumber2[v]
+ except KeyError:
+ raise error.ConfigError(
+ _(b"phases.new-commit: not a valid phase name ('%s')") % v
+ )
def hassecret(repo):
"""utility function that check if a repo have any secret changeset."""
- return bool(repo._phasecache.phaseroots[2])
+ return bool(repo._phasecache.phaseroots[secret])
def preparehookargs(node, old, new):
--- a/mercurial/policy.py Tue Jul 14 10:25:41 2020 +0200
+++ b/mercurial/policy.py Mon Jul 20 21:56:27 2020 +0530
@@ -80,7 +80,7 @@
('cext', 'bdiff'): 3,
('cext', 'mpatch'): 1,
('cext', 'osutil'): 4,
- ('cext', 'parsers'): 16,
+ ('cext', 'parsers'): 17,
}
# map import request to other package or module
--- a/mercurial/posix.py Tue Jul 14 10:25:41 2020 +0200
+++ b/mercurial/posix.py Mon Jul 20 21:56:27 2020 +0530
@@ -538,10 +538,6 @@
return pycompat.shlexsplit(s, posix=True)
-def quotecommand(cmd):
- return cmd
-
-
def testpid(pid):
'''return False if pid dead, True if running or not sure'''
if pycompat.sysplatform == b'OpenVMS':
--- a/mercurial/pycompat.py Tue Jul 14 10:25:41 2020 +0200
+++ b/mercurial/pycompat.py Mon Jul 20 21:56:27 2020 +0530
@@ -98,7 +98,6 @@
import codecs
import functools
import io
- import locale
import struct
if os.name == r'nt' and sys.version_info >= (3, 6):
@@ -143,29 +142,12 @@
long = int
- # Warning: sys.stdout.buffer and sys.stderr.buffer do not necessarily have
- # the same buffering behavior as sys.stdout and sys.stderr. The interpreter
- # initializes them with block-buffered streams or unbuffered streams (when
- # the -u option or the PYTHONUNBUFFERED environment variable is set), never
- # with a line-buffered stream.
- # TODO: .buffer might not exist if std streams were replaced; we'll need
- # a silly wrapper to make a bytes stream backed by a unicode one.
- stdin = sys.stdin.buffer
- stdout = sys.stdout.buffer
- stderr = sys.stderr.buffer
-
if getattr(sys, 'argv', None) is not None:
# On POSIX, the char** argv array is converted to Python str using
- # Py_DecodeLocale(). The inverse of this is Py_EncodeLocale(), which isn't
- # directly callable from Python code. So, we need to emulate it.
- # Py_DecodeLocale() calls mbstowcs() and falls back to mbrtowc() with
- # surrogateescape error handling on failure. These functions take the
- # current system locale into account. So, the inverse operation is to
- # .encode() using the system locale's encoding and using the
- # surrogateescape error handler. The only tricky part here is getting
- # the system encoding correct, since `locale.getlocale()` can return
- # None. We fall back to the filesystem encoding if lookups via `locale`
- # fail, as this seems like a reasonable thing to do.
+ # Py_DecodeLocale(). The inverse of this is Py_EncodeLocale(), which
+ # isn't directly callable from Python code. In practice, os.fsencode()
+ # can be used instead (this is recommended by Python's documentation
+ # for sys.argv).
#
# On Windows, the wchar_t **argv is passed into the interpreter as-is.
# Like POSIX, we need to emulate what Py_EncodeLocale() would do. But
@@ -178,19 +160,7 @@
if os.name == r'nt':
sysargv = [a.encode("mbcs", "ignore") for a in sys.argv]
else:
-
- def getdefaultlocale_if_known():
- try:
- return locale.getdefaultlocale()
- except ValueError:
- return None, None
-
- encoding = (
- locale.getlocale()[1]
- or getdefaultlocale_if_known()[1]
- or sys.getfilesystemencoding()
- )
- sysargv = [a.encode(encoding, "surrogateescape") for a in sys.argv]
+ sysargv = [fsencode(a) for a in sys.argv]
bytechr = struct.Struct('>B').pack
byterepr = b'%r'.__mod__
@@ -495,9 +465,6 @@
osaltsep = os.altsep
osdevnull = os.devnull
long = long
- stdin = sys.stdin
- stdout = sys.stdout
- stderr = sys.stderr
if getattr(sys, 'argv', None) is not None:
sysargv = sys.argv
sysplatform = sys.platform
--- a/mercurial/repair.py Tue Jul 14 10:25:41 2020 +0200
+++ b/mercurial/repair.py Mon Jul 20 21:56:27 2020 +0530
@@ -66,7 +66,7 @@
else:
bundletype = b"HG10UN"
- outgoing = discovery.outgoing(repo, missingroots=bases, missingheads=heads)
+ outgoing = discovery.outgoing(repo, missingroots=bases, ancestorsof=heads)
contentopts = {
b'cg.version': cgversion,
b'obsolescence': obsolescence,
--- a/mercurial/repoview.py Tue Jul 14 10:25:41 2020 +0200
+++ b/mercurial/repoview.py Mon Jul 20 21:56:27 2020 +0530
@@ -129,10 +129,8 @@
def computemutable(repo, visibilityexceptions=None):
assert not repo.changelog.filteredrevs
# fast check to avoid revset call on huge repo
- if any(repo._phasecache.phaseroots[1:]):
- getphase = repo._phasecache.phase
- maymutable = filterrevs(repo, b'base')
- return frozenset(r for r in maymutable if getphase(repo, r))
+ if repo._phasecache.hasnonpublicphases(repo):
+ return frozenset(repo._phasecache.getrevset(repo, phases.mutablephases))
return frozenset()
@@ -154,9 +152,9 @@
assert not repo.changelog.filteredrevs
cl = repo.changelog
firstmutable = len(cl)
- for roots in repo._phasecache.phaseroots[1:]:
- if roots:
- firstmutable = min(firstmutable, min(cl.rev(r) for r in roots))
+ roots = repo._phasecache.nonpublicphaseroots(repo)
+ if roots:
+ firstmutable = min(firstmutable, min(cl.rev(r) for r in roots))
# protect from nullrev root
firstmutable = max(0, firstmutable)
return frozenset(pycompat.xrange(firstmutable, len(cl)))
--- a/mercurial/revlog.py Tue Jul 14 10:25:41 2020 +0200
+++ b/mercurial/revlog.py Mon Jul 20 21:56:27 2020 +0530
@@ -1523,7 +1523,7 @@
def disambiguate(hexnode, minlength):
"""Disambiguate against wdirid."""
- for length in range(minlength, 41):
+ for length in range(minlength, len(hexnode) + 1):
prefix = hexnode[:length]
if not maybewdir(prefix):
return prefix
@@ -1540,12 +1540,12 @@
pass
if node == wdirid:
- for length in range(minlength, 41):
+ for length in range(minlength, len(hexnode) + 1):
prefix = hexnode[:length]
if isvalid(prefix):
return prefix
- for length in range(minlength, 41):
+ for length in range(minlength, len(hexnode) + 1):
prefix = hexnode[:length]
if isvalid(prefix):
return disambiguate(hexnode, length)
--- a/mercurial/revlogutils/nodemap.py Tue Jul 14 10:25:41 2020 +0200
+++ b/mercurial/revlogutils/nodemap.py Mon Jul 20 21:56:27 2020 +0530
@@ -13,6 +13,8 @@
import re
import struct
+from ..i18n import _
+
from .. import (
error,
node as nodemod,
@@ -48,7 +50,7 @@
docket.data_unused = data_unused
filename = _rawdata_filepath(revlog, docket)
- use_mmap = revlog.opener.options.get(b"exp-persistent-nodemap.mmap")
+ use_mmap = revlog.opener.options.get(b"persistent-nodemap.mmap")
try:
with revlog.opener(filename) as fd:
if use_mmap:
@@ -105,6 +107,9 @@
def addabort(self, *args, **kwargs):
pass
+ def _report(self, *args):
+ pass
+
def update_persistent_nodemap(revlog):
"""update the persistent nodemap right now
@@ -137,7 +142,14 @@
can_incremental = util.safehasattr(revlog.index, "nodemap_data_incremental")
ondisk_docket = revlog._nodemap_docket
feed_data = util.safehasattr(revlog.index, "update_nodemap_data")
- use_mmap = revlog.opener.options.get(b"exp-persistent-nodemap.mmap")
+ use_mmap = revlog.opener.options.get(b"persistent-nodemap.mmap")
+ mode = revlog.opener.options.get(b"persistent-nodemap.mode")
+ if not can_incremental:
+ msg = _(b"persistent nodemap in strict mode without efficient method")
+ if mode == b'warn':
+ tr._report(b"%s\n" % msg)
+ elif mode == b'strict':
+ raise error.Abort(msg)
data = None
# first attemp an incremental update of the data
@@ -255,8 +267,7 @@
# data. Its content is currently very light, but it will expand as the on disk
# nodemap gains the necessary features to be used in production.
-# version 0 is experimental, no BC garantee, do no use outside of tests.
-ONDISK_VERSION = 0
+ONDISK_VERSION = 1
S_VERSION = struct.Struct(">B")
S_HEADER = struct.Struct(">BQQQQ")
--- a/mercurial/revset.py Tue Jul 14 10:25:41 2020 +0200
+++ b/mercurial/revset.py Mon Jul 20 21:56:27 2020 +0530
@@ -789,9 +789,9 @@
"merge" here includes merge conflicts from e.g. 'hg rebase' or 'hg graft'.
"""
getargs(x, 0, 0, _(b"conflictlocal takes no arguments"))
- from . import merge
-
- mergestate = merge.mergestate.read(repo)
+ from . import mergestate as mergestatemod
+
+ mergestate = mergestatemod.mergestate.read(repo)
if mergestate.active() and repo.changelog.hasnode(mergestate.local):
return subset & {repo.changelog.rev(mergestate.local)}
@@ -805,9 +805,9 @@
"merge" here includes merge conflicts from e.g. 'hg rebase' or 'hg graft'.
"""
getargs(x, 0, 0, _(b"conflictother takes no arguments"))
- from . import merge
-
- mergestate = merge.mergestate.read(repo)
+ from . import mergestate as mergestatemod
+
+ mergestate = mergestatemod.mergestate.read(repo)
if mergestate.active() and repo.changelog.hasnode(mergestate.other):
return subset & {repo.changelog.rev(mergestate.other)}
--- a/mercurial/rewriteutil.py Tue Jul 14 10:25:41 2020 +0200
+++ b/mercurial/rewriteutil.py Mon Jul 20 21:56:27 2020 +0530
@@ -53,3 +53,20 @@
if allowunstable:
return revset.baseset()
return repo.revs(b"(%ld::) - %ld", revs, revs)
+
+
+def skip_empty_successor(ui, command):
+ empty_successor = ui.config(b'rewrite', b'empty-successor')
+ if empty_successor == b'skip':
+ return True
+ elif empty_successor == b'keep':
+ return False
+ else:
+ raise error.ConfigError(
+ _(
+ b"%s doesn't know how to handle config "
+ b"rewrite.empty-successor=%s (only 'skip' and 'keep' are "
+ b"supported)"
+ )
+ % (command, empty_successor)
+ )
--- a/mercurial/scmutil.py Tue Jul 14 10:25:41 2020 +0200
+++ b/mercurial/scmutil.py Mon Jul 20 21:56:27 2020 +0530
@@ -456,9 +456,7 @@
def resolvehexnodeidprefix(repo, prefix):
- if prefix.startswith(b'x') and repo.ui.configbool(
- b'experimental', b'revisions.prefixhexnode'
- ):
+ if prefix.startswith(b'x'):
prefix = prefix[1:]
try:
# Uses unfiltered repo because it's faster when prefix is ambiguous/
@@ -805,9 +803,12 @@
if relative:
cwd = repo.getcwd()
- pathto = repo.pathto
- return lambda f: pathto(f, cwd)
- elif repo.ui.configbool(b'ui', b'slash'):
+ if cwd != b'':
+ # this branch would work even if cwd == b'' (ie cwd = repo
+ # root), but its generality makes the returned function slower
+ pathto = repo.pathto
+ return lambda f: pathto(f, cwd)
+ if repo.ui.configbool(b'ui', b'slash'):
return lambda f: f
else:
return util.localpath
@@ -1469,6 +1470,13 @@
repo._quick_access_changeid_invalidate()
+def writereporequirements(repo, requirements=None):
+ """ writes requirements for the repo to .hg/requires """
+ if requirements:
+ repo.requirements = requirements
+ writerequires(repo.vfs, repo.requirements)
+
+
def writerequires(opener, requirements):
with opener(b'requires', b'w', atomictemp=True) as fp:
for r in sorted(requirements):
@@ -1879,18 +1887,29 @@
]
-def prefetchfiles(repo, revs, match):
+def prefetchfiles(repo, revmatches):
"""Invokes the registered file prefetch functions, allowing extensions to
ensure the corresponding files are available locally, before the command
- uses them."""
- if match:
- # The command itself will complain about files that don't exist, so
- # don't duplicate the message.
- match = matchmod.badmatch(match, lambda fn, msg: None)
- else:
- match = matchall(repo)
+ uses them.
+
+ Args:
+ revmatches: a list of (revision, match) tuples to indicate the files to
+ fetch at each revision. If any of the match elements is None, it matches
+ all files.
+ """
- fileprefetchhooks(repo, revs, match)
+ def _matcher(m):
+ if m:
+ assert isinstance(m, matchmod.basematcher)
+ # The command itself will complain about files that don't exist, so
+ # don't duplicate the message.
+ return matchmod.badmatch(m, lambda fn, msg: None)
+ else:
+ return matchall(repo)
+
+ revbadmatches = [(rev, _matcher(match)) for (rev, match) in revmatches]
+
+ fileprefetchhooks(repo, revbadmatches)
# a list of (repo, revs, match) prefetch functions
--- a/mercurial/shelve.py Tue Jul 14 10:25:41 2020 +0200
+++ b/mercurial/shelve.py Mon Jul 20 21:56:27 2020 +0530
@@ -42,6 +42,7 @@
lock as lockmod,
mdiff,
merge,
+ mergestate as mergestatemod,
node as nodemod,
patch,
phases,
@@ -161,7 +162,7 @@
repo = self.repo.unfiltered()
outgoing = discovery.outgoing(
- repo, missingroots=bases, missingheads=[node]
+ repo, missingroots=bases, ancestorsof=[node]
)
cg = changegroup.makechangegroup(repo, outgoing, cgversion, b'shelve')
@@ -801,7 +802,7 @@
basename = state.name
with repo.lock():
checkparents(repo, state)
- ms = merge.mergestate.read(repo)
+ ms = mergestatemod.mergestate.read(repo)
if list(ms.unresolved()):
raise error.Abort(
_(b"unresolved conflicts, can't continue"),
@@ -1013,12 +1014,7 @@
activebookmark,
interactive,
)
- raise error.InterventionRequired(
- _(
- b"unresolved conflicts (see 'hg resolve', then "
- b"'hg unshelve --continue')"
- )
- )
+ raise error.ConflictResolutionRequired(b'unshelve')
with repo.dirstate.parentchange():
repo.setparents(tmpwctx.node(), nodemod.nullid)
--- a/mercurial/simplemerge.py Tue Jul 14 10:25:41 2020 +0200
+++ b/mercurial/simplemerge.py Mon Jul 20 21:56:27 2020 +0530
@@ -451,12 +451,7 @@
return result
-def _bytes_to_set(b):
- """turns a multiple bytes (usually flags) into a set of individual byte"""
- return set(b[x : x + 1] for x in range(len(b)))
-
-
-def is_null(ctx):
+def is_not_null(ctx):
if not util.safehasattr(ctx, "node"):
return False
return ctx.node() != nodemod.nullid
@@ -518,15 +513,13 @@
# merge flags if necessary
flags = localctx.flags()
- localflags = _bytes_to_set(flags)
- otherflags = _bytes_to_set(otherctx.flags())
- if is_null(basectx) and localflags != otherflags:
- baseflags = _bytes_to_set(basectx.flags())
- flags = localflags & otherflags
- for f in localflags.symmetric_difference(otherflags):
- if f not in baseflags:
- flags.add(f)
- flags = b''.join(sorted(flags))
+ localflags = set(pycompat.iterbytestr(flags))
+ otherflags = set(pycompat.iterbytestr(otherctx.flags()))
+ if is_not_null(basectx) and localflags != otherflags:
+ baseflags = set(pycompat.iterbytestr(basectx.flags()))
+ commonflags = localflags & otherflags
+ addedflags = (localflags ^ otherflags) - baseflags
+ flags = b''.join(sorted(commonflags | addedflags))
if not opts.get(b'print'):
localctx.write(mergedtext, flags)
--- a/mercurial/sparse.py Tue Jul 14 10:25:41 2020 +0200
+++ b/mercurial/sparse.py Mon Jul 20 21:56:27 2020 +0530
@@ -18,6 +18,7 @@
error,
match as matchmod,
merge as mergemod,
+ mergestate as mergestatemod,
pathutil,
pycompat,
scmutil,
@@ -406,7 +407,7 @@
elif file in wctx:
prunedactions[file] = (b'r', args, msg)
- if branchmerge and type == mergemod.ACTION_MERGE:
+ if branchmerge and type == mergestatemod.ACTION_MERGE:
f1, f2, fa, move, anc = args
if not sparsematch(f1):
temporaryfiles.append(f1)
@@ -600,10 +601,10 @@
if b'exp-sparse' in oldrequires and removing:
repo.requirements.discard(b'exp-sparse')
- scmutil.writerequires(repo.vfs, repo.requirements)
+ scmutil.writereporequirements(repo)
elif b'exp-sparse' not in oldrequires:
repo.requirements.add(b'exp-sparse')
- scmutil.writerequires(repo.vfs, repo.requirements)
+ scmutil.writereporequirements(repo)
try:
writeconfig(repo, includes, excludes, profiles)
@@ -612,7 +613,7 @@
if repo.requirements != oldrequires:
repo.requirements.clear()
repo.requirements |= oldrequires
- scmutil.writerequires(repo.vfs, repo.requirements)
+ scmutil.writereporequirements(repo)
writeconfig(repo, oldincludes, oldexcludes, oldprofiles)
raise
--- a/mercurial/sshpeer.py Tue Jul 14 10:25:41 2020 +0200
+++ b/mercurial/sshpeer.py Mon Jul 20 21:56:27 2020 +0530
@@ -36,15 +36,16 @@
return b"'%s'" % s.replace(b"'", b"'\\''")
-def _forwardoutput(ui, pipe):
+def _forwardoutput(ui, pipe, warn=False):
"""display all data currently available on pipe as remote output.
This is non blocking."""
if pipe:
s = procutil.readpipe(pipe)
if s:
+ display = ui.warn if warn else ui.status
for l in s.splitlines():
- ui.status(_(b"remote: "), l, b'\n')
+ display(_(b"remote: "), l, b'\n')
class doublepipe(object):
@@ -178,7 +179,6 @@
)
ui.debug(b'running %s\n' % cmd)
- cmd = procutil.quotecommand(cmd)
# no buffer allow the use of 'select'
# feel free to remove buffering and select usage when we ultimately
@@ -204,8 +204,12 @@
def _performhandshake(ui, stdin, stdout, stderr):
def badresponse():
- # Flush any output on stderr.
- _forwardoutput(ui, stderr)
+ # Flush any output on stderr. In general, the stderr contains errors
+ # from the remote (ssh errors, some hg errors), and status indications
+ # (like "adding changes"), with no current way to tell them apart.
+ # Here we failed so early that it's almost certainly only errors, so
+ # use warn=True so -q doesn't hide them.
+ _forwardoutput(ui, stderr, warn=True)
msg = _(b'no suitable response from remote hg')
hint = ui.config(b'ui', b'ssherrorhint')
@@ -307,7 +311,7 @@
while lines[-1] and max_noise:
try:
l = stdout.readline()
- _forwardoutput(ui, stderr)
+ _forwardoutput(ui, stderr, warn=True)
# Look for reply to protocol upgrade request. It has a token
# in it, so there should be no false positives.
@@ -374,7 +378,7 @@
badresponse()
# Flush any output on stderr before proceeding.
- _forwardoutput(ui, stderr)
+ _forwardoutput(ui, stderr, warn=True)
return protoname, caps
--- a/mercurial/sslutil.py Tue Jul 14 10:25:41 2020 +0200
+++ b/mercurial/sslutil.py Mon Jul 20 21:56:27 2020 +0530
@@ -33,9 +33,8 @@
# support for TLS 1.1, TLS 1.2, SNI, system CA stores, etc. These features are
# all exposed via the "ssl" module.
#
-# Depending on the version of Python being used, SSL/TLS support is either
-# modern/secure or legacy/insecure. Many operations in this module have
-# separate code paths depending on support in Python.
+# We require in setup.py the presence of ssl.SSLContext, which indicates modern
+# SSL/TLS support.
configprotocols = {
b'tls1.0',
@@ -45,76 +44,19 @@
hassni = getattr(ssl, 'HAS_SNI', False)
-# TLS 1.1 and 1.2 may not be supported if the OpenSSL Python is compiled
-# against doesn't support them.
-supportedprotocols = {b'tls1.0'}
-if util.safehasattr(ssl, b'PROTOCOL_TLSv1_1'):
+# ssl.HAS_TLSv1* are preferred to check support but they were added in Python
+# 3.7. Prior to CPython commit 6e8cda91d92da72800d891b2fc2073ecbc134d98
+# (backported to the 3.7 branch), ssl.PROTOCOL_TLSv1_1 / ssl.PROTOCOL_TLSv1_2
+# were defined only if compiled against a OpenSSL version with TLS 1.1 / 1.2
+# support. At the mentioned commit, they were unconditionally defined.
+supportedprotocols = set()
+if getattr(ssl, 'HAS_TLSv1', util.safehasattr(ssl, 'PROTOCOL_TLSv1')):
+ supportedprotocols.add(b'tls1.0')
+if getattr(ssl, 'HAS_TLSv1_1', util.safehasattr(ssl, 'PROTOCOL_TLSv1_1')):
supportedprotocols.add(b'tls1.1')
-if util.safehasattr(ssl, b'PROTOCOL_TLSv1_2'):
+if getattr(ssl, 'HAS_TLSv1_2', util.safehasattr(ssl, 'PROTOCOL_TLSv1_2')):
supportedprotocols.add(b'tls1.2')
-try:
- # ssl.SSLContext was added in 2.7.9 and presence indicates modern
- # SSL/TLS features are available.
- SSLContext = ssl.SSLContext
- modernssl = True
- _canloaddefaultcerts = util.safehasattr(SSLContext, b'load_default_certs')
-except AttributeError:
- modernssl = False
- _canloaddefaultcerts = False
-
- # We implement SSLContext using the interface from the standard library.
- class SSLContext(object):
- def __init__(self, protocol):
- # From the public interface of SSLContext
- self.protocol = protocol
- self.check_hostname = False
- self.options = 0
- self.verify_mode = ssl.CERT_NONE
-
- # Used by our implementation.
- self._certfile = None
- self._keyfile = None
- self._certpassword = None
- self._cacerts = None
- self._ciphers = None
-
- def load_cert_chain(self, certfile, keyfile=None, password=None):
- self._certfile = certfile
- self._keyfile = keyfile
- self._certpassword = password
-
- def load_default_certs(self, purpose=None):
- pass
-
- def load_verify_locations(self, cafile=None, capath=None, cadata=None):
- if capath:
- raise error.Abort(_(b'capath not supported'))
- if cadata:
- raise error.Abort(_(b'cadata not supported'))
-
- self._cacerts = cafile
-
- def set_ciphers(self, ciphers):
- self._ciphers = ciphers
-
- def wrap_socket(self, socket, server_hostname=None, server_side=False):
- # server_hostname is unique to SSLContext.wrap_socket and is used
- # for SNI in that context. So there's nothing for us to do with it
- # in this legacy code since we don't support SNI.
-
- args = {
- 'keyfile': self._keyfile,
- 'certfile': self._certfile,
- 'server_side': server_side,
- 'cert_reqs': self.verify_mode,
- 'ssl_version': self.protocol,
- 'ca_certs': self._cacerts,
- 'ciphers': self._ciphers,
- }
-
- return ssl.wrap_socket(socket, **args)
-
def _hostsettings(ui, hostname):
"""Obtain security settings for a hostname.
@@ -135,15 +77,11 @@
b'disablecertverification': False,
# Whether the legacy [hostfingerprints] section has data for this host.
b'legacyfingerprint': False,
- # PROTOCOL_* constant to use for SSLContext.__init__.
- b'protocol': None,
# String representation of minimum protocol to be used for UI
# presentation.
- b'protocolui': None,
+ b'minimumprotocol': None,
# ssl.CERT_* constant used by SSLContext.verify_mode.
b'verifymode': None,
- # Defines extra ssl.OP* bitwise options to set.
- b'ctxoptions': None,
# OpenSSL Cipher List to use (instead of default).
b'ciphers': None,
}
@@ -158,45 +96,30 @@
% b' '.join(sorted(configprotocols)),
)
- # We default to TLS 1.1+ where we can because TLS 1.0 has known
- # vulnerabilities (like BEAST and POODLE). We allow users to downgrade to
- # TLS 1.0+ via config options in case a legacy server is encountered.
- if b'tls1.1' in supportedprotocols:
- defaultprotocol = b'tls1.1'
- else:
- # Let people know they are borderline secure.
- # We don't document this config option because we want people to see
- # the bold warnings on the web site.
- # internal config: hostsecurity.disabletls10warning
- if not ui.configbool(b'hostsecurity', b'disabletls10warning'):
- ui.warn(
- _(
- b'warning: connecting to %s using legacy security '
- b'technology (TLS 1.0); see '
- b'https://mercurial-scm.org/wiki/SecureConnections for '
- b'more info\n'
- )
- % bhostname
- )
- defaultprotocol = b'tls1.0'
+ # We default to TLS 1.1+ because TLS 1.0 has known vulnerabilities (like
+ # BEAST and POODLE). We allow users to downgrade to TLS 1.0+ via config
+ # options in case a legacy server is encountered.
+
+ # setup.py checks that TLS 1.1 or TLS 1.2 is present, so the following
+ # assert should not fail.
+ assert supportedprotocols - {b'tls1.0'}
+ defaultminimumprotocol = b'tls1.1'
key = b'minimumprotocol'
- protocol = ui.config(b'hostsecurity', key, defaultprotocol)
- validateprotocol(protocol, key)
+ minimumprotocol = ui.config(b'hostsecurity', key, defaultminimumprotocol)
+ validateprotocol(minimumprotocol, key)
key = b'%s:minimumprotocol' % bhostname
- protocol = ui.config(b'hostsecurity', key, protocol)
- validateprotocol(protocol, key)
+ minimumprotocol = ui.config(b'hostsecurity', key, minimumprotocol)
+ validateprotocol(minimumprotocol, key)
# If --insecure is used, we allow the use of TLS 1.0 despite config options.
# We always print a "connection security to %s is disabled..." message when
# --insecure is used. So no need to print anything more here.
if ui.insecureconnections:
- protocol = b'tls1.0'
+ minimumprotocol = b'tls1.0'
- s[b'protocol'], s[b'ctxoptions'], s[b'protocolui'] = protocolsettings(
- protocol
- )
+ s[b'minimumprotocol'] = minimumprotocol
ciphers = ui.config(b'hostsecurity', b'ciphers')
ciphers = ui.config(b'hostsecurity', b'%s:ciphers' % bhostname, ciphers)
@@ -288,7 +211,7 @@
# Require certificate validation if CA certs are being loaded and
# verification hasn't been disabled above.
- if cafile or (_canloaddefaultcerts and s[b'allowloaddefaultcerts']):
+ if cafile or s[b'allowloaddefaultcerts']:
s[b'verifymode'] = ssl.CERT_REQUIRED
else:
# At this point we don't have a fingerprint, aren't being
@@ -298,59 +221,26 @@
# user).
s[b'verifymode'] = ssl.CERT_NONE
- assert s[b'protocol'] is not None
- assert s[b'ctxoptions'] is not None
assert s[b'verifymode'] is not None
return s
-def protocolsettings(protocol):
- """Resolve the protocol for a config value.
-
- Returns a 3-tuple of (protocol, options, ui value) where the first
- 2 items are values used by SSLContext and the last is a string value
- of the ``minimumprotocol`` config option equivalent.
+def commonssloptions(minimumprotocol):
+ """Return SSLContext options common to servers and clients.
"""
- if protocol not in configprotocols:
- raise ValueError(b'protocol value not supported: %s' % protocol)
-
- # Despite its name, PROTOCOL_SSLv23 selects the highest protocol
- # that both ends support, including TLS protocols. On legacy stacks,
- # the highest it likely goes is TLS 1.0. On modern stacks, it can
- # support TLS 1.2.
- #
- # The PROTOCOL_TLSv* constants select a specific TLS version
- # only (as opposed to multiple versions). So the method for
- # supporting multiple TLS versions is to use PROTOCOL_SSLv23 and
- # disable protocols via SSLContext.options and OP_NO_* constants.
- # However, SSLContext.options doesn't work unless we have the
- # full/real SSLContext available to us.
- if supportedprotocols == {b'tls1.0'}:
- if protocol != b'tls1.0':
- raise error.Abort(
- _(b'current Python does not support protocol setting %s')
- % protocol,
- hint=_(
- b'upgrade Python or disable setting since '
- b'only TLS 1.0 is supported'
- ),
- )
-
- return ssl.PROTOCOL_TLSv1, 0, b'tls1.0'
-
- # WARNING: returned options don't work unless the modern ssl module
- # is available. Be careful when adding options here.
+ if minimumprotocol not in configprotocols:
+ raise ValueError(b'protocol value not supported: %s' % minimumprotocol)
# SSLv2 and SSLv3 are broken. We ban them outright.
options = ssl.OP_NO_SSLv2 | ssl.OP_NO_SSLv3
- if protocol == b'tls1.0':
+ if minimumprotocol == b'tls1.0':
# Defaults above are to use TLS 1.0+
pass
- elif protocol == b'tls1.1':
+ elif minimumprotocol == b'tls1.1':
options |= ssl.OP_NO_TLSv1
- elif protocol == b'tls1.2':
+ elif minimumprotocol == b'tls1.2':
options |= ssl.OP_NO_TLSv1 | ssl.OP_NO_TLSv1_1
else:
raise error.Abort(_(b'this should not happen'))
@@ -359,7 +249,7 @@
# There is no guarantee this attribute is defined on the module.
options |= getattr(ssl, 'OP_NO_COMPRESSION', 0)
- return ssl.PROTOCOL_SSLv23, options, protocol
+ return options
def wrapsocket(sock, keyfile, certfile, ui, serverhostname=None):
@@ -414,12 +304,12 @@
# bundle with a specific CA cert removed. If the system/default CA bundle
# is loaded and contains that removed CA, you've just undone the user's
# choice.
- sslcontext = SSLContext(settings[b'protocol'])
-
- # This is a no-op unless using modern ssl.
- sslcontext.options |= settings[b'ctxoptions']
-
- # This still works on our fake SSLContext.
+ #
+ # Despite its name, PROTOCOL_SSLv23 selects the highest protocol that both
+ # ends support, including TLS protocols. commonssloptions() restricts the
+ # set of allowed protocols.
+ sslcontext = ssl.SSLContext(ssl.PROTOCOL_SSLv23)
+ sslcontext.options |= commonssloptions(settings[b'minimumprotocol'])
sslcontext.verify_mode = settings[b'verifymode']
if settings[b'ciphers']:
@@ -468,8 +358,6 @@
# If we're doing certificate verification and no CA certs are loaded,
# that is almost certainly the reason why verification failed. Provide
# a hint to the user.
- # Only modern ssl module exposes SSLContext.get_ca_certs() so we can
- # only show this warning if modern ssl is available.
# The exception handler is here to handle bugs around cert attributes:
# https://bugs.python.org/issue20916#msg213479. (See issues5313.)
# When the main 20916 bug occurs, 'sslcontext.get_ca_certs()' is a
@@ -478,7 +366,6 @@
if (
caloaded
and settings[b'verifymode'] == ssl.CERT_REQUIRED
- and modernssl
and not sslcontext.get_ca_certs()
):
ui.warn(
@@ -502,7 +389,7 @@
# reason, try to emit an actionable warning.
if e.reason == 'UNSUPPORTED_PROTOCOL':
# We attempted TLS 1.0+.
- if settings[b'protocolui'] == b'tls1.0':
+ if settings[b'minimumprotocol'] == b'tls1.0':
# We support more than just TLS 1.0+. If this happens,
# the likely scenario is either the client or the server
# is really old. (e.g. server doesn't support TLS 1.0+ or
@@ -547,7 +434,7 @@
b'to be more secure than the server can support)\n'
)
% (
- settings[b'protocolui'],
+ settings[b'minimumprotocol'],
pycompat.bytesurl(serverhostname),
)
)
@@ -618,12 +505,18 @@
_(b'referenced certificate file (%s) does not exist') % f
)
- protocol, options, _protocolui = protocolsettings(b'tls1.0')
+ # Despite its name, PROTOCOL_SSLv23 selects the highest protocol that both
+ # ends support, including TLS protocols. commonssloptions() restricts the
+ # set of allowed protocols.
+ protocol = ssl.PROTOCOL_SSLv23
+ options = commonssloptions(b'tls1.0')
# This config option is intended for use in tests only. It is a giant
# footgun to kill security. Don't define it.
exactprotocol = ui.config(b'devel', b'serverexactprotocol')
if exactprotocol == b'tls1.0':
+ if b'tls1.0' not in supportedprotocols:
+ raise error.Abort(_(b'TLS 1.0 not supported by this Python'))
protocol = ssl.PROTOCOL_TLSv1
elif exactprotocol == b'tls1.1':
if b'tls1.1' not in supportedprotocols:
@@ -638,23 +531,20 @@
_(b'invalid value for serverexactprotocol: %s') % exactprotocol
)
- if modernssl:
- # We /could/ use create_default_context() here since it doesn't load
- # CAs when configured for client auth. However, it is hard-coded to
- # use ssl.PROTOCOL_SSLv23 which may not be appropriate here.
- sslcontext = SSLContext(protocol)
- sslcontext.options |= options
+ # We /could/ use create_default_context() here since it doesn't load
+ # CAs when configured for client auth. However, it is hard-coded to
+ # use ssl.PROTOCOL_SSLv23 which may not be appropriate here.
+ sslcontext = ssl.SSLContext(protocol)
+ sslcontext.options |= options
- # Improve forward secrecy.
- sslcontext.options |= getattr(ssl, 'OP_SINGLE_DH_USE', 0)
- sslcontext.options |= getattr(ssl, 'OP_SINGLE_ECDH_USE', 0)
+ # Improve forward secrecy.
+ sslcontext.options |= getattr(ssl, 'OP_SINGLE_DH_USE', 0)
+ sslcontext.options |= getattr(ssl, 'OP_SINGLE_ECDH_USE', 0)
- # Use the list of more secure ciphers if found in the ssl module.
- if util.safehasattr(ssl, b'_RESTRICTED_SERVER_CIPHERS'):
- sslcontext.options |= getattr(ssl, 'OP_CIPHER_SERVER_PREFERENCE', 0)
- sslcontext.set_ciphers(ssl._RESTRICTED_SERVER_CIPHERS)
- else:
- sslcontext = SSLContext(ssl.PROTOCOL_TLSv1)
+ # Use the list of more secure ciphers if found in the ssl module.
+ if util.safehasattr(ssl, b'_RESTRICTED_SERVER_CIPHERS'):
+ sslcontext.options |= getattr(ssl, 'OP_CIPHER_SERVER_PREFERENCE', 0)
+ sslcontext.set_ciphers(ssl._RESTRICTED_SERVER_CIPHERS)
if requireclientcert:
sslcontext.verify_mode = ssl.CERT_REQUIRED
@@ -797,14 +687,6 @@
)
-_systemcacertpaths = [
- # RHEL, CentOS, and Fedora
- b'/etc/pki/tls/certs/ca-bundle.trust.crt',
- # Debian, Ubuntu, Gentoo
- b'/etc/ssl/certs/ca-certificates.crt',
-]
-
-
def _defaultcacerts(ui):
"""return path to default CA certificates or None.
@@ -827,23 +709,6 @@
except (ImportError, AttributeError):
pass
- # On Windows, only the modern ssl module is capable of loading the system
- # CA certificates. If we're not capable of doing that, emit a warning
- # because we'll get a certificate verification error later and the lack
- # of loaded CA certificates will be the reason why.
- # Assertion: this code is only called if certificates are being verified.
- if pycompat.iswindows:
- if not _canloaddefaultcerts:
- ui.warn(
- _(
- b'(unable to load Windows CA certificates; see '
- b'https://mercurial-scm.org/wiki/SecureConnections for '
- b'how to configure Mercurial to avoid this message)\n'
- )
- )
-
- return None
-
# Apple's OpenSSL has patches that allow a specially constructed certificate
# to load the system CA store. If we're running on Apple Python, use this
# trick.
@@ -854,58 +719,6 @@
if os.path.exists(dummycert):
return dummycert
- # The Apple OpenSSL trick isn't available to us. If Python isn't able to
- # load system certs, we're out of luck.
- if pycompat.isdarwin:
- # FUTURE Consider looking for Homebrew or MacPorts installed certs
- # files. Also consider exporting the keychain certs to a file during
- # Mercurial install.
- if not _canloaddefaultcerts:
- ui.warn(
- _(
- b'(unable to load CA certificates; see '
- b'https://mercurial-scm.org/wiki/SecureConnections for '
- b'how to configure Mercurial to avoid this message)\n'
- )
- )
- return None
-
- # / is writable on Windows. Out of an abundance of caution make sure
- # we're not on Windows because paths from _systemcacerts could be installed
- # by non-admin users.
- assert not pycompat.iswindows
-
- # Try to find CA certificates in well-known locations. We print a warning
- # when using a found file because we don't want too much silent magic
- # for security settings. The expectation is that proper Mercurial
- # installs will have the CA certs path defined at install time and the
- # installer/packager will make an appropriate decision on the user's
- # behalf. We only get here and perform this setting as a feature of
- # last resort.
- if not _canloaddefaultcerts:
- for path in _systemcacertpaths:
- if os.path.isfile(path):
- ui.warn(
- _(
- b'(using CA certificates from %s; if you see this '
- b'message, your Mercurial install is not properly '
- b'configured; see '
- b'https://mercurial-scm.org/wiki/SecureConnections '
- b'for how to configure Mercurial to avoid this '
- b'message)\n'
- )
- % path
- )
- return path
-
- ui.warn(
- _(
- b'(unable to load CA certificates; see '
- b'https://mercurial-scm.org/wiki/SecureConnections for '
- b'how to configure Mercurial to avoid this message)\n'
- )
- )
-
return None
--- a/mercurial/state.py Tue Jul 14 10:25:41 2020 +0200
+++ b/mercurial/state.py Mon Jul 20 21:56:27 2020 +0530
@@ -19,6 +19,8 @@
from __future__ import absolute_import
+import contextlib
+
from .i18n import _
from . import (
@@ -119,6 +121,7 @@
reportonly,
continueflag,
stopflag,
+ childopnames,
cmdmsg,
cmdhint,
statushint,
@@ -132,6 +135,8 @@
self._reportonly = reportonly
self._continueflag = continueflag
self._stopflag = stopflag
+ self._childopnames = childopnames
+ self._delegating = False
self._cmdmsg = cmdmsg
self._cmdhint = cmdhint
self._statushint = statushint
@@ -181,12 +186,15 @@
"""
if self._opname == b'merge':
return len(repo[None].parents()) > 1
+ elif self._delegating:
+ return False
else:
return repo.vfs.exists(self._fname)
# A list of statecheck objects for multistep operations like graft.
_unfinishedstates = []
+_unfinishedstatesbyname = {}
def addunfinished(
@@ -197,6 +205,7 @@
reportonly=False,
continueflag=False,
stopflag=False,
+ childopnames=None,
cmdmsg=b"",
cmdhint=b"",
statushint=b"",
@@ -218,6 +227,8 @@
`--continue` option or not.
stopflag is a boolean that determines whether or not a command supports
--stop flag
+ childopnames is a list of other opnames this op uses as sub-steps of its
+ own execution. They must already be added.
cmdmsg is used to pass a different status message in case standard
message of the format "abort: cmdname in progress" is not desired.
cmdhint is used to pass a different hint message in case standard
@@ -230,6 +241,7 @@
continuefunc stores the function required to finish an interrupted
operation.
"""
+ childopnames = childopnames or []
statecheckobj = _statecheck(
opname,
fname,
@@ -238,17 +250,98 @@
reportonly,
continueflag,
stopflag,
+ childopnames,
cmdmsg,
cmdhint,
statushint,
abortfunc,
continuefunc,
)
+
if opname == b'merge':
_unfinishedstates.append(statecheckobj)
else:
+ # This check enforces that for any op 'foo' which depends on op 'bar',
+ # 'foo' comes before 'bar' in _unfinishedstates. This ensures that
+ # getrepostate() always returns the most specific applicable answer.
+ for childopname in childopnames:
+ if childopname not in _unfinishedstatesbyname:
+ raise error.ProgrammingError(
+ _(b'op %s depends on unknown op %s') % (opname, childopname)
+ )
+
_unfinishedstates.insert(0, statecheckobj)
+ if opname in _unfinishedstatesbyname:
+ raise error.ProgrammingError(_(b'op %s registered twice') % opname)
+ _unfinishedstatesbyname[opname] = statecheckobj
+
+
+def _getparentandchild(opname, childopname):
+ p = _unfinishedstatesbyname.get(opname, None)
+ if not p:
+ raise error.ProgrammingError(_(b'unknown op %s') % opname)
+ if childopname not in p._childopnames:
+ raise error.ProgrammingError(
+ _(b'op %s does not delegate to %s') % (opname, childopname)
+ )
+ c = _unfinishedstatesbyname[childopname]
+ return p, c
+
+
+@contextlib.contextmanager
+def delegating(repo, opname, childopname):
+ """context wrapper for delegations from opname to childopname.
+
+ requires that childopname was specified when opname was registered.
+
+ Usage:
+ def my_command_foo_that_uses_rebase(...):
+ ...
+ with state.delegating(repo, 'foo', 'rebase'):
+ _run_rebase(...)
+ ...
+ """
+
+ p, c = _getparentandchild(opname, childopname)
+ if p._delegating:
+ raise error.ProgrammingError(
+ _(b'cannot delegate from op %s recursively') % opname
+ )
+ p._delegating = True
+ try:
+ yield
+ except error.ConflictResolutionRequired as e:
+ # Rewrite conflict resolution advice for the parent opname.
+ if e.opname == childopname:
+ raise error.ConflictResolutionRequired(opname)
+ raise e
+ finally:
+ p._delegating = False
+
+
+def ischildunfinished(repo, opname, childopname):
+ """Returns true if both opname and childopname are unfinished."""
+
+ p, c = _getparentandchild(opname, childopname)
+ return (p._delegating or p.isunfinished(repo)) and c.isunfinished(repo)
+
+
+def continuechild(ui, repo, opname, childopname):
+ """Checks that childopname is in progress, and continues it."""
+
+ p, c = _getparentandchild(opname, childopname)
+ if not ischildunfinished(repo, opname, childopname):
+ raise error.ProgrammingError(
+ _(b'child op %s of parent %s is not unfinished')
+ % (childopname, opname)
+ )
+ if not c.continuefunc:
+ raise error.ProgrammingError(
+ _(b'op %s has no continue function') % childopname
+ )
+ return c.continuefunc(ui, repo)
+
addunfinished(
b'update',
--- a/mercurial/streamclone.py Tue Jul 14 10:25:41 2020 +0200
+++ b/mercurial/streamclone.py Mon Jul 20 21:56:27 2020 +0530
@@ -20,6 +20,7 @@
narrowspec,
phases,
pycompat,
+ scmutil,
store,
util,
)
@@ -187,7 +188,7 @@
repo.svfs.options = localrepo.resolvestorevfsoptions(
repo.ui, repo.requirements, repo.features
)
- repo._writerequirements()
+ scmutil.writereporequirements(repo)
if rbranchmap:
repo._branchcaches.replace(repo, rbranchmap)
@@ -730,4 +731,4 @@
repo.svfs.options = localrepo.resolvestorevfsoptions(
repo.ui, repo.requirements, repo.features
)
- repo._writerequirements()
+ scmutil.writereporequirements(repo)
--- a/mercurial/subrepo.py Tue Jul 14 10:25:41 2020 +0200
+++ b/mercurial/subrepo.py Mon Jul 20 21:56:27 2020 +0530
@@ -617,8 +617,8 @@
ui,
self._repo,
diffopts,
- node1,
- node2,
+ self._repo[node1],
+ self._repo[node2],
match,
prefix=prefix,
listsubrepos=True,
@@ -639,7 +639,7 @@
rev = self._state[1]
ctx = self._repo[rev]
scmutil.prefetchfiles(
- self._repo, [ctx.rev()], scmutil.matchfiles(self._repo, files)
+ self._repo, [(ctx.rev(), scmutil.matchfiles(self._repo, files))]
)
total = abstractsubrepo.archive(self, archiver, prefix, match)
for subpath in ctx.substate:
--- a/mercurial/templatekw.py Tue Jul 14 10:25:41 2020 +0200
+++ b/mercurial/templatekw.py Mon Jul 20 21:56:27 2020 +0530
@@ -419,9 +419,9 @@
else:
merge_nodes = cache.get(b'merge_nodes')
if merge_nodes is None:
- from . import merge
+ from . import mergestate as mergestatemod
- mergestate = merge.mergestate.read(repo)
+ mergestate = mergestatemod.mergestate.read(repo)
if mergestate.active():
merge_nodes = (mergestate.local, mergestate.other)
else:
--- a/mercurial/ui.py Tue Jul 14 10:25:41 2020 +0200
+++ b/mercurial/ui.py Mon Jul 20 21:56:27 2020 +0530
@@ -9,6 +9,7 @@
import collections
import contextlib
+import datetime
import errno
import getpass
import inspect
@@ -242,6 +243,7 @@
self._terminfoparams = {}
self._styles = {}
self._uninterruptible = False
+ self.showtimestamp = False
if src:
self._fout = src._fout
@@ -561,6 +563,7 @@
self._reportuntrusted = self.debugflag or self.configbool(
b"ui", b"report_untrusted"
)
+ self.showtimestamp = self.configbool(b'ui', b'timestamp-output')
self.tracebackflag = self.configbool(b'ui', b'traceback')
self.logblockedtimes = self.configbool(b'ui', b'logblockedtimes')
@@ -1200,7 +1203,7 @@
dest.write(msg)
# stderr may be buffered under win32 when redirected to files,
# including stdout.
- if dest is self._ferr and not getattr(self._ferr, 'closed', False):
+ if dest is self._ferr and not getattr(dest, 'closed', False):
dest.flush()
except IOError as err:
if dest is self._ferr and err.errno in (
@@ -1217,7 +1220,21 @@
) * 1000
def _writemsg(self, dest, *args, **opts):
+ timestamp = self.showtimestamp and opts.get('type') in {
+ b'debug',
+ b'error',
+ b'note',
+ b'status',
+ b'warning',
+ }
+ if timestamp:
+ args = (
+ b'[%s] '
+ % pycompat.bytestr(datetime.datetime.now().isoformat()),
+ ) + args
_writemsgwith(self._write, dest, *args, **opts)
+ if timestamp:
+ dest.flush()
def _writemsgnobuf(self, dest, *args, **opts):
_writemsgwith(self._writenobuf, dest, *args, **opts)
@@ -2102,6 +2119,22 @@
if (b'ui', b'quiet') in overrides:
self.fixconfig(section=b'ui')
+ def estimatememory(self):
+ """Provide an estimate for the available system memory in Bytes.
+
+ This can be overriden via ui.available-memory. It returns None, if
+ no estimate can be computed.
+ """
+ value = self.config(b'ui', b'available-memory')
+ if value is not None:
+ try:
+ return util.sizetoint(value)
+ except error.ParseError:
+ raise error.ConfigError(
+ _(b"ui.available-memory value is invalid ('%s')") % value
+ )
+ return util._estimatememory()
+
class paths(dict):
"""Represents a collection of paths and their configs.
--- a/mercurial/upgrade.py Tue Jul 14 10:25:41 2020 +0200
+++ b/mercurial/upgrade.py Mon Jul 20 21:56:27 2020 +0530
@@ -13,12 +13,12 @@
from .pycompat import getattr
from . import (
changelog,
- copies,
error,
filelog,
hg,
localrepo,
manifest,
+ metadata,
pycompat,
revlog,
scmutil,
@@ -78,6 +78,7 @@
localrepo.SPARSEREVLOG_REQUIREMENT,
localrepo.SIDEDATA_REQUIREMENT,
localrepo.COPIESSDC_REQUIREMENT,
+ localrepo.NODEMAP_REQUIREMENT,
}
for name in compression.compengines:
engine = compression.compengines[name]
@@ -105,6 +106,7 @@
localrepo.SPARSEREVLOG_REQUIREMENT,
localrepo.SIDEDATA_REQUIREMENT,
localrepo.COPIESSDC_REQUIREMENT,
+ localrepo.NODEMAP_REQUIREMENT,
}
for name in compression.compengines:
engine = compression.compengines[name]
@@ -132,6 +134,7 @@
localrepo.SPARSEREVLOG_REQUIREMENT,
localrepo.SIDEDATA_REQUIREMENT,
localrepo.COPIESSDC_REQUIREMENT,
+ localrepo.NODEMAP_REQUIREMENT,
}
for name in compression.compengines:
engine = compression.compengines[name]
@@ -374,6 +377,21 @@
@registerformatvariant
+class persistentnodemap(requirementformatvariant):
+ name = b'persistent-nodemap'
+
+ _requirement = localrepo.NODEMAP_REQUIREMENT
+
+ default = False
+
+ description = _(
+ b'persist the node -> rev mapping on disk to speedup lookup'
+ )
+
+ upgrademessage = _(b'Speedup revision lookup by node id.')
+
+
+@registerformatvariant
class copiessdc(requirementformatvariant):
name = b'copies-sdc'
@@ -716,9 +734,9 @@
return False, (), {}
elif localrepo.COPIESSDC_REQUIREMENT in addedreqs:
- sidedatacompanion = copies.getsidedataadder(srcrepo, dstrepo)
+ sidedatacompanion = metadata.getsidedataadder(srcrepo, dstrepo)
elif localrepo.COPIESSDC_REQUIREMENT in removedreqs:
- sidedatacompanion = copies.getsidedataremover(srcrepo, dstrepo)
+ sidedatacompanion = metadata.getsidedataremover(srcrepo, dstrepo)
return sidedatacompanion
@@ -807,14 +825,14 @@
if not revcount:
return
- ui.write(
+ ui.status(
_(
b'migrating %d total revisions (%d in filelogs, %d in manifests, '
b'%d in changelog)\n'
)
% (revcount, frevcount, mrevcount, crevcount)
)
- ui.write(
+ ui.status(
_(b'migrating %s in store; %s tracked data\n')
% ((util.bytecount(srcsize), util.bytecount(srcrawsize)))
)
@@ -837,7 +855,7 @@
oldrl = _revlogfrompath(srcrepo, unencoded)
if isinstance(oldrl, changelog.changelog) and b'c' not in seen:
- ui.write(
+ ui.status(
_(
b'finished migrating %d manifest revisions across %d '
b'manifests; change in size: %s\n'
@@ -845,7 +863,7 @@
% (mrevcount, mcount, util.bytecount(mdstsize - msrcsize))
)
- ui.write(
+ ui.status(
_(
b'migrating changelog containing %d revisions '
b'(%s in store; %s tracked data)\n'
@@ -861,7 +879,7 @@
_(b'changelog revisions'), total=crevcount
)
elif isinstance(oldrl, manifest.manifestrevlog) and b'm' not in seen:
- ui.write(
+ ui.status(
_(
b'finished migrating %d filelog revisions across %d '
b'filelogs; change in size: %s\n'
@@ -869,7 +887,7 @@
% (frevcount, fcount, util.bytecount(fdstsize - fsrcsize))
)
- ui.write(
+ ui.status(
_(
b'migrating %d manifests containing %d revisions '
b'(%s in store; %s tracked data)\n'
@@ -888,7 +906,7 @@
_(b'manifest revisions'), total=mrevcount
)
elif b'f' not in seen:
- ui.write(
+ ui.status(
_(
b'migrating %d filelogs containing %d revisions '
b'(%s in store; %s tracked data)\n'
@@ -941,7 +959,7 @@
progress.complete()
- ui.write(
+ ui.status(
_(
b'finished migrating %d changelog revisions; change in size: '
b'%s\n'
@@ -949,7 +967,7 @@
% (crevcount, util.bytecount(cdstsize - csrcsize))
)
- ui.write(
+ ui.status(
_(
b'finished migrating %d total revisions; total change in store '
b'size: %s\n'
@@ -975,7 +993,7 @@
Function should return ``True`` if the file is to be copied.
"""
# Skip revlogs.
- if path.endswith((b'.i', b'.d')):
+ if path.endswith((b'.i', b'.d', b'.n', b'.nd')):
return False
# Skip transaction related files.
if path.startswith(b'undo'):
@@ -1013,7 +1031,7 @@
assert srcrepo.currentwlock()
assert dstrepo.currentwlock()
- ui.write(
+ ui.status(
_(
b'(it is safe to interrupt this process any time before '
b'data migration completes)\n'
@@ -1048,14 +1066,14 @@
if not _filterstorefile(srcrepo, dstrepo, requirements, p, kind, st):
continue
- srcrepo.ui.write(_(b'copying %s\n') % p)
+ srcrepo.ui.status(_(b'copying %s\n') % p)
src = srcrepo.store.rawvfs.join(p)
dst = dstrepo.store.rawvfs.join(p)
util.copyfile(src, dst, copystat=True)
_finishdatamigration(ui, srcrepo, dstrepo, requirements)
- ui.write(_(b'data fully migrated to temporary repository\n'))
+ ui.status(_(b'data fully migrated to temporary repository\n'))
backuppath = pycompat.mkdtemp(prefix=b'upgradebackup.', dir=srcrepo.path)
backupvfs = vfsmod.vfs(backuppath)
@@ -1067,28 +1085,28 @@
# as a mechanism to lock out new clients during the data swap. This is
# better than allowing a client to continue while the repository is in
# an inconsistent state.
- ui.write(
+ ui.status(
_(
b'marking source repository as being upgraded; clients will be '
b'unable to read from repository\n'
)
)
- scmutil.writerequires(
- srcrepo.vfs, srcrepo.requirements | {b'upgradeinprogress'}
+ scmutil.writereporequirements(
+ srcrepo, srcrepo.requirements | {b'upgradeinprogress'}
)
- ui.write(_(b'starting in-place swap of repository data\n'))
- ui.write(_(b'replaced files will be backed up at %s\n') % backuppath)
+ ui.status(_(b'starting in-place swap of repository data\n'))
+ ui.status(_(b'replaced files will be backed up at %s\n') % backuppath)
# Now swap in the new store directory. Doing it as a rename should make
# the operation nearly instantaneous and atomic (at least in well-behaved
# environments).
- ui.write(_(b'replacing store...\n'))
+ ui.status(_(b'replacing store...\n'))
tstart = util.timer()
util.rename(srcrepo.spath, backupvfs.join(b'store'))
util.rename(dstrepo.spath, srcrepo.spath)
elapsed = util.timer() - tstart
- ui.write(
+ ui.status(
_(
b'store replacement complete; repository was inconsistent for '
b'%0.1fs\n'
@@ -1098,13 +1116,13 @@
# We first write the requirements file. Any new requirements will lock
# out legacy clients.
- ui.write(
+ ui.status(
_(
b'finalizing requirements file and making repository readable '
b'again\n'
)
)
- scmutil.writerequires(srcrepo.vfs, requirements)
+ scmutil.writereporequirements(srcrepo, requirements)
# The lock file from the old store won't be removed because nothing has a
# reference to its new location. So clean it up manually. Alternatively, we
@@ -1274,9 +1292,20 @@
ui.write((b'\n'))
ui.write(b'\n')
+ def printoptimisations():
+ optimisations = [a for a in actions if a.type == optimisation]
+ optimisations.sort(key=lambda a: a.name)
+ if optimisations:
+ ui.write(_(b'optimisations: '))
+ write_labeled(
+ [a.name for a in optimisations],
+ "upgrade-repo.optimisation.performed",
+ )
+ ui.write(b'\n\n')
+
def printupgradeactions():
for a in actions:
- ui.write(b'%s\n %s\n\n' % (a.name, a.upgrademessage))
+ ui.status(b'%s\n %s\n\n' % (a.name, a.upgrademessage))
if not run:
fromconfig = []
@@ -1291,35 +1320,35 @@
if fromconfig or onlydefault:
if fromconfig:
- ui.write(
+ ui.status(
_(
b'repository lacks features recommended by '
b'current config options:\n\n'
)
)
for i in fromconfig:
- ui.write(b'%s\n %s\n\n' % (i.name, i.description))
+ ui.status(b'%s\n %s\n\n' % (i.name, i.description))
if onlydefault:
- ui.write(
+ ui.status(
_(
b'repository lacks features used by the default '
b'config options:\n\n'
)
)
for i in onlydefault:
- ui.write(b'%s\n %s\n\n' % (i.name, i.description))
+ ui.status(b'%s\n %s\n\n' % (i.name, i.description))
- ui.write(b'\n')
+ ui.status(b'\n')
else:
- ui.write(
+ ui.status(
_(
b'(no feature deficiencies found in existing '
b'repository)\n'
)
)
- ui.write(
+ ui.status(
_(
b'performing an upgrade with "--run" will make the following '
b'changes:\n\n'
@@ -1327,31 +1356,33 @@
)
printrequirements()
+ printoptimisations()
printupgradeactions()
unusedoptimize = [i for i in alloptimizations if i not in actions]
if unusedoptimize:
- ui.write(
+ ui.status(
_(
b'additional optimizations are available by specifying '
b'"--optimize <name>":\n\n'
)
)
for i in unusedoptimize:
- ui.write(_(b'%s\n %s\n\n') % (i.name, i.description))
+ ui.status(_(b'%s\n %s\n\n') % (i.name, i.description))
return
# Else we're in the run=true case.
ui.write(_(b'upgrade will perform the following actions:\n\n'))
printrequirements()
+ printoptimisations()
printupgradeactions()
upgradeactions = [a.name for a in actions]
- ui.write(_(b'beginning upgrade...\n'))
+ ui.status(_(b'beginning upgrade...\n'))
with repo.wlock(), repo.lock():
- ui.write(_(b'repository locked and read-only\n'))
+ ui.status(_(b'repository locked and read-only\n'))
# Our strategy for upgrading the repository is to create a new,
# temporary repository, write data to it, then do a swap of the
# data. There are less heavyweight ways to do this, but it is easier
@@ -1360,7 +1391,7 @@
tmppath = pycompat.mkdtemp(prefix=b'upgrade.', dir=repo.path)
backuppath = None
try:
- ui.write(
+ ui.status(
_(
b'creating temporary repository to stage migrated '
b'data: %s\n'
@@ -1377,15 +1408,17 @@
ui, repo, dstrepo, newreqs, upgradeactions, revlogs=revlogs
)
if not (backup or backuppath is None):
- ui.write(_(b'removing old repository content%s\n') % backuppath)
+ ui.status(
+ _(b'removing old repository content%s\n') % backuppath
+ )
repo.vfs.rmtree(backuppath, forcibly=True)
backuppath = None
finally:
- ui.write(_(b'removing temporary repository %s\n') % tmppath)
+ ui.status(_(b'removing temporary repository %s\n') % tmppath)
repo.vfs.rmtree(tmppath, forcibly=True)
- if backuppath:
+ if backuppath and not ui.quiet:
ui.warn(
_(b'copy of old repository backed up at %s\n') % backuppath
)
--- a/mercurial/util.py Tue Jul 14 10:25:41 2020 +0200
+++ b/mercurial/util.py Mon Jul 20 21:56:27 2020 +0530
@@ -205,6 +205,8 @@
b" update your code.)"
) % version
warnings.warn(pycompat.sysstr(msg), DeprecationWarning, stacklevel + 1)
+ # on python 3 with chg, we will need to explicitly flush the output
+ sys.stderr.flush()
DIGESTS = {
@@ -1379,8 +1381,8 @@
@contextlib.contextmanager
-def nullcontextmanager():
- yield
+def nullcontextmanager(enter_result=None):
+ yield enter_result
class _lrucachenode(object):
@@ -2845,7 +2847,7 @@
# [1]: fixed by changeset 67dc99a989cd in the cpython hg repo.
#
# Here we workaround the EINTR issue for fileobj.__iter__. Other methods
- # like "read*" are ignored for now, as Python < 2.7.4 is a minority.
+ # like "read*" work fine, as we do not support Python < 2.7.4.
#
# Although we can workaround the EINTR issue for fp.__iter__, it is slower:
# "for x in fp" is 4x faster than "for x in iter(fp.readline, '')" in
@@ -2857,39 +2859,6 @@
# affects things like pipes, sockets, ttys etc. We treat "normal" (S_ISREG)
# files approximately as "fast" files and use the fast (unsafe) code path,
# to minimize the performance impact.
- if sys.version_info >= (2, 7, 4):
- # fp.readline deals with EINTR correctly, use it as a workaround.
- def _safeiterfile(fp):
- return iter(fp.readline, b'')
-
- else:
- # fp.read* are broken too, manually deal with EINTR in a stupid way.
- # note: this may block longer than necessary because of bufsize.
- def _safeiterfile(fp, bufsize=4096):
- fd = fp.fileno()
- line = b''
- while True:
- try:
- buf = os.read(fd, bufsize)
- except OSError as ex:
- # os.read only raises EINTR before any data is read
- if ex.errno == errno.EINTR:
- continue
- else:
- raise
- line += buf
- if b'\n' in buf:
- splitted = line.splitlines(True)
- line = b''
- for l in splitted:
- if l[-1] == b'\n':
- yield l
- else:
- line = l
- if not buf:
- break
- if line:
- yield line
def iterfile(fp):
fastpath = True
@@ -2898,7 +2867,8 @@
if fastpath:
return fp
else:
- return _safeiterfile(fp)
+ # fp.readline deals with EINTR correctly, use it as a workaround.
+ return iter(fp.readline, b'')
else:
@@ -3656,3 +3626,44 @@
locale.setlocale(locale.LC_CTYPE, oldloc)
else:
yield
+
+
+def _estimatememory():
+ """Provide an estimate for the available system memory in Bytes.
+
+ If no estimate can be provided on the platform, returns None.
+ """
+ if pycompat.sysplatform.startswith(b'win'):
+ # On Windows, use the GlobalMemoryStatusEx kernel function directly.
+ from ctypes import c_long as DWORD, c_ulonglong as DWORDLONG
+ from ctypes.wintypes import Structure, byref, sizeof, windll
+
+ class MEMORYSTATUSEX(Structure):
+ _fields_ = [
+ ('dwLength', DWORD),
+ ('dwMemoryLoad', DWORD),
+ ('ullTotalPhys', DWORDLONG),
+ ('ullAvailPhys', DWORDLONG),
+ ('ullTotalPageFile', DWORDLONG),
+ ('ullAvailPageFile', DWORDLONG),
+ ('ullTotalVirtual', DWORDLONG),
+ ('ullAvailVirtual', DWORDLONG),
+ ('ullExtendedVirtual', DWORDLONG),
+ ]
+
+ x = MEMORYSTATUSEX()
+ x.dwLength = sizeof(x)
+ windll.kernel32.GlobalMemoryStatusEx(byref(x))
+ return x.ullAvailPhys
+
+ # On newer Unix-like systems and Mac OSX, the sysconf interface
+ # can be used. _SC_PAGE_SIZE is part of POSIX; _SC_PHYS_PAGES
+ # seems to be implemented on most systems.
+ try:
+ pagesize = os.sysconf(os.sysconf_names['SC_PAGE_SIZE'])
+ pages = os.sysconf(os.sysconf_names['SC_PHYS_PAGES'])
+ return pagesize * pages
+ except OSError: # sysconf can fail
+ pass
+ except KeyError: # unknown parameter
+ pass
--- a/mercurial/utils/procutil.py Tue Jul 14 10:25:41 2020 +0200
+++ b/mercurial/utils/procutil.py Mon Jul 20 21:56:27 2020 +0530
@@ -37,9 +37,10 @@
osutil = policy.importmod('osutil')
-stderr = pycompat.stderr
-stdin = pycompat.stdin
-stdout = pycompat.stdout
+if pycompat.iswindows:
+ from .. import windows as platform
+else:
+ from .. import posix as platform
def isatty(fp):
@@ -49,33 +50,108 @@
return False
-# Python 2 uses the C library's standard I/O streams. Glibc determines
-# buffering on first write to stdout - if we replace a TTY destined stdout with
-# a pipe destined stdout (e.g. pager), we want line buffering (or unbuffered,
-# on Windows).
-# Python 3 rolls its own standard I/O streams.
-if isatty(stdout):
+class LineBufferedWrapper(object):
+ def __init__(self, orig):
+ self.orig = orig
+
+ def __getattr__(self, attr):
+ return getattr(self.orig, attr)
+
+ def write(self, s):
+ orig = self.orig
+ res = orig.write(s)
+ if s.endswith(b'\n'):
+ orig.flush()
+ return res
+
+
+io.BufferedIOBase.register(LineBufferedWrapper)
+
+
+def make_line_buffered(stream):
+ if pycompat.ispy3 and not isinstance(stream, io.BufferedIOBase):
+ # On Python 3, buffered streams can be expected to subclass
+ # BufferedIOBase. This is definitively the case for the streams
+ # initialized by the interpreter. For unbuffered streams, we don't need
+ # to emulate line buffering.
+ return stream
+ if isinstance(stream, LineBufferedWrapper):
+ return stream
+ return LineBufferedWrapper(stream)
+
+
+class WriteAllWrapper(object):
+ def __init__(self, orig):
+ self.orig = orig
+
+ def __getattr__(self, attr):
+ return getattr(self.orig, attr)
+
+ def write(self, s):
+ write1 = self.orig.write
+ m = memoryview(s)
+ total_to_write = len(s)
+ total_written = 0
+ while total_written < total_to_write:
+ total_written += write1(m[total_written:])
+ return total_written
+
+
+io.IOBase.register(WriteAllWrapper)
+
+
+def _make_write_all(stream):
+ assert pycompat.ispy3
+ if isinstance(stream, WriteAllWrapper):
+ return stream
+ if isinstance(stream, io.BufferedIOBase):
+ # The io.BufferedIOBase.write() contract guarantees that all data is
+ # written.
+ return stream
+ # In general, the write() method of streams is free to write only part of
+ # the data.
+ return WriteAllWrapper(stream)
+
+
+if pycompat.ispy3:
+ # Python 3 implements its own I/O streams.
+ # TODO: .buffer might not exist if std streams were replaced; we'll need
+ # a silly wrapper to make a bytes stream backed by a unicode one.
+ stdin = sys.stdin.buffer
+ stdout = _make_write_all(sys.stdout.buffer)
+ stderr = _make_write_all(sys.stderr.buffer)
if pycompat.iswindows:
- # Windows doesn't support line buffering
- stdout = os.fdopen(stdout.fileno(), 'wb', 0)
- elif not pycompat.ispy3:
- # on Python 3, stdout (sys.stdout.buffer) is already line buffered and
- # buffering=1 is not handled in binary mode
- stdout = os.fdopen(stdout.fileno(), 'wb', 1)
+ # Work around Windows bugs.
+ stdout = platform.winstdout(stdout)
+ stderr = platform.winstdout(stderr)
+ if isatty(stdout):
+ # The standard library doesn't offer line-buffered binary streams.
+ stdout = make_line_buffered(stdout)
+else:
+ # Python 2 uses the I/O streams provided by the C library.
+ stdin = sys.stdin
+ stdout = sys.stdout
+ stderr = sys.stderr
+ if pycompat.iswindows:
+ # Work around Windows bugs.
+ stdout = platform.winstdout(stdout)
+ stderr = platform.winstdout(stderr)
+ if isatty(stdout):
+ if pycompat.iswindows:
+ # The Windows C runtime library doesn't support line buffering.
+ stdout = make_line_buffered(stdout)
+ else:
+ # glibc determines buffering on first write to stdout - if we
+ # replace a TTY destined stdout with a pipe destined stdout (e.g.
+ # pager), we want line buffering.
+ stdout = os.fdopen(stdout.fileno(), 'wb', 1)
-if pycompat.iswindows:
- from .. import windows as platform
-
- stdout = platform.winstdout(stdout)
-else:
- from .. import posix as platform
findexe = platform.findexe
_gethgcmd = platform.gethgcmd
getuser = platform.getuser
getpid = os.getpid
hidewindow = platform.hidewindow
-quotecommand = platform.quotecommand
readpipe = platform.readpipe
setbinary = platform.setbinary
setsignalhandler = platform.setsignalhandler
@@ -140,7 +216,7 @@
def _popenreader(cmd, bufsize):
p = subprocess.Popen(
- tonativestr(quotecommand(cmd)),
+ tonativestr(cmd),
shell=True,
bufsize=bufsize,
close_fds=closefds,
@@ -151,7 +227,7 @@
def _popenwriter(cmd, bufsize):
p = subprocess.Popen(
- tonativestr(quotecommand(cmd)),
+ tonativestr(cmd),
shell=True,
bufsize=bufsize,
close_fds=closefds,
@@ -397,7 +473,6 @@
stdout.flush()
except Exception:
pass
- cmd = quotecommand(cmd)
env = shellenviron(environ)
if out is None or isstdout(out):
rc = subprocess.call(
--- a/mercurial/windows.py Tue Jul 14 10:25:41 2020 +0200
+++ b/mercurial/windows.py Mon Jul 20 21:56:27 2020 +0530
@@ -186,11 +186,26 @@
listdir = osutil.listdir
+# copied from .utils.procutil, remove after Python 2 support was dropped
+def _isatty(fp):
+ try:
+ return fp.isatty()
+ except AttributeError:
+ return False
+
+
class winstdout(object):
- '''stdout on windows misbehaves if sent through a pipe'''
+ '''Some files on Windows misbehave.
+
+ When writing to a broken pipe, EINVAL instead of EPIPE may be raised.
+
+ When writing too many bytes to a console at the same, a "Not enough space"
+ error may happen. Python 3 already works around that.
+ '''
def __init__(self, fp):
self.fp = fp
+ self.throttle = not pycompat.ispy3 and _isatty(fp)
def __getattr__(self, key):
return getattr(self.fp, key)
@@ -203,12 +218,13 @@
def write(self, s):
try:
+ if not self.throttle:
+ return self.fp.write(s)
# This is workaround for "Not enough space" error on
# writing large size of data to console.
limit = 16000
l = len(s)
start = 0
- self.softspace = 0
while start < l:
end = start + limit
self.fp.write(s[start:end])
@@ -474,14 +490,6 @@
return pycompat.maplist(_unquote, pycompat.shlexsplit(s, posix=False))
-def quotecommand(cmd):
- """Build a command string suitable for os.popen* calls."""
- if sys.version_info < (2, 7, 1):
- # Python versions since 2.7.1 do this extra quoting themselves
- return b'"' + cmd + b'"'
- return cmd
-
-
# if you change this stub into a real check, please try to implement the
# username and groupname functions above, too.
def isowner(st):
--- a/mercurial/wireprotov1server.py Tue Jul 14 10:25:41 2020 +0200
+++ b/mercurial/wireprotov1server.py Mon Jul 20 21:56:27 2020 +0530
@@ -339,7 +339,7 @@
def changegroup(repo, proto, roots):
nodes = wireprototypes.decodelist(roots)
outgoing = discovery.outgoing(
- repo, missingroots=nodes, missingheads=repo.heads()
+ repo, missingroots=nodes, ancestorsof=repo.heads()
)
cg = changegroupmod.makechangegroup(repo, outgoing, b'01', b'serve')
gen = iter(lambda: cg.read(32768), b'')
@@ -350,7 +350,7 @@
def changegroupsubset(repo, proto, bases, heads):
bases = wireprototypes.decodelist(bases)
heads = wireprototypes.decodelist(heads)
- outgoing = discovery.outgoing(repo, missingroots=bases, missingheads=heads)
+ outgoing = discovery.outgoing(repo, missingroots=bases, ancestorsof=heads)
cg = changegroupmod.makechangegroup(repo, outgoing, b'01', b'serve')
gen = iter(lambda: cg.read(32768), b'')
return wireprototypes.streamres(gen=gen)
--- a/relnotes/next Tue Jul 14 10:25:41 2020 +0200
+++ b/relnotes/next Mon Jul 20 21:56:27 2020 +0530
@@ -1,11 +1,45 @@
== New Features ==
+ * clonebundles can be annotated with the expected memory requirements
+ using the `REQUIREDRAM` option. This allows clients to skip
+ bundles created with large zstd windows and fallback to larger, but
+ less demanding bundles.
+
+ * The `phabricator` extension now provides more functionality of the
+ arcanist CLI like changing the status of a differential.
+
+ * Phases processing is much faster, especially for repositories with
+ old non-public changesets.
== New Experimental Features ==
+ * The core of some hg operations have been (and are being)
+ implemented in rust, for speed. `hg status` on a repository with
+ 300k tracked files goes from 1.8s to 0.6s for instance.
+ This has currently been tested only on linux, and does not build on
+ windows. See rust/README.rst in the mercurial repository for
+ instructions to opt into this.
== Backwards Compatibility Changes ==
+* Mercurial now requires at least Python 2.7.9 or a Python version that
+ backported modern SSL/TLS features (as defined in PEP 466), and that Python
+ was compiled against a OpenSSL version supporting TLS 1.1 or TLS 1.2
+ (likely this requires the OpenSSL version to be at least 1.0.1).
+
+* The `hg perfwrite` command from contrib/perf.py was made more flexible and
+ changed its default behavior. To get the previous behavior, run `hg perfwrite
+ --nlines=100000 --nitems=1 --item='Testing write performance' --batch-line`.
+
== Internal API Changes ==
+ * logcmdutil.diffordiffstat() now takes contexts instead of nodes.
+
+ * The `mergestate` class along with some related methods and constants have
+ moved from `mercurial.merge` to a new `mercurial.mergestate` module.
+
+ * The `phasecache` class now uses sparse dictionaries for the phase data.
+ New accessors are provided to detect if any non-public changeset exists
+ (`hasnonpublicphases`) and get the correponsponding root set
+ (`nonpublicphaseroots`).
--- a/rust/Cargo.lock Tue Jul 14 10:25:41 2020 +0200
+++ b/rust/Cargo.lock Mon Jul 20 21:56:27 2020 +0530
@@ -42,11 +42,6 @@
source = "registry+https://github.com/rust-lang/crates.io-index"
[[package]]
-name = "cc"
-version = "1.0.50"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-
-[[package]]
name = "cfg-if"
version = "0.1.10"
source = "registry+https://github.com/rust-lang/crates.io-index"
@@ -63,7 +58,7 @@
[[package]]
name = "clap"
-version = "2.33.0"
+version = "2.33.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
dependencies = [
"ansi_term 0.11.0 (registry+https://github.com/rust-lang/crates.io-index)",
@@ -208,22 +203,20 @@
version = "0.1.0"
dependencies = [
"byteorder 1.3.4 (registry+https://github.com/rust-lang/crates.io-index)",
- "cc 1.0.50 (registry+https://github.com/rust-lang/crates.io-index)",
- "clap 2.33.0 (registry+https://github.com/rust-lang/crates.io-index)",
+ "clap 2.33.1 (registry+https://github.com/rust-lang/crates.io-index)",
"crossbeam 0.7.3 (registry+https://github.com/rust-lang/crates.io-index)",
"hex 0.4.2 (registry+https://github.com/rust-lang/crates.io-index)",
"lazy_static 1.4.0 (registry+https://github.com/rust-lang/crates.io-index)",
- "libc 0.2.67 (registry+https://github.com/rust-lang/crates.io-index)",
"log 0.4.8 (registry+https://github.com/rust-lang/crates.io-index)",
"memchr 2.3.3 (registry+https://github.com/rust-lang/crates.io-index)",
"memmap 0.7.0 (registry+https://github.com/rust-lang/crates.io-index)",
- "micro-timer 0.2.1 (registry+https://github.com/rust-lang/crates.io-index)",
+ "micro-timer 0.3.0 (registry+https://github.com/rust-lang/crates.io-index)",
"pretty_assertions 0.6.1 (registry+https://github.com/rust-lang/crates.io-index)",
"rand 0.7.3 (registry+https://github.com/rust-lang/crates.io-index)",
"rand_distr 0.2.2 (registry+https://github.com/rust-lang/crates.io-index)",
"rand_pcg 0.2.1 (registry+https://github.com/rust-lang/crates.io-index)",
"rayon 1.3.0 (registry+https://github.com/rust-lang/crates.io-index)",
- "regex 1.3.6 (registry+https://github.com/rust-lang/crates.io-index)",
+ "regex 1.3.9 (registry+https://github.com/rust-lang/crates.io-index)",
"same-file 1.0.6 (registry+https://github.com/rust-lang/crates.io-index)",
"tempfile 3.1.0 (registry+https://github.com/rust-lang/crates.io-index)",
"twox-hash 1.5.0 (registry+https://github.com/rust-lang/crates.io-index)",
@@ -287,16 +280,16 @@
[[package]]
name = "micro-timer"
-version = "0.2.1"
+version = "0.3.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
dependencies = [
- "micro-timer-macros 0.2.0 (registry+https://github.com/rust-lang/crates.io-index)",
+ "micro-timer-macros 0.3.0 (registry+https://github.com/rust-lang/crates.io-index)",
"scopeguard 1.1.0 (registry+https://github.com/rust-lang/crates.io-index)",
]
[[package]]
name = "micro-timer-macros"
-version = "0.2.0"
+version = "0.3.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
dependencies = [
"proc-macro2 1.0.9 (registry+https://github.com/rust-lang/crates.io-index)",
@@ -369,7 +362,7 @@
source = "registry+https://github.com/rust-lang/crates.io-index"
dependencies = [
"libc 0.2.67 (registry+https://github.com/rust-lang/crates.io-index)",
- "regex 1.3.6 (registry+https://github.com/rust-lang/crates.io-index)",
+ "regex 1.3.9 (registry+https://github.com/rust-lang/crates.io-index)",
]
[[package]]
@@ -378,7 +371,7 @@
source = "registry+https://github.com/rust-lang/crates.io-index"
dependencies = [
"libc 0.2.67 (registry+https://github.com/rust-lang/crates.io-index)",
- "regex 1.3.6 (registry+https://github.com/rust-lang/crates.io-index)",
+ "regex 1.3.9 (registry+https://github.com/rust-lang/crates.io-index)",
]
[[package]]
@@ -471,18 +464,18 @@
[[package]]
name = "regex"
-version = "1.3.6"
+version = "1.3.9"
source = "registry+https://github.com/rust-lang/crates.io-index"
dependencies = [
"aho-corasick 0.7.10 (registry+https://github.com/rust-lang/crates.io-index)",
"memchr 2.3.3 (registry+https://github.com/rust-lang/crates.io-index)",
- "regex-syntax 0.6.17 (registry+https://github.com/rust-lang/crates.io-index)",
+ "regex-syntax 0.6.18 (registry+https://github.com/rust-lang/crates.io-index)",
"thread_local 1.0.1 (registry+https://github.com/rust-lang/crates.io-index)",
]
[[package]]
name = "regex-syntax"
-version = "0.6.17"
+version = "0.6.18"
source = "registry+https://github.com/rust-lang/crates.io-index"
[[package]]
@@ -494,6 +487,14 @@
]
[[package]]
+name = "rhg"
+version = "0.1.0"
+dependencies = [
+ "clap 2.33.1 (registry+https://github.com/rust-lang/crates.io-index)",
+ "hg-core 0.1.0",
+]
+
+[[package]]
name = "rustc_version"
version = "0.2.3"
source = "registry+https://github.com/rust-lang/crates.io-index"
@@ -655,10 +656,9 @@
"checksum autocfg 1.0.0 (registry+https://github.com/rust-lang/crates.io-index)" = "f8aac770f1885fd7e387acedd76065302551364496e46b3dd00860b2f8359b9d"
"checksum bitflags 1.2.1 (registry+https://github.com/rust-lang/crates.io-index)" = "cf1de2fe8c75bc145a2f577add951f8134889b4795d47466a54a5c846d691693"
"checksum byteorder 1.3.4 (registry+https://github.com/rust-lang/crates.io-index)" = "08c48aae112d48ed9f069b33538ea9e3e90aa263cfa3d1c24309612b1f7472de"
-"checksum cc 1.0.50 (registry+https://github.com/rust-lang/crates.io-index)" = "95e28fa049fda1c330bcf9d723be7663a899c4679724b34c81e9f5a326aab8cd"
"checksum cfg-if 0.1.10 (registry+https://github.com/rust-lang/crates.io-index)" = "4785bdd1c96b2a846b2bd7cc02e86b6b3dbf14e7e53446c4f54c92a361040822"
"checksum chrono 0.4.11 (registry+https://github.com/rust-lang/crates.io-index)" = "80094f509cf8b5ae86a4966a39b3ff66cd7e2a3e594accec3743ff3fabeab5b2"
-"checksum clap 2.33.0 (registry+https://github.com/rust-lang/crates.io-index)" = "5067f5bb2d80ef5d68b4c87db81601f0b75bca627bc2ef76b141d7b846a3c6d9"
+"checksum clap 2.33.1 (registry+https://github.com/rust-lang/crates.io-index)" = "bdfa80d47f954d53a35a64987ca1422f495b8d6483c0fe9f7117b36c2a792129"
"checksum colored 1.9.3 (registry+https://github.com/rust-lang/crates.io-index)" = "f4ffc801dacf156c5854b9df4f425a626539c3a6ef7893cc0c5084a23f0b6c59"
"checksum cpython 0.4.1 (registry+https://github.com/rust-lang/crates.io-index)" = "bfaf3847ab963e40c4f6dd8d6be279bdf74007ae2413786a0dcbb28c52139a95"
"checksum crossbeam 0.7.3 (registry+https://github.com/rust-lang/crates.io-index)" = "69323bff1fb41c635347b8ead484a5ca6c3f11914d784170b158d8449ab07f8e"
@@ -680,8 +680,8 @@
"checksum memchr 2.3.3 (registry+https://github.com/rust-lang/crates.io-index)" = "3728d817d99e5ac407411fa471ff9800a778d88a24685968b36824eaf4bee400"
"checksum memmap 0.7.0 (registry+https://github.com/rust-lang/crates.io-index)" = "6585fd95e7bb50d6cc31e20d4cf9afb4e2ba16c5846fc76793f11218da9c475b"
"checksum memoffset 0.5.3 (registry+https://github.com/rust-lang/crates.io-index)" = "75189eb85871ea5c2e2c15abbdd541185f63b408415e5051f5cac122d8c774b9"
-"checksum micro-timer 0.2.1 (registry+https://github.com/rust-lang/crates.io-index)" = "987429cd6162a80ed5ff44fc790f5090b1c6d617ac73a2e272965ed91201d79b"
-"checksum micro-timer-macros 0.2.0 (registry+https://github.com/rust-lang/crates.io-index)" = "43cec5c0b38783eb33ef7bccf4b250b7a085703e11f5f2238fa31969e629388a"
+"checksum micro-timer 0.3.0 (registry+https://github.com/rust-lang/crates.io-index)" = "25b31d6cb9112984323d05d7a353f272ae5d7a307074f9ab9b25c00121b8c947"
+"checksum micro-timer-macros 0.3.0 (registry+https://github.com/rust-lang/crates.io-index)" = "5694085dd384bb9e824207facc040c248d9df653f55e28c3ad0686958b448504"
"checksum num-integer 0.1.42 (registry+https://github.com/rust-lang/crates.io-index)" = "3f6ea62e9d81a77cd3ee9a2a5b9b609447857f3d358704331e4ef39eb247fcba"
"checksum num-traits 0.2.11 (registry+https://github.com/rust-lang/crates.io-index)" = "c62be47e61d1842b9170f0fdeec8eba98e60e90e5446449a0545e5152acd7096"
"checksum num_cpus 1.12.0 (registry+https://github.com/rust-lang/crates.io-index)" = "46203554f085ff89c235cd12f7075f3233af9b11ed7c9e16dfe2560d03313ce6"
@@ -701,8 +701,8 @@
"checksum rayon 1.3.0 (registry+https://github.com/rust-lang/crates.io-index)" = "db6ce3297f9c85e16621bb8cca38a06779ffc31bb8184e1be4bed2be4678a098"
"checksum rayon-core 1.7.0 (registry+https://github.com/rust-lang/crates.io-index)" = "08a89b46efaf957e52b18062fb2f4660f8b8a4dde1807ca002690868ef2c85a9"
"checksum redox_syscall 0.1.56 (registry+https://github.com/rust-lang/crates.io-index)" = "2439c63f3f6139d1b57529d16bc3b8bb855230c8efcc5d3a896c8bea7c3b1e84"
-"checksum regex 1.3.6 (registry+https://github.com/rust-lang/crates.io-index)" = "7f6946991529684867e47d86474e3a6d0c0ab9b82d5821e314b1ede31fa3a4b3"
-"checksum regex-syntax 0.6.17 (registry+https://github.com/rust-lang/crates.io-index)" = "7fe5bd57d1d7414c6b5ed48563a2c855d995ff777729dcd91c369ec7fea395ae"
+"checksum regex 1.3.9 (registry+https://github.com/rust-lang/crates.io-index)" = "9c3780fcf44b193bc4d09f36d2a3c87b251da4a046c87795a0d35f4f927ad8e6"
+"checksum regex-syntax 0.6.18 (registry+https://github.com/rust-lang/crates.io-index)" = "26412eb97c6b088a6997e05f69403a802a92d520de2f8e63c2b65f9e0f47c4e8"
"checksum remove_dir_all 0.5.2 (registry+https://github.com/rust-lang/crates.io-index)" = "4a83fa3702a688b9359eccba92d153ac33fd2e8462f9e0e3fdf155239ea7792e"
"checksum rustc_version 0.2.3 (registry+https://github.com/rust-lang/crates.io-index)" = "138e3e0acb6c9fb258b19b67cb8abd63c00679d2851805ea151465464fe9030a"
"checksum same-file 1.0.6 (registry+https://github.com/rust-lang/crates.io-index)" = "93fc1dc3aaa9bfed95e02e6eadabb4baf7e3078b0bd1b4d7b6b0b68378900502"
--- a/rust/Cargo.toml Tue Jul 14 10:25:41 2020 +0200
+++ b/rust/Cargo.toml Mon Jul 20 21:56:27 2020 +0530
@@ -1,3 +1,3 @@
[workspace]
-members = ["hg-core", "hg-cpython"]
+members = ["hg-core", "hg-cpython", "rhg"]
exclude = ["chg", "hgcli"]
--- a/rust/README.rst Tue Jul 14 10:25:41 2020 +0200
+++ b/rust/README.rst Mon Jul 20 21:56:27 2020 +0530
@@ -8,9 +8,9 @@
There are currently three independent rust projects:
- chg. An implementation of chg, in rust instead of C.
-- hgcli. A experiment for starting hg in rust rather than in python,
- by linking with the python runtime. Probably meant to be replaced by
- PyOxidizer at some point.
+- hgcli. A project that provide a (mostly) self-contained "hg" binary,
+ for ease of deployment and a bit of speed, using PyOxidizer. See
+ hgcli/README.md.
- hg-core (and hg-cpython): implementation of some
functionality of mercurial in rust, e.g. ancestry computations in
revision graphs, status or pull discovery. The top-level ``Cargo.toml`` file
@@ -27,8 +27,6 @@
$ ./hg debuginstall | grep -i rust # to validate rust is in use
checking Rust extensions (installed)
checking module policy (rust+c-allow)
- checking "re2" regexp engine Rust bindings (installed)
-
If the environment variable ``HGWITHRUSTEXT=cpython`` is set, the Rust
extension will be used by default unless ``--no-rust``.
@@ -36,35 +34,20 @@
One day we may use this environment variable to switch to new experimental
binding crates like a hypothetical ``HGWITHRUSTEXT=hpy``.
-Using the fastest ``hg status``
--------------------------------
-
-The code for ``hg status`` needs to conform to ``.hgignore`` rules, which are
-all translated into regex.
-
-In the first version, for compatibility and ease of development reasons, the
-Re2 regex engine was chosen until we figured out if the ``regex`` crate had
-similar enough behavior.
-
-Now that that work has been done, the default behavior is to use the ``regex``
-crate, that provides a significant performance boost compared to the standard
-Python + C path in many commands such as ``status``, ``diff`` and ``commit``,
+Profiling
+=========
-However, the ``Re2`` path remains slightly faster for our use cases and remains
-a better option for getting the most speed out of your Mercurial.
+Setting the environment variable ``RUST_LOG=trace`` will make hg print
+a few high level rust-related performance numbers. It can also
+indicate why the rust code cannot be used (say, using lookarounds in
+hgignore).
-If you want to use ``Re2``, you need to install ``Re2`` following Google's
-guidelines: https://github.com/google/re2/wiki/Install.
-Then, use ``HG_RUST_FEATURES=with-re2`` and
-``HG_RE2_PATH=system|<path to your re2 install>`` when building ``hg`` to
-signal the use of Re2. Using the local path instead of the "system" RE2 links
-it statically.
-
-For example::
-
- $ HG_RUST_FEATURES=with-re2 HG_RE2_PATH=system make PURE=--rust
- $ # OR
- $ HG_RUST_FEATURES=with-re2 HG_RE2_PATH=/path/to/re2 make PURE=--rust
+``py-spy`` (https://github.com/benfred/py-spy) can be used to
+construct a single profile with rust functions and python functions
+(as opposed to ``hg --profile``, which attributes time spent in rust
+to some unlucky python code running shortly after the rust code, and
+as opposed to tools for native code like ``perf``, which attribute
+time to the python interpreter instead of python functions).
Developing Rust
===============
@@ -114,14 +97,3 @@
$ cargo +nightly fmt
This requires you to have the nightly toolchain installed.
-
-Additional features
--------------------
-
-As mentioned in the section about ``hg status``, code paths using ``re2`` are
-opt-in.
-
-For example::
-
- $ cargo check --features with-re2
-
--- a/rust/chg/Cargo.lock Tue Jul 14 10:25:41 2020 +0200
+++ b/rust/chg/Cargo.lock Mon Jul 20 21:56:27 2020 +0530
@@ -6,9 +6,14 @@
source = "registry+https://github.com/rust-lang/crates.io-index"
[[package]]
-name = "autocfg"
-version = "1.0.0"
+name = "async-trait"
+version = "0.1.30"
source = "registry+https://github.com/rust-lang/crates.io-index"
+dependencies = [
+ "proc-macro2 1.0.10 (registry+https://github.com/rust-lang/crates.io-index)",
+ "quote 1.0.3 (registry+https://github.com/rust-lang/crates.io-index)",
+ "syn 1.0.17 (registry+https://github.com/rust-lang/crates.io-index)",
+]
[[package]]
name = "bitflags"
@@ -16,20 +21,11 @@
source = "registry+https://github.com/rust-lang/crates.io-index"
[[package]]
-name = "byteorder"
-version = "1.3.4"
+name = "bytes"
+version = "0.5.4"
source = "registry+https://github.com/rust-lang/crates.io-index"
[[package]]
-name = "bytes"
-version = "0.4.12"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-dependencies = [
- "byteorder 1.3.4 (registry+https://github.com/rust-lang/crates.io-index)",
- "iovec 0.1.4 (registry+https://github.com/rust-lang/crates.io-index)",
-]
-
-[[package]]
name = "cc"
version = "1.0.50"
source = "registry+https://github.com/rust-lang/crates.io-index"
@@ -43,91 +39,17 @@
name = "chg"
version = "0.1.0"
dependencies = [
- "bytes 0.4.12 (registry+https://github.com/rust-lang/crates.io-index)",
+ "async-trait 0.1.30 (registry+https://github.com/rust-lang/crates.io-index)",
+ "bytes 0.5.4 (registry+https://github.com/rust-lang/crates.io-index)",
"cc 1.0.50 (registry+https://github.com/rust-lang/crates.io-index)",
- "futures 0.1.29 (registry+https://github.com/rust-lang/crates.io-index)",
- "libc 0.2.68 (registry+https://github.com/rust-lang/crates.io-index)",
+ "futures 0.3.4 (registry+https://github.com/rust-lang/crates.io-index)",
+ "libc 0.2.69 (registry+https://github.com/rust-lang/crates.io-index)",
"log 0.4.8 (registry+https://github.com/rust-lang/crates.io-index)",
- "tokio 0.1.22 (registry+https://github.com/rust-lang/crates.io-index)",
- "tokio-hglib 0.2.0 (registry+https://github.com/rust-lang/crates.io-index)",
- "tokio-process 0.2.5 (registry+https://github.com/rust-lang/crates.io-index)",
- "tokio-timer 0.2.13 (registry+https://github.com/rust-lang/crates.io-index)",
-]
-
-[[package]]
-name = "cloudabi"
-version = "0.0.3"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-dependencies = [
- "bitflags 1.2.1 (registry+https://github.com/rust-lang/crates.io-index)",
-]
-
-[[package]]
-name = "crossbeam-deque"
-version = "0.7.3"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-dependencies = [
- "crossbeam-epoch 0.8.2 (registry+https://github.com/rust-lang/crates.io-index)",
- "crossbeam-utils 0.7.2 (registry+https://github.com/rust-lang/crates.io-index)",
- "maybe-uninit 2.0.0 (registry+https://github.com/rust-lang/crates.io-index)",
-]
-
-[[package]]
-name = "crossbeam-epoch"
-version = "0.8.2"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-dependencies = [
- "autocfg 1.0.0 (registry+https://github.com/rust-lang/crates.io-index)",
- "cfg-if 0.1.10 (registry+https://github.com/rust-lang/crates.io-index)",
- "crossbeam-utils 0.7.2 (registry+https://github.com/rust-lang/crates.io-index)",
- "lazy_static 1.4.0 (registry+https://github.com/rust-lang/crates.io-index)",
- "maybe-uninit 2.0.0 (registry+https://github.com/rust-lang/crates.io-index)",
- "memoffset 0.5.4 (registry+https://github.com/rust-lang/crates.io-index)",
- "scopeguard 1.1.0 (registry+https://github.com/rust-lang/crates.io-index)",
+ "tokio 0.2.18 (registry+https://github.com/rust-lang/crates.io-index)",
+ "tokio-hglib 0.3.0 (registry+https://github.com/rust-lang/crates.io-index)",
]
[[package]]
-name = "crossbeam-queue"
-version = "0.1.2"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-dependencies = [
- "crossbeam-utils 0.6.6 (registry+https://github.com/rust-lang/crates.io-index)",
-]
-
-[[package]]
-name = "crossbeam-queue"
-version = "0.2.1"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-dependencies = [
- "cfg-if 0.1.10 (registry+https://github.com/rust-lang/crates.io-index)",
- "crossbeam-utils 0.7.2 (registry+https://github.com/rust-lang/crates.io-index)",
-]
-
-[[package]]
-name = "crossbeam-utils"
-version = "0.6.6"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-dependencies = [
- "cfg-if 0.1.10 (registry+https://github.com/rust-lang/crates.io-index)",
- "lazy_static 1.4.0 (registry+https://github.com/rust-lang/crates.io-index)",
-]
-
-[[package]]
-name = "crossbeam-utils"
-version = "0.7.2"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-dependencies = [
- "autocfg 1.0.0 (registry+https://github.com/rust-lang/crates.io-index)",
- "cfg-if 0.1.10 (registry+https://github.com/rust-lang/crates.io-index)",
- "lazy_static 1.4.0 (registry+https://github.com/rust-lang/crates.io-index)",
-]
-
-[[package]]
-name = "fnv"
-version = "1.0.6"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-
-[[package]]
name = "fuchsia-zircon"
version = "0.3.3"
source = "registry+https://github.com/rust-lang/crates.io-index"
@@ -143,15 +65,84 @@
[[package]]
name = "futures"
-version = "0.1.29"
+version = "0.3.4"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+dependencies = [
+ "futures-channel 0.3.4 (registry+https://github.com/rust-lang/crates.io-index)",
+ "futures-core 0.3.4 (registry+https://github.com/rust-lang/crates.io-index)",
+ "futures-executor 0.3.4 (registry+https://github.com/rust-lang/crates.io-index)",
+ "futures-io 0.3.4 (registry+https://github.com/rust-lang/crates.io-index)",
+ "futures-sink 0.3.4 (registry+https://github.com/rust-lang/crates.io-index)",
+ "futures-task 0.3.4 (registry+https://github.com/rust-lang/crates.io-index)",
+ "futures-util 0.3.4 (registry+https://github.com/rust-lang/crates.io-index)",
+]
+
+[[package]]
+name = "futures-channel"
+version = "0.3.4"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+dependencies = [
+ "futures-core 0.3.4 (registry+https://github.com/rust-lang/crates.io-index)",
+ "futures-sink 0.3.4 (registry+https://github.com/rust-lang/crates.io-index)",
+]
+
+[[package]]
+name = "futures-core"
+version = "0.3.4"
source = "registry+https://github.com/rust-lang/crates.io-index"
[[package]]
-name = "hermit-abi"
-version = "0.1.10"
+name = "futures-executor"
+version = "0.3.4"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+dependencies = [
+ "futures-core 0.3.4 (registry+https://github.com/rust-lang/crates.io-index)",
+ "futures-task 0.3.4 (registry+https://github.com/rust-lang/crates.io-index)",
+ "futures-util 0.3.4 (registry+https://github.com/rust-lang/crates.io-index)",
+]
+
+[[package]]
+name = "futures-io"
+version = "0.3.4"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+
+[[package]]
+name = "futures-macro"
+version = "0.3.4"
source = "registry+https://github.com/rust-lang/crates.io-index"
dependencies = [
- "libc 0.2.68 (registry+https://github.com/rust-lang/crates.io-index)",
+ "proc-macro-hack 0.5.15 (registry+https://github.com/rust-lang/crates.io-index)",
+ "proc-macro2 1.0.10 (registry+https://github.com/rust-lang/crates.io-index)",
+ "quote 1.0.3 (registry+https://github.com/rust-lang/crates.io-index)",
+ "syn 1.0.17 (registry+https://github.com/rust-lang/crates.io-index)",
+]
+
+[[package]]
+name = "futures-sink"
+version = "0.3.4"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+
+[[package]]
+name = "futures-task"
+version = "0.3.4"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+
+[[package]]
+name = "futures-util"
+version = "0.3.4"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+dependencies = [
+ "futures-channel 0.3.4 (registry+https://github.com/rust-lang/crates.io-index)",
+ "futures-core 0.3.4 (registry+https://github.com/rust-lang/crates.io-index)",
+ "futures-io 0.3.4 (registry+https://github.com/rust-lang/crates.io-index)",
+ "futures-macro 0.3.4 (registry+https://github.com/rust-lang/crates.io-index)",
+ "futures-sink 0.3.4 (registry+https://github.com/rust-lang/crates.io-index)",
+ "futures-task 0.3.4 (registry+https://github.com/rust-lang/crates.io-index)",
+ "memchr 2.3.3 (registry+https://github.com/rust-lang/crates.io-index)",
+ "pin-utils 0.1.0-alpha.4 (registry+https://github.com/rust-lang/crates.io-index)",
+ "proc-macro-hack 0.5.15 (registry+https://github.com/rust-lang/crates.io-index)",
+ "proc-macro-nested 0.1.4 (registry+https://github.com/rust-lang/crates.io-index)",
+ "slab 0.4.2 (registry+https://github.com/rust-lang/crates.io-index)",
]
[[package]]
@@ -159,7 +150,7 @@
version = "0.1.4"
source = "registry+https://github.com/rust-lang/crates.io-index"
dependencies = [
- "libc 0.2.68 (registry+https://github.com/rust-lang/crates.io-index)",
+ "libc 0.2.69 (registry+https://github.com/rust-lang/crates.io-index)",
]
[[package]]
@@ -178,18 +169,10 @@
[[package]]
name = "libc"
-version = "0.2.68"
+version = "0.2.69"
source = "registry+https://github.com/rust-lang/crates.io-index"
[[package]]
-name = "lock_api"
-version = "0.3.3"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-dependencies = [
- "scopeguard 1.1.0 (registry+https://github.com/rust-lang/crates.io-index)",
-]
-
-[[package]]
name = "log"
version = "0.4.8"
source = "registry+https://github.com/rust-lang/crates.io-index"
@@ -198,19 +181,11 @@
]
[[package]]
-name = "maybe-uninit"
-version = "2.0.0"
+name = "memchr"
+version = "2.3.3"
source = "registry+https://github.com/rust-lang/crates.io-index"
[[package]]
-name = "memoffset"
-version = "0.5.4"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-dependencies = [
- "autocfg 1.0.0 (registry+https://github.com/rust-lang/crates.io-index)",
-]
-
-[[package]]
name = "mio"
version = "0.6.21"
source = "registry+https://github.com/rust-lang/crates.io-index"
@@ -220,7 +195,7 @@
"fuchsia-zircon-sys 0.3.3 (registry+https://github.com/rust-lang/crates.io-index)",
"iovec 0.1.4 (registry+https://github.com/rust-lang/crates.io-index)",
"kernel32-sys 0.2.2 (registry+https://github.com/rust-lang/crates.io-index)",
- "libc 0.2.68 (registry+https://github.com/rust-lang/crates.io-index)",
+ "libc 0.2.69 (registry+https://github.com/rust-lang/crates.io-index)",
"log 0.4.8 (registry+https://github.com/rust-lang/crates.io-index)",
"miow 0.2.1 (registry+https://github.com/rust-lang/crates.io-index)",
"net2 0.2.33 (registry+https://github.com/rust-lang/crates.io-index)",
@@ -245,7 +220,7 @@
source = "registry+https://github.com/rust-lang/crates.io-index"
dependencies = [
"iovec 0.1.4 (registry+https://github.com/rust-lang/crates.io-index)",
- "libc 0.2.68 (registry+https://github.com/rust-lang/crates.io-index)",
+ "libc 0.2.69 (registry+https://github.com/rust-lang/crates.io-index)",
"mio 0.6.21 (registry+https://github.com/rust-lang/crates.io-index)",
]
@@ -265,7 +240,7 @@
version = "0.3.3"
source = "registry+https://github.com/rust-lang/crates.io-index"
dependencies = [
- "socket2 0.3.11 (registry+https://github.com/rust-lang/crates.io-index)",
+ "socket2 0.3.12 (registry+https://github.com/rust-lang/crates.io-index)",
"winapi 0.3.8 (registry+https://github.com/rust-lang/crates.io-index)",
]
@@ -275,41 +250,44 @@
source = "registry+https://github.com/rust-lang/crates.io-index"
dependencies = [
"cfg-if 0.1.10 (registry+https://github.com/rust-lang/crates.io-index)",
- "libc 0.2.68 (registry+https://github.com/rust-lang/crates.io-index)",
+ "libc 0.2.69 (registry+https://github.com/rust-lang/crates.io-index)",
"winapi 0.3.8 (registry+https://github.com/rust-lang/crates.io-index)",
]
[[package]]
-name = "num_cpus"
-version = "1.12.0"
+name = "pin-project-lite"
+version = "0.1.4"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+
+[[package]]
+name = "pin-utils"
+version = "0.1.0-alpha.4"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+
+[[package]]
+name = "proc-macro-hack"
+version = "0.5.15"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+
+[[package]]
+name = "proc-macro-nested"
+version = "0.1.4"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+
+[[package]]
+name = "proc-macro2"
+version = "1.0.10"
source = "registry+https://github.com/rust-lang/crates.io-index"
dependencies = [
- "hermit-abi 0.1.10 (registry+https://github.com/rust-lang/crates.io-index)",
- "libc 0.2.68 (registry+https://github.com/rust-lang/crates.io-index)",
+ "unicode-xid 0.2.0 (registry+https://github.com/rust-lang/crates.io-index)",
]
[[package]]
-name = "parking_lot"
-version = "0.9.0"
+name = "quote"
+version = "1.0.3"
source = "registry+https://github.com/rust-lang/crates.io-index"
dependencies = [
- "lock_api 0.3.3 (registry+https://github.com/rust-lang/crates.io-index)",
- "parking_lot_core 0.6.2 (registry+https://github.com/rust-lang/crates.io-index)",
- "rustc_version 0.2.3 (registry+https://github.com/rust-lang/crates.io-index)",
-]
-
-[[package]]
-name = "parking_lot_core"
-version = "0.6.2"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-dependencies = [
- "cfg-if 0.1.10 (registry+https://github.com/rust-lang/crates.io-index)",
- "cloudabi 0.0.3 (registry+https://github.com/rust-lang/crates.io-index)",
- "libc 0.2.68 (registry+https://github.com/rust-lang/crates.io-index)",
- "redox_syscall 0.1.56 (registry+https://github.com/rust-lang/crates.io-index)",
- "rustc_version 0.2.3 (registry+https://github.com/rust-lang/crates.io-index)",
- "smallvec 0.6.13 (registry+https://github.com/rust-lang/crates.io-index)",
- "winapi 0.3.8 (registry+https://github.com/rust-lang/crates.io-index)",
+ "proc-macro2 1.0.10 (registry+https://github.com/rust-lang/crates.io-index)",
]
[[package]]
@@ -318,38 +296,12 @@
source = "registry+https://github.com/rust-lang/crates.io-index"
[[package]]
-name = "rustc_version"
-version = "0.2.3"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-dependencies = [
- "semver 0.9.0 (registry+https://github.com/rust-lang/crates.io-index)",
-]
-
-[[package]]
-name = "scopeguard"
-version = "1.1.0"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-
-[[package]]
-name = "semver"
-version = "0.9.0"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-dependencies = [
- "semver-parser 0.7.0 (registry+https://github.com/rust-lang/crates.io-index)",
-]
-
-[[package]]
-name = "semver-parser"
-version = "0.7.0"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-
-[[package]]
name = "signal-hook-registry"
version = "1.2.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
dependencies = [
"arc-swap 0.4.5 (registry+https://github.com/rust-lang/crates.io-index)",
- "libc 0.2.68 (registry+https://github.com/rust-lang/crates.io-index)",
+ "libc 0.2.69 (registry+https://github.com/rust-lang/crates.io-index)",
]
[[package]]
@@ -358,240 +310,85 @@
source = "registry+https://github.com/rust-lang/crates.io-index"
[[package]]
-name = "smallvec"
-version = "0.6.13"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-dependencies = [
- "maybe-uninit 2.0.0 (registry+https://github.com/rust-lang/crates.io-index)",
-]
-
-[[package]]
name = "socket2"
-version = "0.3.11"
+version = "0.3.12"
source = "registry+https://github.com/rust-lang/crates.io-index"
dependencies = [
"cfg-if 0.1.10 (registry+https://github.com/rust-lang/crates.io-index)",
- "libc 0.2.68 (registry+https://github.com/rust-lang/crates.io-index)",
+ "libc 0.2.69 (registry+https://github.com/rust-lang/crates.io-index)",
"redox_syscall 0.1.56 (registry+https://github.com/rust-lang/crates.io-index)",
"winapi 0.3.8 (registry+https://github.com/rust-lang/crates.io-index)",
]
[[package]]
-name = "tokio"
-version = "0.1.22"
+name = "syn"
+version = "1.0.17"
source = "registry+https://github.com/rust-lang/crates.io-index"
dependencies = [
- "bytes 0.4.12 (registry+https://github.com/rust-lang/crates.io-index)",
- "futures 0.1.29 (registry+https://github.com/rust-lang/crates.io-index)",
- "mio 0.6.21 (registry+https://github.com/rust-lang/crates.io-index)",
- "num_cpus 1.12.0 (registry+https://github.com/rust-lang/crates.io-index)",
- "tokio-codec 0.1.2 (registry+https://github.com/rust-lang/crates.io-index)",
- "tokio-current-thread 0.1.7 (registry+https://github.com/rust-lang/crates.io-index)",
- "tokio-executor 0.1.10 (registry+https://github.com/rust-lang/crates.io-index)",
- "tokio-fs 0.1.7 (registry+https://github.com/rust-lang/crates.io-index)",
- "tokio-io 0.1.13 (registry+https://github.com/rust-lang/crates.io-index)",
- "tokio-reactor 0.1.12 (registry+https://github.com/rust-lang/crates.io-index)",
- "tokio-sync 0.1.8 (registry+https://github.com/rust-lang/crates.io-index)",
- "tokio-tcp 0.1.4 (registry+https://github.com/rust-lang/crates.io-index)",
- "tokio-threadpool 0.1.18 (registry+https://github.com/rust-lang/crates.io-index)",
- "tokio-timer 0.2.13 (registry+https://github.com/rust-lang/crates.io-index)",
- "tokio-udp 0.1.6 (registry+https://github.com/rust-lang/crates.io-index)",
- "tokio-uds 0.2.6 (registry+https://github.com/rust-lang/crates.io-index)",
-]
-
-[[package]]
-name = "tokio-codec"
-version = "0.1.2"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-dependencies = [
- "bytes 0.4.12 (registry+https://github.com/rust-lang/crates.io-index)",
- "futures 0.1.29 (registry+https://github.com/rust-lang/crates.io-index)",
- "tokio-io 0.1.13 (registry+https://github.com/rust-lang/crates.io-index)",
-]
-
-[[package]]
-name = "tokio-current-thread"
-version = "0.1.7"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-dependencies = [
- "futures 0.1.29 (registry+https://github.com/rust-lang/crates.io-index)",
- "tokio-executor 0.1.10 (registry+https://github.com/rust-lang/crates.io-index)",
+ "proc-macro2 1.0.10 (registry+https://github.com/rust-lang/crates.io-index)",
+ "quote 1.0.3 (registry+https://github.com/rust-lang/crates.io-index)",
+ "unicode-xid 0.2.0 (registry+https://github.com/rust-lang/crates.io-index)",
]
[[package]]
-name = "tokio-executor"
-version = "0.1.10"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-dependencies = [
- "crossbeam-utils 0.7.2 (registry+https://github.com/rust-lang/crates.io-index)",
- "futures 0.1.29 (registry+https://github.com/rust-lang/crates.io-index)",
-]
-
-[[package]]
-name = "tokio-fs"
-version = "0.1.7"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-dependencies = [
- "futures 0.1.29 (registry+https://github.com/rust-lang/crates.io-index)",
- "tokio-io 0.1.13 (registry+https://github.com/rust-lang/crates.io-index)",
- "tokio-threadpool 0.1.18 (registry+https://github.com/rust-lang/crates.io-index)",
-]
-
-[[package]]
-name = "tokio-hglib"
-version = "0.2.0"
+name = "tokio"
+version = "0.2.18"
source = "registry+https://github.com/rust-lang/crates.io-index"
dependencies = [
- "bytes 0.4.12 (registry+https://github.com/rust-lang/crates.io-index)",
- "futures 0.1.29 (registry+https://github.com/rust-lang/crates.io-index)",
- "tokio 0.1.22 (registry+https://github.com/rust-lang/crates.io-index)",
- "tokio-codec 0.1.2 (registry+https://github.com/rust-lang/crates.io-index)",
- "tokio-io 0.1.13 (registry+https://github.com/rust-lang/crates.io-index)",
- "tokio-process 0.2.5 (registry+https://github.com/rust-lang/crates.io-index)",
- "tokio-uds 0.2.6 (registry+https://github.com/rust-lang/crates.io-index)",
-]
-
-[[package]]
-name = "tokio-io"
-version = "0.1.13"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-dependencies = [
- "bytes 0.4.12 (registry+https://github.com/rust-lang/crates.io-index)",
- "futures 0.1.29 (registry+https://github.com/rust-lang/crates.io-index)",
- "log 0.4.8 (registry+https://github.com/rust-lang/crates.io-index)",
-]
-
-[[package]]
-name = "tokio-process"
-version = "0.2.5"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-dependencies = [
- "crossbeam-queue 0.1.2 (registry+https://github.com/rust-lang/crates.io-index)",
- "futures 0.1.29 (registry+https://github.com/rust-lang/crates.io-index)",
+ "bytes 0.5.4 (registry+https://github.com/rust-lang/crates.io-index)",
+ "futures-core 0.3.4 (registry+https://github.com/rust-lang/crates.io-index)",
"lazy_static 1.4.0 (registry+https://github.com/rust-lang/crates.io-index)",
- "libc 0.2.68 (registry+https://github.com/rust-lang/crates.io-index)",
- "log 0.4.8 (registry+https://github.com/rust-lang/crates.io-index)",
+ "libc 0.2.69 (registry+https://github.com/rust-lang/crates.io-index)",
+ "memchr 2.3.3 (registry+https://github.com/rust-lang/crates.io-index)",
"mio 0.6.21 (registry+https://github.com/rust-lang/crates.io-index)",
"mio-named-pipes 0.1.6 (registry+https://github.com/rust-lang/crates.io-index)",
- "tokio-io 0.1.13 (registry+https://github.com/rust-lang/crates.io-index)",
- "tokio-reactor 0.1.12 (registry+https://github.com/rust-lang/crates.io-index)",
- "tokio-signal 0.2.9 (registry+https://github.com/rust-lang/crates.io-index)",
+ "mio-uds 0.6.7 (registry+https://github.com/rust-lang/crates.io-index)",
+ "pin-project-lite 0.1.4 (registry+https://github.com/rust-lang/crates.io-index)",
+ "signal-hook-registry 1.2.0 (registry+https://github.com/rust-lang/crates.io-index)",
+ "slab 0.4.2 (registry+https://github.com/rust-lang/crates.io-index)",
+ "tokio-macros 0.2.5 (registry+https://github.com/rust-lang/crates.io-index)",
"winapi 0.3.8 (registry+https://github.com/rust-lang/crates.io-index)",
]
[[package]]
-name = "tokio-reactor"
-version = "0.1.12"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-dependencies = [
- "crossbeam-utils 0.7.2 (registry+https://github.com/rust-lang/crates.io-index)",
- "futures 0.1.29 (registry+https://github.com/rust-lang/crates.io-index)",
- "lazy_static 1.4.0 (registry+https://github.com/rust-lang/crates.io-index)",
- "log 0.4.8 (registry+https://github.com/rust-lang/crates.io-index)",
- "mio 0.6.21 (registry+https://github.com/rust-lang/crates.io-index)",
- "num_cpus 1.12.0 (registry+https://github.com/rust-lang/crates.io-index)",
- "parking_lot 0.9.0 (registry+https://github.com/rust-lang/crates.io-index)",
- "slab 0.4.2 (registry+https://github.com/rust-lang/crates.io-index)",
- "tokio-executor 0.1.10 (registry+https://github.com/rust-lang/crates.io-index)",
- "tokio-io 0.1.13 (registry+https://github.com/rust-lang/crates.io-index)",
- "tokio-sync 0.1.8 (registry+https://github.com/rust-lang/crates.io-index)",
-]
-
-[[package]]
-name = "tokio-signal"
-version = "0.2.9"
+name = "tokio-hglib"
+version = "0.3.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
dependencies = [
- "futures 0.1.29 (registry+https://github.com/rust-lang/crates.io-index)",
- "libc 0.2.68 (registry+https://github.com/rust-lang/crates.io-index)",
- "mio 0.6.21 (registry+https://github.com/rust-lang/crates.io-index)",
- "mio-uds 0.6.7 (registry+https://github.com/rust-lang/crates.io-index)",
- "signal-hook-registry 1.2.0 (registry+https://github.com/rust-lang/crates.io-index)",
- "tokio-executor 0.1.10 (registry+https://github.com/rust-lang/crates.io-index)",
- "tokio-io 0.1.13 (registry+https://github.com/rust-lang/crates.io-index)",
- "tokio-reactor 0.1.12 (registry+https://github.com/rust-lang/crates.io-index)",
- "winapi 0.3.8 (registry+https://github.com/rust-lang/crates.io-index)",
-]
-
-[[package]]
-name = "tokio-sync"
-version = "0.1.8"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-dependencies = [
- "fnv 1.0.6 (registry+https://github.com/rust-lang/crates.io-index)",
- "futures 0.1.29 (registry+https://github.com/rust-lang/crates.io-index)",
-]
-
-[[package]]
-name = "tokio-tcp"
-version = "0.1.4"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-dependencies = [
- "bytes 0.4.12 (registry+https://github.com/rust-lang/crates.io-index)",
- "futures 0.1.29 (registry+https://github.com/rust-lang/crates.io-index)",
- "iovec 0.1.4 (registry+https://github.com/rust-lang/crates.io-index)",
- "mio 0.6.21 (registry+https://github.com/rust-lang/crates.io-index)",
- "tokio-io 0.1.13 (registry+https://github.com/rust-lang/crates.io-index)",
- "tokio-reactor 0.1.12 (registry+https://github.com/rust-lang/crates.io-index)",
+ "async-trait 0.1.30 (registry+https://github.com/rust-lang/crates.io-index)",
+ "bytes 0.5.4 (registry+https://github.com/rust-lang/crates.io-index)",
+ "futures 0.3.4 (registry+https://github.com/rust-lang/crates.io-index)",
+ "tokio 0.2.18 (registry+https://github.com/rust-lang/crates.io-index)",
+ "tokio-util 0.3.1 (registry+https://github.com/rust-lang/crates.io-index)",
]
[[package]]
-name = "tokio-threadpool"
-version = "0.1.18"
+name = "tokio-macros"
+version = "0.2.5"
source = "registry+https://github.com/rust-lang/crates.io-index"
dependencies = [
- "crossbeam-deque 0.7.3 (registry+https://github.com/rust-lang/crates.io-index)",
- "crossbeam-queue 0.2.1 (registry+https://github.com/rust-lang/crates.io-index)",
- "crossbeam-utils 0.7.2 (registry+https://github.com/rust-lang/crates.io-index)",
- "futures 0.1.29 (registry+https://github.com/rust-lang/crates.io-index)",
- "lazy_static 1.4.0 (registry+https://github.com/rust-lang/crates.io-index)",
- "log 0.4.8 (registry+https://github.com/rust-lang/crates.io-index)",
- "num_cpus 1.12.0 (registry+https://github.com/rust-lang/crates.io-index)",
- "slab 0.4.2 (registry+https://github.com/rust-lang/crates.io-index)",
- "tokio-executor 0.1.10 (registry+https://github.com/rust-lang/crates.io-index)",
-]
-
-[[package]]
-name = "tokio-timer"
-version = "0.2.13"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-dependencies = [
- "crossbeam-utils 0.7.2 (registry+https://github.com/rust-lang/crates.io-index)",
- "futures 0.1.29 (registry+https://github.com/rust-lang/crates.io-index)",
- "slab 0.4.2 (registry+https://github.com/rust-lang/crates.io-index)",
- "tokio-executor 0.1.10 (registry+https://github.com/rust-lang/crates.io-index)",
+ "proc-macro2 1.0.10 (registry+https://github.com/rust-lang/crates.io-index)",
+ "quote 1.0.3 (registry+https://github.com/rust-lang/crates.io-index)",
+ "syn 1.0.17 (registry+https://github.com/rust-lang/crates.io-index)",
]
[[package]]
-name = "tokio-udp"
-version = "0.1.6"
+name = "tokio-util"
+version = "0.3.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
dependencies = [
- "bytes 0.4.12 (registry+https://github.com/rust-lang/crates.io-index)",
- "futures 0.1.29 (registry+https://github.com/rust-lang/crates.io-index)",
+ "bytes 0.5.4 (registry+https://github.com/rust-lang/crates.io-index)",
+ "futures-core 0.3.4 (registry+https://github.com/rust-lang/crates.io-index)",
+ "futures-sink 0.3.4 (registry+https://github.com/rust-lang/crates.io-index)",
"log 0.4.8 (registry+https://github.com/rust-lang/crates.io-index)",
- "mio 0.6.21 (registry+https://github.com/rust-lang/crates.io-index)",
- "tokio-codec 0.1.2 (registry+https://github.com/rust-lang/crates.io-index)",
- "tokio-io 0.1.13 (registry+https://github.com/rust-lang/crates.io-index)",
- "tokio-reactor 0.1.12 (registry+https://github.com/rust-lang/crates.io-index)",
+ "pin-project-lite 0.1.4 (registry+https://github.com/rust-lang/crates.io-index)",
+ "tokio 0.2.18 (registry+https://github.com/rust-lang/crates.io-index)",
]
[[package]]
-name = "tokio-uds"
-version = "0.2.6"
+name = "unicode-xid"
+version = "0.2.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
-dependencies = [
- "bytes 0.4.12 (registry+https://github.com/rust-lang/crates.io-index)",
- "futures 0.1.29 (registry+https://github.com/rust-lang/crates.io-index)",
- "iovec 0.1.4 (registry+https://github.com/rust-lang/crates.io-index)",
- "libc 0.2.68 (registry+https://github.com/rust-lang/crates.io-index)",
- "log 0.4.8 (registry+https://github.com/rust-lang/crates.io-index)",
- "mio 0.6.21 (registry+https://github.com/rust-lang/crates.io-index)",
- "mio-uds 0.6.7 (registry+https://github.com/rust-lang/crates.io-index)",
- "tokio-codec 0.1.2 (registry+https://github.com/rust-lang/crates.io-index)",
- "tokio-io 0.1.13 (registry+https://github.com/rust-lang/crates.io-index)",
- "tokio-reactor 0.1.12 (registry+https://github.com/rust-lang/crates.io-index)",
-]
[[package]]
name = "winapi"
@@ -633,66 +430,50 @@
[metadata]
"checksum arc-swap 0.4.5 (registry+https://github.com/rust-lang/crates.io-index)" = "d663a8e9a99154b5fb793032533f6328da35e23aac63d5c152279aa8ba356825"
-"checksum autocfg 1.0.0 (registry+https://github.com/rust-lang/crates.io-index)" = "f8aac770f1885fd7e387acedd76065302551364496e46b3dd00860b2f8359b9d"
+"checksum async-trait 0.1.30 (registry+https://github.com/rust-lang/crates.io-index)" = "da71fef07bc806586090247e971229289f64c210a278ee5ae419314eb386b31d"
"checksum bitflags 1.2.1 (registry+https://github.com/rust-lang/crates.io-index)" = "cf1de2fe8c75bc145a2f577add951f8134889b4795d47466a54a5c846d691693"
-"checksum byteorder 1.3.4 (registry+https://github.com/rust-lang/crates.io-index)" = "08c48aae112d48ed9f069b33538ea9e3e90aa263cfa3d1c24309612b1f7472de"
-"checksum bytes 0.4.12 (registry+https://github.com/rust-lang/crates.io-index)" = "206fdffcfa2df7cbe15601ef46c813fce0965eb3286db6b56c583b814b51c81c"
+"checksum bytes 0.5.4 (registry+https://github.com/rust-lang/crates.io-index)" = "130aac562c0dd69c56b3b1cc8ffd2e17be31d0b6c25b61c96b76231aa23e39e1"
"checksum cc 1.0.50 (registry+https://github.com/rust-lang/crates.io-index)" = "95e28fa049fda1c330bcf9d723be7663a899c4679724b34c81e9f5a326aab8cd"
"checksum cfg-if 0.1.10 (registry+https://github.com/rust-lang/crates.io-index)" = "4785bdd1c96b2a846b2bd7cc02e86b6b3dbf14e7e53446c4f54c92a361040822"
-"checksum cloudabi 0.0.3 (registry+https://github.com/rust-lang/crates.io-index)" = "ddfc5b9aa5d4507acaf872de71051dfd0e309860e88966e1051e462a077aac4f"
-"checksum crossbeam-deque 0.7.3 (registry+https://github.com/rust-lang/crates.io-index)" = "9f02af974daeee82218205558e51ec8768b48cf524bd01d550abe5573a608285"
-"checksum crossbeam-epoch 0.8.2 (registry+https://github.com/rust-lang/crates.io-index)" = "058ed274caafc1f60c4997b5fc07bf7dc7cca454af7c6e81edffe5f33f70dace"
-"checksum crossbeam-queue 0.1.2 (registry+https://github.com/rust-lang/crates.io-index)" = "7c979cd6cfe72335896575c6b5688da489e420d36a27a0b9eb0c73db574b4a4b"
-"checksum crossbeam-queue 0.2.1 (registry+https://github.com/rust-lang/crates.io-index)" = "c695eeca1e7173472a32221542ae469b3e9aac3a4fc81f7696bcad82029493db"
-"checksum crossbeam-utils 0.6.6 (registry+https://github.com/rust-lang/crates.io-index)" = "04973fa96e96579258a5091af6003abde64af786b860f18622b82e026cca60e6"
-"checksum crossbeam-utils 0.7.2 (registry+https://github.com/rust-lang/crates.io-index)" = "c3c7c73a2d1e9fc0886a08b93e98eb643461230d5f1925e4036204d5f2e261a8"
-"checksum fnv 1.0.6 (registry+https://github.com/rust-lang/crates.io-index)" = "2fad85553e09a6f881f739c29f0b00b0f01357c743266d478b68951ce23285f3"
"checksum fuchsia-zircon 0.3.3 (registry+https://github.com/rust-lang/crates.io-index)" = "2e9763c69ebaae630ba35f74888db465e49e259ba1bc0eda7d06f4a067615d82"
"checksum fuchsia-zircon-sys 0.3.3 (registry+https://github.com/rust-lang/crates.io-index)" = "3dcaa9ae7725d12cdb85b3ad99a434db70b468c09ded17e012d86b5c1010f7a7"
-"checksum futures 0.1.29 (registry+https://github.com/rust-lang/crates.io-index)" = "1b980f2816d6ee8673b6517b52cb0e808a180efc92e5c19d02cdda79066703ef"
-"checksum hermit-abi 0.1.10 (registry+https://github.com/rust-lang/crates.io-index)" = "725cf19794cf90aa94e65050cb4191ff5d8fa87a498383774c47b332e3af952e"
+"checksum futures 0.3.4 (registry+https://github.com/rust-lang/crates.io-index)" = "5c329ae8753502fb44ae4fc2b622fa2a94652c41e795143765ba0927f92ab780"
+"checksum futures-channel 0.3.4 (registry+https://github.com/rust-lang/crates.io-index)" = "f0c77d04ce8edd9cb903932b608268b3fffec4163dc053b3b402bf47eac1f1a8"
+"checksum futures-core 0.3.4 (registry+https://github.com/rust-lang/crates.io-index)" = "f25592f769825e89b92358db00d26f965761e094951ac44d3663ef25b7ac464a"
+"checksum futures-executor 0.3.4 (registry+https://github.com/rust-lang/crates.io-index)" = "f674f3e1bcb15b37284a90cedf55afdba482ab061c407a9c0ebbd0f3109741ba"
+"checksum futures-io 0.3.4 (registry+https://github.com/rust-lang/crates.io-index)" = "a638959aa96152c7a4cddf50fcb1e3fede0583b27157c26e67d6f99904090dc6"
+"checksum futures-macro 0.3.4 (registry+https://github.com/rust-lang/crates.io-index)" = "9a5081aa3de1f7542a794a397cde100ed903b0630152d0973479018fd85423a7"
+"checksum futures-sink 0.3.4 (registry+https://github.com/rust-lang/crates.io-index)" = "3466821b4bc114d95b087b850a724c6f83115e929bc88f1fa98a3304a944c8a6"
+"checksum futures-task 0.3.4 (registry+https://github.com/rust-lang/crates.io-index)" = "7b0a34e53cf6cdcd0178aa573aed466b646eb3db769570841fda0c7ede375a27"
+"checksum futures-util 0.3.4 (registry+https://github.com/rust-lang/crates.io-index)" = "22766cf25d64306bedf0384da004d05c9974ab104fcc4528f1236181c18004c5"
"checksum iovec 0.1.4 (registry+https://github.com/rust-lang/crates.io-index)" = "b2b3ea6ff95e175473f8ffe6a7eb7c00d054240321b84c57051175fe3c1e075e"
"checksum kernel32-sys 0.2.2 (registry+https://github.com/rust-lang/crates.io-index)" = "7507624b29483431c0ba2d82aece8ca6cdba9382bff4ddd0f7490560c056098d"
"checksum lazy_static 1.4.0 (registry+https://github.com/rust-lang/crates.io-index)" = "e2abad23fbc42b3700f2f279844dc832adb2b2eb069b2df918f455c4e18cc646"
-"checksum libc 0.2.68 (registry+https://github.com/rust-lang/crates.io-index)" = "dea0c0405123bba743ee3f91f49b1c7cfb684eef0da0a50110f758ccf24cdff0"
-"checksum lock_api 0.3.3 (registry+https://github.com/rust-lang/crates.io-index)" = "79b2de95ecb4691949fea4716ca53cdbcfccb2c612e19644a8bad05edcf9f47b"
+"checksum libc 0.2.69 (registry+https://github.com/rust-lang/crates.io-index)" = "99e85c08494b21a9054e7fe1374a732aeadaff3980b6990b94bfd3a70f690005"
"checksum log 0.4.8 (registry+https://github.com/rust-lang/crates.io-index)" = "14b6052be84e6b71ab17edffc2eeabf5c2c3ae1fdb464aae35ac50c67a44e1f7"
-"checksum maybe-uninit 2.0.0 (registry+https://github.com/rust-lang/crates.io-index)" = "60302e4db3a61da70c0cb7991976248362f30319e88850c487b9b95bbf059e00"
-"checksum memoffset 0.5.4 (registry+https://github.com/rust-lang/crates.io-index)" = "b4fc2c02a7e374099d4ee95a193111f72d2110197fe200272371758f6c3643d8"
+"checksum memchr 2.3.3 (registry+https://github.com/rust-lang/crates.io-index)" = "3728d817d99e5ac407411fa471ff9800a778d88a24685968b36824eaf4bee400"
"checksum mio 0.6.21 (registry+https://github.com/rust-lang/crates.io-index)" = "302dec22bcf6bae6dfb69c647187f4b4d0fb6f535521f7bc022430ce8e12008f"
"checksum mio-named-pipes 0.1.6 (registry+https://github.com/rust-lang/crates.io-index)" = "f5e374eff525ce1c5b7687c4cef63943e7686524a387933ad27ca7ec43779cb3"
"checksum mio-uds 0.6.7 (registry+https://github.com/rust-lang/crates.io-index)" = "966257a94e196b11bb43aca423754d87429960a768de9414f3691d6957abf125"
"checksum miow 0.2.1 (registry+https://github.com/rust-lang/crates.io-index)" = "8c1f2f3b1cf331de6896aabf6e9d55dca90356cc9960cca7eaaf408a355ae919"
"checksum miow 0.3.3 (registry+https://github.com/rust-lang/crates.io-index)" = "396aa0f2003d7df8395cb93e09871561ccc3e785f0acb369170e8cc74ddf9226"
"checksum net2 0.2.33 (registry+https://github.com/rust-lang/crates.io-index)" = "42550d9fb7b6684a6d404d9fa7250c2eb2646df731d1c06afc06dcee9e1bcf88"
-"checksum num_cpus 1.12.0 (registry+https://github.com/rust-lang/crates.io-index)" = "46203554f085ff89c235cd12f7075f3233af9b11ed7c9e16dfe2560d03313ce6"
-"checksum parking_lot 0.9.0 (registry+https://github.com/rust-lang/crates.io-index)" = "f842b1982eb6c2fe34036a4fbfb06dd185a3f5c8edfaacdf7d1ea10b07de6252"
-"checksum parking_lot_core 0.6.2 (registry+https://github.com/rust-lang/crates.io-index)" = "b876b1b9e7ac6e1a74a6da34d25c42e17e8862aa409cbbbdcfc8d86c6f3bc62b"
+"checksum pin-project-lite 0.1.4 (registry+https://github.com/rust-lang/crates.io-index)" = "237844750cfbb86f67afe27eee600dfbbcb6188d734139b534cbfbf4f96792ae"
+"checksum pin-utils 0.1.0-alpha.4 (registry+https://github.com/rust-lang/crates.io-index)" = "5894c618ce612a3fa23881b152b608bafb8c56cfc22f434a3ba3120b40f7b587"
+"checksum proc-macro-hack 0.5.15 (registry+https://github.com/rust-lang/crates.io-index)" = "0d659fe7c6d27f25e9d80a1a094c223f5246f6a6596453e09d7229bf42750b63"
+"checksum proc-macro-nested 0.1.4 (registry+https://github.com/rust-lang/crates.io-index)" = "8e946095f9d3ed29ec38de908c22f95d9ac008e424c7bcae54c75a79c527c694"
+"checksum proc-macro2 1.0.10 (registry+https://github.com/rust-lang/crates.io-index)" = "df246d292ff63439fea9bc8c0a270bed0e390d5ebd4db4ba15aba81111b5abe3"
+"checksum quote 1.0.3 (registry+https://github.com/rust-lang/crates.io-index)" = "2bdc6c187c65bca4260c9011c9e3132efe4909da44726bad24cf7572ae338d7f"
"checksum redox_syscall 0.1.56 (registry+https://github.com/rust-lang/crates.io-index)" = "2439c63f3f6139d1b57529d16bc3b8bb855230c8efcc5d3a896c8bea7c3b1e84"
-"checksum rustc_version 0.2.3 (registry+https://github.com/rust-lang/crates.io-index)" = "138e3e0acb6c9fb258b19b67cb8abd63c00679d2851805ea151465464fe9030a"
-"checksum scopeguard 1.1.0 (registry+https://github.com/rust-lang/crates.io-index)" = "d29ab0c6d3fc0ee92fe66e2d99f700eab17a8d57d1c1d3b748380fb20baa78cd"
-"checksum semver 0.9.0 (registry+https://github.com/rust-lang/crates.io-index)" = "1d7eb9ef2c18661902cc47e535f9bc51b78acd254da71d375c2f6720d9a40403"
-"checksum semver-parser 0.7.0 (registry+https://github.com/rust-lang/crates.io-index)" = "388a1df253eca08550bef6c72392cfe7c30914bf41df5269b68cbd6ff8f570a3"
"checksum signal-hook-registry 1.2.0 (registry+https://github.com/rust-lang/crates.io-index)" = "94f478ede9f64724c5d173d7bb56099ec3e2d9fc2774aac65d34b8b890405f41"
"checksum slab 0.4.2 (registry+https://github.com/rust-lang/crates.io-index)" = "c111b5bd5695e56cffe5129854aa230b39c93a305372fdbb2668ca2394eea9f8"
-"checksum smallvec 0.6.13 (registry+https://github.com/rust-lang/crates.io-index)" = "f7b0758c52e15a8b5e3691eae6cc559f08eee9406e548a4477ba4e67770a82b6"
-"checksum socket2 0.3.11 (registry+https://github.com/rust-lang/crates.io-index)" = "e8b74de517221a2cb01a53349cf54182acdc31a074727d3079068448c0676d85"
-"checksum tokio 0.1.22 (registry+https://github.com/rust-lang/crates.io-index)" = "5a09c0b5bb588872ab2f09afa13ee6e9dac11e10a0ec9e8e3ba39a5a5d530af6"
-"checksum tokio-codec 0.1.2 (registry+https://github.com/rust-lang/crates.io-index)" = "25b2998660ba0e70d18684de5d06b70b70a3a747469af9dea7618cc59e75976b"
-"checksum tokio-current-thread 0.1.7 (registry+https://github.com/rust-lang/crates.io-index)" = "b1de0e32a83f131e002238d7ccde18211c0a5397f60cbfffcb112868c2e0e20e"
-"checksum tokio-executor 0.1.10 (registry+https://github.com/rust-lang/crates.io-index)" = "fb2d1b8f4548dbf5e1f7818512e9c406860678f29c300cdf0ebac72d1a3a1671"
-"checksum tokio-fs 0.1.7 (registry+https://github.com/rust-lang/crates.io-index)" = "297a1206e0ca6302a0eed35b700d292b275256f596e2f3fea7729d5e629b6ff4"
-"checksum tokio-hglib 0.2.0 (registry+https://github.com/rust-lang/crates.io-index)" = "8a138c3cb866c8a95ceddae44634bb159eefeebcdba45aec2158f8ad6c201e6d"
-"checksum tokio-io 0.1.13 (registry+https://github.com/rust-lang/crates.io-index)" = "57fc868aae093479e3131e3d165c93b1c7474109d13c90ec0dda2a1bbfff0674"
-"checksum tokio-process 0.2.5 (registry+https://github.com/rust-lang/crates.io-index)" = "382d90f43fa31caebe5d3bc6cfd854963394fff3b8cb59d5146607aaae7e7e43"
-"checksum tokio-reactor 0.1.12 (registry+https://github.com/rust-lang/crates.io-index)" = "09bc590ec4ba8ba87652da2068d150dcada2cfa2e07faae270a5e0409aa51351"
-"checksum tokio-signal 0.2.9 (registry+https://github.com/rust-lang/crates.io-index)" = "d0c34c6e548f101053321cba3da7cbb87a610b85555884c41b07da2eb91aff12"
-"checksum tokio-sync 0.1.8 (registry+https://github.com/rust-lang/crates.io-index)" = "edfe50152bc8164fcc456dab7891fa9bf8beaf01c5ee7e1dd43a397c3cf87dee"
-"checksum tokio-tcp 0.1.4 (registry+https://github.com/rust-lang/crates.io-index)" = "98df18ed66e3b72e742f185882a9e201892407957e45fbff8da17ae7a7c51f72"
-"checksum tokio-threadpool 0.1.18 (registry+https://github.com/rust-lang/crates.io-index)" = "df720b6581784c118f0eb4310796b12b1d242a7eb95f716a8367855325c25f89"
-"checksum tokio-timer 0.2.13 (registry+https://github.com/rust-lang/crates.io-index)" = "93044f2d313c95ff1cb7809ce9a7a05735b012288a888b62d4434fd58c94f296"
-"checksum tokio-udp 0.1.6 (registry+https://github.com/rust-lang/crates.io-index)" = "e2a0b10e610b39c38b031a2fcab08e4b82f16ece36504988dcbd81dbba650d82"
-"checksum tokio-uds 0.2.6 (registry+https://github.com/rust-lang/crates.io-index)" = "5076db410d6fdc6523df7595447629099a1fdc47b3d9f896220780fa48faf798"
+"checksum socket2 0.3.12 (registry+https://github.com/rust-lang/crates.io-index)" = "03088793f677dce356f3ccc2edb1b314ad191ab702a5de3faf49304f7e104918"
+"checksum syn 1.0.17 (registry+https://github.com/rust-lang/crates.io-index)" = "0df0eb663f387145cab623dea85b09c2c5b4b0aef44e945d928e682fce71bb03"
+"checksum tokio 0.2.18 (registry+https://github.com/rust-lang/crates.io-index)" = "34ef16d072d2b6dc8b4a56c70f5c5ced1a37752116f8e7c1e80c659aa7cb6713"
+"checksum tokio-hglib 0.3.0 (registry+https://github.com/rust-lang/crates.io-index)" = "8d7e2b5d44911ebf67a1044423604f5f69206c5cbbd7e911b4966e6831514bca"
+"checksum tokio-macros 0.2.5 (registry+https://github.com/rust-lang/crates.io-index)" = "f0c3acc6aa564495a0f2e1d59fab677cd7f81a19994cfc7f3ad0e64301560389"
+"checksum tokio-util 0.3.1 (registry+https://github.com/rust-lang/crates.io-index)" = "be8242891f2b6cbef26a2d7e8605133c2c554cd35b3e4948ea892d6d68436499"
+"checksum unicode-xid 0.2.0 (registry+https://github.com/rust-lang/crates.io-index)" = "826e7639553986605ec5979c7dd957c7895e93eabed50ab2ffa7f6128a75097c"
"checksum winapi 0.2.8 (registry+https://github.com/rust-lang/crates.io-index)" = "167dc9d6949a9b857f3451275e911c3f44255842c1f7a76f33c55103a909087a"
"checksum winapi 0.3.8 (registry+https://github.com/rust-lang/crates.io-index)" = "8093091eeb260906a183e6ae1abdba2ef5ef2257a21801128899c3fc699229c6"
"checksum winapi-build 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)" = "2d315eee3b34aca4797b2da6b13ed88266e6d612562a0c46390af8299fc699bc"
--- a/rust/chg/Cargo.toml Tue Jul 14 10:25:41 2020 +0200
+++ b/rust/chg/Cargo.toml Mon Jul 20 21:56:27 2020 +0530
@@ -7,14 +7,16 @@
edition = "2018"
[dependencies]
-bytes = "0.4"
-futures = "0.1"
+async-trait = "0.1"
+bytes = "0.5"
+futures = "0.3"
libc = "0.2"
log = { version = "0.4", features = ["std"] }
-tokio = "0.1"
-tokio-hglib = "0.2"
-tokio-process = "0.2.3"
-tokio-timer = "0.2"
+tokio-hglib = "0.3"
+
+[dependencies.tokio]
+version = "0.2"
+features = ["rt-core", "io-util", "time", "process", "macros"]
[build-dependencies]
cc = "1.0"
--- a/rust/chg/src/attachio.rs Tue Jul 14 10:25:41 2020 +0200
+++ b/rust/chg/src/attachio.rs Mon Jul 20 21:56:27 2020 +0530
@@ -5,17 +5,15 @@
//! Functions to send client-side fds over the command server channel.
-use futures::{try_ready, Async, Future, Poll};
use std::io;
use std::os::unix::io::AsRawFd;
use tokio_hglib::codec::ChannelMessage;
-use tokio_hglib::protocol::MessageLoop;
-use tokio_hglib::{Client, Connection};
+use tokio_hglib::{Connection, Protocol};
use crate::message;
use crate::procutil;
-/// Future to send client-side fds over the command server channel.
+/// Sends client-side fds over the command server channel.
///
/// This works as follows:
/// 1. Client sends "attachio" request.
@@ -23,92 +21,48 @@
/// 3. Client sends fds with 1-byte dummy payload in response.
/// 4. Server returns the number of the fds received.
///
-/// If the stderr is omitted, it will be redirected to the stdout. This
-/// allows us to attach the pager stdin to both stdout and stderr, and
-/// dispose of the client-side handle once attached.
-#[must_use = "futures do nothing unless polled"]
-pub struct AttachIo<C, I, O, E>
-where
- C: Connection,
-{
- msg_loop: MessageLoop<C>,
- stdin: I,
- stdout: O,
- stderr: Option<E>,
-}
-
-impl<C, I, O, E> AttachIo<C, I, O, E>
-where
- C: Connection + AsRawFd,
- I: AsRawFd,
- O: AsRawFd,
- E: AsRawFd,
-{
- pub fn with_client(
- client: Client<C>,
- stdin: I,
- stdout: O,
- stderr: Option<E>,
- ) -> AttachIo<C, I, O, E> {
- let msg_loop = MessageLoop::start(client, b"attachio");
- AttachIo {
- msg_loop,
- stdin,
- stdout,
- stderr,
- }
- }
-}
-
-impl<C, I, O, E> Future for AttachIo<C, I, O, E>
-where
- C: Connection + AsRawFd,
- I: AsRawFd,
- O: AsRawFd,
- E: AsRawFd,
-{
- type Item = Client<C>;
- type Error = io::Error;
-
- fn poll(&mut self) -> Poll<Self::Item, Self::Error> {
- loop {
- let (client, msg) = try_ready!(self.msg_loop.poll());
- match msg {
- ChannelMessage::Data(b'r', data) => {
- let fd_cnt = message::parse_result_code(data)?;
- if fd_cnt == 3 {
- return Ok(Async::Ready(client));
- } else {
- return Err(io::Error::new(
- io::ErrorKind::InvalidData,
- "unexpected attachio result",
- ));
- }
- }
- ChannelMessage::Data(..) => {
- // just ignore data sent to uninteresting (optional) channel
- self.msg_loop = MessageLoop::resume(client);
- }
- ChannelMessage::InputRequest(1) => {
- // this may fail with EWOULDBLOCK in theory, but the
- // payload is quite small, and the send buffer should
- // be empty so the operation will complete immediately
- let sock_fd = client.as_raw_fd();
- let ifd = self.stdin.as_raw_fd();
- let ofd = self.stdout.as_raw_fd();
- let efd = self.stderr.as_ref().map_or(ofd, |f| f.as_raw_fd());
- procutil::send_raw_fds(sock_fd, &[ifd, ofd, efd])?;
- self.msg_loop = MessageLoop::resume(client);
- }
- ChannelMessage::InputRequest(..)
- | ChannelMessage::LineRequest(..)
- | ChannelMessage::SystemRequest(..) => {
+/// The client-side fds may be dropped once duplicated to the server.
+pub async fn attach_io(
+ proto: &mut Protocol<impl Connection + AsRawFd>,
+ stdin: &impl AsRawFd,
+ stdout: &impl AsRawFd,
+ stderr: &impl AsRawFd,
+) -> io::Result<()> {
+ proto.send_command("attachio").await?;
+ loop {
+ match proto.fetch_response().await? {
+ ChannelMessage::Data(b'r', data) => {
+ let fd_cnt = message::parse_result_code(data)?;
+ if fd_cnt == 3 {
+ return Ok(());
+ } else {
return Err(io::Error::new(
io::ErrorKind::InvalidData,
- "unsupported request while attaching io",
+ "unexpected attachio result",
));
}
}
+ ChannelMessage::Data(..) => {
+ // just ignore data sent to uninteresting (optional) channel
+ }
+ ChannelMessage::InputRequest(1) => {
+ // this may fail with EWOULDBLOCK in theory, but the
+ // payload is quite small, and the send buffer should
+ // be empty so the operation will complete immediately
+ let sock_fd = proto.as_raw_fd();
+ let ifd = stdin.as_raw_fd();
+ let ofd = stdout.as_raw_fd();
+ let efd = stderr.as_raw_fd();
+ procutil::send_raw_fds(sock_fd, &[ifd, ofd, efd])?;
+ }
+ ChannelMessage::InputRequest(..)
+ | ChannelMessage::LineRequest(..)
+ | ChannelMessage::SystemRequest(..) => {
+ return Err(io::Error::new(
+ io::ErrorKind::InvalidData,
+ "unsupported request while attaching io",
+ ));
+ }
}
}
}
--- a/rust/chg/src/clientext.rs Tue Jul 14 10:25:41 2020 +0200
+++ b/rust/chg/src/clientext.rs Mon Jul 20 21:56:27 2020 +0530
@@ -5,55 +5,99 @@
//! cHg extensions to command server client.
-use bytes::{BufMut, Bytes, BytesMut};
+use bytes::{BufMut, BytesMut};
use std::ffi::OsStr;
use std::io;
use std::mem;
use std::os::unix::ffi::OsStrExt;
use std::os::unix::io::AsRawFd;
use std::path::Path;
-use tokio_hglib::protocol::{OneShotQuery, OneShotRequest};
-use tokio_hglib::{Client, Connection};
+use tokio_hglib::UnixClient;
-use crate::attachio::AttachIo;
-use crate::message::{self, Instruction};
-use crate::runcommand::ChgRunCommand;
+use crate::attachio;
+use crate::message::{self, Instruction, ServerSpec};
+use crate::runcommand;
use crate::uihandler::SystemHandler;
-pub trait ChgClientExt<C>
-where
- C: Connection + AsRawFd,
-{
+/// Command-server client that also supports cHg extensions.
+pub struct ChgClient {
+ client: UnixClient,
+}
+
+impl ChgClient {
+ /// Connects to a command server listening at the specified socket path.
+ pub async fn connect(path: impl AsRef<Path>) -> io::Result<Self> {
+ let client = UnixClient::connect(path).await?;
+ Ok(ChgClient { client })
+ }
+
+ /// Server capabilities, encoding, etc.
+ pub fn server_spec(&self) -> &ServerSpec {
+ self.client.server_spec()
+ }
+
/// Attaches the client file descriptors to the server.
- fn attach_io<I, O, E>(self, stdin: I, stdout: O, stderr: E) -> AttachIo<C, I, O, E>
- where
- I: AsRawFd,
- O: AsRawFd,
- E: AsRawFd;
+ pub async fn attach_io(
+ &mut self,
+ stdin: &impl AsRawFd,
+ stdout: &impl AsRawFd,
+ stderr: &impl AsRawFd,
+ ) -> io::Result<()> {
+ attachio::attach_io(self.client.borrow_protocol_mut(), stdin, stdout, stderr).await
+ }
/// Changes the working directory of the server.
- fn set_current_dir(self, dir: impl AsRef<Path>) -> OneShotRequest<C>;
+ pub async fn set_current_dir(&mut self, dir: impl AsRef<Path>) -> io::Result<()> {
+ let dir_bytes = dir.as_ref().as_os_str().as_bytes().to_owned();
+ self.client
+ .borrow_protocol_mut()
+ .send_command_with_args("chdir", dir_bytes)
+ .await
+ }
/// Updates the environment variables of the server.
- fn set_env_vars_os(
- self,
+ pub async fn set_env_vars_os(
+ &mut self,
vars: impl IntoIterator<Item = (impl AsRef<OsStr>, impl AsRef<OsStr>)>,
- ) -> OneShotRequest<C>;
+ ) -> io::Result<()> {
+ self.client
+ .borrow_protocol_mut()
+ .send_command_with_args("setenv", message::pack_env_vars_os(vars))
+ .await
+ }
/// Changes the process title of the server.
- fn set_process_name(self, name: impl AsRef<OsStr>) -> OneShotRequest<C>;
+ pub async fn set_process_name(&mut self, name: impl AsRef<OsStr>) -> io::Result<()> {
+ let name_bytes = name.as_ref().as_bytes().to_owned();
+ self.client
+ .borrow_protocol_mut()
+ .send_command_with_args("setprocname", name_bytes)
+ .await
+ }
/// Changes the umask of the server process.
- fn set_umask(self, mask: u32) -> OneShotRequest<C>;
+ pub async fn set_umask(&mut self, mask: u32) -> io::Result<()> {
+ let mut mask_bytes = BytesMut::with_capacity(mem::size_of_val(&mask));
+ mask_bytes.put_u32(mask);
+ self.client
+ .borrow_protocol_mut()
+ .send_command_with_args("setumask2", mask_bytes)
+ .await
+ }
/// Runs the specified Mercurial command with cHg extension.
- fn run_command_chg<H>(
- self,
- handler: H,
+ pub async fn run_command_chg(
+ &mut self,
+ handler: &mut impl SystemHandler,
args: impl IntoIterator<Item = impl AsRef<OsStr>>,
- ) -> ChgRunCommand<C, H>
- where
- H: SystemHandler;
+ ) -> io::Result<i32> {
+ runcommand::run_command(
+ self.client.borrow_protocol_mut(),
+ handler,
+ message::pack_args_os(args),
+ )
+ .await
+ }
/// Validates if the server can run Mercurial commands with the expected
/// configuration.
@@ -63,66 +107,15 @@
///
/// Client-side environment must be sent prior to this request, by
/// `set_current_dir()` and `set_env_vars_os()`.
- fn validate(
- self,
+ pub async fn validate(
+ &mut self,
args: impl IntoIterator<Item = impl AsRef<OsStr>>,
- ) -> OneShotQuery<C, fn(Bytes) -> io::Result<Vec<Instruction>>>;
-}
-
-impl<C> ChgClientExt<C> for Client<C>
-where
- C: Connection + AsRawFd,
-{
- fn attach_io<I, O, E>(self, stdin: I, stdout: O, stderr: E) -> AttachIo<C, I, O, E>
- where
- I: AsRawFd,
- O: AsRawFd,
- E: AsRawFd,
- {
- AttachIo::with_client(self, stdin, stdout, Some(stderr))
- }
-
- fn set_current_dir(self, dir: impl AsRef<Path>) -> OneShotRequest<C> {
- OneShotRequest::start_with_args(self, b"chdir", dir.as_ref().as_os_str().as_bytes())
- }
-
- fn set_env_vars_os(
- self,
- vars: impl IntoIterator<Item = (impl AsRef<OsStr>, impl AsRef<OsStr>)>,
- ) -> OneShotRequest<C> {
- OneShotRequest::start_with_args(self, b"setenv", message::pack_env_vars_os(vars))
- }
-
- fn set_process_name(self, name: impl AsRef<OsStr>) -> OneShotRequest<C> {
- OneShotRequest::start_with_args(self, b"setprocname", name.as_ref().as_bytes())
- }
-
- fn set_umask(self, mask: u32) -> OneShotRequest<C> {
- let mut args = BytesMut::with_capacity(mem::size_of_val(&mask));
- args.put_u32_be(mask);
- OneShotRequest::start_with_args(self, b"setumask2", args)
- }
-
- fn run_command_chg<H>(
- self,
- handler: H,
- args: impl IntoIterator<Item = impl AsRef<OsStr>>,
- ) -> ChgRunCommand<C, H>
- where
- H: SystemHandler,
- {
- ChgRunCommand::with_client(self, handler, message::pack_args_os(args))
- }
-
- fn validate(
- self,
- args: impl IntoIterator<Item = impl AsRef<OsStr>>,
- ) -> OneShotQuery<C, fn(Bytes) -> io::Result<Vec<Instruction>>> {
- OneShotQuery::start_with_args(
- self,
- b"validate",
- message::pack_args_os(args),
- message::parse_instructions,
- )
+ ) -> io::Result<Vec<Instruction>> {
+ let data = self
+ .client
+ .borrow_protocol_mut()
+ .query_with_args("validate", message::pack_args_os(args))
+ .await?;
+ message::parse_instructions(data)
}
}
--- a/rust/chg/src/lib.rs Tue Jul 14 10:25:41 2020 +0200
+++ b/rust/chg/src/lib.rs Mon Jul 20 21:56:27 2020 +0530
@@ -11,5 +11,5 @@
mod runcommand;
mod uihandler;
-pub use clientext::ChgClientExt;
+pub use clientext::ChgClient;
pub use uihandler::{ChgUiHandler, SystemHandler};
--- a/rust/chg/src/locator.rs Tue Jul 14 10:25:41 2020 +0200
+++ b/rust/chg/src/locator.rs Mon Jul 20 21:56:27 2020 +0530
@@ -5,7 +5,6 @@
//! Utility for locating command-server process.
-use futures::future::{self, Either, Loop};
use log::debug;
use std::env;
use std::ffi::{OsStr, OsString};
@@ -14,14 +13,11 @@
use std::os::unix::ffi::{OsStrExt, OsStringExt};
use std::os::unix::fs::{DirBuilderExt, MetadataExt};
use std::path::{Path, PathBuf};
-use std::process::{self, Command};
-use std::time::Duration;
-use tokio::prelude::*;
-use tokio_hglib::UnixClient;
-use tokio_process::{Child, CommandExt};
-use tokio_timer;
+use std::process::{self, Child, Command};
+use std::time::{Duration, Instant};
+use tokio::time;
-use crate::clientext::ChgClientExt;
+use crate::clientext::ChgClient;
use crate::message::{Instruction, ServerSpec};
use crate::procutil;
@@ -82,43 +78,33 @@
/// Connects to the server.
///
/// The server process will be spawned if not running.
- pub fn connect(self) -> impl Future<Item = (Self, UnixClient), Error = io::Error> {
- future::loop_fn((self, 0), |(loc, cnt)| {
- if cnt < 10 {
- let fut = loc
- .try_connect()
- .and_then(|(loc, client)| {
- client
- .validate(&loc.hg_early_args)
- .map(|(client, instructions)| (loc, client, instructions))
- })
- .and_then(move |(loc, client, instructions)| {
- loc.run_instructions(client, instructions, cnt)
- });
- Either::A(fut)
- } else {
- let msg = format!(
- concat!(
- "too many redirections.\n",
- "Please make sure {:?} is not a wrapper which ",
- "changes sensitive environment variables ",
- "before executing hg. If you have to use a ",
- "wrapper, wrap chg instead of hg.",
- ),
- loc.hg_command
- );
- Either::B(future::err(io::Error::new(io::ErrorKind::Other, msg)))
+ pub async fn connect(&mut self) -> io::Result<ChgClient> {
+ for _cnt in 0..10 {
+ let mut client = self.try_connect().await?;
+ let instructions = client.validate(&self.hg_early_args).await?;
+ let reconnect = self.run_instructions(&instructions)?;
+ if !reconnect {
+ return Ok(client);
}
- })
+ }
+
+ let msg = format!(
+ concat!(
+ "too many redirections.\n",
+ "Please make sure {:?} is not a wrapper which ",
+ "changes sensitive environment variables ",
+ "before executing hg. If you have to use a ",
+ "wrapper, wrap chg instead of hg.",
+ ),
+ self.hg_command
+ );
+ Err(io::Error::new(io::ErrorKind::Other, msg))
}
/// Runs instructions received from the server.
- fn run_instructions(
- mut self,
- client: UnixClient,
- instructions: Vec<Instruction>,
- cnt: usize,
- ) -> io::Result<Loop<(Self, UnixClient), (Self, usize)>> {
+ ///
+ /// Returns true if the client should try connecting to the other server.
+ fn run_instructions(&mut self, instructions: &[Instruction]) -> io::Result<bool> {
let mut reconnect = false;
for inst in instructions {
debug!("instruction: {:?}", inst);
@@ -126,7 +112,7 @@
Instruction::Exit(_) => {
// Just returns the current connection to run the
// unparsable command and report the error
- return Ok(Loop::Break((self, client)));
+ return Ok(false);
}
Instruction::Reconnect => {
reconnect = true;
@@ -139,7 +125,7 @@
);
return Err(io::Error::new(io::ErrorKind::InvalidData, msg));
}
- self.redirect_sock_path = Some(path);
+ self.redirect_sock_path = Some(path.to_owned());
reconnect = true;
}
Instruction::Unlink(path) => {
@@ -155,64 +141,44 @@
}
}
- if reconnect {
- Ok(Loop::Continue((self, cnt + 1)))
- } else {
- Ok(Loop::Break((self, client)))
- }
+ Ok(reconnect)
}
/// Tries to connect to the existing server, or spawns new if not running.
- fn try_connect(self) -> impl Future<Item = (Self, UnixClient), Error = io::Error> {
+ async fn try_connect(&mut self) -> io::Result<ChgClient> {
let sock_path = self
.redirect_sock_path
.as_ref()
.unwrap_or(&self.base_sock_path)
.clone();
debug!("try connect to {}", sock_path.display());
- UnixClient::connect(sock_path)
- .then(|res| {
- match res {
- Ok(client) => Either::A(future::ok((self, client))),
- Err(_) => {
- // Prevent us from being re-connected to the outdated
- // master server: We were told by the server to redirect
- // to redirect_sock_path, which didn't work. We do not
- // want to connect to the same master server again
- // because it would probably tell us the same thing.
- if self.redirect_sock_path.is_some() {
- fs::remove_file(&self.base_sock_path).unwrap_or(());
- // may race
- }
- Either::B(self.spawn_connect())
- }
+ let mut client = match ChgClient::connect(sock_path).await {
+ Ok(client) => client,
+ Err(_) => {
+ // Prevent us from being re-connected to the outdated
+ // master server: We were told by the server to redirect
+ // to redirect_sock_path, which didn't work. We do not
+ // want to connect to the same master server again
+ // because it would probably tell us the same thing.
+ if self.redirect_sock_path.is_some() {
+ fs::remove_file(&self.base_sock_path).unwrap_or(());
+ // may race
}
- })
- .and_then(|(loc, client)| {
- check_server_capabilities(client.server_spec())?;
- Ok((loc, client))
- })
- .and_then(|(loc, client)| {
- // It's purely optional, and the server might not support this command.
- if client.server_spec().capabilities.contains("setprocname") {
- let fut = client
- .set_process_name(format!("chg[worker/{}]", loc.process_id))
- .map(|client| (loc, client));
- Either::A(fut)
- } else {
- Either::B(future::ok((loc, client)))
- }
- })
- .and_then(|(loc, client)| {
- client
- .set_current_dir(&loc.current_dir)
- .map(|client| (loc, client))
- })
- .and_then(|(loc, client)| {
- client
- .set_env_vars_os(loc.env_vars.iter().cloned())
- .map(|client| (loc, client))
- })
+ self.spawn_connect().await?
+ }
+ };
+ check_server_capabilities(client.server_spec())?;
+ // It's purely optional, and the server might not support this command.
+ if client.server_spec().capabilities.contains("setprocname") {
+ client
+ .set_process_name(format!("chg[worker/{}]", self.process_id))
+ .await?;
+ }
+ client.set_current_dir(&self.current_dir).await?;
+ client
+ .set_env_vars_os(self.env_vars.iter().cloned())
+ .await?;
+ Ok(client)
}
/// Spawns new server process and connects to it.
@@ -220,10 +186,10 @@
/// The server will be spawned at the current working directory, then
/// chdir to "/", so that the server will load configs from the target
/// repository.
- fn spawn_connect(self) -> impl Future<Item = (Self, UnixClient), Error = io::Error> {
+ async fn spawn_connect(&mut self) -> io::Result<ChgClient> {
let sock_path = self.temp_sock_path();
debug!("start cmdserver at {}", sock_path.display());
- Command::new(&self.hg_command)
+ let server = Command::new(&self.hg_command)
.arg("serve")
.arg("--cmdserver")
.arg("chgunix")
@@ -236,68 +202,49 @@
.env_clear()
.envs(self.env_vars.iter().cloned())
.env("CHGINTERNALMARK", "")
- .spawn_async()
- .into_future()
- .and_then(|server| self.connect_spawned(server, sock_path))
- .and_then(|(loc, client, sock_path)| {
- debug!(
- "rename {} to {}",
- sock_path.display(),
- loc.base_sock_path.display()
- );
- fs::rename(&sock_path, &loc.base_sock_path)?;
- Ok((loc, client))
- })
+ .spawn()?;
+ let client = self.connect_spawned(server, &sock_path).await?;
+ debug!(
+ "rename {} to {}",
+ sock_path.display(),
+ self.base_sock_path.display()
+ );
+ fs::rename(&sock_path, &self.base_sock_path)?;
+ Ok(client)
}
/// Tries to connect to the just spawned server repeatedly until timeout
/// exceeded.
- fn connect_spawned(
- self,
- server: Child,
- sock_path: PathBuf,
- ) -> impl Future<Item = (Self, UnixClient, PathBuf), Error = io::Error> {
+ async fn connect_spawned(
+ &mut self,
+ mut server: Child,
+ sock_path: &Path,
+ ) -> io::Result<ChgClient> {
debug!("try connect to {} repeatedly", sock_path.display());
- let connect = future::loop_fn(sock_path, |sock_path| {
- UnixClient::connect(sock_path.clone()).then(|res| {
- match res {
- Ok(client) => Either::A(future::ok(Loop::Break((client, sock_path)))),
- Err(_) => {
- // try again with slight delay
- let fut = tokio_timer::sleep(Duration::from_millis(10))
- .map(|()| Loop::Continue(sock_path))
- .map_err(|err| io::Error::new(io::ErrorKind::Other, err));
- Either::B(fut)
- }
- }
- })
- });
-
// waits for either connection established or server failed to start
- connect
- .select2(server)
- .map_err(|res| res.split().0)
- .timeout(self.timeout)
- .map_err(|err| {
- err.into_inner().unwrap_or_else(|| {
- io::Error::new(
- io::ErrorKind::TimedOut,
- "timed out while connecting to server",
- )
- })
- })
- .and_then(|res| {
- match res {
- Either::A(((client, sock_path), server)) => {
- server.forget(); // continue to run in background
- Ok((self, client, sock_path))
- }
- Either::B((st, _)) => Err(io::Error::new(
- io::ErrorKind::Other,
- format!("server exited too early: {}", st),
- )),
- }
- })
+ let start_time = Instant::now();
+ while start_time.elapsed() < self.timeout {
+ if let Ok(client) = ChgClient::connect(&sock_path).await {
+ // server handle is dropped here, but the detached process
+ // will continue running in background
+ return Ok(client);
+ }
+
+ if let Some(st) = server.try_wait()? {
+ return Err(io::Error::new(
+ io::ErrorKind::Other,
+ format!("server exited too early: {}", st),
+ ));
+ }
+
+ // try again with slight delay
+ time::delay_for(Duration::from_millis(10)).await;
+ }
+
+ Err(io::Error::new(
+ io::ErrorKind::TimedOut,
+ "timed out while connecting to server",
+ ))
}
}
--- a/rust/chg/src/main.rs Tue Jul 14 10:25:41 2020 +0200
+++ b/rust/chg/src/main.rs Mon Jul 20 21:56:27 2020 +0530
@@ -5,13 +5,12 @@
use chg::locator::{self, Locator};
use chg::procutil;
-use chg::{ChgClientExt, ChgUiHandler};
-use futures::sync::oneshot;
+use chg::ChgUiHandler;
use std::env;
use std::io;
+use std::io::Write;
use std::process;
use std::time::Instant;
-use tokio::prelude::*;
struct DebugLogger {
start: Instant,
@@ -67,31 +66,23 @@
process::exit(code);
}
-fn run(umask: u32) -> io::Result<i32> {
+#[tokio::main]
+async fn run(umask: u32) -> io::Result<i32> {
let mut loc = Locator::prepare_from_env()?;
loc.set_early_args(locator::collect_early_args(env::args_os().skip(1)));
- let handler = ChgUiHandler::new();
- let (result_tx, result_rx) = oneshot::channel();
- let fut = loc
- .connect()
- .and_then(|(_, client)| client.attach_io(io::stdin(), io::stdout(), io::stderr()))
- .and_then(move |client| client.set_umask(umask))
- .and_then(|client| {
- let pid = client.server_spec().process_id.unwrap();
- let pgid = client.server_spec().process_group_id;
- procutil::setup_signal_handler_once(pid, pgid)?;
- Ok(client)
- })
- .and_then(|client| client.run_command_chg(handler, env::args_os().skip(1)))
- .map(|(_client, _handler, code)| {
- procutil::restore_signal_handler_once()?;
- Ok(code)
- })
- .or_else(|err| Ok(Err(err))) // pass back error to caller
- .map(|res| result_tx.send(res).unwrap());
- tokio::run(fut);
- result_rx.wait().unwrap_or(Err(io::Error::new(
- io::ErrorKind::Other,
- "no exit code set",
- )))
+ let mut handler = ChgUiHandler::new();
+ let mut client = loc.connect().await?;
+ client
+ .attach_io(&io::stdin(), &io::stdout(), &io::stderr())
+ .await?;
+ client.set_umask(umask).await?;
+ let pid = client.server_spec().process_id.unwrap();
+ let pgid = client.server_spec().process_group_id;
+ procutil::setup_signal_handler_once(pid, pgid)?;
+ let code = client
+ .run_command_chg(&mut handler, env::args_os().skip(1))
+ .await?;
+ procutil::restore_signal_handler_once()?;
+ handler.wait_pager().await?;
+ Ok(code)
}
--- a/rust/chg/src/runcommand.rs Tue Jul 14 10:25:41 2020 +0200
+++ b/rust/chg/src/runcommand.rs Mon Jul 20 21:56:27 2020 +0530
@@ -6,164 +6,56 @@
//! Functions to run Mercurial command in cHg-aware command server.
use bytes::Bytes;
-use futures::future::IntoFuture;
-use futures::{Async, Future, Poll};
use std::io;
-use std::mem;
use std::os::unix::io::AsRawFd;
use tokio_hglib::codec::ChannelMessage;
-use tokio_hglib::protocol::MessageLoop;
-use tokio_hglib::{Client, Connection};
+use tokio_hglib::{Connection, Protocol};
-use crate::attachio::AttachIo;
+use crate::attachio;
use crate::message::{self, CommandType};
use crate::uihandler::SystemHandler;
-enum AsyncS<R, S> {
- Ready(R),
- NotReady(S),
- PollAgain(S),
-}
-
-enum CommandState<C, H>
-where
- C: Connection,
- H: SystemHandler,
-{
- Running(MessageLoop<C>, H),
- SpawningPager(Client<C>, <H::SpawnPagerResult as IntoFuture>::Future),
- AttachingPager(AttachIo<C, io::Stdin, H::PagerStdin, H::PagerStdin>, H),
- WaitingSystem(Client<C>, <H::RunSystemResult as IntoFuture>::Future),
- Finished,
-}
-
-type CommandPoll<C, H> = io::Result<AsyncS<(Client<C>, H, i32), CommandState<C, H>>>;
-
-/// Future resolves to `(exit_code, client)`.
-#[must_use = "futures do nothing unless polled"]
-pub struct ChgRunCommand<C, H>
-where
- C: Connection,
- H: SystemHandler,
-{
- state: CommandState<C, H>,
-}
-
-impl<C, H> ChgRunCommand<C, H>
-where
- C: Connection + AsRawFd,
- H: SystemHandler,
-{
- pub fn with_client(client: Client<C>, handler: H, packed_args: Bytes) -> ChgRunCommand<C, H> {
- let msg_loop = MessageLoop::start_with_args(client, b"runcommand", packed_args);
- ChgRunCommand {
- state: CommandState::Running(msg_loop, handler),
- }
- }
-}
-
-impl<C, H> Future for ChgRunCommand<C, H>
-where
- C: Connection + AsRawFd,
- H: SystemHandler,
-{
- type Item = (Client<C>, H, i32);
- type Error = io::Error;
-
- fn poll(&mut self) -> Poll<Self::Item, Self::Error> {
- loop {
- let state = mem::replace(&mut self.state, CommandState::Finished);
- match state.poll()? {
- AsyncS::Ready((client, handler, code)) => {
- return Ok(Async::Ready((client, handler, code)));
- }
- AsyncS::NotReady(newstate) => {
- self.state = newstate;
- return Ok(Async::NotReady);
- }
- AsyncS::PollAgain(newstate) => {
- self.state = newstate;
- }
- }
- }
- }
-}
-
-impl<C, H> CommandState<C, H>
-where
- C: Connection + AsRawFd,
- H: SystemHandler,
-{
- fn poll(self) -> CommandPoll<C, H> {
- match self {
- CommandState::Running(mut msg_loop, handler) => {
- if let Async::Ready((client, msg)) = msg_loop.poll()? {
- process_message(client, handler, msg)
- } else {
- Ok(AsyncS::NotReady(CommandState::Running(msg_loop, handler)))
- }
- }
- CommandState::SpawningPager(client, mut fut) => {
- if let Async::Ready((handler, pin)) = fut.poll()? {
- let fut = AttachIo::with_client(client, io::stdin(), pin, None);
- Ok(AsyncS::PollAgain(CommandState::AttachingPager(
- fut, handler,
- )))
- } else {
- Ok(AsyncS::NotReady(CommandState::SpawningPager(client, fut)))
- }
- }
- CommandState::AttachingPager(mut fut, handler) => {
- if let Async::Ready(client) = fut.poll()? {
- let msg_loop = MessageLoop::start(client, b""); // terminator
- Ok(AsyncS::PollAgain(CommandState::Running(msg_loop, handler)))
- } else {
- Ok(AsyncS::NotReady(CommandState::AttachingPager(fut, handler)))
- }
- }
- CommandState::WaitingSystem(client, mut fut) => {
- if let Async::Ready((handler, code)) = fut.poll()? {
- let data = message::pack_result_code(code);
- let msg_loop = MessageLoop::resume_with_data(client, data);
- Ok(AsyncS::PollAgain(CommandState::Running(msg_loop, handler)))
- } else {
- Ok(AsyncS::NotReady(CommandState::WaitingSystem(client, fut)))
- }
- }
- CommandState::Finished => panic!("poll ChgRunCommand after it's done"),
- }
- }
-}
-
-fn process_message<C, H>(client: Client<C>, handler: H, msg: ChannelMessage) -> CommandPoll<C, H>
-where
- C: Connection,
- H: SystemHandler,
-{
- {
- match msg {
+/// Runs the given Mercurial command in cHg-aware command server, and
+/// fetches the result code.
+///
+/// This is a subset of tokio-hglib's `run_command()` with the additional
+/// SystemRequest support.
+pub async fn run_command(
+ proto: &mut Protocol<impl Connection + AsRawFd>,
+ handler: &mut impl SystemHandler,
+ packed_args: impl Into<Bytes>,
+) -> io::Result<i32> {
+ proto
+ .send_command_with_args("runcommand", packed_args)
+ .await?;
+ loop {
+ match proto.fetch_response().await? {
ChannelMessage::Data(b'r', data) => {
- let code = message::parse_result_code(data)?;
- Ok(AsyncS::Ready((client, handler, code)))
+ return message::parse_result_code(data);
}
ChannelMessage::Data(..) => {
// just ignores data sent to optional channel
- let msg_loop = MessageLoop::resume(client);
- Ok(AsyncS::PollAgain(CommandState::Running(msg_loop, handler)))
}
- ChannelMessage::InputRequest(..) | ChannelMessage::LineRequest(..) => Err(
- io::Error::new(io::ErrorKind::InvalidData, "unsupported request"),
- ),
+ ChannelMessage::InputRequest(..) | ChannelMessage::LineRequest(..) => {
+ return Err(io::Error::new(
+ io::ErrorKind::InvalidData,
+ "unsupported request",
+ ));
+ }
ChannelMessage::SystemRequest(data) => {
let (cmd_type, cmd_spec) = message::parse_command_spec(data)?;
match cmd_type {
CommandType::Pager => {
- let fut = handler.spawn_pager(cmd_spec).into_future();
- Ok(AsyncS::PollAgain(CommandState::SpawningPager(client, fut)))
+ // server spins new command loop while pager request is
+ // in progress, which can be terminated by "" command.
+ let pin = handler.spawn_pager(&cmd_spec).await?;
+ attachio::attach_io(proto, &io::stdin(), &pin, &pin).await?;
+ proto.send_command("").await?; // terminator
}
CommandType::System => {
- let fut = handler.run_system(cmd_spec).into_future();
- Ok(AsyncS::PollAgain(CommandState::WaitingSystem(client, fut)))
+ let code = handler.run_system(&cmd_spec).await?;
+ let data = message::pack_result_code(code);
+ proto.send_data(data).await?;
}
}
}
--- a/rust/chg/src/uihandler.rs Tue Jul 14 10:25:41 2020 +0200
+++ b/rust/chg/src/uihandler.rs Mon Jul 20 21:56:27 2020 +0530
@@ -3,76 +3,75 @@
// This software may be used and distributed according to the terms of the
// GNU General Public License version 2 or any later version.
-use futures::future::IntoFuture;
-use futures::Future;
+use async_trait::async_trait;
use std::io;
use std::os::unix::io::AsRawFd;
use std::os::unix::process::ExitStatusExt;
-use std::process::{Command, Stdio};
+use std::process::Stdio;
use tokio;
-use tokio_process::{ChildStdin, CommandExt};
+use tokio::process::{Child, ChildStdin, Command};
use crate::message::CommandSpec;
use crate::procutil;
/// Callback to process shell command requests received from server.
-pub trait SystemHandler: Sized {
+#[async_trait]
+pub trait SystemHandler {
type PagerStdin: AsRawFd;
- type SpawnPagerResult: IntoFuture<Item = (Self, Self::PagerStdin), Error = io::Error>;
- type RunSystemResult: IntoFuture<Item = (Self, i32), Error = io::Error>;
/// Handles pager command request.
///
/// Returns the pipe to be attached to the server if the pager is spawned.
- fn spawn_pager(self, spec: CommandSpec) -> Self::SpawnPagerResult;
+ async fn spawn_pager(&mut self, spec: &CommandSpec) -> io::Result<Self::PagerStdin>;
/// Handles system command request.
///
/// Returns command exit code (positive) or signal number (negative).
- fn run_system(self, spec: CommandSpec) -> Self::RunSystemResult;
+ async fn run_system(&mut self, spec: &CommandSpec) -> io::Result<i32>;
}
/// Default cHg implementation to process requests received from server.
-pub struct ChgUiHandler {}
+pub struct ChgUiHandler {
+ pager: Option<Child>,
+}
impl ChgUiHandler {
pub fn new() -> ChgUiHandler {
- ChgUiHandler {}
+ ChgUiHandler { pager: None }
+ }
+
+ /// Waits until the pager process exits.
+ pub async fn wait_pager(&mut self) -> io::Result<()> {
+ if let Some(p) = self.pager.take() {
+ p.await?;
+ }
+ Ok(())
}
}
+#[async_trait]
impl SystemHandler for ChgUiHandler {
type PagerStdin = ChildStdin;
- type SpawnPagerResult = io::Result<(Self, Self::PagerStdin)>;
- type RunSystemResult = Box<dyn Future<Item = (Self, i32), Error = io::Error> + Send>;
- fn spawn_pager(self, spec: CommandSpec) -> Self::SpawnPagerResult {
- let mut pager = new_shell_command(&spec)
- .stdin(Stdio::piped())
- .spawn_async()?;
- let pin = pager.stdin().take().unwrap();
+ async fn spawn_pager(&mut self, spec: &CommandSpec) -> io::Result<Self::PagerStdin> {
+ let mut pager = new_shell_command(&spec).stdin(Stdio::piped()).spawn()?;
+ let pin = pager.stdin.take().unwrap();
procutil::set_blocking_fd(pin.as_raw_fd())?;
// TODO: if pager exits, notify the server with SIGPIPE immediately.
// otherwise the server won't get SIGPIPE if it does not write
// anything. (issue5278)
// kill(peerpid, SIGPIPE);
- tokio::spawn(pager.map(|_| ()).map_err(|_| ())); // just ignore errors
- Ok((self, pin))
+ self.pager = Some(pager);
+ Ok(pin)
}
- fn run_system(self, spec: CommandSpec) -> Self::RunSystemResult {
- let fut = new_shell_command(&spec)
- .spawn_async()
- .into_future()
- .flatten()
- .map(|status| {
- let code = status
- .code()
- .or_else(|| status.signal().map(|n| -n))
- .expect("either exit code or signal should be set");
- (self, code)
- });
- Box::new(fut)
+ async fn run_system(&mut self, spec: &CommandSpec) -> io::Result<i32> {
+ let status = new_shell_command(&spec).spawn()?.await?;
+ let code = status
+ .code()
+ .or_else(|| status.signal().map(|n| -n))
+ .expect("either exit code or signal should be set");
+ Ok(code)
}
}
--- a/rust/hg-core/Cargo.toml Tue Jul 14 10:25:41 2020 +0200
+++ b/rust/hg-core/Cargo.toml Mon Jul 20 21:56:27 2020 +0530
@@ -4,7 +4,6 @@
authors = ["Georges Racinet <gracinet@anybox.fr>"]
description = "Mercurial pure Rust core library, with no assumption on Python bindings (FFI)"
edition = "2018"
-build = "build.rs"
[lib]
name = "hg"
@@ -13,17 +12,16 @@
byteorder = "1.3.4"
hex = "0.4.2"
lazy_static = "1.4.0"
-libc = { version = "0.2.66", optional = true }
memchr = "2.3.3"
rand = "0.7.3"
rand_pcg = "0.2.1"
rand_distr = "0.2.2"
rayon = "1.3.0"
-regex = "1.3.6"
+regex = "1.3.9"
twox-hash = "1.5.0"
same-file = "1.0.6"
crossbeam = "0.7.3"
-micro-timer = "0.2.1"
+micro-timer = "0.3.0"
log = "0.4.8"
[dev-dependencies]
@@ -31,10 +29,3 @@
memmap = "0.7.0"
pretty_assertions = "0.6.1"
tempfile = "3.1.0"
-
-[build-dependencies]
-cc = { version = "1.0.48", optional = true }
-
-[features]
-default = []
-with-re2 = ["cc", "libc"]
--- a/rust/hg-core/build.rs Tue Jul 14 10:25:41 2020 +0200
+++ /dev/null Thu Jan 01 00:00:00 1970 +0000
@@ -1,61 +0,0 @@
-// build.rs
-//
-// Copyright 2020 Raphaël Gomès <rgomes@octobus.net>
-//
-// This software may be used and distributed according to the terms of the
-// GNU General Public License version 2 or any later version.
-
-#[cfg(feature = "with-re2")]
-use cc;
-
-/// Uses either the system Re2 install as a dynamic library or the provided
-/// build as a static library
-#[cfg(feature = "with-re2")]
-fn compile_re2() {
- use cc;
- use std::path::Path;
- use std::process::exit;
-
- let msg = r"HG_RE2_PATH must be one of `system|<path to build source clone of Re2>`";
- let re2 = match std::env::var_os("HG_RE2_PATH") {
- None => {
- eprintln!("{}", msg);
- exit(1)
- }
- Some(v) => {
- if v == "system" {
- None
- } else {
- Some(v)
- }
- }
- };
-
- let mut options = cc::Build::new();
- options
- .cpp(true)
- .flag("-std=c++11")
- .file("src/re2/rust_re2.cpp");
-
- if let Some(ref source) = re2 {
- options.include(Path::new(source));
- };
-
- options.compile("librustre.a");
-
- if let Some(ref source) = &re2 {
- // Link the local source statically
- println!(
- "cargo:rustc-link-search=native={}",
- Path::new(source).join(Path::new("obj")).display()
- );
- println!("cargo:rustc-link-lib=static=re2");
- } else {
- println!("cargo:rustc-link-lib=re2");
- }
-}
-
-fn main() {
- #[cfg(feature = "with-re2")]
- compile_re2();
-}
--- a/rust/hg-core/src/ancestors.rs Tue Jul 14 10:25:41 2020 +0200
+++ b/rust/hg-core/src/ancestors.rs Mon Jul 20 21:56:27 2020 +0530
@@ -55,19 +55,19 @@
let filtered_initrevs = initrevs.into_iter().filter(|&r| r >= stoprev);
if inclusive {
let visit: BinaryHeap<Revision> = filtered_initrevs.collect();
- let seen = visit.iter().map(|&x| x).collect();
+ let seen = visit.iter().cloned().collect();
return Ok(AncestorsIterator {
- visit: visit,
- seen: seen,
- stoprev: stoprev,
- graph: graph,
+ visit,
+ seen,
+ stoprev,
+ graph,
});
}
let mut this = AncestorsIterator {
visit: BinaryHeap::new(),
seen: HashSet::new(),
- stoprev: stoprev,
- graph: graph,
+ stoprev,
+ graph,
};
this.seen.insert(NULL_REVISION);
for rev in filtered_initrevs {
@@ -107,7 +107,7 @@
}
pub fn peek(&self) -> Option<Revision> {
- self.visit.peek().map(|&r| r)
+ self.visit.peek().cloned()
}
/// Tell if the iterator is about an empty set
@@ -182,8 +182,8 @@
inclusive,
)?,
initrevs: v,
- stoprev: stoprev,
- inclusive: inclusive,
+ stoprev,
+ inclusive,
})
}
@@ -211,7 +211,7 @@
impl<G: Graph> MissingAncestors<G> {
pub fn new(graph: G, bases: impl IntoIterator<Item = Revision>) -> Self {
let mut created = MissingAncestors {
- graph: graph,
+ graph,
bases: HashSet::new(),
max_base: NULL_REVISION,
};
--- a/rust/hg-core/src/dagops.rs Tue Jul 14 10:25:41 2020 +0200
+++ b/rust/hg-core/src/dagops.rs Mon Jul 20 21:56:27 2020 +0530
@@ -16,10 +16,10 @@
use crate::ancestors::AncestorsIterator;
use std::collections::{BTreeSet, HashSet};
-fn remove_parents(
+fn remove_parents<S: std::hash::BuildHasher>(
graph: &impl Graph,
rev: Revision,
- set: &mut HashSet<Revision>,
+ set: &mut HashSet<Revision, S>,
) -> Result<(), GraphError> {
for parent in graph.parents(rev)?.iter() {
if *parent != NULL_REVISION {
@@ -65,9 +65,9 @@
///
/// # Performance notes
/// Internally, this function will store a full copy of `revs` in a `Vec`.
-pub fn retain_heads(
+pub fn retain_heads<S: std::hash::BuildHasher>(
graph: &impl Graph,
- revs: &mut HashSet<Revision>,
+ revs: &mut HashSet<Revision, S>,
) -> Result<(), GraphError> {
revs.remove(&NULL_REVISION);
// we need to construct an iterable copy of revs to avoid itering while
@@ -84,9 +84,9 @@
/// Roots of `revs`, passed as a `HashSet`
///
/// They are returned in arbitrary order
-pub fn roots<G: Graph>(
+pub fn roots<G: Graph, S: std::hash::BuildHasher>(
graph: &G,
- revs: &HashSet<Revision>,
+ revs: &HashSet<Revision, S>,
) -> Result<Vec<Revision>, GraphError> {
let mut roots: Vec<Revision> = Vec::new();
for rev in revs {
@@ -229,7 +229,8 @@
graph: &impl Graph,
revs: &[Revision],
) -> Result<Vec<Revision>, GraphError> {
- let mut as_vec = roots(graph, &revs.iter().cloned().collect())?;
+ let set: HashSet<_> = revs.iter().cloned().collect();
+ let mut as_vec = roots(graph, &set)?;
as_vec.sort();
Ok(as_vec)
}
--- a/rust/hg-core/src/dirstate/dirs_multiset.rs Tue Jul 14 10:25:41 2020 +0200
+++ b/rust/hg-core/src/dirstate/dirs_multiset.rs Mon Jul 20 21:56:27 2020 +0530
@@ -108,7 +108,7 @@
for subpath in files::find_dirs(path.as_ref()) {
match self.inner.entry(subpath.to_owned()) {
Entry::Occupied(mut entry) => {
- let val = entry.get().clone();
+ let val = *entry.get();
if val > 1 {
entry.insert(val - 1);
break;
@@ -137,6 +137,10 @@
pub fn len(&self) -> usize {
self.inner.len()
}
+
+ pub fn is_empty(&self) -> bool {
+ self.len() == 0
+ }
}
/// This is basically a reimplementation of `DirsMultiset` that stores the
@@ -156,7 +160,7 @@
let mut new = Self {
inner: HashMap::default(),
only_include: only_include
- .map(|s| s.iter().map(|p| p.as_ref()).collect()),
+ .map(|s| s.iter().map(AsRef::as_ref).collect()),
};
for path in paths {
--- a/rust/hg-core/src/dirstate/dirstate_map.rs Tue Jul 14 10:25:41 2020 +0200
+++ b/rust/hg-core/src/dirstate/dirstate_map.rs Mon Jul 20 21:56:27 2020 +0530
@@ -223,7 +223,7 @@
self.get_non_normal_other_parent_entries()
.0
.union(&other)
- .map(|e| e.to_owned())
+ .map(ToOwned::to_owned)
.collect()
}
--- a/rust/hg-core/src/dirstate/parsers.rs Tue Jul 14 10:25:41 2020 +0200
+++ b/rust/hg-core/src/dirstate/parsers.rs Mon Jul 20 21:56:27 2020 +0530
@@ -135,7 +135,7 @@
}
let mut new_filename = new_filename.into_vec();
if let Some(copy) = copy_map.get(filename) {
- new_filename.push('\0' as u8);
+ new_filename.push(b'\0');
new_filename.extend(copy.bytes());
}
--- a/rust/hg-core/src/dirstate/status.rs Tue Jul 14 10:25:41 2020 +0200
+++ b/rust/hg-core/src/dirstate/status.rs Mon Jul 20 21:56:27 2020 +0530
@@ -13,6 +13,7 @@
dirstate::SIZE_FROM_OTHER_PARENT,
filepatterns::PatternFileWarning,
matchers::{get_ignore_function, Matcher, VisitChildrenSet},
+ operations::Operation,
utils::{
files::{find_dirs, HgMetadata},
hg_path::{
@@ -69,11 +70,11 @@
BadType(BadType),
}
-/// Marker enum used to dispatch new status entries into the right collections.
+/// Enum used to dispatch new status entries into the right collections.
/// Is similar to `crate::EntryState`, but represents the transient state of
/// entries during the lifetime of a command.
#[derive(Debug, Copy, Clone)]
-enum Dispatch {
+pub enum Dispatch {
Unsure,
Modified,
Added,
@@ -94,10 +95,18 @@
}
type IoResult<T> = std::io::Result<T>;
+
/// `Box<dyn Trait>` is syntactic sugar for `Box<dyn Trait, 'static>`, so add
/// an explicit lifetime here to not fight `'static` bounds "out of nowhere".
type IgnoreFnType<'a> = Box<dyn for<'r> Fn(&'r HgPath) -> bool + Sync + 'a>;
+/// We have a good mix of owned (from directory traversal) and borrowed (from
+/// the dirstate/explicit) paths, this comes up a lot.
+pub type HgPathCow<'a> = Cow<'a, HgPath>;
+
+/// A path with its computed ``Dispatch`` information
+type DispatchedPath<'a> = (HgPathCow<'a>, Dispatch);
+
/// Dates and times that are outside the 31-bit signed range are compared
/// modulo 2^31. This should prevent hg from behaving badly with very large
/// files or corrupt dates while still having a high probability of detecting
@@ -127,7 +136,7 @@
if skip_dot_hg && filename.as_bytes() == b".hg" && file_type.is_dir() {
return Ok(vec![]);
} else {
- results.push((HgPathBuf::from(filename), entry))
+ results.push((filename, entry))
}
}
@@ -164,14 +173,15 @@
(mode ^ st_mode as i32) & 0o100 != 0o000 && options.check_exec;
let metadata_changed = size >= 0 && (size_changed || mode_changed);
let other_parent = size == SIZE_FROM_OTHER_PARENT;
+
if metadata_changed
|| other_parent
|| copy_map.contains_key(filename.as_ref())
{
Dispatch::Modified
- } else if mod_compare(mtime, st_mtime as i32) {
- Dispatch::Unsure
- } else if st_mtime == options.last_normal_time {
+ } else if mod_compare(mtime, st_mtime as i32)
+ || st_mtime == options.last_normal_time
+ {
// the file may have just been marked as normal and
// it may have changed in the same second without
// changing its size. This can happen if we quickly
@@ -213,84 +223,6 @@
};
}
-/// Get stat data about the files explicitly specified by match.
-/// TODO subrepos
-#[timed]
-fn walk_explicit<'a>(
- files: Option<&'a HashSet<&HgPath>>,
- dmap: &'a DirstateMap,
- root_dir: impl AsRef<Path> + Sync + Send + 'a,
- options: StatusOptions,
-) -> impl ParallelIterator<Item = IoResult<(&'a HgPath, Dispatch)>> {
- files
- .unwrap_or(&DEFAULT_WORK)
- .par_iter()
- .map(move |filename| {
- // TODO normalization
- let normalized = filename.as_ref();
-
- let buf = match hg_path_to_path_buf(normalized) {
- Ok(x) => x,
- Err(e) => return Some(Err(e.into())),
- };
- let target = root_dir.as_ref().join(buf);
- let st = target.symlink_metadata();
- let in_dmap = dmap.get(normalized);
- match st {
- Ok(meta) => {
- let file_type = meta.file_type();
- return if file_type.is_file() || file_type.is_symlink() {
- if let Some(entry) = in_dmap {
- return Some(Ok((
- normalized,
- dispatch_found(
- &normalized,
- *entry,
- HgMetadata::from_metadata(meta),
- &dmap.copy_map,
- options,
- ),
- )));
- }
- Some(Ok((normalized, Dispatch::Unknown)))
- } else {
- if file_type.is_dir() {
- Some(Ok((
- normalized,
- Dispatch::Directory {
- was_file: in_dmap.is_some(),
- },
- )))
- } else {
- Some(Ok((
- normalized,
- Dispatch::Bad(BadMatch::BadType(
- // TODO do more than unknown
- // Support for all `BadType` variant
- // varies greatly between platforms.
- // So far, no tests check the type and
- // this should be good enough for most
- // users.
- BadType::Unknown,
- )),
- )))
- }
- };
- }
- Err(_) => {
- if let Some(entry) = in_dmap {
- return Some(Ok((
- normalized,
- dispatch_missing(entry.state),
- )));
- }
- }
- };
- None
- })
- .flatten()
-}
-
#[derive(Debug, Copy, Clone)]
pub struct StatusOptions {
/// Remember the most recent modification timeslot for status, to make
@@ -302,348 +234,614 @@
pub list_clean: bool,
pub list_unknown: bool,
pub list_ignored: bool,
+ /// Whether to collect traversed dirs for applying a callback later.
+ /// Used by `hg purge` for example.
+ pub collect_traversed_dirs: bool,
+}
+
+#[derive(Debug)]
+pub struct DirstateStatus<'a> {
+ pub modified: Vec<HgPathCow<'a>>,
+ pub added: Vec<HgPathCow<'a>>,
+ pub removed: Vec<HgPathCow<'a>>,
+ pub deleted: Vec<HgPathCow<'a>>,
+ pub clean: Vec<HgPathCow<'a>>,
+ pub ignored: Vec<HgPathCow<'a>>,
+ pub unknown: Vec<HgPathCow<'a>>,
+ pub bad: Vec<(HgPathCow<'a>, BadMatch)>,
+ /// Only filled if `collect_traversed_dirs` is `true`
+ pub traversed: Vec<HgPathBuf>,
+}
+
+#[derive(Debug)]
+pub enum StatusError {
+ /// Generic IO error
+ IO(std::io::Error),
+ /// An invalid path that cannot be represented in Mercurial was found
+ Path(HgPathError),
+ /// An invalid "ignore" pattern was found
+ Pattern(PatternError),
+}
+
+pub type StatusResult<T> = Result<T, StatusError>;
+
+impl From<PatternError> for StatusError {
+ fn from(e: PatternError) -> Self {
+ StatusError::Pattern(e)
+ }
+}
+impl From<HgPathError> for StatusError {
+ fn from(e: HgPathError) -> Self {
+ StatusError::Path(e)
+ }
+}
+impl From<std::io::Error> for StatusError {
+ fn from(e: std::io::Error) -> Self {
+ StatusError::IO(e)
+ }
+}
+
+impl ToString for StatusError {
+ fn to_string(&self) -> String {
+ match self {
+ StatusError::IO(e) => e.to_string(),
+ StatusError::Path(e) => e.to_string(),
+ StatusError::Pattern(e) => e.to_string(),
+ }
+ }
+}
+
+/// Gives information about which files are changed in the working directory
+/// and how, compared to the revision we're based on
+pub struct Status<'a, M: Matcher + Sync> {
+ dmap: &'a DirstateMap,
+ pub(crate) matcher: &'a M,
+ root_dir: PathBuf,
+ pub(crate) options: StatusOptions,
+ ignore_fn: IgnoreFnType<'a>,
}
-/// Dispatch a single entry (file, folder, symlink...) found during `traverse`.
-/// If the entry is a folder that needs to be traversed, it will be handled
-/// in a separate thread.
-fn handle_traversed_entry<'a>(
- scope: &rayon::Scope<'a>,
- files_sender: &'a crossbeam::Sender<IoResult<(HgPathBuf, Dispatch)>>,
- matcher: &'a (impl Matcher + Sync),
- root_dir: impl AsRef<Path> + Sync + Send + Copy + 'a,
- dmap: &'a DirstateMap,
- old_results: &'a FastHashMap<Cow<HgPath>, Dispatch>,
- ignore_fn: &'a IgnoreFnType,
- dir_ignore_fn: &'a IgnoreFnType,
- options: StatusOptions,
- filename: HgPathBuf,
- dir_entry: DirEntry,
-) -> IoResult<()> {
- let file_type = dir_entry.file_type()?;
- let entry_option = dmap.get(&filename);
+impl<'a, M> Status<'a, M>
+where
+ M: Matcher + Sync,
+{
+ pub fn new(
+ dmap: &'a DirstateMap,
+ matcher: &'a M,
+ root_dir: PathBuf,
+ ignore_files: Vec<PathBuf>,
+ options: StatusOptions,
+ ) -> StatusResult<(Self, Vec<PatternFileWarning>)> {
+ // Needs to outlive `dir_ignore_fn` since it's captured.
+
+ let (ignore_fn, warnings): (IgnoreFnType, _) =
+ if options.list_ignored || options.list_unknown {
+ get_ignore_function(ignore_files, &root_dir)?
+ } else {
+ (Box::new(|&_| true), vec![])
+ };
- if filename.as_bytes() == b".hg" {
- // Could be a directory or a symlink
- return Ok(());
+ Ok((
+ Self {
+ dmap,
+ matcher,
+ root_dir,
+ options,
+ ignore_fn,
+ },
+ warnings,
+ ))
+ }
+
+ /// Is the path ignored?
+ pub fn is_ignored(&self, path: impl AsRef<HgPath>) -> bool {
+ (self.ignore_fn)(path.as_ref())
+ }
+
+ /// Is the path or one of its ancestors ignored?
+ pub fn dir_ignore(&self, dir: impl AsRef<HgPath>) -> bool {
+ // Only involve ignore mechanism if we're listing unknowns or ignored.
+ if self.options.list_ignored || self.options.list_unknown {
+ if self.is_ignored(&dir) {
+ true
+ } else {
+ for p in find_dirs(dir.as_ref()) {
+ if self.is_ignored(p) {
+ return true;
+ }
+ }
+ false
+ }
+ } else {
+ true
+ }
}
- if file_type.is_dir() {
- handle_traversed_dir(
- scope,
- files_sender,
- matcher,
- root_dir,
- dmap,
- old_results,
- ignore_fn,
- dir_ignore_fn,
- options,
- entry_option,
- filename,
- );
- } else if file_type.is_file() || file_type.is_symlink() {
- if let Some(entry) = entry_option {
- if matcher.matches_everything() || matcher.matches(&filename) {
- let metadata = dir_entry.metadata()?;
+ /// Get stat data about the files explicitly specified by the matcher.
+ /// Returns a tuple of the directories that need to be traversed and the
+ /// files with their corresponding `Dispatch`.
+ /// TODO subrepos
+ #[timed]
+ pub fn walk_explicit(
+ &self,
+ traversed_sender: crossbeam::Sender<HgPathBuf>,
+ ) -> (Vec<DispatchedPath<'a>>, Vec<DispatchedPath<'a>>) {
+ self.matcher
+ .file_set()
+ .unwrap_or(&DEFAULT_WORK)
+ .par_iter()
+ .map(|&filename| -> Option<IoResult<_>> {
+ // TODO normalization
+ let normalized = filename;
+
+ let buf = match hg_path_to_path_buf(normalized) {
+ Ok(x) => x,
+ Err(e) => return Some(Err(e.into())),
+ };
+ let target = self.root_dir.join(buf);
+ let st = target.symlink_metadata();
+ let in_dmap = self.dmap.get(normalized);
+ match st {
+ Ok(meta) => {
+ let file_type = meta.file_type();
+ return if file_type.is_file() || file_type.is_symlink()
+ {
+ if let Some(entry) = in_dmap {
+ return Some(Ok((
+ Cow::Borrowed(normalized),
+ dispatch_found(
+ &normalized,
+ *entry,
+ HgMetadata::from_metadata(meta),
+ &self.dmap.copy_map,
+ self.options,
+ ),
+ )));
+ }
+ Some(Ok((
+ Cow::Borrowed(normalized),
+ Dispatch::Unknown,
+ )))
+ } else if file_type.is_dir() {
+ if self.options.collect_traversed_dirs {
+ traversed_sender
+ .send(normalized.to_owned())
+ .expect("receiver should outlive sender");
+ }
+ Some(Ok((
+ Cow::Borrowed(normalized),
+ Dispatch::Directory {
+ was_file: in_dmap.is_some(),
+ },
+ )))
+ } else {
+ Some(Ok((
+ Cow::Borrowed(normalized),
+ Dispatch::Bad(BadMatch::BadType(
+ // TODO do more than unknown
+ // Support for all `BadType` variant
+ // varies greatly between platforms.
+ // So far, no tests check the type and
+ // this should be good enough for most
+ // users.
+ BadType::Unknown,
+ )),
+ )))
+ };
+ }
+ Err(_) => {
+ if let Some(entry) = in_dmap {
+ return Some(Ok((
+ Cow::Borrowed(normalized),
+ dispatch_missing(entry.state),
+ )));
+ }
+ }
+ };
+ None
+ })
+ .flatten()
+ .filter_map(Result::ok)
+ .partition(|(_, dispatch)| match dispatch {
+ Dispatch::Directory { .. } => true,
+ _ => false,
+ })
+ }
+
+ /// Walk the working directory recursively to look for changes compared to
+ /// the current `DirstateMap`.
+ ///
+ /// This takes a mutable reference to the results to account for the
+ /// `extend` in timings
+ #[timed]
+ pub fn traverse(
+ &self,
+ path: impl AsRef<HgPath>,
+ old_results: &FastHashMap<HgPathCow<'a>, Dispatch>,
+ results: &mut Vec<DispatchedPath<'a>>,
+ traversed_sender: crossbeam::Sender<HgPathBuf>,
+ ) -> IoResult<()> {
+ // The traversal is done in parallel, so use a channel to gather
+ // entries. `crossbeam::Sender` is `Sync`, while `mpsc::Sender`
+ // is not.
+ let (files_transmitter, files_receiver) =
+ crossbeam::channel::unbounded();
+
+ self.traverse_dir(
+ &files_transmitter,
+ path,
+ &old_results,
+ traversed_sender,
+ )?;
+
+ // Disconnect the channel so the receiver stops waiting
+ drop(files_transmitter);
+
+ // TODO don't collect. Find a way of replicating the behavior of
+ // `itertools::process_results`, but for `rayon::ParallelIterator`
+ let new_results: IoResult<Vec<(Cow<HgPath>, Dispatch)>> =
+ files_receiver
+ .into_iter()
+ .map(|item| {
+ let (f, d) = item?;
+ Ok((Cow::Owned(f), d))
+ })
+ .collect();
+
+ results.par_extend(new_results?);
+
+ Ok(())
+ }
+
+ /// Dispatch a single entry (file, folder, symlink...) found during
+ /// `traverse`. If the entry is a folder that needs to be traversed, it
+ /// will be handled in a separate thread.
+ fn handle_traversed_entry<'b>(
+ &'a self,
+ scope: &rayon::Scope<'b>,
+ files_sender: &'b crossbeam::Sender<IoResult<(HgPathBuf, Dispatch)>>,
+ old_results: &'a FastHashMap<Cow<HgPath>, Dispatch>,
+ filename: HgPathBuf,
+ dir_entry: DirEntry,
+ traversed_sender: crossbeam::Sender<HgPathBuf>,
+ ) -> IoResult<()>
+ where
+ 'a: 'b,
+ {
+ let file_type = dir_entry.file_type()?;
+ let entry_option = self.dmap.get(&filename);
+
+ if filename.as_bytes() == b".hg" {
+ // Could be a directory or a symlink
+ return Ok(());
+ }
+
+ if file_type.is_dir() {
+ self.handle_traversed_dir(
+ scope,
+ files_sender,
+ old_results,
+ entry_option,
+ filename,
+ traversed_sender,
+ );
+ } else if file_type.is_file() || file_type.is_symlink() {
+ if let Some(entry) = entry_option {
+ if self.matcher.matches_everything()
+ || self.matcher.matches(&filename)
+ {
+ let metadata = dir_entry.metadata()?;
+ files_sender
+ .send(Ok((
+ filename.to_owned(),
+ dispatch_found(
+ &filename,
+ *entry,
+ HgMetadata::from_metadata(metadata),
+ &self.dmap.copy_map,
+ self.options,
+ ),
+ )))
+ .unwrap();
+ }
+ } else if (self.matcher.matches_everything()
+ || self.matcher.matches(&filename))
+ && !self.is_ignored(&filename)
+ {
+ if (self.options.list_ignored
+ || self.matcher.exact_match(&filename))
+ && self.dir_ignore(&filename)
+ {
+ if self.options.list_ignored {
+ files_sender
+ .send(Ok((filename.to_owned(), Dispatch::Ignored)))
+ .unwrap();
+ }
+ } else if self.options.list_unknown {
+ files_sender
+ .send(Ok((filename.to_owned(), Dispatch::Unknown)))
+ .unwrap();
+ }
+ } else if self.is_ignored(&filename) && self.options.list_ignored {
+ files_sender
+ .send(Ok((filename.to_owned(), Dispatch::Ignored)))
+ .unwrap();
+ }
+ } else if let Some(entry) = entry_option {
+ // Used to be a file or a folder, now something else.
+ if self.matcher.matches_everything()
+ || self.matcher.matches(&filename)
+ {
files_sender
.send(Ok((
filename.to_owned(),
- dispatch_found(
- &filename,
- *entry,
- HgMetadata::from_metadata(metadata),
- &dmap.copy_map,
- options,
- ),
- )))
- .unwrap();
- }
- } else if (matcher.matches_everything() || matcher.matches(&filename))
- && !ignore_fn(&filename)
- {
- if (options.list_ignored || matcher.exact_match(&filename))
- && dir_ignore_fn(&filename)
- {
- if options.list_ignored {
- files_sender
- .send(Ok((filename.to_owned(), Dispatch::Ignored)))
- .unwrap();
- }
- } else {
- files_sender
- .send(Ok((filename.to_owned(), Dispatch::Unknown)))
- .unwrap();
- }
- } else if ignore_fn(&filename) && options.list_ignored {
- files_sender
- .send(Ok((filename.to_owned(), Dispatch::Ignored)))
- .unwrap();
- }
- } else if let Some(entry) = entry_option {
- // Used to be a file or a folder, now something else.
- if matcher.matches_everything() || matcher.matches(&filename) {
- files_sender
- .send(Ok((filename.to_owned(), dispatch_missing(entry.state))))
- .unwrap();
- }
- }
-
- Ok(())
-}
-
-/// A directory was found in the filesystem and needs to be traversed
-fn handle_traversed_dir<'a>(
- scope: &rayon::Scope<'a>,
- files_sender: &'a crossbeam::Sender<IoResult<(HgPathBuf, Dispatch)>>,
- matcher: &'a (impl Matcher + Sync),
- root_dir: impl AsRef<Path> + Sync + Send + Copy + 'a,
- dmap: &'a DirstateMap,
- old_results: &'a FastHashMap<Cow<HgPath>, Dispatch>,
- ignore_fn: &'a IgnoreFnType,
- dir_ignore_fn: &'a IgnoreFnType,
- options: StatusOptions,
- entry_option: Option<&'a DirstateEntry>,
- directory: HgPathBuf,
-) {
- scope.spawn(move |_| {
- // Nested `if` until `rust-lang/rust#53668` is stable
- if let Some(entry) = entry_option {
- // Used to be a file, is now a folder
- if matcher.matches_everything() || matcher.matches(&directory) {
- files_sender
- .send(Ok((
- directory.to_owned(),
dispatch_missing(entry.state),
)))
.unwrap();
}
}
- // Do we need to traverse it?
- if !ignore_fn(&directory) || options.list_ignored {
- traverse_dir(
- files_sender,
- matcher,
- root_dir,
- dmap,
- directory,
- &old_results,
- ignore_fn,
- dir_ignore_fn,
- options,
- )
- .unwrap_or_else(|e| files_sender.send(Err(e)).unwrap())
- }
- });
-}
-/// Decides whether the directory needs to be listed, and if so handles the
-/// entries in a separate thread.
-fn traverse_dir<'a>(
- files_sender: &crossbeam::Sender<IoResult<(HgPathBuf, Dispatch)>>,
- matcher: &'a (impl Matcher + Sync),
- root_dir: impl AsRef<Path> + Sync + Send + Copy,
- dmap: &'a DirstateMap,
- directory: impl AsRef<HgPath>,
- old_results: &FastHashMap<Cow<'a, HgPath>, Dispatch>,
- ignore_fn: &IgnoreFnType,
- dir_ignore_fn: &IgnoreFnType,
- options: StatusOptions,
-) -> IoResult<()> {
- let directory = directory.as_ref();
+ Ok(())
+ }
- let visit_entries = match matcher.visit_children_set(directory) {
- VisitChildrenSet::Empty => return Ok(()),
- VisitChildrenSet::This | VisitChildrenSet::Recursive => None,
- VisitChildrenSet::Set(set) => Some(set),
- };
- let buf = hg_path_to_path_buf(directory)?;
- let dir_path = root_dir.as_ref().join(buf);
-
- let skip_dot_hg = !directory.as_bytes().is_empty();
- let entries = match list_directory(dir_path, skip_dot_hg) {
- Err(e) => match e.kind() {
- ErrorKind::NotFound | ErrorKind::PermissionDenied => {
- files_sender
- .send(Ok((
- directory.to_owned(),
- Dispatch::Bad(BadMatch::OsError(
- // Unwrapping here is OK because the error always
- // is a real os error
- e.raw_os_error().unwrap(),
- )),
- )))
- .unwrap();
- return Ok(());
- }
- _ => return Err(e),
- },
- Ok(entries) => entries,
- };
-
- rayon::scope(|scope| -> IoResult<()> {
- for (filename, dir_entry) in entries {
- if let Some(ref set) = visit_entries {
- if !set.contains(filename.deref()) {
- continue;
+ /// A directory was found in the filesystem and needs to be traversed
+ fn handle_traversed_dir<'b>(
+ &'a self,
+ scope: &rayon::Scope<'b>,
+ files_sender: &'b crossbeam::Sender<IoResult<(HgPathBuf, Dispatch)>>,
+ old_results: &'a FastHashMap<Cow<HgPath>, Dispatch>,
+ entry_option: Option<&'a DirstateEntry>,
+ directory: HgPathBuf,
+ traversed_sender: crossbeam::Sender<HgPathBuf>,
+ ) where
+ 'a: 'b,
+ {
+ scope.spawn(move |_| {
+ // Nested `if` until `rust-lang/rust#53668` is stable
+ if let Some(entry) = entry_option {
+ // Used to be a file, is now a folder
+ if self.matcher.matches_everything()
+ || self.matcher.matches(&directory)
+ {
+ files_sender
+ .send(Ok((
+ directory.to_owned(),
+ dispatch_missing(entry.state),
+ )))
+ .unwrap();
}
}
- // TODO normalize
- let filename = if directory.is_empty() {
- filename.to_owned()
+ // Do we need to traverse it?
+ if !self.is_ignored(&directory) || self.options.list_ignored {
+ self.traverse_dir(
+ files_sender,
+ directory,
+ &old_results,
+ traversed_sender,
+ )
+ .unwrap_or_else(|e| files_sender.send(Err(e)).unwrap())
+ }
+ });
+ }
+
+ /// Decides whether the directory needs to be listed, and if so handles the
+ /// entries in a separate thread.
+ fn traverse_dir(
+ &self,
+ files_sender: &crossbeam::Sender<IoResult<(HgPathBuf, Dispatch)>>,
+ directory: impl AsRef<HgPath>,
+ old_results: &FastHashMap<Cow<HgPath>, Dispatch>,
+ traversed_sender: crossbeam::Sender<HgPathBuf>,
+ ) -> IoResult<()> {
+ let directory = directory.as_ref();
+
+ if self.options.collect_traversed_dirs {
+ traversed_sender
+ .send(directory.to_owned())
+ .expect("receiver should outlive sender");
+ }
+
+ let visit_entries = match self.matcher.visit_children_set(directory) {
+ VisitChildrenSet::Empty => return Ok(()),
+ VisitChildrenSet::This | VisitChildrenSet::Recursive => None,
+ VisitChildrenSet::Set(set) => Some(set),
+ };
+ let buf = hg_path_to_path_buf(directory)?;
+ let dir_path = self.root_dir.join(buf);
+
+ let skip_dot_hg = !directory.as_bytes().is_empty();
+ let entries = match list_directory(dir_path, skip_dot_hg) {
+ Err(e) => {
+ return match e.kind() {
+ ErrorKind::NotFound | ErrorKind::PermissionDenied => {
+ files_sender
+ .send(Ok((
+ directory.to_owned(),
+ Dispatch::Bad(BadMatch::OsError(
+ // Unwrapping here is OK because the error
+ // always is a
+ // real os error
+ e.raw_os_error().unwrap(),
+ )),
+ )))
+ .expect("receiver should outlive sender");
+ Ok(())
+ }
+ _ => Err(e),
+ };
+ }
+ Ok(entries) => entries,
+ };
+
+ rayon::scope(|scope| -> IoResult<()> {
+ for (filename, dir_entry) in entries {
+ if let Some(ref set) = visit_entries {
+ if !set.contains(filename.deref()) {
+ continue;
+ }
+ }
+ // TODO normalize
+ let filename = if directory.is_empty() {
+ filename.to_owned()
+ } else {
+ directory.join(&filename)
+ };
+
+ if !old_results.contains_key(filename.deref()) {
+ self.handle_traversed_entry(
+ scope,
+ files_sender,
+ old_results,
+ filename,
+ dir_entry,
+ traversed_sender.clone(),
+ )?;
+ }
+ }
+ Ok(())
+ })
+ }
+
+ /// Checks all files that are in the dirstate but were not found during the
+ /// working directory traversal. This means that the rest must
+ /// be either ignored, under a symlink or under a new nested repo.
+ ///
+ /// This takes a mutable reference to the results to account for the
+ /// `extend` in timings
+ #[timed]
+ pub fn handle_unknowns(
+ &self,
+ results: &mut Vec<DispatchedPath<'a>>,
+ ) -> IoResult<()> {
+ let to_visit: Vec<(&HgPath, &DirstateEntry)> =
+ if results.is_empty() && self.matcher.matches_everything() {
+ self.dmap.iter().map(|(f, e)| (f.deref(), e)).collect()
} else {
- directory.join(&filename)
+ // Only convert to a hashmap if needed.
+ let old_results: FastHashMap<_, _> =
+ results.iter().cloned().collect();
+ self.dmap
+ .iter()
+ .filter_map(move |(f, e)| {
+ if !old_results.contains_key(f.deref())
+ && self.matcher.matches(f)
+ {
+ Some((f.deref(), e))
+ } else {
+ None
+ }
+ })
+ .collect()
};
- if !old_results.contains_key(filename.deref()) {
- handle_traversed_entry(
- scope,
- files_sender,
- matcher,
- root_dir,
- dmap,
- old_results,
- ignore_fn,
- dir_ignore_fn,
- options,
- filename,
- dir_entry,
- )?;
- }
- }
- Ok(())
- })
-}
+ let path_auditor = PathAuditor::new(&self.root_dir);
-/// Walk the working directory recursively to look for changes compared to the
-/// current `DirstateMap`.
-///
-/// This takes a mutable reference to the results to account for the `extend`
-/// in timings
-#[timed]
-fn traverse<'a>(
- matcher: &'a (impl Matcher + Sync),
- root_dir: impl AsRef<Path> + Sync + Send + Copy,
- dmap: &'a DirstateMap,
- path: impl AsRef<HgPath>,
- old_results: &FastHashMap<Cow<'a, HgPath>, Dispatch>,
- ignore_fn: &IgnoreFnType,
- dir_ignore_fn: &IgnoreFnType,
- options: StatusOptions,
- results: &mut Vec<(Cow<'a, HgPath>, Dispatch)>,
-) -> IoResult<()> {
- let root_dir = root_dir.as_ref();
-
- // The traversal is done in parallel, so use a channel to gather entries.
- // `crossbeam::Sender` is `Send`, while `mpsc::Sender` is not.
- let (files_transmitter, files_receiver) = crossbeam::channel::unbounded();
-
- traverse_dir(
- &files_transmitter,
- matcher,
- root_dir,
- &dmap,
- path,
- &old_results,
- &ignore_fn,
- &dir_ignore_fn,
- options,
- )?;
-
- // Disconnect the channel so the receiver stops waiting
- drop(files_transmitter);
-
- // TODO don't collect. Find a way of replicating the behavior of
- // `itertools::process_results`, but for `rayon::ParallelIterator`
- let new_results: IoResult<Vec<(Cow<'a, HgPath>, Dispatch)>> =
- files_receiver
- .into_iter()
- .map(|item| {
- let (f, d) = item?;
- Ok((Cow::Owned(f), d))
+ // TODO don't collect. Find a way of replicating the behavior of
+ // `itertools::process_results`, but for `rayon::ParallelIterator`
+ let new_results: IoResult<Vec<_>> = to_visit
+ .into_par_iter()
+ .filter_map(|(filename, entry)| -> Option<IoResult<_>> {
+ // Report ignored items in the dmap as long as they are not
+ // under a symlink directory.
+ if path_auditor.check(filename) {
+ // TODO normalize for case-insensitive filesystems
+ let buf = match hg_path_to_path_buf(filename) {
+ Ok(x) => x,
+ Err(e) => return Some(Err(e.into())),
+ };
+ Some(Ok((
+ Cow::Borrowed(filename),
+ match self.root_dir.join(&buf).symlink_metadata() {
+ // File was just ignored, no links, and exists
+ Ok(meta) => {
+ let metadata = HgMetadata::from_metadata(meta);
+ dispatch_found(
+ filename,
+ *entry,
+ metadata,
+ &self.dmap.copy_map,
+ self.options,
+ )
+ }
+ // File doesn't exist
+ Err(_) => dispatch_missing(entry.state),
+ },
+ )))
+ } else {
+ // It's either missing or under a symlink directory which
+ // we, in this case, report as missing.
+ Some(Ok((
+ Cow::Borrowed(filename),
+ dispatch_missing(entry.state),
+ )))
+ }
})
.collect();
- results.par_extend(new_results?);
+ results.par_extend(new_results?);
- Ok(())
-}
+ Ok(())
+ }
-/// Stat all entries in the `DirstateMap` and mark them for dispatch.
-fn stat_dmap_entries(
- dmap: &DirstateMap,
- root_dir: impl AsRef<Path> + Sync + Send,
- options: StatusOptions,
-) -> impl ParallelIterator<Item = IoResult<(&HgPath, Dispatch)>> {
- dmap.par_iter().map(move |(filename, entry)| {
- let filename: &HgPath = filename;
- let filename_as_path = hg_path_to_path_buf(filename)?;
- let meta = root_dir.as_ref().join(filename_as_path).symlink_metadata();
+ /// Add the files in the dirstate to the results.
+ ///
+ /// This takes a mutable reference to the results to account for the
+ /// `extend` in timings
+ #[timed]
+ pub fn extend_from_dmap(&self, results: &mut Vec<DispatchedPath<'a>>) {
+ results.par_extend(self.dmap.par_iter().flat_map(
+ move |(filename, entry)| {
+ let filename: &HgPath = filename;
+ let filename_as_path = hg_path_to_path_buf(filename)?;
+ let meta =
+ self.root_dir.join(filename_as_path).symlink_metadata();
- match meta {
- Ok(ref m)
- if !(m.file_type().is_file()
- || m.file_type().is_symlink()) =>
- {
- Ok((filename, dispatch_missing(entry.state)))
- }
- Ok(m) => Ok((
- filename,
- dispatch_found(
- filename,
- *entry,
- HgMetadata::from_metadata(m),
- &dmap.copy_map,
- options,
- ),
- )),
- Err(ref e)
- if e.kind() == ErrorKind::NotFound
- || e.raw_os_error() == Some(20) =>
- {
- // Rust does not yet have an `ErrorKind` for
- // `NotADirectory` (errno 20)
- // It happens if the dirstate contains `foo/bar` and
- // foo is not a directory
- Ok((filename, dispatch_missing(entry.state)))
- }
- Err(e) => Err(e),
- }
- })
-}
-
-/// This takes a mutable reference to the results to account for the `extend`
-/// in timings
-#[timed]
-fn extend_from_dmap<'a>(
- dmap: &'a DirstateMap,
- root_dir: impl AsRef<Path> + Sync + Send,
- options: StatusOptions,
- results: &mut Vec<(Cow<'a, HgPath>, Dispatch)>,
-) {
- results.par_extend(
- stat_dmap_entries(dmap, root_dir, options)
- .flatten()
- .map(|(filename, dispatch)| (Cow::Borrowed(filename), dispatch)),
- );
-}
-
-#[derive(Debug)]
-pub struct DirstateStatus<'a> {
- pub modified: Vec<Cow<'a, HgPath>>,
- pub added: Vec<Cow<'a, HgPath>>,
- pub removed: Vec<Cow<'a, HgPath>>,
- pub deleted: Vec<Cow<'a, HgPath>>,
- pub clean: Vec<Cow<'a, HgPath>>,
- pub ignored: Vec<Cow<'a, HgPath>>,
- pub unknown: Vec<Cow<'a, HgPath>>,
- pub bad: Vec<(Cow<'a, HgPath>, BadMatch)>,
+ match meta {
+ Ok(ref m)
+ if !(m.file_type().is_file()
+ || m.file_type().is_symlink()) =>
+ {
+ Ok((
+ Cow::Borrowed(filename),
+ dispatch_missing(entry.state),
+ ))
+ }
+ Ok(m) => Ok((
+ Cow::Borrowed(filename),
+ dispatch_found(
+ filename,
+ *entry,
+ HgMetadata::from_metadata(m),
+ &self.dmap.copy_map,
+ self.options,
+ ),
+ )),
+ Err(ref e)
+ if e.kind() == ErrorKind::NotFound
+ || e.raw_os_error() == Some(20) =>
+ {
+ // Rust does not yet have an `ErrorKind` for
+ // `NotADirectory` (errno 20)
+ // It happens if the dirstate contains `foo/bar`
+ // and foo is not a
+ // directory
+ Ok((
+ Cow::Borrowed(filename),
+ dispatch_missing(entry.state),
+ ))
+ }
+ Err(e) => Err(e),
+ }
+ },
+ ));
+ }
}
#[timed]
-fn build_response<'a>(
- results: impl IntoIterator<Item = (Cow<'a, HgPath>, Dispatch)>,
-) -> (Vec<Cow<'a, HgPath>>, DirstateStatus<'a>) {
+pub fn build_response<'a>(
+ results: impl IntoIterator<Item = DispatchedPath<'a>>,
+ traversed: Vec<HgPathBuf>,
+) -> (Vec<HgPathCow<'a>>, DirstateStatus<'a>) {
let mut lookup = vec![];
let mut modified = vec![];
let mut added = vec![];
@@ -681,233 +879,29 @@
ignored,
unknown,
bad,
+ traversed,
},
)
}
-#[derive(Debug)]
-pub enum StatusError {
- IO(std::io::Error),
- Path(HgPathError),
- Pattern(PatternError),
-}
-
-pub type StatusResult<T> = Result<T, StatusError>;
-
-impl From<PatternError> for StatusError {
- fn from(e: PatternError) -> Self {
- StatusError::Pattern(e)
- }
-}
-impl From<HgPathError> for StatusError {
- fn from(e: HgPathError) -> Self {
- StatusError::Path(e)
- }
-}
-impl From<std::io::Error> for StatusError {
- fn from(e: std::io::Error) -> Self {
- StatusError::IO(e)
- }
-}
-
-impl ToString for StatusError {
- fn to_string(&self) -> String {
- match self {
- StatusError::IO(e) => e.to_string(),
- StatusError::Path(e) => e.to_string(),
- StatusError::Pattern(e) => e.to_string(),
- }
- }
-}
-
-/// This takes a mutable reference to the results to account for the `extend`
-/// in timings
-#[timed]
-fn handle_unknowns<'a>(
- dmap: &'a DirstateMap,
- matcher: &(impl Matcher + Sync),
- root_dir: impl AsRef<Path> + Sync + Send + Copy,
- options: StatusOptions,
- results: &mut Vec<(Cow<'a, HgPath>, Dispatch)>,
-) -> IoResult<()> {
- let to_visit: Vec<(&HgPath, &DirstateEntry)> = if results.is_empty()
- && matcher.matches_everything()
- {
- dmap.iter().map(|(f, e)| (f.deref(), e)).collect()
- } else {
- // Only convert to a hashmap if needed.
- let old_results: FastHashMap<_, _> = results.iter().cloned().collect();
- dmap.iter()
- .filter_map(move |(f, e)| {
- if !old_results.contains_key(f.deref()) && matcher.matches(f) {
- Some((f.deref(), e))
- } else {
- None
- }
- })
- .collect()
- };
-
- // We walked all dirs under the roots that weren't ignored, and
- // everything that matched was stat'ed and is already in results.
- // The rest must thus be ignored or under a symlink.
- let path_auditor = PathAuditor::new(root_dir);
-
- // TODO don't collect. Find a way of replicating the behavior of
- // `itertools::process_results`, but for `rayon::ParallelIterator`
- let new_results: IoResult<Vec<_>> = to_visit
- .into_par_iter()
- .filter_map(|(filename, entry)| -> Option<IoResult<_>> {
- // Report ignored items in the dmap as long as they are not
- // under a symlink directory.
- if path_auditor.check(filename) {
- // TODO normalize for case-insensitive filesystems
- let buf = match hg_path_to_path_buf(filename) {
- Ok(x) => x,
- Err(e) => return Some(Err(e.into())),
- };
- Some(Ok((
- Cow::Borrowed(filename),
- match root_dir.as_ref().join(&buf).symlink_metadata() {
- // File was just ignored, no links, and exists
- Ok(meta) => {
- let metadata = HgMetadata::from_metadata(meta);
- dispatch_found(
- filename,
- *entry,
- metadata,
- &dmap.copy_map,
- options,
- )
- }
- // File doesn't exist
- Err(_) => dispatch_missing(entry.state),
- },
- )))
- } else {
- // It's either missing or under a symlink directory which
- // we, in this case, report as missing.
- Some(Ok((
- Cow::Borrowed(filename),
- dispatch_missing(entry.state),
- )))
- }
- })
- .collect();
-
- results.par_extend(new_results?);
-
- Ok(())
-}
-
/// Get the status of files in the working directory.
///
/// This is the current entry-point for `hg-core` and is realistically unusable
/// outside of a Python context because its arguments need to provide a lot of
/// information that will not be necessary in the future.
#[timed]
-pub fn status<'a: 'c, 'b: 'c, 'c>(
+pub fn status<'a>(
dmap: &'a DirstateMap,
- matcher: &'b (impl Matcher + Sync),
- root_dir: impl AsRef<Path> + Sync + Send + Copy + 'c,
+ matcher: &'a (impl Matcher + Sync),
+ root_dir: PathBuf,
ignore_files: Vec<PathBuf>,
options: StatusOptions,
) -> StatusResult<(
- (Vec<Cow<'c, HgPath>>, DirstateStatus<'c>),
+ (Vec<HgPathCow<'a>>, DirstateStatus<'a>),
Vec<PatternFileWarning>,
)> {
- // Needs to outlive `dir_ignore_fn` since it's captured.
- let mut ignore_fn: IgnoreFnType;
-
- // Only involve real ignore mechanism if we're listing unknowns or ignored.
- let (dir_ignore_fn, warnings): (IgnoreFnType, _) = if options.list_ignored
- || options.list_unknown
- {
- let (ignore, warnings) = get_ignore_function(ignore_files, root_dir)?;
-
- ignore_fn = ignore;
- let dir_ignore_fn = Box::new(|dir: &_| {
- // Is the path or one of its ancestors ignored?
- if ignore_fn(dir) {
- true
- } else {
- for p in find_dirs(dir) {
- if ignore_fn(p) {
- return true;
- }
- }
- false
- }
- });
- (dir_ignore_fn, warnings)
- } else {
- ignore_fn = Box::new(|&_| true);
- (Box::new(|&_| true), vec![])
- };
-
- let files = matcher.file_set();
-
- // Step 1: check the files explicitly mentioned by the user
- let explicit = walk_explicit(files, &dmap, root_dir, options);
-
- // Collect results into a `Vec` because we do very few lookups in most
- // cases.
- let (work, mut results): (Vec<_>, Vec<_>) = explicit
- .filter_map(Result::ok)
- .map(|(filename, dispatch)| (Cow::Borrowed(filename), dispatch))
- .partition(|(_, dispatch)| match dispatch {
- Dispatch::Directory { .. } => true,
- _ => false,
- });
+ let (status, warnings) =
+ Status::new(dmap, matcher, root_dir, ignore_files, options)?;
- if !work.is_empty() {
- // Hashmaps are quite a bit slower to build than vecs, so only build it
- // if needed.
- let old_results = results.iter().cloned().collect();
-
- // Step 2: recursively check the working directory for changes if
- // needed
- for (dir, dispatch) in work {
- match dispatch {
- Dispatch::Directory { was_file } => {
- if was_file {
- results.push((dir.to_owned(), Dispatch::Removed));
- }
- if options.list_ignored
- || options.list_unknown && !dir_ignore_fn(&dir)
- {
- traverse(
- matcher,
- root_dir,
- &dmap,
- &dir,
- &old_results,
- &ignore_fn,
- &dir_ignore_fn,
- options,
- &mut results,
- )?;
- }
- }
- _ => unreachable!("There can only be directories in `work`"),
- }
- }
- }
-
- if !matcher.is_exact() {
- // Step 3: Check the remaining files from the dmap.
- // If a dmap file is not in results yet, it was either
- // a) not matched b) ignored, c) missing, or d) under a
- // symlink directory.
-
- if options.list_unknown {
- handle_unknowns(dmap, matcher, root_dir, options, &mut results)?;
- } else {
- // We may not have walked the full directory tree above, so stat
- // and check everything we missed.
- extend_from_dmap(&dmap, root_dir, options, &mut results);
- }
- }
-
- Ok((build_response(results), warnings))
+ Ok((status.run()?, warnings))
}
--- a/rust/hg-core/src/discovery.rs Tue Jul 14 10:25:41 2020 +0200
+++ b/rust/hg-core/src/discovery.rs Mon Jul 20 21:56:27 2020 +0530
@@ -181,8 +181,8 @@
common: MissingAncestors::new(graph, vec![]),
missing: HashSet::new(),
rng: Rng::from_seed(seed),
- respect_size: respect_size,
- randomize: randomize,
+ respect_size,
+ randomize,
}
}
@@ -284,7 +284,7 @@
/// Did we acquire full knowledge of our Revisions that the peer has?
pub fn is_complete(&self) -> bool {
- self.undecided.as_ref().map_or(false, |s| s.is_empty())
+ self.undecided.as_ref().map_or(false, HashSet::is_empty)
}
/// Return the heads of the currently known common set of revisions.
@@ -332,7 +332,7 @@
FastHashMap::default();
for &rev in self.undecided.as_ref().unwrap() {
for p in ParentsIterator::graph_parents(&self.graph, rev)? {
- children.entry(p).or_insert_with(|| Vec::new()).push(rev);
+ children.entry(p).or_insert_with(Vec::new).push(rev);
}
}
self.children_cache = Some(children);
@@ -342,7 +342,7 @@
/// Provide statistics about the current state of the discovery process
pub fn stats(&self) -> DiscoveryStats {
DiscoveryStats {
- undecided: self.undecided.as_ref().map(|s| s.len()),
+ undecided: self.undecided.as_ref().map(HashSet::len),
}
}
--- a/rust/hg-core/src/filepatterns.rs Tue Jul 14 10:25:41 2020 +0200
+++ b/rust/hg-core/src/filepatterns.rs Mon Jul 20 21:56:27 2020 +0530
@@ -176,14 +176,15 @@
return vec![];
}
match syntax {
- // The `regex` crate adds `.*` to the start and end of expressions
- // if there are no anchors, so add them.
- PatternSyntax::Regexp => [b"^", &pattern[..], b"$"].concat(),
+ PatternSyntax::Regexp => pattern.to_owned(),
PatternSyntax::RelRegexp => {
// The `regex` crate accepts `**` while `re2` and Python's `re`
// do not. Checking for `*` correctly triggers the same error all
// engines.
- if pattern[0] == b'^' || pattern[0] == b'*' {
+ if pattern[0] == b'^'
+ || pattern[0] == b'*'
+ || pattern.starts_with(b".*")
+ {
return pattern.to_owned();
}
[&b".*"[..], pattern].concat()
@@ -196,15 +197,14 @@
}
PatternSyntax::RootFiles => {
let mut res = if pattern == b"." {
- vec![b'^']
+ vec![]
} else {
// Pattern is a directory name.
- [b"^", escape_pattern(pattern).as_slice(), b"/"].concat()
+ [escape_pattern(pattern).as_slice(), b"/"].concat()
};
// Anything after the pattern must be a non-directory.
res.extend(b"[^/]+$");
- res.push(b'$');
res
}
PatternSyntax::RelGlob => {
@@ -216,7 +216,7 @@
}
}
PatternSyntax::Glob | PatternSyntax::RootGlob => {
- [b"^", glob_to_re(pattern).as_slice(), GLOB_SUFFIX].concat()
+ [glob_to_re(pattern).as_slice(), GLOB_SUFFIX].concat()
}
PatternSyntax::Include | PatternSyntax::SubInclude => unreachable!(),
}
@@ -271,7 +271,7 @@
/// that don't need to be transformed into a regex.
pub fn build_single_regex(
entry: &IgnorePattern,
-) -> Result<Vec<u8>, PatternError> {
+) -> Result<Option<Vec<u8>>, PatternError> {
let IgnorePattern {
pattern, syntax, ..
} = entry;
@@ -288,16 +288,11 @@
if *syntax == PatternSyntax::RootGlob
&& !pattern.iter().any(|b| GLOB_SPECIAL_CHARACTERS.contains(b))
{
- // The `regex` crate adds `.*` to the start and end of expressions
- // if there are no anchors, so add the start anchor.
- let mut escaped = vec![b'^'];
- escaped.extend(escape_pattern(&pattern));
- escaped.extend(GLOB_SUFFIX);
- Ok(escaped)
+ Ok(None)
} else {
let mut entry = entry.clone();
entry.pattern = pattern;
- Ok(_build_single_regex(&entry))
+ Ok(Some(_build_single_regex(&entry)))
}
}
@@ -329,6 +324,8 @@
warn: bool,
) -> Result<(Vec<IgnorePattern>, Vec<PatternFileWarning>), PatternError> {
let comment_regex = Regex::new(r"((?:^|[^\\])(?:\\\\)*)#.*").unwrap();
+
+ #[allow(clippy::trivial_regex)]
let comment_escape_regex = Regex::new(r"\\#").unwrap();
let mut inputs: Vec<IgnorePattern> = vec![];
let mut warnings: Vec<PatternFileWarning> = vec![];
@@ -463,9 +460,7 @@
.into_iter()
.flat_map(|entry| -> PatternResult<_> {
let IgnorePattern {
- syntax,
- pattern,
- source: _,
+ syntax, pattern, ..
} = &entry;
Ok(match syntax {
PatternSyntax::Include => {
@@ -509,10 +504,11 @@
normalize_path_bytes(&get_bytes_from_path(source));
let source_root = get_path_from_bytes(&normalized_source);
- let source_root = source_root.parent().unwrap_or(source_root.deref());
+ let source_root =
+ source_root.parent().unwrap_or_else(|| source_root.deref());
let path = source_root.join(get_path_from_bytes(pattern));
- let new_root = path.parent().unwrap_or(path.deref());
+ let new_root = path.parent().unwrap_or_else(|| path.deref());
let prefix = canonical_path(&root_dir, &root_dir, new_root)?;
@@ -628,7 +624,16 @@
Path::new("")
))
.unwrap(),
- br"(?:.*/)?rust/target(?:/|$)".to_vec(),
+ Some(br"(?:.*/)?rust/target(?:/|$)".to_vec()),
+ );
+ assert_eq!(
+ build_single_regex(&IgnorePattern::new(
+ PatternSyntax::Regexp,
+ br"rust/target/\d+",
+ Path::new("")
+ ))
+ .unwrap(),
+ Some(br"rust/target/\d+".to_vec()),
);
}
@@ -641,7 +646,7 @@
Path::new("")
))
.unwrap(),
- br"^\.(?:/|$)".to_vec(),
+ None,
);
assert_eq!(
build_single_regex(&IgnorePattern::new(
@@ -650,7 +655,7 @@
Path::new("")
))
.unwrap(),
- br"^whatever(?:/|$)".to_vec(),
+ None,
);
assert_eq!(
build_single_regex(&IgnorePattern::new(
@@ -659,7 +664,7 @@
Path::new("")
))
.unwrap(),
- br"^[^/]*\.o(?:/|$)".to_vec(),
+ Some(br"[^/]*\.o(?:/|$)".to_vec()),
);
}
}
--- a/rust/hg-core/src/lib.rs Tue Jul 14 10:25:41 2020 +0200
+++ b/rust/hg-core/src/lib.rs Mon Jul 20 21:56:27 2020 +0530
@@ -23,8 +23,7 @@
pub mod matchers;
pub mod revlog;
pub use revlog::*;
-#[cfg(feature = "with-re2")]
-pub mod re2;
+pub mod operations;
pub mod utils;
// Remove this to see (potential) non-artificial compile failures. MacOS
@@ -141,9 +140,6 @@
/// Needed a pattern that can be turned into a regex but got one that
/// can't. This should only happen through programmer error.
NonRegexPattern(IgnorePattern),
- /// This is temporary, see `re2/mod.rs`.
- /// This will cause a fallback to Python.
- Re2NotInstalled,
}
impl ToString for PatternError {
@@ -166,10 +162,6 @@
PatternError::NonRegexPattern(pattern) => {
format!("'{:?}' cannot be turned into a regex", pattern)
}
- PatternError::Re2NotInstalled => {
- "Re2 is not installed, cannot use regex functionality."
- .to_string()
- }
}
}
}
--- a/rust/hg-core/src/matchers.rs Tue Jul 14 10:25:41 2020 +0200
+++ b/rust/hg-core/src/matchers.rs Mon Jul 20 21:56:27 2020 +0530
@@ -7,8 +7,6 @@
//! Structs and types for matching files and directories.
-#[cfg(feature = "with-re2")]
-use crate::re2::Re2;
use crate::{
dirstate::dirs_multiset::DirsChildrenMultiset,
filepatterns::{
@@ -24,6 +22,7 @@
PatternSyntax,
};
+use crate::filepatterns::normalize_path_bytes;
use std::borrow::ToOwned;
use std::collections::HashSet;
use std::fmt::{Display, Error, Formatter};
@@ -31,6 +30,8 @@
use std::ops::Deref;
use std::path::{Path, PathBuf};
+use micro_timer::timed;
+
#[derive(Debug, PartialEq)]
pub enum VisitChildrenSet<'a> {
/// Don't visit anything
@@ -163,7 +164,7 @@
files: &'a [impl AsRef<HgPath>],
) -> Result<Self, DirstateMapError> {
Ok(Self {
- files: HashSet::from_iter(files.iter().map(|f| f.as_ref())),
+ files: HashSet::from_iter(files.iter().map(AsRef::as_ref)),
dirs: DirsMultiset::from_manifest(files)?,
})
}
@@ -189,10 +190,10 @@
if self.files.is_empty() || !self.dirs.contains(&directory) {
return VisitChildrenSet::Empty;
}
- let dirs_as_set = self.dirs.iter().map(|k| k.deref()).collect();
+ let dirs_as_set = self.dirs.iter().map(Deref::deref).collect();
let mut candidates: HashSet<&HgPath> =
- self.files.union(&dirs_as_set).map(|k| *k).collect();
+ self.files.union(&dirs_as_set).cloned().collect();
candidates.remove(HgPath::new(b""));
if !directory.as_ref().is_empty() {
@@ -236,29 +237,24 @@
}
/// Matches files that are included in the ignore rules.
-#[cfg_attr(
- feature = "with-re2",
- doc = r##"
-```
-use hg::{
- matchers::{IncludeMatcher, Matcher},
- IgnorePattern,
- PatternSyntax,
- utils::hg_path::HgPath
-};
-use std::path::Path;
-///
-let ignore_patterns =
-vec![IgnorePattern::new(PatternSyntax::RootGlob, b"this*", Path::new(""))];
-let (matcher, _) = IncludeMatcher::new(ignore_patterns, "").unwrap();
-///
-assert_eq!(matcher.matches(HgPath::new(b"testing")), false);
-assert_eq!(matcher.matches(HgPath::new(b"this should work")), true);
-assert_eq!(matcher.matches(HgPath::new(b"this also")), true);
-assert_eq!(matcher.matches(HgPath::new(b"but not this")), false);
-```
-"##
-)]
+/// ```
+/// use hg::{
+/// matchers::{IncludeMatcher, Matcher},
+/// IgnorePattern,
+/// PatternSyntax,
+/// utils::hg_path::HgPath
+/// };
+/// use std::path::Path;
+/// ///
+/// let ignore_patterns =
+/// vec![IgnorePattern::new(PatternSyntax::RootGlob, b"this*", Path::new(""))];
+/// let (matcher, _) = IncludeMatcher::new(ignore_patterns, "").unwrap();
+/// ///
+/// assert_eq!(matcher.matches(HgPath::new(b"testing")), false);
+/// assert_eq!(matcher.matches(HgPath::new(b"this should work")), true);
+/// assert_eq!(matcher.matches(HgPath::new(b"this also")), true);
+/// assert_eq!(matcher.matches(HgPath::new(b"but not this")), false);
+/// ```
pub struct IncludeMatcher<'a> {
patterns: Vec<u8>,
match_fn: Box<dyn for<'r> Fn(&'r HgPath) -> bool + 'a + Sync>,
@@ -316,33 +312,21 @@
}
}
-#[cfg(feature = "with-re2")]
-/// Returns a function that matches an `HgPath` against the given regex
-/// pattern.
-///
-/// This can fail when the pattern is invalid or not supported by the
-/// underlying engine `Re2`, for instance anything with back-references.
-fn re_matcher(
- pattern: &[u8],
-) -> PatternResult<impl Fn(&HgPath) -> bool + Sync> {
- let regex = Re2::new(pattern);
- let regex = regex.map_err(|e| PatternError::UnsupportedSyntax(e))?;
- Ok(move |path: &HgPath| regex.is_match(path.as_bytes()))
-}
-
-#[cfg(not(feature = "with-re2"))]
/// Returns a function that matches an `HgPath` against the given regex
/// pattern.
///
/// This can fail when the pattern is invalid or not supported by the
/// underlying engine (the `regex` crate), for instance anything with
/// back-references.
+#[timed]
fn re_matcher(
pattern: &[u8],
) -> PatternResult<impl Fn(&HgPath) -> bool + Sync> {
use std::io::Write;
- let mut escaped_bytes = vec![];
+ // The `regex` crate adds `.*` to the start and end of expressions if there
+ // are no anchors, so add the start anchor.
+ let mut escaped_bytes = vec![b'^', b'(', b'?', b':'];
for byte in pattern {
if *byte > 127 {
write!(escaped_bytes, "\\x{:x}", *byte).unwrap();
@@ -350,6 +334,7 @@
escaped_bytes.push(*byte);
}
}
+ escaped_bytes.push(b')');
// Avoid the cost of UTF8 checking
//
@@ -373,15 +358,32 @@
fn build_regex_match<'a>(
ignore_patterns: &'a [&'a IgnorePattern],
) -> PatternResult<(Vec<u8>, Box<dyn Fn(&HgPath) -> bool + Sync>)> {
- let regexps: Result<Vec<_>, PatternError> = ignore_patterns
- .into_iter()
- .map(|k| build_single_regex(*k))
- .collect();
- let regexps = regexps?;
+ let mut regexps = vec![];
+ let mut exact_set = HashSet::new();
+
+ for pattern in ignore_patterns {
+ if let Some(re) = build_single_regex(pattern)? {
+ regexps.push(re);
+ } else {
+ let exact = normalize_path_bytes(&pattern.pattern);
+ exact_set.insert(HgPathBuf::from_bytes(&exact));
+ }
+ }
+
let full_regex = regexps.join(&b'|');
- let matcher = re_matcher(&full_regex)?;
- let func = Box::new(move |filename: &HgPath| matcher(filename));
+ // An empty pattern would cause the regex engine to incorrectly match the
+ // (empty) root directory
+ let func = if !(regexps.is_empty()) {
+ let matcher = re_matcher(&full_regex)?;
+ let func = move |filename: &HgPath| {
+ exact_set.contains(filename) || matcher(filename)
+ };
+ Box::new(func) as Box<dyn Fn(&HgPath) -> bool + Sync>
+ } else {
+ let func = move |filename: &HgPath| exact_set.contains(filename);
+ Box::new(func) as Box<dyn Fn(&HgPath) -> bool + Sync>
+ };
Ok((full_regex, func))
}
@@ -468,7 +470,7 @@
_ => unreachable!(),
})?
.iter()
- .map(|k| k.to_owned()),
+ .map(ToOwned::to_owned),
);
parents.extend(
DirsMultiset::from_manifest(&roots)
@@ -477,7 +479,7 @@
_ => unreachable!(),
})?
.iter()
- .map(|k| k.to_owned()),
+ .map(ToOwned::to_owned),
);
Ok(RootsDirsAndParents {
@@ -521,7 +523,7 @@
let match_subinclude = move |filename: &HgPath| {
for prefix in prefixes.iter() {
if let Some(rel) = filename.relative_to(prefix) {
- if (submatchers.get(prefix).unwrap())(rel) {
+ if (submatchers[prefix])(rel) {
return true;
}
}
@@ -652,6 +654,12 @@
impl<'a> Display for IncludeMatcher<'a> {
fn fmt(&self, f: &mut Formatter<'_>) -> Result<(), Error> {
+ // XXX What about exact matches?
+ // I'm not sure it's worth it to clone the HashSet and keep it
+ // around just in case someone wants to display the matcher, plus
+ // it's going to be unreadable after a few entries, but we need to
+ // inform in this display that exact matches are being used and are
+ // (on purpose) missing from the `includes`.
write!(
f,
"IncludeMatcher(includes='{}')",
@@ -813,7 +821,6 @@
);
}
- #[cfg(feature = "with-re2")]
#[test]
fn test_includematcher() {
// VisitchildrensetPrefix
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/rust/hg-core/src/operations/dirstate_status.rs Mon Jul 20 21:56:27 2020 +0530
@@ -0,0 +1,76 @@
+// dirstate_status.rs
+//
+// Copyright 2019, Raphaël Gomès <rgomes@octobus.net>
+//
+// This software may be used and distributed according to the terms of the
+// GNU General Public License version 2 or any later version.
+
+use crate::dirstate::status::{build_response, Dispatch, HgPathCow, Status};
+use crate::matchers::Matcher;
+use crate::operations::Operation;
+use crate::{DirstateStatus, StatusError};
+
+/// A tuple of the paths that need to be checked in the filelog because it's
+/// ambiguous whether they've changed, and the rest of the already dispatched
+/// files.
+pub type LookupAndStatus<'a> = (Vec<HgPathCow<'a>>, DirstateStatus<'a>);
+
+impl<'a, M: Matcher + Sync> Operation<LookupAndStatus<'a>> for Status<'a, M> {
+ type Error = StatusError;
+
+ fn run(&self) -> Result<LookupAndStatus<'a>, Self::Error> {
+ let (traversed_sender, traversed_receiver) =
+ crossbeam::channel::unbounded();
+
+ // Step 1: check the files explicitly mentioned by the user
+ let (work, mut results) = self.walk_explicit(traversed_sender.clone());
+
+ if !work.is_empty() {
+ // Hashmaps are quite a bit slower to build than vecs, so only
+ // build it if needed.
+ let old_results = results.iter().cloned().collect();
+
+ // Step 2: recursively check the working directory for changes if
+ // needed
+ for (dir, dispatch) in work {
+ match dispatch {
+ Dispatch::Directory { was_file } => {
+ if was_file {
+ results.push((dir.to_owned(), Dispatch::Removed));
+ }
+ if self.options.list_ignored
+ || self.options.list_unknown
+ && !self.dir_ignore(&dir)
+ {
+ self.traverse(
+ &dir,
+ &old_results,
+ &mut results,
+ traversed_sender.clone(),
+ )?;
+ }
+ }
+ _ => {
+ unreachable!("There can only be directories in `work`")
+ }
+ }
+ }
+ }
+
+ if !self.matcher.is_exact() {
+ if self.options.list_unknown {
+ self.handle_unknowns(&mut results)?;
+ } else {
+ // TODO this is incorrect, see issue6335
+ // This requires a fix in both Python and Rust that can happen
+ // with other pending changes to `status`.
+ self.extend_from_dmap(&mut results);
+ }
+ }
+
+ drop(traversed_sender);
+ let traversed = traversed_receiver.into_iter().collect();
+
+ Ok(build_response(results, traversed))
+ }
+}
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/rust/hg-core/src/operations/find_root.rs Mon Jul 20 21:56:27 2020 +0530
@@ -0,0 +1,124 @@
+use super::Operation;
+use std::fmt;
+use std::path::{Path, PathBuf};
+
+/// Kind of error encoutered by FindRoot
+#[derive(Debug)]
+pub enum FindRootErrorKind {
+ /// Root of the repository has not been found
+ /// Contains the current directory used by FindRoot
+ RootNotFound(PathBuf),
+ /// The current directory does not exists or permissions are insufficient
+ /// to get access to it
+ GetCurrentDirError(std::io::Error),
+}
+
+/// A FindRoot error
+#[derive(Debug)]
+pub struct FindRootError {
+ /// Kind of error encoutered by FindRoot
+ pub kind: FindRootErrorKind,
+}
+
+impl std::error::Error for FindRootError {}
+
+impl fmt::Display for FindRootError {
+ fn fmt(&self, _f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ unimplemented!()
+ }
+}
+
+/// Find the root of the repository
+/// by searching for a .hg directory in the current directory and its
+/// ancestors
+pub struct FindRoot<'a> {
+ current_dir: Option<&'a Path>,
+}
+
+impl<'a> FindRoot<'a> {
+ pub fn new() -> Self {
+ Self { current_dir: None }
+ }
+
+ pub fn new_from_path(current_dir: &'a Path) -> Self {
+ Self {
+ current_dir: Some(current_dir),
+ }
+ }
+}
+
+impl<'a> Operation<PathBuf> for FindRoot<'a> {
+ type Error = FindRootError;
+
+ fn run(&self) -> Result<PathBuf, Self::Error> {
+ let current_dir = match self.current_dir {
+ None => std::env::current_dir().or_else(|e| {
+ Err(FindRootError {
+ kind: FindRootErrorKind::GetCurrentDirError(e),
+ })
+ })?,
+ Some(path) => path.into(),
+ };
+
+ if current_dir.join(".hg").exists() {
+ return Ok(current_dir.into());
+ }
+ let mut ancestors = current_dir.ancestors();
+ while let Some(parent) = ancestors.next() {
+ if parent.join(".hg").exists() {
+ return Ok(parent.into());
+ }
+ }
+ Err(FindRootError {
+ kind: FindRootErrorKind::RootNotFound(current_dir.to_path_buf()),
+ })
+ }
+}
+
+#[cfg(test)]
+mod tests {
+ use super::*;
+ use std::fs;
+ use tempfile;
+
+ #[test]
+ fn dot_hg_not_found() {
+ let tmp_dir = tempfile::tempdir().unwrap();
+ let path = tmp_dir.path();
+
+ let err = FindRoot::new_from_path(&path).run().unwrap_err();
+
+ // TODO do something better
+ assert!(match err {
+ FindRootError { kind } => match kind {
+ FindRootErrorKind::RootNotFound(p) => p == path.to_path_buf(),
+ _ => false,
+ },
+ })
+ }
+
+ #[test]
+ fn dot_hg_in_current_path() {
+ let tmp_dir = tempfile::tempdir().unwrap();
+ let root = tmp_dir.path();
+ fs::create_dir_all(root.join(".hg")).unwrap();
+
+ let result = FindRoot::new_from_path(&root).run().unwrap();
+
+ assert_eq!(result, root)
+ }
+
+ #[test]
+ fn dot_hg_in_parent() {
+ let tmp_dir = tempfile::tempdir().unwrap();
+ let root = tmp_dir.path();
+ fs::create_dir_all(root.join(".hg")).unwrap();
+
+ let result =
+ FindRoot::new_from_path(&root.join("some/nested/directory"))
+ .run()
+ .unwrap();
+
+ assert_eq!(result, root)
+ }
+} /* tests */
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/rust/hg-core/src/operations/mod.rs Mon Jul 20 21:56:27 2020 +0530
@@ -0,0 +1,13 @@
+mod dirstate_status;
+mod find_root;
+pub use find_root::{FindRoot, FindRootError, FindRootErrorKind};
+
+/// An interface for high-level hg operations.
+///
+/// A distinction is made between operation and commands.
+/// An operation is what can be done whereas a command is what is exposed by
+/// the cli. A single command can use several operations to achieve its goal.
+pub trait Operation<T> {
+ type Error;
+ fn run(&self) -> Result<T, Self::Error>;
+}
--- a/rust/hg-core/src/re2/mod.rs Tue Jul 14 10:25:41 2020 +0200
+++ /dev/null Thu Jan 01 00:00:00 1970 +0000
@@ -1,21 +0,0 @@
-/// re2 module
-///
-/// The Python implementation of Mercurial uses the Re2 regex engine when
-/// possible and if the bindings are installed, falling back to Python's `re`
-/// in case of unsupported syntax (Re2 is a non-backtracking engine).
-///
-/// Using it from Rust is not ideal. We need C++ bindings, a C++ compiler,
-/// Re2 needs to be installed... why not just use the `regex` crate?
-///
-/// Using Re2 from the Rust implementation guarantees backwards compatibility.
-/// We know it will work out of the box without needing to figure out the
-/// subtle differences in syntax. For example, `regex` currently does not
-/// support empty alternations (regex like `a||b`) which happens more often
-/// than we might think. Old benchmarks also showed worse performance from
-/// regex than with Re2, but the methodology and results were lost, so take
-/// this with a grain of salt.
-///
-/// The idea is to use Re2 for now as a temporary phase and then investigate
-/// how much work would be needed to use `regex`.
-mod re2;
-pub use re2::Re2;
--- a/rust/hg-core/src/re2/re2.rs Tue Jul 14 10:25:41 2020 +0200
+++ /dev/null Thu Jan 01 00:00:00 1970 +0000
@@ -1,66 +0,0 @@
-/*
-re2.rs
-
-Rust FFI bindings to Re2.
-
-Copyright 2020 Valentin Gatien-Baron
-
-This software may be used and distributed according to the terms of the
-GNU General Public License version 2 or any later version.
-*/
-use libc::{c_int, c_void};
-
-type Re2Ptr = *const c_void;
-
-pub struct Re2(Re2Ptr);
-
-/// `re2.h` says:
-/// "An "RE2" object is safe for concurrent use by multiple threads."
-unsafe impl Sync for Re2 {}
-
-/// These bind to the C ABI in `rust_re2.cpp`.
-extern "C" {
- fn rust_re2_create(data: *const u8, len: usize) -> Re2Ptr;
- fn rust_re2_destroy(re2: Re2Ptr);
- fn rust_re2_ok(re2: Re2Ptr) -> bool;
- fn rust_re2_error(
- re2: Re2Ptr,
- outdata: *mut *const u8,
- outlen: *mut usize,
- ) -> bool;
- fn rust_re2_match(
- re2: Re2Ptr,
- data: *const u8,
- len: usize,
- anchor: c_int,
- ) -> bool;
-}
-
-impl Re2 {
- pub fn new(pattern: &[u8]) -> Result<Re2, String> {
- unsafe {
- let re2 = rust_re2_create(pattern.as_ptr(), pattern.len());
- if rust_re2_ok(re2) {
- Ok(Re2(re2))
- } else {
- let mut data: *const u8 = std::ptr::null();
- let mut len: usize = 0;
- rust_re2_error(re2, &mut data, &mut len);
- Err(String::from_utf8_lossy(std::slice::from_raw_parts(
- data, len,
- ))
- .to_string())
- }
- }
- }
-
- pub fn is_match(&self, data: &[u8]) -> bool {
- unsafe { rust_re2_match(self.0, data.as_ptr(), data.len(), 1) }
- }
-}
-
-impl Drop for Re2 {
- fn drop(&mut self) {
- unsafe { rust_re2_destroy(self.0) }
- }
-}
--- a/rust/hg-core/src/re2/rust_re2.cpp Tue Jul 14 10:25:41 2020 +0200
+++ /dev/null Thu Jan 01 00:00:00 1970 +0000
@@ -1,49 +0,0 @@
-/*
-rust_re2.cpp
-
-C ABI export of Re2's C++ interface for Rust FFI.
-
-Copyright 2020 Valentin Gatien-Baron
-
-This software may be used and distributed according to the terms of the
-GNU General Public License version 2 or any later version.
-*/
-
-#include <re2/re2.h>
-using namespace re2;
-
-extern "C" {
- RE2* rust_re2_create(const char* data, size_t len) {
- RE2::Options o;
- o.set_encoding(RE2::Options::Encoding::EncodingLatin1);
- o.set_log_errors(false);
- o.set_max_mem(50000000);
-
- return new RE2(StringPiece(data, len), o);
- }
-
- void rust_re2_destroy(RE2* re) {
- delete re;
- }
-
- bool rust_re2_ok(RE2* re) {
- return re->ok();
- }
-
- void rust_re2_error(RE2* re, const char** outdata, size_t* outlen) {
- const std::string& e = re->error();
- *outdata = e.data();
- *outlen = e.length();
- }
-
- bool rust_re2_match(RE2* re, char* data, size_t len, int ianchor) {
- const StringPiece sp = StringPiece(data, len);
-
- RE2::Anchor anchor =
- ianchor == 0 ? RE2::Anchor::UNANCHORED :
- (ianchor == 1 ? RE2::Anchor::ANCHOR_START :
- RE2::Anchor::ANCHOR_BOTH);
-
- return re->Match(sp, 0, len, anchor, NULL, 0);
- }
-}
--- a/rust/hg-core/src/revlog.rs Tue Jul 14 10:25:41 2020 +0200
+++ b/rust/hg-core/src/revlog.rs Mon Jul 20 21:56:27 2020 +0530
@@ -25,6 +25,7 @@
///
/// This is also equal to `i32::max_value()`, but it's better to spell
/// it out explicitely, same as in `mercurial.node`
+#[allow(clippy::unreadable_literal)]
pub const WORKING_DIRECTORY_REVISION: Revision = 0x7fffffff;
/// The simplest expression of what we need of Mercurial DAGs.
@@ -49,6 +50,10 @@
/// Total number of Revisions referenced in this index
fn len(&self) -> usize;
+ fn is_empty(&self) -> bool {
+ self.len() == 0
+ }
+
/// Return a reference to the Node or `None` if rev is out of bounds
///
/// `NULL_REVISION` is not considered to be out of bounds.
--- a/rust/hg-core/src/revlog/node.rs Tue Jul 14 10:25:41 2020 +0200
+++ b/rust/hg-core/src/revlog/node.rs Mon Jul 20 21:56:27 2020 +0530
@@ -208,6 +208,10 @@
}
}
+ pub fn is_empty(&self) -> bool {
+ self.len() == 0
+ }
+
pub fn is_prefix_of(&self, node: &Node) -> bool {
if self.is_odd {
let buf = self.buf;
@@ -242,13 +246,13 @@
} else {
buf.len()
};
- for i in 0..until {
- if buf[i] != node.data[i] {
- if buf[i] & 0xf0 == node.data[i] & 0xf0 {
- return Some(2 * i + 1);
+ for (i, item) in buf.iter().enumerate().take(until) {
+ if *item != node.data[i] {
+ return if *item & 0xf0 == node.data[i] & 0xf0 {
+ Some(2 * i + 1)
} else {
- return Some(2 * i);
- }
+ Some(2 * i)
+ };
}
}
if self.is_odd && buf[until] & 0xf0 != node.data[until] & 0xf0 {
--- a/rust/hg-core/src/revlog/nodemap.rs Tue Jul 14 10:25:41 2020 +0200
+++ b/rust/hg-core/src/revlog/nodemap.rs Mon Jul 20 21:56:27 2020 +0530
@@ -218,7 +218,7 @@
/// Not derivable for arrays of length >32 until const generics are stable
impl PartialEq for Block {
fn eq(&self, other: &Self) -> bool {
- &self.0[..] == &other.0[..]
+ self.0[..] == other.0[..]
}
}
@@ -343,14 +343,11 @@
///
/// We keep `readonly` and clone its root block if it isn't empty.
fn new(readonly: Box<dyn Deref<Target = [Block]> + Send>) -> Self {
- let root = readonly
- .last()
- .map(|b| b.clone())
- .unwrap_or_else(|| Block::new());
+ let root = readonly.last().cloned().unwrap_or_else(Block::new);
NodeTree {
- readonly: readonly,
+ readonly,
growable: Vec::new(),
- root: root,
+ root,
masked_inner_blocks: 0,
}
}
@@ -461,7 +458,7 @@
) -> NodeTreeVisitor<'n, 'p> {
NodeTreeVisitor {
nt: self,
- prefix: prefix,
+ prefix,
visit: self.len() - 1,
nybble_idx: 0,
done: false,
@@ -486,8 +483,7 @@
let glen = self.growable.len();
if idx < ro_len {
self.masked_inner_blocks += 1;
- // TODO OPTIM I think this makes two copies
- self.growable.push(ro_blocks[idx].clone());
+ self.growable.push(ro_blocks[idx]);
(glen + ro_len, &mut self.growable[glen], glen + 1)
} else if glen + ro_len == idx {
(idx, &mut self.root, glen)
@@ -674,8 +670,8 @@
Some(NodeTreeVisitItem {
block_idx: visit,
- nybble: nybble,
- element: element,
+ nybble,
+ element,
})
}
}
--- a/rust/hg-core/src/utils.rs Tue Jul 14 10:25:41 2020 +0200
+++ b/rust/hg-core/src/utils.rs Mon Jul 20 21:56:27 2020 +0530
@@ -68,6 +68,7 @@
fn drop_prefix(&self, needle: &Self) -> Option<&Self>;
}
+#[allow(clippy::trivially_copy_pass_by_ref)]
fn is_not_whitespace(c: &u8) -> bool {
!(*c as char).is_whitespace()
}
@@ -75,7 +76,7 @@
impl SliceExt for [u8] {
fn trim_end(&self) -> &[u8] {
if let Some(last) = self.iter().rposition(is_not_whitespace) {
- &self[..last + 1]
+ &self[..=last]
} else {
&[]
}
@@ -151,7 +152,7 @@
impl<'a, T: Escaped> Escaped for &'a [T] {
fn escaped_bytes(&self) -> Vec<u8> {
- self.iter().flat_map(|item| item.escaped_bytes()).collect()
+ self.iter().flat_map(Escaped::escaped_bytes).collect()
}
}
--- a/rust/hg-core/src/utils/files.rs Tue Jul 14 10:25:41 2020 +0200
+++ b/rust/hg-core/src/utils/files.rs Mon Jul 20 21:56:27 2020 +0530
@@ -98,7 +98,7 @@
///
/// The path itself isn't included unless it is b"" (meaning the root
/// directory.)
-pub fn find_dirs<'a>(path: &'a HgPath) -> Ancestors<'a> {
+pub fn find_dirs(path: &HgPath) -> Ancestors {
let mut dirs = Ancestors { next: Some(path) };
if !path.is_empty() {
dirs.next(); // skip itself
@@ -113,9 +113,7 @@
///
/// The path itself isn't included unless it is b"" (meaning the root
/// directory.)
-pub(crate) fn find_dirs_with_base<'a>(
- path: &'a HgPath,
-) -> AncestorsWithBase<'a> {
+pub(crate) fn find_dirs_with_base(path: &HgPath) -> AncestorsWithBase {
let mut dirs = AncestorsWithBase {
next: Some((path, HgPath::new(b""))),
};
@@ -214,9 +212,9 @@
if name != root && name.starts_with(&root) {
let name = name.strip_prefix(&root).unwrap();
auditor.audit_path(path_to_hg_path_buf(name)?)?;
- return Ok(name.to_owned());
+ Ok(name.to_owned())
} else if name == root {
- return Ok("".into());
+ Ok("".into())
} else {
// Determine whether `name' is in the hierarchy at or beneath `root',
// by iterating name=name.parent() until it returns `None` (can't
--- a/rust/hg-core/src/utils/hg_path.rs Tue Jul 14 10:25:41 2020 +0200
+++ b/rust/hg-core/src/utils/hg_path.rs Mon Jul 20 21:56:27 2020 +0530
@@ -208,7 +208,7 @@
}
pub fn join<T: ?Sized + AsRef<Self>>(&self, other: &T) -> HgPathBuf {
let mut inner = self.inner.to_owned();
- if inner.len() != 0 && inner.last() != Some(&b'/') {
+ if !inner.is_empty() && inner.last() != Some(&b'/') {
inner.push(b'/');
}
inner.extend(other.as_ref().bytes());
@@ -315,7 +315,7 @@
/// This generates fine-grained errors useful for debugging.
/// To simply check if the path is valid during tests, use `is_valid`.
pub fn check_state(&self) -> Result<(), HgPathError> {
- if self.len() == 0 {
+ if self.is_empty() {
return Ok(());
}
let bytes = self.as_bytes();
@@ -366,14 +366,14 @@
}
}
-#[derive(Eq, Ord, Clone, PartialEq, PartialOrd, Hash)]
+#[derive(Default, Eq, Ord, Clone, PartialEq, PartialOrd, Hash)]
pub struct HgPathBuf {
inner: Vec<u8>,
}
impl HgPathBuf {
pub fn new() -> Self {
- Self { inner: Vec::new() }
+ Default::default()
}
pub fn push(&mut self, byte: u8) {
self.inner.push(byte);
@@ -384,9 +384,6 @@
pub fn into_vec(self) -> Vec<u8> {
self.inner
}
- pub fn as_ref(&self) -> &[u8] {
- self.inner.as_ref()
- }
}
impl fmt::Debug for HgPathBuf {
--- a/rust/hg-core/src/utils/path_auditor.rs Tue Jul 14 10:25:41 2020 +0200
+++ b/rust/hg-core/src/utils/path_auditor.rs Mon Jul 20 21:56:27 2020 +0530
@@ -112,7 +112,7 @@
// accidentally traverse a symlink into some other filesystem (which
// is potentially expensive to access).
for index in 0..parts.len() {
- let prefix = &parts[..index + 1].join(&b'/');
+ let prefix = &parts[..=index].join(&b'/');
let prefix = HgPath::new(prefix);
if self.audited_dirs.read().unwrap().contains(prefix) {
continue;
--- a/rust/hg-cpython/Cargo.toml Tue Jul 14 10:25:41 2020 +0200
+++ b/rust/hg-cpython/Cargo.toml Mon Jul 20 21:56:27 2020 +0530
@@ -10,7 +10,6 @@
[features]
default = ["python27"]
-with-re2 = ["hg-core/with-re2"]
# Features to build an extension module:
python27 = ["cpython/python27-sys", "cpython/extension-module-2-7"]
--- a/rust/hg-cpython/src/cindex.rs Tue Jul 14 10:25:41 2020 +0200
+++ b/rust/hg-cpython/src/cindex.rs Mon Jul 20 21:56:27 2020 +0530
@@ -90,10 +90,7 @@
),
));
}
- Ok(Index {
- index: index,
- capi: capi,
- })
+ Ok(Index { index, capi })
}
/// return a reference to the CPython Index object in this Struct
@@ -158,7 +155,7 @@
unsafe { (self.capi.index_length)(self.index.as_ptr()) as usize }
}
- fn node<'a>(&'a self, rev: Revision) -> Option<&'a Node> {
+ fn node(&self, rev: Revision) -> Option<&Node> {
let raw = unsafe {
(self.capi.index_node)(self.index.as_ptr(), rev as c_int)
};
--- a/rust/hg-cpython/src/debug.rs Tue Jul 14 10:25:41 2020 +0200
+++ b/rust/hg-cpython/src/debug.rs Mon Jul 20 21:56:27 2020 +0530
@@ -16,8 +16,6 @@
m.add(py, "__package__", package)?;
m.add(py, "__doc__", "Rust debugging information")?;
- m.add(py, "re2_installed", cfg!(feature = "with-re2"))?;
-
let sys = PyModule::import(py, "sys")?;
let sys_modules: PyDict = sys.get(py, "modules")?.extract(py)?;
sys_modules.set_item(py, dotted_name, &m)?;
--- a/rust/hg-cpython/src/dirstate.rs Tue Jul 14 10:25:41 2020 +0200
+++ b/rust/hg-cpython/src/dirstate.rs Mon Jul 20 21:56:27 2020 +0530
@@ -133,7 +133,8 @@
last_normal_time: i64,
list_clean: bool,
list_ignored: bool,
- list_unknown: bool
+ list_unknown: bool,
+ collect_traversed_dirs: bool
)
),
)?;
--- a/rust/hg-cpython/src/dirstate/copymap.rs Tue Jul 14 10:25:41 2020 +0200
+++ b/rust/hg-cpython/src/dirstate/copymap.rs Mon Jul 20 21:56:27 2020 +0530
@@ -89,7 +89,7 @@
py: Python,
res: (&HgPathBuf, &HgPathBuf),
) -> PyResult<Option<PyBytes>> {
- Ok(Some(PyBytes::new(py, res.0.as_ref())))
+ Ok(Some(PyBytes::new(py, res.0.as_bytes())))
}
fn translate_key_value(
py: Python,
@@ -97,8 +97,8 @@
) -> PyResult<Option<(PyBytes, PyBytes)>> {
let (k, v) = res;
Ok(Some((
- PyBytes::new(py, k.as_ref()),
- PyBytes::new(py, v.as_ref()),
+ PyBytes::new(py, k.as_bytes()),
+ PyBytes::new(py, v.as_bytes()),
)))
}
}
--- a/rust/hg-cpython/src/dirstate/dirs_multiset.rs Tue Jul 14 10:25:41 2020 +0200
+++ b/rust/hg-cpython/src/dirstate/dirs_multiset.rs Mon Jul 20 21:56:27 2020 +0530
@@ -128,7 +128,7 @@
py: Python,
res: &HgPathBuf,
) -> PyResult<Option<PyBytes>> {
- Ok(Some(PyBytes::new(py, res.as_ref())))
+ Ok(Some(PyBytes::new(py, res.as_bytes())))
}
}
--- a/rust/hg-cpython/src/dirstate/dirstate_map.rs Tue Jul 14 10:25:41 2020 +0200
+++ b/rust/hg-cpython/src/dirstate/dirstate_map.rs Mon Jul 20 21:56:27 2020 +0530
@@ -179,7 +179,7 @@
"other_parent",
other_parent
.iter()
- .map(|v| PyBytes::new(py, v.as_ref()))
+ .map(|v| PyBytes::new(py, v.as_bytes()))
.collect::<Vec<PyBytes>>()
.to_py_object(py),
)?;
@@ -348,7 +348,11 @@
for (key, value) in
self.inner(py).borrow_mut().build_file_fold_map().iter()
{
- dict.set_item(py, key.as_ref().to_vec(), value.as_ref().to_vec())?;
+ dict.set_item(
+ py,
+ key.as_bytes().to_vec(),
+ value.as_bytes().to_vec(),
+ )?;
}
Ok(dict)
}
@@ -440,8 +444,8 @@
for (key, value) in self.inner(py).borrow().copy_map.iter() {
dict.set_item(
py,
- PyBytes::new(py, key.as_ref()),
- PyBytes::new(py, value.as_ref()),
+ PyBytes::new(py, key.as_bytes()),
+ PyBytes::new(py, value.as_bytes()),
)?;
}
Ok(dict)
@@ -450,7 +454,7 @@
def copymapgetitem(&self, key: PyObject) -> PyResult<PyBytes> {
let key = key.extract::<PyBytes>(py)?;
match self.inner(py).borrow().copy_map.get(HgPath::new(key.data(py))) {
- Some(copy) => Ok(PyBytes::new(py, copy.as_ref())),
+ Some(copy) => Ok(PyBytes::new(py, copy.as_bytes())),
None => Err(PyErr::new::<exc::KeyError, _>(
py,
String::from_utf8_lossy(key.data(py)),
@@ -485,7 +489,7 @@
.get(HgPath::new(key.data(py)))
{
Some(copy) => Ok(Some(
- PyBytes::new(py, copy.as_ref()).into_object(),
+ PyBytes::new(py, copy.as_bytes()).into_object(),
)),
None => Ok(default),
}
@@ -549,7 +553,7 @@
py: Python,
res: (&HgPathBuf, &DirstateEntry),
) -> PyResult<Option<PyBytes>> {
- Ok(Some(PyBytes::new(py, res.0.as_ref())))
+ Ok(Some(PyBytes::new(py, res.0.as_bytes())))
}
fn translate_key_value(
py: Python,
@@ -557,7 +561,7 @@
) -> PyResult<Option<(PyBytes, PyObject)>> {
let (f, entry) = res;
Ok(Some((
- PyBytes::new(py, f.as_ref()),
+ PyBytes::new(py, f.as_bytes()),
make_dirstate_tuple(py, entry)?,
)))
}
--- a/rust/hg-cpython/src/dirstate/non_normal_entries.rs Tue Jul 14 10:25:41 2020 +0200
+++ b/rust/hg-cpython/src/dirstate/non_normal_entries.rs Mon Jul 20 21:56:27 2020 +0530
@@ -62,7 +62,7 @@
py: Python,
key: &HgPathBuf,
) -> PyResult<Option<PyBytes>> {
- Ok(Some(PyBytes::new(py, key.as_ref())))
+ Ok(Some(PyBytes::new(py, key.as_bytes())))
}
}
--- a/rust/hg-cpython/src/dirstate/status.rs Tue Jul 14 10:25:41 2020 +0200
+++ b/rust/hg-cpython/src/dirstate/status.rs Mon Jul 20 21:56:27 2020 +0530
@@ -104,6 +104,7 @@
list_clean: bool,
list_ignored: bool,
list_unknown: bool,
+ collect_traversed_dirs: bool,
) -> PyResult<PyTuple> {
let bytes = root_dir.extract::<PyBytes>(py)?;
let root_dir = get_path_from_bytes(bytes.data(py));
@@ -126,7 +127,7 @@
let ((lookup, status_res), warnings) = status(
&dmap,
&matcher,
- &root_dir,
+ root_dir.to_path_buf(),
ignore_files,
StatusOptions {
check_exec,
@@ -134,6 +135,7 @@
list_clean,
list_ignored,
list_unknown,
+ collect_traversed_dirs,
},
)
.map_err(|e| handle_fallback(py, e))?;
@@ -162,7 +164,7 @@
let ((lookup, status_res), warnings) = status(
&dmap,
&matcher,
- &root_dir,
+ root_dir.to_path_buf(),
ignore_files,
StatusOptions {
check_exec,
@@ -170,6 +172,7 @@
list_clean,
list_ignored,
list_unknown,
+ collect_traversed_dirs,
},
)
.map_err(|e| handle_fallback(py, e))?;
@@ -216,7 +219,7 @@
let ((lookup, status_res), warnings) = status(
&dmap,
&matcher,
- &root_dir,
+ root_dir.to_path_buf(),
ignore_files,
StatusOptions {
check_exec,
@@ -224,6 +227,7 @@
list_clean,
list_ignored,
list_unknown,
+ collect_traversed_dirs,
},
)
.map_err(|e| handle_fallback(py, e))?;
@@ -232,12 +236,10 @@
build_response(py, lookup, status_res, all_warnings)
}
- e => {
- return Err(PyErr::new::<ValueError, _>(
- py,
- format!("Unsupported matcher {}", e),
- ));
- }
+ e => Err(PyErr::new::<ValueError, _>(
+ py,
+ format!("Unsupported matcher {}", e),
+ )),
}
}
@@ -256,6 +258,7 @@
let unknown = collect_pybytes_list(py, status_res.unknown.as_ref());
let lookup = collect_pybytes_list(py, lookup.as_ref());
let bad = collect_bad_matches(py, status_res.bad.as_ref())?;
+ let traversed = collect_pybytes_list(py, status_res.traversed.as_ref());
let py_warnings = PyList::new(py, &[]);
for warning in warnings.iter() {
// We use duck-typing on the Python side for dispatch, good enough for
@@ -292,6 +295,7 @@
unknown.into_object(),
py_warnings.into_object(),
bad.into_object(),
+ traversed.into_object(),
][..],
))
}
--- a/rust/hg-cpython/src/parsers.rs Tue Jul 14 10:25:41 2020 +0200
+++ b/rust/hg-cpython/src/parsers.rs Mon Jul 20 21:56:27 2020 +0530
@@ -37,15 +37,15 @@
for (filename, entry) in &dirstate_map {
dmap.set_item(
py,
- PyBytes::new(py, filename.as_ref()),
+ PyBytes::new(py, filename.as_bytes()),
make_dirstate_tuple(py, entry)?,
)?;
}
for (path, copy_path) in copies {
copymap.set_item(
py,
- PyBytes::new(py, path.as_ref()),
- PyBytes::new(py, copy_path.as_ref()),
+ PyBytes::new(py, path.as_bytes()),
+ PyBytes::new(py, copy_path.as_bytes()),
)?;
}
Ok(
@@ -116,7 +116,7 @@
for (filename, entry) in &dirstate_map {
dmap.set_item(
py,
- PyBytes::new(py, filename.as_ref()),
+ PyBytes::new(py, filename.as_bytes()),
make_dirstate_tuple(py, entry)?,
)?;
}
--- a/rust/hg-cpython/src/utils.rs Tue Jul 14 10:25:41 2020 +0200
+++ b/rust/hg-cpython/src/utils.rs Mon Jul 20 21:56:27 2020 +0530
@@ -32,10 +32,7 @@
/// Clone incoming Python bytes given as `PyBytes` as a `Node`,
/// doing the necessary checks.
-pub fn node_from_py_bytes<'a>(
- py: Python,
- bytes: &'a PyBytes,
-) -> PyResult<Node> {
+pub fn node_from_py_bytes(py: Python, bytes: &PyBytes) -> PyResult<Node> {
<NodeData>::try_from(bytes.data(py))
.map_err(|_| {
PyErr::new::<ValueError, _>(
@@ -43,5 +40,5 @@
format!("{}-byte hash required", NODE_BYTES_LENGTH),
)
})
- .map(|n| n.into())
+ .map(Into::into)
}
--- a/rust/hgcli/pyoxidizer.bzl Tue Jul 14 10:25:41 2020 +0200
+++ b/rust/hgcli/pyoxidizer.bzl Mon Jul 20 21:56:27 2020 +0530
@@ -3,19 +3,16 @@
# Code to run in Python interpreter.
RUN_CODE = "import hgdemandimport; hgdemandimport.enable(); from mercurial import dispatch; dispatch.run()"
-
set_build_path(ROOT + "/build/pyoxidizer")
-
def make_distribution():
return default_python_distribution()
-
def make_distribution_windows():
- return default_python_distribution(flavor="standalone_dynamic")
-
+ return default_python_distribution(flavor = "standalone_dynamic")
def make_exe(dist):
+ """Builds a Rust-wrapped Mercurial binary."""
config = PythonInterpreterConfig(
raw_allocator = "system",
run_eval = RUN_CODE,
@@ -58,23 +55,20 @@
# On Windows, we install extra packages for convenience.
if "windows" in BUILD_TARGET_TRIPLE:
exe.add_python_resources(
- dist.pip_install(["-r", ROOT + "/contrib/packaging/requirements_win32.txt"])
+ dist.pip_install(["-r", ROOT + "/contrib/packaging/requirements_win32.txt"]),
)
return exe
-
def make_manifest(dist, exe):
m = FileManifest()
m.add_python_resource(".", exe)
return m
-
def make_embedded_resources(exe):
return exe.to_embedded_resources()
-
register_target("distribution_posix", make_distribution)
register_target("distribution_windows", make_distribution_windows)
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/rust/rhg/Cargo.toml Mon Jul 20 21:56:27 2020 +0530
@@ -0,0 +1,10 @@
+[package]
+name = "rhg"
+version = "0.1.0"
+authors = ["Antoine Cezar <antoine.cezar@octobus.net>"]
+edition = "2018"
+
+[dependencies]
+hg-core = { path = "../hg-core"}
+clap = "2.33.1"
+
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/rust/rhg/README.md Mon Jul 20 21:56:27 2020 +0530
@@ -0,0 +1,4 @@
+# rhg
+
+This project provides a fastpath Rust implementation of the Mercurial (`hg`)
+version control tool.
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/rust/rhg/rustfmt.toml Mon Jul 20 21:56:27 2020 +0530
@@ -0,0 +1,3 @@
+max_width = 79
+wrap_comments = true
+error_on_line_overflow = true
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/rust/rhg/src/commands.rs Mon Jul 20 21:56:27 2020 +0530
@@ -0,0 +1,9 @@
+pub mod root;
+use crate::error::CommandError;
+
+/// The common trait for rhg commands
+///
+/// Normalize the interface of the commands provided by rhg
+pub trait Command {
+ fn run(&self) -> Result<(), CommandError>;
+}
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/rust/rhg/src/commands/root.rs Mon Jul 20 21:56:27 2020 +0530
@@ -0,0 +1,76 @@
+use crate::commands::Command;
+use crate::error::{CommandError, CommandErrorKind};
+use crate::ui::Ui;
+use hg::operations::{FindRoot, FindRootError, FindRootErrorKind, Operation};
+use hg::utils::files::get_bytes_from_path;
+use std::path::PathBuf;
+
+pub const HELP_TEXT: &str = "
+Print the root directory of the current repository.
+
+Returns 0 on success.
+";
+
+pub struct RootCommand {
+ ui: Ui,
+}
+
+impl RootCommand {
+ pub fn new() -> Self {
+ RootCommand { ui: Ui::new() }
+ }
+
+ fn display_found_path(
+ &self,
+ path_buf: PathBuf,
+ ) -> Result<(), CommandError> {
+ let bytes = get_bytes_from_path(path_buf);
+
+ // TODO use formating macro
+ self.ui.write_stdout(&[bytes.as_slice(), b"\n"].concat())?;
+
+ Err(CommandErrorKind::Ok.into())
+ }
+
+ fn display_error(&self, error: FindRootError) -> Result<(), CommandError> {
+ match error.kind {
+ FindRootErrorKind::RootNotFound(path) => {
+ let bytes = get_bytes_from_path(path);
+
+ // TODO use formating macro
+ self.ui.write_stderr(
+ &[
+ b"abort: no repository found in '",
+ bytes.as_slice(),
+ b"' (.hg not found)!\n",
+ ]
+ .concat(),
+ )?;
+
+ Err(CommandErrorKind::RootNotFound.into())
+ }
+ FindRootErrorKind::GetCurrentDirError(e) => {
+ // TODO use formating macro
+ self.ui.write_stderr(
+ &[
+ b"abort: error getting current working directory: ",
+ e.to_string().as_bytes(),
+ b"\n",
+ ]
+ .concat(),
+ )?;
+
+ Err(CommandErrorKind::CurrentDirNotFound.into())
+ }
+ }
+ }
+}
+
+impl Command for RootCommand {
+ fn run(&self) -> Result<(), CommandError> {
+ match FindRoot::new().run() {
+ Ok(path_buf) => self.display_found_path(path_buf),
+ Err(e) => self.display_error(e),
+ }
+ }
+}
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/rust/rhg/src/error.rs Mon Jul 20 21:56:27 2020 +0530
@@ -0,0 +1,60 @@
+use crate::exitcode;
+use crate::ui::UiError;
+use std::convert::From;
+
+/// The kind of command error
+#[derive(Debug, PartialEq)]
+pub enum CommandErrorKind {
+ /// The command finished without error
+ Ok,
+ /// The root of the repository cannot be found
+ RootNotFound,
+ /// The current directory cannot be found
+ CurrentDirNotFound,
+ /// The standard output stream cannot be written to
+ StdoutError,
+ /// The standard error stream cannot be written to
+ StderrError,
+}
+
+impl CommandErrorKind {
+ pub fn get_exit_code(&self) -> exitcode::ExitCode {
+ match self {
+ CommandErrorKind::Ok => exitcode::OK,
+ CommandErrorKind::RootNotFound => exitcode::ABORT,
+ CommandErrorKind::CurrentDirNotFound => exitcode::ABORT,
+ CommandErrorKind::StdoutError => exitcode::ABORT,
+ CommandErrorKind::StderrError => exitcode::ABORT,
+ }
+ }
+}
+
+/// The error type for the Command trait
+#[derive(Debug, PartialEq)]
+pub struct CommandError {
+ pub kind: CommandErrorKind,
+}
+
+impl CommandError {
+ /// Exist the process with the corresponding exit code.
+ pub fn exit(&self) -> () {
+ std::process::exit(self.kind.get_exit_code())
+ }
+}
+
+impl From<CommandErrorKind> for CommandError {
+ fn from(kind: CommandErrorKind) -> Self {
+ CommandError { kind }
+ }
+}
+
+impl From<UiError> for CommandError {
+ fn from(error: UiError) -> Self {
+ CommandError {
+ kind: match error {
+ UiError::StdoutError(_) => CommandErrorKind::StdoutError,
+ UiError::StderrError(_) => CommandErrorKind::StderrError,
+ },
+ }
+ }
+}
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/rust/rhg/src/exitcode.rs Mon Jul 20 21:56:27 2020 +0530
@@ -0,0 +1,10 @@
+pub type ExitCode = i32;
+
+/// Successful exit
+pub const OK: ExitCode = 0;
+
+/// Generic abort
+pub const ABORT: ExitCode = 255;
+
+/// Command not implemented by rhg
+pub const UNIMPLEMENTED_COMMAND: ExitCode = 252;
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/rust/rhg/src/main.rs Mon Jul 20 21:56:27 2020 +0530
@@ -0,0 +1,42 @@
+use clap::App;
+use clap::AppSettings;
+use clap::SubCommand;
+
+mod commands;
+mod error;
+mod exitcode;
+mod ui;
+use commands::Command;
+
+fn main() {
+ let mut app = App::new("rhg")
+ .setting(AppSettings::AllowInvalidUtf8)
+ .setting(AppSettings::SubcommandRequired)
+ .setting(AppSettings::VersionlessSubcommands)
+ .version("0.0.1")
+ .subcommand(
+ SubCommand::with_name("root").about(commands::root::HELP_TEXT),
+ );
+
+ let matches = app.clone().get_matches_safe().unwrap_or_else(|_| {
+ std::process::exit(exitcode::UNIMPLEMENTED_COMMAND)
+ });
+
+ let command_result = match matches.subcommand_name() {
+ Some(name) => match name {
+ "root" => commands::root::RootCommand::new().run(),
+ _ => std::process::exit(exitcode::UNIMPLEMENTED_COMMAND),
+ },
+ _ => {
+ match app.print_help() {
+ Ok(_) => std::process::exit(exitcode::OK),
+ Err(_) => std::process::exit(exitcode::ABORT),
+ };
+ }
+ };
+
+ match command_result {
+ Ok(_) => std::process::exit(exitcode::OK),
+ Err(e) => e.exit(),
+ }
+}
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/rust/rhg/src/ui.rs Mon Jul 20 21:56:27 2020 +0530
@@ -0,0 +1,54 @@
+use std::io;
+use std::io::Write;
+
+pub struct Ui {}
+
+/// The kind of user interface error
+pub enum UiError {
+ /// The standard output stream cannot be written to
+ StdoutError(io::Error),
+ /// The standard error stream cannot be written to
+ StderrError(io::Error),
+}
+
+/// The commandline user interface
+impl Ui {
+ pub fn new() -> Self {
+ Ui {}
+ }
+
+ /// Write bytes to stdout
+ pub fn write_stdout(&self, bytes: &[u8]) -> Result<(), UiError> {
+ let mut stdout = io::stdout();
+
+ self.write_stream(&mut stdout, bytes)
+ .or_else(|e| self.into_stdout_error(e))?;
+
+ stdout.flush().or_else(|e| self.into_stdout_error(e))
+ }
+
+ fn into_stdout_error(&self, error: io::Error) -> Result<(), UiError> {
+ self.write_stderr(
+ &[b"abort: ", error.to_string().as_bytes(), b"\n"].concat(),
+ )?;
+ Err(UiError::StdoutError(error))
+ }
+
+ /// Write bytes to stderr
+ pub fn write_stderr(&self, bytes: &[u8]) -> Result<(), UiError> {
+ let mut stderr = io::stderr();
+
+ self.write_stream(&mut stderr, bytes)
+ .or_else(|e| Err(UiError::StderrError(e)))?;
+
+ stderr.flush().or_else(|e| Err(UiError::StderrError(e)))
+ }
+
+ fn write_stream(
+ &self,
+ stream: &mut impl Write,
+ bytes: &[u8],
+ ) -> Result<(), io::Error> {
+ stream.write_all(bytes)
+ }
+}
--- a/setup.py Tue Jul 14 10:25:41 2020 +0200
+++ b/setup.py Mon Jul 20 21:56:27 2020 +0530
@@ -83,6 +83,43 @@
printf(error, file=sys.stderr)
sys.exit(1)
+import ssl
+
+try:
+ ssl.SSLContext
+except AttributeError:
+ error = """
+The `ssl` module does not have the `SSLContext` class. This indicates an old
+Python version which does not support modern security features (which were
+added to Python 2.7 as part of "PEP 466"). Please make sure you have installed
+at least Python 2.7.9 or a Python version with backports of these security
+features.
+"""
+ printf(error, file=sys.stderr)
+ sys.exit(1)
+
+# ssl.HAS_TLSv1* are preferred to check support but they were added in Python
+# 3.7. Prior to CPython commit 6e8cda91d92da72800d891b2fc2073ecbc134d98
+# (backported to the 3.7 branch), ssl.PROTOCOL_TLSv1_1 / ssl.PROTOCOL_TLSv1_2
+# were defined only if compiled against a OpenSSL version with TLS 1.1 / 1.2
+# support. At the mentioned commit, they were unconditionally defined.
+_notset = object()
+has_tlsv1_1 = getattr(ssl, 'HAS_TLSv1_1', _notset)
+if has_tlsv1_1 is _notset:
+ has_tlsv1_1 = getattr(ssl, 'PROTOCOL_TLSv1_1', _notset) is not _notset
+has_tlsv1_2 = getattr(ssl, 'HAS_TLSv1_2', _notset)
+if has_tlsv1_2 is _notset:
+ has_tlsv1_2 = getattr(ssl, 'PROTOCOL_TLSv1_2', _notset) is not _notset
+if not (has_tlsv1_1 or has_tlsv1_2):
+ error = """
+The `ssl` module does not advertise support for TLS 1.1 or TLS 1.2.
+Please make sure that your Python installation was compiled against an OpenSSL
+version enabling these features (likely this requires the OpenSSL version to
+be at least 1.0.1).
+"""
+ printf(error, file=sys.stderr)
+ sys.exit(1)
+
if sys.version_info[0] >= 3:
DYLIB_SUFFIX = sysconfig.get_config_vars()['EXT_SUFFIX']
else:
@@ -1396,7 +1433,7 @@
env['HOME'] = pwd.getpwuid(os.getuid()).pw_dir
- cargocmd = ['cargo', 'rustc', '-vv', '--release']
+ cargocmd = ['cargo', 'rustc', '--release']
feature_flags = []
@@ -1658,6 +1695,9 @@
if dllexcludes:
py2exedllexcludes.extend(dllexcludes.split(' '))
+if os.environ.get('PYOXIDIZER'):
+ hgbuild.sub_commands.insert(0, ('build_hgextindex', None))
+
if os.name == 'nt':
# Windows binary file versions for exe/dll files must have the
# form W.X.Y.Z, where W,X,Y,Z are numbers in the range 0..65535
--- a/tests/fakemergerecord.py Tue Jul 14 10:25:41 2020 +0200
+++ b/tests/fakemergerecord.py Mon Jul 20 21:56:27 2020 +0530
@@ -5,7 +5,7 @@
from __future__ import absolute_import
from mercurial import (
- merge,
+ mergestate as mergestatemod,
registrar,
)
@@ -23,7 +23,7 @@
)
def fakemergerecord(ui, repo, *pats, **opts):
with repo.wlock():
- ms = merge.mergestate.read(repo)
+ ms = mergestatemod.mergestate.read(repo)
records = ms._makerecords()
if opts.get('mandatory'):
records.append((b'X', b'mandatory record'))
--- a/tests/hghave.py Tue Jul 14 10:25:41 2020 +0200
+++ b/tests/hghave.py Mon Jul 20 21:56:27 2020 +0530
@@ -591,7 +591,7 @@
@check("clang-format", "clang-format C code formatter")
def has_clang_format():
- m = matchoutput('clang-format --version', br'clang-format version (\d)')
+ m = matchoutput('clang-format --version', br'clang-format version (\d*)')
# style changed somewhere between 4.x and 6.x
return m and int(m.group(1)) >= 6
@@ -645,35 +645,11 @@
return False
-@check("sslcontext", "python >= 2.7.9 ssl")
-def has_sslcontext():
- try:
- import ssl
-
- ssl.SSLContext
- return True
- except (ImportError, AttributeError):
- return False
-
-
-@check("defaultcacerts", "can verify SSL certs by system's CA certs store")
-def has_defaultcacerts():
- from mercurial import sslutil, ui as uimod
-
- ui = uimod.ui.load()
- return sslutil._defaultcacerts(ui) or sslutil._canloaddefaultcerts
-
-
@check("defaultcacertsloaded", "detected presence of loaded system CA certs")
def has_defaultcacertsloaded():
import ssl
from mercurial import sslutil, ui as uimod
- if not has_defaultcacerts():
- return False
- if not has_sslcontext():
- return False
-
ui = uimod.ui.load()
cafile = sslutil._defaultcacerts(ui)
ctx = ssl.create_default_context()
@@ -707,6 +683,17 @@
return True
+@check("setprocname", "whether osutil.setprocname is available or not")
+def has_setprocname():
+ try:
+ from mercurial.utils import procutil
+
+ procutil.setprocname
+ return True
+ except AttributeError:
+ return False
+
+
@check("test-repo", "running tests from repository")
def has_test_repo():
t = os.environ["TESTDIR"]
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/tests/phabricator/phabupdate-change-6876.json Mon Jul 20 21:56:27 2020 +0530
@@ -0,0 +1,141 @@
+{
+ "version": 1,
+ "interactions": [
+ {
+ "response": {
+ "headers": {
+ "x-xss-protection": [
+ "1; mode=block"
+ ],
+ "expires": [
+ "Sat, 01 Jan 2000 00:00:00 GMT"
+ ],
+ "server": [
+ "Apache/2.4.10 (Debian)"
+ ],
+ "date": [
+ "Wed, 15 Jul 2020 17:23:27 GMT"
+ ],
+ "cache-control": [
+ "no-store"
+ ],
+ "content-type": [
+ "application/json"
+ ],
+ "transfer-encoding": [
+ "chunked"
+ ],
+ "strict-transport-security": [
+ "max-age=0; includeSubdomains; preload"
+ ],
+ "x-frame-options": [
+ "Deny"
+ ],
+ "referrer-policy": [
+ "no-referrer"
+ ],
+ "x-content-type-options": [
+ "nosniff"
+ ]
+ },
+ "body": {
+ "string": "{\"result\":[{\"id\":\"6876\",\"phid\":\"PHID-DREV-looitrxgt3omaau7a7qk\",\"title\":\"phabricator: support automatically obsoleting old revisions of pulled commits\",\"uri\":\"https:\\/\\/phab.mercurial-scm.org\\/D6876\",\"dateCreated\":\"1569388644\",\"dateModified\":\"1579887103\",\"authorPHID\":\"PHID-USER-tzhaient733lwrlbcag5\",\"status\":\"1\",\"statusName\":\"Needs Revision\",\"properties\":{\"draft.broadcast\":true,\"lines.added\":68,\"lines.removed\":1},\"branch\":null,\"summary\":\"This is basically an import of the `pullcreatemarkers` extension[1] from the FB\\nrepo, with minor adjustments to `getmatchingdiff()` to work with modern hg.\\nSince this is very phabricator specific, it makes more sense to me to bundle it\\ninto the existing extension. It wasn't very obvious from the old name what\\nfunctionality was provided, and it may make sense to do this in other scenarios\\nbesides `hg pull`.\\n\\nThere are two use cases that I can see- first, ensuring that old revisions are\\ncleaned up for a contributor (I seem to recall something I submitted recently\\nneeded to be explicitly pruned, though most submissions do clean up\\nautomatically). Second, any `hg phabread | hg import -` would otherwise need to\\nbe manually cleaned up. The latter is annoying enough that I tend not to grab\\nthe code and try it when reviewing.\\n\\nIt is currently guarded by a config option (off by default), because @marmoute\\nexpressed concerns about duplicate marker creation if the pushing reviewer also\\ncreates a marker. I don't think that's possible here, since the obsolete\\nrevisions are explicitly excluded. But maybe there are other reasons someone\\nwouldn't want older revisions obsoleted. The config name reflects the fact that\\nI'm not sure if other things like import should get this too.\\n\\nI suspect that we could wrap a function deeper in the pull sequence to improve\\nboth the code and the UX. For example, when pulling an obsolete marker, it can\\nprint out a warning that the working directory parent is obsolete, but that\\ndoesn't happen here. (It won't happen with this test. It *should* without the\\n`--bypass` option, but doesn't.) It should also be possible to not have to\\nquery the range of new revisions, and maybe it can be added to the existing\\ntransaction.\\n\\n[1] https:\\/\\/bitbucket.org\\/facebook\\/hg-experimental\\/src\\/default\\/hgext3rd\\/pullcreatemarkers.py\",\"testPlan\":\"\",\"lineCount\":\"69\",\"activeDiffPHID\":\"PHID-DIFF-jdpqpzciqcooaxf2kojh\",\"diffs\":[\"16604\"],\"commits\":[],\"reviewers\":{\"PHID-PROJ-3dvcxzznrjru2xmmses3\":\"PHID-PROJ-3dvcxzznrjru2xmmses3\",\"PHID-USER-cah4b6i3kszy6debh3bl\":\"PHID-USER-cah4b6i3kszy6debh3bl\"},\"ccs\":[\"PHID-USER-34jnztnonbr4lhwuybwl\",\"PHID-USER-e66t6wbudjtigdnqbl3e\",\"PHID-USER-5iy6mkoveguhm2zthvww\",\"PHID-USER-q42dn7cc3donqriafhjx\",\"PHID-USER-vflsibccj4unqydwfvne\"],\"hashes\":[],\"auxiliary\":{\"phabricator:projects\":[],\"phabricator:depends-on\":[\"PHID-DREV-2dbanvk64h5wguhxta2o\"]},\"repositoryPHID\":\"PHID-REPO-bvunnehri4u2isyr7bc3\",\"sourcePath\":null}],\"error_code\":null,\"error_info\":null}"
+ },
+ "status": {
+ "message": "OK",
+ "code": 200
+ }
+ },
+ "request": {
+ "uri": "https://phab.mercurial-scm.org//api/differential.query",
+ "body": "output=json¶ms=%7B%22__conduit__%22%3A+%7B%22token%22%3A+%22cli-hahayouwish%22%7D%2C+%22ids%22%3A+%5B6876%5D%7D&__conduit__=1",
+ "method": "POST",
+ "headers": {
+ "content-length": [
+ "146"
+ ],
+ "accept": [
+ "application/mercurial-0.1"
+ ],
+ "content-type": [
+ "application/x-www-form-urlencoded"
+ ],
+ "user-agent": [
+ "mercurial/proto-1.0 (Mercurial 5.4.2+207-8403cc54bc83+20200709)"
+ ],
+ "host": [
+ "phab.mercurial-scm.org"
+ ]
+ }
+ }
+ },
+ {
+ "response": {
+ "headers": {
+ "x-xss-protection": [
+ "1; mode=block"
+ ],
+ "expires": [
+ "Sat, 01 Jan 2000 00:00:00 GMT"
+ ],
+ "server": [
+ "Apache/2.4.10 (Debian)"
+ ],
+ "date": [
+ "Wed, 15 Jul 2020 17:23:28 GMT"
+ ],
+ "cache-control": [
+ "no-store"
+ ],
+ "content-type": [
+ "application/json"
+ ],
+ "transfer-encoding": [
+ "chunked"
+ ],
+ "strict-transport-security": [
+ "max-age=0; includeSubdomains; preload"
+ ],
+ "x-frame-options": [
+ "Deny"
+ ],
+ "referrer-policy": [
+ "no-referrer"
+ ],
+ "x-content-type-options": [
+ "nosniff"
+ ]
+ },
+ "body": {
+ "string": "{\"result\":{\"object\":{\"id\":6876,\"phid\":\"PHID-DREV-looitrxgt3omaau7a7qk\"},\"transactions\":[{\"phid\":\"PHID-XACT-DREV-g2xkgr2sjkrmhcr\"},{\"phid\":\"PHID-XACT-DREV-lgbrex6poz6x5pk\"}]},\"error_code\":null,\"error_info\":null}"
+ },
+ "status": {
+ "message": "OK",
+ "code": 200
+ }
+ },
+ "request": {
+ "uri": "https://phab.mercurial-scm.org//api/differential.revision.edit",
+ "body": "output=json¶ms=%7B%22__conduit__%22%3A+%7B%22token%22%3A+%22cli-hahayouwish%22%7D%2C+%22objectIdentifier%22%3A+%22PHID-DREV-looitrxgt3omaau7a7qk%22%2C+%22transactions%22%3A+%5B%7B%22type%22%3A+%22plan-changes%22%2C+%22value%22%3A+true%7D%5D%7D&__conduit__=1",
+ "method": "POST",
+ "headers": {
+ "content-length": [
+ "278"
+ ],
+ "accept": [
+ "application/mercurial-0.1"
+ ],
+ "content-type": [
+ "application/x-www-form-urlencoded"
+ ],
+ "user-agent": [
+ "mercurial/proto-1.0 (Mercurial 5.4.2+207-8403cc54bc83+20200709)"
+ ],
+ "host": [
+ "phab.mercurial-scm.org"
+ ]
+ }
+ }
+ }
+ ]
+}
\ No newline at end of file
--- a/tests/run-tests.py Tue Jul 14 10:25:41 2020 +0200
+++ b/tests/run-tests.py Mon Jul 20 21:56:27 2020 +0530
@@ -1595,7 +1595,7 @@
casepath = b'#'.join(case)
self.name = '%s#%s' % (self.name, _bytes2sys(casepath))
self.errpath = b'%s#%s.err' % (self.errpath[:-4], casepath)
- self._tmpname += b'-%s' % casepath
+ self._tmpname += b'-%s' % casepath.replace(b'#', b'-')
self._have = {}
@property
@@ -2260,7 +2260,7 @@
'changes)'
)
else:
- self.stream.write('Accept this change? [n] ')
+ self.stream.write('Accept this change? [y/N] ')
self.stream.flush()
answer = sys.stdin.readline().strip()
if answer.lower() in ('y', 'yes'):
--- a/tests/test-absorb-unfinished.t Tue Jul 14 10:25:41 2020 +0200
+++ b/tests/test-absorb-unfinished.t Mon Jul 20 21:56:27 2020 +0530
@@ -20,7 +20,7 @@
rebasing 1:c3b6dc0e177a "foo 2" (tip)
merging foo.whole
warning: conflicts while merging foo.whole! (edit, then use 'hg resolve --mark')
- unresolved conflicts (see hg resolve, then hg rebase --continue)
+ unresolved conflicts (see 'hg resolve', then 'hg rebase --continue')
[1]
$ hg --config extensions.rebase= absorb
--- a/tests/test-absorb.t Tue Jul 14 10:25:41 2020 +0200
+++ b/tests/test-absorb.t Mon Jul 20 21:56:27 2020 +0530
@@ -97,7 +97,7 @@
84e5416 commit 5
ff5d556 commit 3
f548282 commit 1
- apply changes (yn)? y
+ apply changes (y/N)? y
saved backup bundle to * (glob)
3 of 3 chunk(s) applied
$ hg annotate a
@@ -490,6 +490,75 @@
+3
+Setting config rewrite.empty-successor=keep causes empty changesets to get committed:
+
+ $ cd ..
+ $ hg init repo4a
+ $ cd repo4a
+ $ cat > a <<EOF
+ > 1
+ > 2
+ > EOF
+ $ hg commit -m a12 -A a
+ $ cat > b <<EOF
+ > 1
+ > 2
+ > EOF
+ $ hg commit -m b12 -A b
+ $ echo 3 >> b
+ $ hg commit -m b3
+ $ echo 4 >> b
+ $ hg commit -m b4
+ $ hg commit -m empty --config ui.allowemptycommit=True
+ $ echo 1 > b
+ $ echo 3 >> a
+ $ hg absorb -pn
+ showing changes for a
+ @@ -2,0 +2,1 @@
+ bfafb49 +3
+ showing changes for b
+ @@ -1,3 +1,0 @@
+ 1154859 -2
+ 30970db -3
+ a393a58 -4
+
+ 4 changesets affected
+ a393a58 b4
+ 30970db b3
+ 1154859 b12
+ bfafb49 a12
+ $ hg absorb -av --config rewrite.empty-successor=keep | grep became
+ 0:bfafb49242db: 1 file(s) changed, became 5:1a2de97fc652
+ 1:115485984805: 2 file(s) changed, became 6:0c930dfab74c
+ 2:30970dbf7b40: 2 file(s) changed, became empty as 7:df6574ae635c
+ 3:a393a58b9a85: 2 file(s) changed, became empty as 8:ad4bd3462c9e
+ 4:1bb0e8cff87a: 2 file(s) changed, became 9:2dbed75af996
+ $ hg log -T '{rev} {desc}\n' -Gp
+ @ 9 empty
+ |
+ o 8 b4
+ |
+ o 7 b3
+ |
+ o 6 b12
+ | diff --git a/b b/b
+ | new file mode 100644
+ | --- /dev/null
+ | +++ b/b
+ | @@ -0,0 +1,1 @@
+ | +1
+ |
+ o 5 a12
+ diff --git a/a b/a
+ new file mode 100644
+ --- /dev/null
+ +++ b/a
+ @@ -0,0 +1,3 @@
+ +1
+ +2
+ +3
+
+
Use revert to make the current change and its parent disappear.
This should move us to the non-obsolete ancestor.
@@ -525,3 +594,83 @@
a: 1 of 1 chunk(s) applied
$ hg id
bfafb49242db tip
+
+ $ cd ..
+ $ hg init repo6
+ $ cd repo6
+ $ echo a1 > a
+ $ touch b
+ $ hg commit -m a -A a b
+ $ hg branch foo -q
+ $ echo b > b
+ $ hg commit -m foo # will become empty
+ $ hg branch bar -q
+ $ hg commit -m bar # is already empty
+ $ echo a2 > a
+ $ printf '' > b
+ $ hg absorb --apply-changes --verbose | grep became
+ 0:0cde1ae39321: 1 file(s) changed, became 3:fc7fcdd90fdb
+ 1:795dfb1adcef: 2 file(s) changed, became 4:a8740537aa53
+ 2:b02935f68891: 2 file(s) changed, became 5:59533e01c707
+ $ hg log -T '{rev} (branch: {branch}) {desc}\n' -G --stat
+ @ 5 (branch: bar) bar
+ |
+ o 4 (branch: foo) foo
+ |
+ o 3 (branch: default) a
+ a | 1 +
+ b | 0
+ 2 files changed, 1 insertions(+), 0 deletions(-)
+
+
+ $ cd ..
+ $ hg init repo7
+ $ cd repo7
+ $ echo a1 > a
+ $ touch b
+ $ hg commit -m a -A a b
+ $ echo b > b
+ $ hg commit -m foo --close-branch # will become empty
+ $ echo c > c
+ $ hg commit -m reopen -A c -q
+ $ hg commit -m bar --close-branch # is already empty
+ $ echo a2 > a
+ $ printf '' > b
+ $ hg absorb --apply-changes --verbose | grep became
+ 0:0cde1ae39321: 1 file(s) changed, became 4:fc7fcdd90fdb
+ 1:651b953d5764: 2 file(s) changed, became 5:0c9de988ecdc
+ 2:76017bba73f6: 2 file(s) changed, became 6:d53ac896eb25
+ 3:c7c1d67efc1d: 2 file(s) changed, became 7:66520267fe96
+ $ hg up null -q # to make visible closed heads
+ $ hg log -T '{rev} {desc}\n' -G --stat
+ _ 7 bar
+ |
+ o 6 reopen
+ | c | 1 +
+ | 1 files changed, 1 insertions(+), 0 deletions(-)
+ |
+ _ 5 foo
+ |
+ o 4 a
+ a | 1 +
+ b | 0
+ 2 files changed, 1 insertions(+), 0 deletions(-)
+
+
+ $ cd ..
+ $ hg init repo8
+ $ cd repo8
+ $ echo a1 > a
+ $ hg commit -m a -A a
+ $ hg commit -m empty --config ui.allowemptycommit=True
+ $ echo a2 > a
+ $ hg absorb --apply-changes --verbose | grep became
+ 0:ecf99a8d6699: 1 file(s) changed, became 2:7e3ccf8e2fa5
+ 1:97f72456ae0d: 1 file(s) changed, became 3:2df488325d6f
+ $ hg log -T '{rev} {desc}\n' -G --stat
+ @ 3 empty
+ |
+ o 2 a
+ a | 1 +
+ 1 files changed, 1 insertions(+), 0 deletions(-)
+
--- a/tests/test-alias.t Tue Jul 14 10:25:41 2020 +0200
+++ b/tests/test-alias.t Mon Jul 20 21:56:27 2020 +0530
@@ -182,7 +182,7 @@
-m --modified show only modified files
-a --added show only added files
-r --removed show only removed files
- -d --deleted show only deleted (but tracked) files
+ -d --deleted show only missing files
-c --clean show only files without changes
-u --unknown show only unknown (not tracked) files
-i --ignored show only ignored files
--- a/tests/test-bookmarks-rebase.t Tue Jul 14 10:25:41 2020 +0200
+++ b/tests/test-bookmarks-rebase.t Mon Jul 20 21:56:27 2020 +0530
@@ -80,7 +80,7 @@
rebasing 4:dd7c838e8362 "4" (three tip)
merging d
warning: conflicts while merging d! (edit, then use 'hg resolve --mark')
- unresolved conflicts (see hg resolve, then hg rebase --continue)
+ unresolved conflicts (see 'hg resolve', then 'hg rebase --continue')
[1]
$ hg rebase --abort
rebase aborted
@@ -95,7 +95,7 @@
rebasing 4:dd7c838e8362 "4" (three tip)
merging d
warning: conflicts while merging d! (edit, then use 'hg resolve --mark')
- unresolved conflicts (see hg resolve, then hg rebase --continue)
+ unresolved conflicts (see 'hg resolve', then 'hg rebase --continue')
[1]
$ hg bookmark -d three
$ hg rebase --abort
--- a/tests/test-bundle2-format.t Tue Jul 14 10:25:41 2020 +0200
+++ b/tests/test-bundle2-format.t Mon Jul 20 21:56:27 2020 +0530
@@ -22,8 +22,8 @@
> from mercurial import changegroup
> from mercurial import error
> from mercurial import obsolete
- > from mercurial import pycompat
> from mercurial import registrar
+ > from mercurial.utils import procutil
>
>
> try:
@@ -148,7 +148,7 @@
> bundler.newpart(b'output', data=genraise(), mandatory=False)
>
> if path is None:
- > file = pycompat.stdout
+ > file = procutil.stdout
> else:
> file = open(path, 'wb')
>
@@ -181,7 +181,7 @@
> lock = repo.lock()
> tr = repo.transaction(b'processbundle')
> try:
- > unbundler = bundle2.getunbundler(ui, pycompat.stdin)
+ > unbundler = bundle2.getunbundler(ui, procutil.stdin)
> op = bundle2.processbundle(repo, unbundler, lambda: tr)
> tr.close()
> except error.BundleValueError as exc:
@@ -192,7 +192,7 @@
> if tr is not None:
> tr.release()
> lock.release()
- > remains = pycompat.stdin.read()
+ > remains = procutil.stdin.read()
> ui.write(b'%i unread bytes\n' % len(remains))
> if op.records[b'song']:
> totalverses = sum(r[b'verses'] for r in op.records[b'song'])
@@ -207,7 +207,7 @@
> @command(b'statbundle2', [], b'')
> def cmdstatbundle2(ui, repo):
> """print statistic on the bundle2 container read from stdin"""
- > unbundler = bundle2.getunbundler(ui, pycompat.stdin)
+ > unbundler = bundle2.getunbundler(ui, procutil.stdin)
> try:
> params = unbundler.params
> except error.BundleValueError as exc:
--- a/tests/test-check-rust-format.t Tue Jul 14 10:25:41 2020 +0200
+++ b/tests/test-check-rust-format.t Mon Jul 20 21:56:27 2020 +0530
@@ -5,5 +5,5 @@
$ cd "$TESTDIR"/..
$ RUSTFMT=$(rustup which --toolchain nightly rustfmt)
$ for f in `testrepohg files 'glob:**/*.rs'` ; do
- > $RUSTFMT --check --unstable-features --color=never $f
+ > $RUSTFMT --check --edition=2018 --unstable-features --color=never $f
> done
--- a/tests/test-chg.t Tue Jul 14 10:25:41 2020 +0200
+++ b/tests/test-chg.t Mon Jul 20 21:56:27 2020 +0530
@@ -229,13 +229,13 @@
server.log.1
print only the last 10 lines, since we aren't sure how many records are
-preserved (since setprocname isn't available on py3, the 10th-most-recent line
-is different when using py3):
+preserved (since setprocname isn't available on py3 and pure version,
+the 10th-most-recent line is different when using py3):
$ cat log/server.log.1 log/server.log | tail -10 | filterlog
- YYYY/MM/DD HH:MM:SS (PID)> confighash = ... mtimehash = ... (py3 !)
+ YYYY/MM/DD HH:MM:SS (PID)> confighash = ... mtimehash = ... (no-setprocname !)
YYYY/MM/DD HH:MM:SS (PID)> forked worker process (pid=...)
- YYYY/MM/DD HH:MM:SS (PID)> setprocname: ... (no-py3 !)
+ YYYY/MM/DD HH:MM:SS (PID)> setprocname: ... (setprocname !)
YYYY/MM/DD HH:MM:SS (PID)> received fds: ...
YYYY/MM/DD HH:MM:SS (PID)> chdir to '$TESTTMP/extreload'
YYYY/MM/DD HH:MM:SS (PID)> setumask 18
--- a/tests/test-clone-uncompressed.t Tue Jul 14 10:25:41 2020 +0200
+++ b/tests/test-clone-uncompressed.t Mon Jul 20 21:56:27 2020 +0530
@@ -407,7 +407,7 @@
$ sleep 1
$ echo >> repo/f1
$ echo >> repo/f2
- $ hg -R repo ci -m "1"
+ $ hg -R repo ci -m "1" --config ui.timeout.warn=-1
$ wait
$ hg -R clone id
000000000000
--- a/tests/test-clonebundles.t Tue Jul 14 10:25:41 2020 +0200
+++ b/tests/test-clonebundles.t Mon Jul 20 21:56:27 2020 +0530
@@ -255,7 +255,8 @@
added 2 changesets with 2 changes to 2 files
new changesets 53245c60e682:aaff8d2ffbbf
-URLs requiring SNI are filtered in Python <2.7.9
+We require a Python version that supports SNI. Therefore, URLs requiring SNI
+are not filtered.
$ cp full.hg sni.hg
$ cat > server/.hg/clonebundles.manifest << EOF
@@ -263,9 +264,6 @@
> http://localhost:$HGPORT1/full.hg
> EOF
-#if sslcontext
-Python 2.7.9+ support SNI
-
$ hg clone -U http://localhost:$HGPORT sni-supported
applying clone bundle from http://localhost:$HGPORT1/sni.hg
adding changesets
@@ -276,20 +274,6 @@
searching for changes
no changes found
2 local changesets published
-#else
-Python <2.7.9 will filter SNI URLs
-
- $ hg clone -U http://localhost:$HGPORT sni-unsupported
- applying clone bundle from http://localhost:$HGPORT1/full.hg
- adding changesets
- adding manifests
- adding file changes
- added 2 changesets with 2 changes to 2 files
- finished applying clone bundle
- searching for changes
- no changes found
- 2 local changesets published
-#endif
Stream clone bundles are supported
@@ -567,3 +551,88 @@
searching for changes
no changes found
2 local changesets published
+ $ killdaemons.py
+
+A manifest with a gzip bundle requiring too much memory for a 16MB system and working
+on a 32MB system.
+
+ $ "$PYTHON" $TESTDIR/dumbhttp.py -p $HGPORT1 --pid http.pid
+ $ cat http.pid >> $DAEMON_PIDS
+ $ hg -R server serve -d -p $HGPORT --pid-file hg.pid --accesslog access.log
+ $ cat hg.pid >> $DAEMON_PIDS
+
+ $ cat > server/.hg/clonebundles.manifest << EOF
+ > http://localhost:$HGPORT1/gz-a.hg BUNDLESPEC=gzip-v2 REQUIREDRAM=12MB
+ > EOF
+
+ $ hg clone -U --debug --config ui.available-memory=16MB http://localhost:$HGPORT gzip-too-large
+ using http://localhost:$HGPORT/
+ sending capabilities command
+ sending clonebundles command
+ filtering http://localhost:$HGPORT1/gz-a.hg as it needs more than 2/3 of system memory
+ no compatible clone bundles available on server; falling back to regular clone
+ (you may want to report this to the server operator)
+ query 1; heads
+ sending batch command
+ requesting all changes
+ sending getbundle command
+ bundle2-input-bundle: with-transaction
+ bundle2-input-part: "changegroup" (params: 1 mandatory 1 advisory) supported
+ adding changesets
+ add changeset 53245c60e682
+ add changeset aaff8d2ffbbf
+ adding manifests
+ adding file changes
+ adding bar revisions
+ adding foo revisions
+ bundle2-input-part: total payload size 920
+ bundle2-input-part: "listkeys" (params: 1 mandatory) supported
+ bundle2-input-part: "phase-heads" supported
+ bundle2-input-part: total payload size 24
+ bundle2-input-part: "cache:rev-branch-cache" (advisory) supported
+ bundle2-input-part: total payload size 59
+ bundle2-input-bundle: 4 parts total
+ checking for updated bookmarks
+ updating the branch cache
+ added 2 changesets with 2 changes to 2 files
+ new changesets 53245c60e682:aaff8d2ffbbf
+ calling hook changegroup.lfiles: hgext.largefiles.reposetup.checkrequireslfiles
+ (sent 4 HTTP requests and * bytes; received * bytes in responses) (glob)
+
+ $ hg clone -U --debug --config ui.available-memory=32MB http://localhost:$HGPORT gzip-too-large2
+ using http://localhost:$HGPORT/
+ sending capabilities command
+ sending clonebundles command
+ applying clone bundle from http://localhost:$HGPORT1/gz-a.hg
+ bundle2-input-bundle: 1 params with-transaction
+ bundle2-input-part: "changegroup" (params: 1 mandatory 1 advisory) supported
+ adding changesets
+ add changeset 53245c60e682
+ add changeset aaff8d2ffbbf
+ adding manifests
+ adding file changes
+ adding bar revisions
+ adding foo revisions
+ bundle2-input-part: total payload size 920
+ bundle2-input-part: "cache:rev-branch-cache" (advisory) supported
+ bundle2-input-part: total payload size 59
+ bundle2-input-bundle: 2 parts total
+ updating the branch cache
+ added 2 changesets with 2 changes to 2 files
+ finished applying clone bundle
+ query 1; heads
+ sending batch command
+ searching for changes
+ all remote heads known locally
+ no changes found
+ sending getbundle command
+ bundle2-input-bundle: with-transaction
+ bundle2-input-part: "listkeys" (params: 1 mandatory) supported
+ bundle2-input-part: "phase-heads" supported
+ bundle2-input-part: total payload size 24
+ bundle2-input-bundle: 2 parts total
+ checking for updated bookmarks
+ 2 local changesets published
+ calling hook changegroup.lfiles: hgext.largefiles.reposetup.checkrequireslfiles
+ (sent 4 HTTP requests and * bytes; received * bytes in responses) (glob)
+ $ killdaemons.py
--- a/tests/test-commandserver.t Tue Jul 14 10:25:41 2020 +0200
+++ b/tests/test-commandserver.t Mon Jul 20 21:56:27 2020 +0530
@@ -734,6 +734,51 @@
$ cd ..
+#if no-windows
+
+option to not shutdown on SIGINT:
+
+ $ cat <<'EOF' > dbgint.py
+ > import os
+ > import signal
+ > import time
+ > from mercurial import commands, registrar
+ > cmdtable = {}
+ > command = registrar.command(cmdtable)
+ > @command(b"debugsleep", norepo=True)
+ > def debugsleep(ui):
+ > time.sleep(1)
+ > @command(b"debugsuicide", norepo=True)
+ > def debugsuicide(ui):
+ > os.kill(os.getpid(), signal.SIGINT)
+ > time.sleep(1)
+ > EOF
+
+ >>> import signal
+ >>> import time
+ >>> from hgclient import checkwith, readchannel, runcommand
+ >>> @checkwith(extraargs=[b'--config', b'cmdserver.shutdown-on-interrupt=False',
+ ... b'--config', b'extensions.dbgint=dbgint.py'])
+ ... def nointr(server):
+ ... readchannel(server)
+ ... server.send_signal(signal.SIGINT) # server won't be terminated
+ ... time.sleep(1)
+ ... runcommand(server, [b'debugsleep'])
+ ... server.send_signal(signal.SIGINT) # server won't be terminated
+ ... runcommand(server, [b'debugsleep'])
+ ... runcommand(server, [b'debugsuicide']) # command can be interrupted
+ ... server.send_signal(signal.SIGTERM) # server will be terminated
+ ... time.sleep(1)
+ *** runcommand debugsleep
+ *** runcommand debugsleep
+ *** runcommand debugsuicide
+ interrupted!
+ killed!
+ [255]
+
+#endif
+
+
structured message channel:
$ cat <<'EOF' >> repo2/.hg/hgrc
--- a/tests/test-completion.t Tue Jul 14 10:25:41 2020 +0200
+++ b/tests/test-completion.t Mon Jul 20 21:56:27 2020 +0530
@@ -74,6 +74,7 @@
Show debug commands if there are no other candidates
$ hg debugcomplete debug
debugancestor
+ debugantivirusrunning
debugapplystreamclonebundle
debugbackupbundle
debugbuilddag
@@ -121,6 +122,7 @@
debugrebuilddirstate
debugrebuildfncache
debugrename
+ debugrequires
debugrevlog
debugrevlogindex
debugrevspec
@@ -260,6 +262,7 @@
continue: dry-run
copy: forget, after, at-rev, force, include, exclude, dry-run
debugancestor:
+ debugantivirusrunning:
debugapplystreamclonebundle:
debugbackupbundle: recover, patch, git, limit, no-merges, stat, graph, style, template
debugbuilddag: mergeable-file, overwritten-file, new-file
@@ -306,6 +309,7 @@
debugrebuilddirstate: rev, minimal
debugrebuildfncache:
debugrename: rev
+ debugrequires:
debugrevlog: changelog, manifest, dir, dump
debugrevlogindex: changelog, manifest, dir, format
debugrevspec: optimize, show-revs, show-set, show-stage, no-optimized, verify-optimized
--- a/tests/test-contrib-perf.t Tue Jul 14 10:25:41 2020 +0200
+++ b/tests/test-contrib-perf.t Mon Jul 20 21:56:27 2020 +0530
@@ -180,7 +180,7 @@
perfvolatilesets
benchmark the computation of various volatile set
perfwalk (no help text available)
- perfwrite microbenchmark ui.write
+ perfwrite microbenchmark ui.write (and others)
(use 'hg help -v perf' to show built-in aliases and global options)
$ hg perfaddremove
--- a/tests/test-copies-chain-merge.t Tue Jul 14 10:25:41 2020 +0200
+++ b/tests/test-copies-chain-merge.t Mon Jul 20 21:56:27 2020 +0530
@@ -1,3 +1,5 @@
+#testcases filelog compatibility sidedata
+
=====================================================
Test Copy tracing for chain of copies involving merge
=====================================================
@@ -6,6 +8,7 @@
are involved. It cheks we do not have unwanted update of behavior and that the
different options to retrieve copies behave correctly.
+
Setup
=====
@@ -18,6 +21,22 @@
> logtemplate={rev} {desc}\n
> EOF
+#if compatibility
+ $ cat >> $HGRCPATH << EOF
+ > [experimental]
+ > copies.read-from = compatibility
+ > EOF
+#endif
+
+#if sidedata
+ $ cat >> $HGRCPATH << EOF
+ > [format]
+ > exp-use-side-data = yes
+ > exp-use-copies-side-data-changeset = yes
+ > EOF
+#endif
+
+
$ hg init repo-chain
$ cd repo-chain
@@ -453,17 +472,26 @@
0 4 0dd616bc7ab1 000000000000 000000000000
1 10 6da5a2eecb9c 000000000000 000000000000
2 19 eb806e34ef6b 0dd616bc7ab1 6da5a2eecb9c
+
+# Here the filelog based implementation is not looking at the rename
+# information (because the file exist on both side). However the changelog
+# based on works fine. We have different output.
+
$ hg status --copies --rev 'desc("a-2")' --rev 'desc("mAEm-0")'
M f
+ b (no-filelog !)
R b
$ hg status --copies --rev 'desc("a-2")' --rev 'desc("mEAm-0")'
M f
+ b (no-filelog !)
R b
$ hg status --copies --rev 'desc("e-2")' --rev 'desc("mAEm-0")'
M f
+ d (no-filelog !)
R d
$ hg status --copies --rev 'desc("e-2")' --rev 'desc("mEAm-0")'
M f
+ d (no-filelog !)
R d
$ hg status --copies --rev 'desc("i-2")' --rev 'desc("a-2")'
A f
@@ -473,6 +501,18 @@
A f
b
R b
+
+# From here, we run status against revision where both source file exists.
+#
+# The filelog based implementation picks an arbitrary side based on revision
+# numbers. So the same side "wins" whatever the parents order is. This is
+# sub-optimal because depending on revision numbers means the result can be
+# different from one repository to the next.
+#
+# The changeset based algorithm use the parent order to break tie on conflicting
+# information and will have a different order depending on who is p1 and p2.
+# That order is stable accross repositories. (data from p1 prevails)
+
$ hg status --copies --rev 'desc("i-2")' --rev 'desc("mAEm-0")'
A f
d
@@ -480,7 +520,8 @@
R d
$ hg status --copies --rev 'desc("i-2")' --rev 'desc("mEAm-0")'
A f
- d
+ d (filelog !)
+ b (no-filelog !)
R b
R d
$ hg status --copies --rev 'desc("i-0")' --rev 'desc("mAEm-0")'
@@ -490,7 +531,8 @@
R b
$ hg status --copies --rev 'desc("i-0")' --rev 'desc("mEAm-0")'
A f
- a
+ a (filelog !)
+ b (no-filelog !)
R a
R b
@@ -563,21 +605,25 @@
R h
$ hg status --copies --rev 'desc("b-1")' --rev 'desc("mBFm-0")'
M d
+ h (no-filelog !)
R h
$ hg status --copies --rev 'desc("f-2")' --rev 'desc("mBFm-0")'
M b
$ hg status --copies --rev 'desc("f-1")' --rev 'desc("mBFm-0")'
M b
M d
+ i (no-filelog !)
R i
$ hg status --copies --rev 'desc("b-1")' --rev 'desc("mFBm-0")'
M d
+ h (no-filelog !)
R h
$ hg status --copies --rev 'desc("f-2")' --rev 'desc("mFBm-0")'
M b
$ hg status --copies --rev 'desc("f-1")' --rev 'desc("mFBm-0")'
M b
M d
+ i (no-filelog !)
R i
The following graphlog is wrong, the "a -> c -> d" chain was overwritten and should not appear.
@@ -645,9 +691,15 @@
|
o 0 i-0 initial commit: a b h
+One side of the merge have a long history with rename. The other side of the
+merge point to a new file with a smaller history. Each side is "valid".
+
+(and again the filelog based algorithm only explore one, with a pick based on
+revision numbers)
+
$ hg status --copies --rev 'desc("i-0")' --rev 'desc("mDGm-0")'
A d
- a
+ a (filelog !)
R a
$ hg status --copies --rev 'desc("i-0")' --rev 'desc("mGDm-0")'
A d
@@ -740,7 +792,8 @@
$ hg status --copies --rev 'desc("i-0")' --rev 'desc("mFGm-0")'
A d
- a
+ h (no-filelog !)
+ a (filelog !)
R a
R h
$ hg status --copies --rev 'desc("i-0")' --rev 'desc("mGFm-0")'
@@ -754,15 +807,19 @@
M d
$ hg status --copies --rev 'desc("f-1")' --rev 'desc("mFGm-0")'
M d
+ i (no-filelog !)
R i
$ hg status --copies --rev 'desc("f-1")' --rev 'desc("mGFm-0")'
M d
+ i (no-filelog !)
R i
$ hg status --copies --rev 'desc("g-1")' --rev 'desc("mFGm-0")'
M d
+ h (no-filelog !)
R h
$ hg status --copies --rev 'desc("g-1")' --rev 'desc("mGFm-0")'
M d
+ h (no-filelog !)
R h
$ hg log -Gfr 'desc("mFGm-0")' d
--- a/tests/test-copies-in-changeset.t Tue Jul 14 10:25:41 2020 +0200
+++ b/tests/test-copies-in-changeset.t Mon Jul 20 21:56:27 2020 +0530
@@ -33,28 +33,30 @@
$ cd repo
#if sidedata
$ hg debugformat -v
- format-variant repo config default
- fncache: yes yes yes
- dotencode: yes yes yes
- generaldelta: yes yes yes
- sparserevlog: yes yes yes
- sidedata: yes yes no
- copies-sdc: yes yes no
- plain-cl-delta: yes yes yes
- compression: zlib zlib zlib
- compression-level: default default default
+ format-variant repo config default
+ fncache: yes yes yes
+ dotencode: yes yes yes
+ generaldelta: yes yes yes
+ sparserevlog: yes yes yes
+ sidedata: yes yes no
+ persistent-nodemap: no no no
+ copies-sdc: yes yes no
+ plain-cl-delta: yes yes yes
+ compression: zlib zlib zlib
+ compression-level: default default default
#else
$ hg debugformat -v
- format-variant repo config default
- fncache: yes yes yes
- dotencode: yes yes yes
- generaldelta: yes yes yes
- sparserevlog: yes yes yes
- sidedata: no no no
- copies-sdc: no no no
- plain-cl-delta: yes yes yes
- compression: zlib zlib zlib
- compression-level: default default default
+ format-variant repo config default
+ fncache: yes yes yes
+ dotencode: yes yes yes
+ generaldelta: yes yes yes
+ sparserevlog: yes yes yes
+ sidedata: no no no
+ persistent-nodemap: no no no
+ copies-sdc: no no no
+ plain-cl-delta: yes yes yes
+ compression: zlib zlib zlib
+ compression-level: default default default
#endif
$ echo a > a
$ hg add a
@@ -424,16 +426,17 @@
downgrading (keeping some sidedata)
$ hg debugformat -v
- format-variant repo config default
- fncache: yes yes yes
- dotencode: yes yes yes
- generaldelta: yes yes yes
- sparserevlog: yes yes yes
- sidedata: yes yes no
- copies-sdc: yes yes no
- plain-cl-delta: yes yes yes
- compression: zlib zlib zlib
- compression-level: default default default
+ format-variant repo config default
+ fncache: yes yes yes
+ dotencode: yes yes yes
+ generaldelta: yes yes yes
+ sparserevlog: yes yes yes
+ sidedata: yes yes no
+ persistent-nodemap: no no no
+ copies-sdc: yes yes no
+ plain-cl-delta: yes yes yes
+ compression: zlib zlib zlib
+ compression-level: default default default
$ hg debugsidedata -c -- 0
1 sidedata entries
entry-0012 size 1
@@ -448,16 +451,17 @@
> EOF
$ hg debugupgraderepo --run --quiet --no-backup > /dev/null
$ hg debugformat -v
- format-variant repo config default
- fncache: yes yes yes
- dotencode: yes yes yes
- generaldelta: yes yes yes
- sparserevlog: yes yes yes
- sidedata: yes yes no
- copies-sdc: no no no
- plain-cl-delta: yes yes yes
- compression: zlib zlib zlib
- compression-level: default default default
+ format-variant repo config default
+ fncache: yes yes yes
+ dotencode: yes yes yes
+ generaldelta: yes yes yes
+ sparserevlog: yes yes yes
+ sidedata: yes yes no
+ persistent-nodemap: no no no
+ copies-sdc: no no no
+ plain-cl-delta: yes yes yes
+ compression: zlib zlib zlib
+ compression-level: default default default
$ hg debugsidedata -c -- 0
$ hg debugsidedata -c -- 1
$ hg debugsidedata -m -- 0
@@ -470,16 +474,17 @@
> EOF
$ hg debugupgraderepo --run --quiet --no-backup > /dev/null
$ hg debugformat -v
- format-variant repo config default
- fncache: yes yes yes
- dotencode: yes yes yes
- generaldelta: yes yes yes
- sparserevlog: yes yes yes
- sidedata: yes yes no
- copies-sdc: yes yes no
- plain-cl-delta: yes yes yes
- compression: zlib zlib zlib
- compression-level: default default default
+ format-variant repo config default
+ fncache: yes yes yes
+ dotencode: yes yes yes
+ generaldelta: yes yes yes
+ sparserevlog: yes yes yes
+ sidedata: yes yes no
+ persistent-nodemap: no no no
+ copies-sdc: yes yes no
+ plain-cl-delta: yes yes yes
+ compression: zlib zlib zlib
+ compression-level: default default default
$ hg debugsidedata -c -- 0
1 sidedata entries
entry-0012 size 1
--- a/tests/test-copytrace-heuristics.t Tue Jul 14 10:25:41 2020 +0200
+++ b/tests/test-copytrace-heuristics.t Mon Jul 20 21:56:27 2020 +0530
@@ -91,7 +91,7 @@
file 'a' was deleted in local [dest] but was modified in other [source].
You can use (c)hanged version, leave (d)eleted, or leave (u)nresolved.
What do you want to do? u
- unresolved conflicts (see hg resolve, then hg rebase --continue)
+ unresolved conflicts (see 'hg resolve', then 'hg rebase --continue')
[1]
$ cd ..
@@ -248,7 +248,7 @@
file 'a' was deleted in local [dest] but was modified in other [source].
You can use (c)hanged version, leave (d)eleted, or leave (u)nresolved.
What do you want to do? u
- unresolved conflicts (see hg resolve, then hg rebase --continue)
+ unresolved conflicts (see 'hg resolve', then 'hg rebase --continue')
[1]
$ hg rebase --abort
@@ -710,7 +710,7 @@
file 'a' was deleted in local [dest] but was modified in other [source].
You can use (c)hanged version, leave (d)eleted, or leave (u)nresolved.
What do you want to do? u
- unresolved conflicts (see hg resolve, then hg rebase --continue)
+ unresolved conflicts (see 'hg resolve', then 'hg rebase --continue')
[1]
But when we have "sourcecommitlimit > (no. of drafts from base to c1)", we do
--- a/tests/test-debugcommands.t Tue Jul 14 10:25:41 2020 +0200
+++ b/tests/test-debugcommands.t Mon Jul 20 21:56:27 2020 +0530
@@ -573,11 +573,13 @@
$ cat > debugstacktrace.py << EOF
> from __future__ import absolute_import
> from mercurial import (
- > pycompat,
> util,
> )
+ > from mercurial.utils import (
+ > procutil,
+ > )
> def f():
- > util.debugstacktrace(f=pycompat.stdout)
+ > util.debugstacktrace(f=procutil.stdout)
> g()
> def g():
> util.dst(b'hello from g\\n', skip=1)
@@ -588,15 +590,15 @@
> EOF
$ "$PYTHON" debugstacktrace.py
stacktrace at:
- *debugstacktrace.py:14 in * (glob)
- *debugstacktrace.py:7 in f (glob)
+ *debugstacktrace.py:16 in * (glob)
+ *debugstacktrace.py:9 in f (glob)
hello from g at:
- *debugstacktrace.py:14 in * (glob)
- *debugstacktrace.py:8 in f (glob)
+ *debugstacktrace.py:16 in * (glob)
+ *debugstacktrace.py:10 in f (glob)
hi ...
from h hidden in g at:
- *debugstacktrace.py:8 in f (glob)
- *debugstacktrace.py:11 in g (glob)
+ *debugstacktrace.py:10 in f (glob)
+ *debugstacktrace.py:13 in g (glob)
Test debugcapabilities command:
--- a/tests/test-devel-warnings.t Tue Jul 14 10:25:41 2020 +0200
+++ b/tests/test-devel-warnings.t Mon Jul 20 21:56:27 2020 +0530
@@ -140,6 +140,7 @@
*/mercurial/commandserver.py:* in serveone (glob)
*/mercurial/chgserver.py:* in runcommand (glob)
*/mercurial/commandserver.py:* in runcommand (glob)
+ */mercurial/commandserver.py:* in _dispatchcommand (glob)
*/mercurial/dispatch.py:* in dispatch (glob)
*/mercurial/dispatch.py:* in _runcatch (glob)
*/mercurial/dispatch.py:* in _callcatch (glob)
@@ -220,6 +221,7 @@
*/mercurial/commandserver.py:* in serveone (glob)
*/mercurial/chgserver.py:* in runcommand (glob)
*/mercurial/commandserver.py:* in runcommand (glob)
+ */mercurial/commandserver.py:* in _dispatchcommand (glob)
*/mercurial/dispatch.py:* in dispatch (glob)
*/mercurial/dispatch.py:* in _runcatch (glob)
*/mercurial/dispatch.py:* in _callcatch (glob)
@@ -289,6 +291,7 @@
*/mercurial/commandserver.py:* in serveone (glob)
*/mercurial/chgserver.py:* in runcommand (glob)
*/mercurial/commandserver.py:* in runcommand (glob)
+ */mercurial/commandserver.py:* in _dispatchcommand (glob)
*/mercurial/dispatch.py:* in dispatch (glob)
*/mercurial/dispatch.py:* in _runcatch (glob)
*/mercurial/dispatch.py:* in _callcatch (glob)
--- a/tests/test-dirstate.t Tue Jul 14 10:25:41 2020 +0200
+++ b/tests/test-dirstate.t Mon Jul 20 21:56:27 2020 +0530
@@ -70,14 +70,15 @@
> from mercurial import (
> error,
> extensions,
- > merge,
+ > mergestate as mergestatemod,
> )
>
> def wraprecordupdates(*args):
> raise error.Abort("simulated error while recording dirstateupdates")
>
> def reposetup(ui, repo):
- > extensions.wrapfunction(merge, 'recordupdates', wraprecordupdates)
+ > extensions.wrapfunction(mergestatemod, 'recordupdates',
+ > wraprecordupdates)
> EOF
$ hg rm a
--- a/tests/test-extension.t Tue Jul 14 10:25:41 2020 +0200
+++ b/tests/test-extension.t Mon Jul 20 21:56:27 2020 +0530
@@ -152,21 +152,25 @@
> from __future__ import print_function
> import os
> from mercurial import exthelper
+ > from mercurial.utils import procutil
+ >
+ > write = procutil.stdout.write
> name = os.path.basename(__file__).rsplit('.', 1)[0]
- > print("1) %s imported" % name, flush=True)
+ > bytesname = name.encode('utf-8')
+ > write(b"1) %s imported\n" % bytesname)
> eh = exthelper.exthelper()
> @eh.uisetup
> def _uisetup(ui):
- > print("2) %s uisetup" % name, flush=True)
+ > write(b"2) %s uisetup\n" % bytesname)
> @eh.extsetup
> def _extsetup(ui):
- > print("3) %s extsetup" % name, flush=True)
+ > write(b"3) %s extsetup\n" % bytesname)
> @eh.uipopulate
> def _uipopulate(ui):
- > print("4) %s uipopulate" % name, flush=True)
+ > write(b"4) %s uipopulate\n" % bytesname)
> @eh.reposetup
> def _reposetup(ui, repo):
- > print("5) %s reposetup" % name, flush=True)
+ > write(b"5) %s reposetup\n" % bytesname)
>
> extsetup = eh.finalextsetup
> reposetup = eh.finalreposetup
@@ -174,7 +178,6 @@
> uisetup = eh.finaluisetup
> revsetpredicate = eh.revsetpredicate
>
- > bytesname = name.encode('utf-8')
> # custom predicate to check registration of functions at loading
> from mercurial import (
> smartset,
@@ -1556,8 +1559,8 @@
Enabled extensions:
+ strip internal
throw external 1.twentythree
- strip internal
$ hg version -q --config extensions.throw=throw.py
Mercurial Distributed SCM (version *) (glob)
@@ -1597,8 +1600,8 @@
$ hg version --config extensions.throw=throw.py --config extensions.strip= \
> -T'{extensions % "{name} {pad(ver, 16)} ({if(bundled, "internal", "external")})\n"}'
+ strip (internal)
throw 1.twentythree (external)
- strip (internal)
Refuse to load extensions with minimum version requirements
@@ -1852,17 +1855,6 @@
GREPME make sure that this is in the help!
$ cd ..
-Show deprecation warning for the use of cmdutil.command
-
- $ cat > nonregistrar.py <<EOF
- > from mercurial import cmdutil
- > cmdtable = {}
- > command = cmdutil.command(cmdtable)
- > @command(b'foo', [], norepo=True)
- > def foo(ui):
- > pass
- > EOF
-
Prohibit the use of unicode strings as the default value of options
$ hg init $TESTTMP/opt-unicode-default
--- a/tests/test-fix.t Tue Jul 14 10:25:41 2020 +0200
+++ b/tests/test-fix.t Mon Jul 20 21:56:27 2020 +0530
@@ -868,7 +868,7 @@
rebasing 1:c3b6dc0e177a "foo 2" (tip)
merging foo.whole
warning: conflicts while merging foo.whole! (edit, then use 'hg resolve --mark')
- unresolved conflicts (see hg resolve, then hg rebase --continue)
+ unresolved conflicts (see 'hg resolve', then 'hg rebase --continue')
[1]
$ hg --config extensions.rebase= fix --working-dir
--- a/tests/test-git-interop.t Tue Jul 14 10:25:41 2020 +0200
+++ b/tests/test-git-interop.t Mon Jul 20 21:56:27 2020 +0530
@@ -36,8 +36,12 @@
$ cd ..
Now globally enable extension for the rest of the test:
- $ echo "[extensions]" >> $HGRCPATH
- > echo "git=" >> $HGRCPATH
+ $ cat <<EOF >> $HGRCPATH
+ > [extensions]
+ > git=
+ > [git]
+ > log-index-cache-miss = yes
+ > EOF
Make a new repo with git:
$ mkdir foo
@@ -68,6 +72,7 @@
But if you run hg init --git, it works:
$ hg init --git
$ hg id --traceback
+ heads mismatch, rebuilding dagcache
3d9be8deba43 tip master
$ hg status
? gamma
@@ -167,9 +172,12 @@
$ hg ci -m 'more alpha' --traceback --date '1583522787 18000'
$ echo b >> beta
$ hg ci -m 'more beta'
+ heads mismatch, rebuilding dagcache
$ echo a >> alpha
$ hg ci -m 'even more alpha'
+ heads mismatch, rebuilding dagcache
$ hg log -G alpha
+ heads mismatch, rebuilding dagcache
@ changeset: 4:6626247b7dc8
: bookmark: master
: tag: tip
@@ -199,6 +207,9 @@
summary: Add beta
+ $ hg log -r "children(3d9be8deba43)" -T"{node|short} {children}\n"
+ a1983dd7fb19 3:d8ee22687733
+
hg annotate
$ hg annotate alpha
@@ -235,6 +246,7 @@
On branch master
nothing to commit, working tree clean
$ hg status
+ heads mismatch, rebuilding dagcache
node|shortest works correctly
@@ -248,3 +260,13 @@
$ hg log -r ae1ab744f95bfd5b07cf573baef98a778058537b --template "{shortest(node,1)}\n"
ae
+This coveres changelog.findmissing()
+ $ hg merge --preview 3d9be8deba43
+
+This covers manifest.diff()
+ $ hg diff -c 3d9be8deba43
+ diff -r c5864c9d16fb -r 3d9be8deba43 beta
+ --- /dev/null Thu Jan 01 00:00:00 1970 +0000
+ +++ b/beta Mon Jan 01 00:00:11 2007 +0000
+ @@ -0,0 +1,1 @@
+ +beta
--- a/tests/test-githelp.t Tue Jul 14 10:25:41 2020 +0200
+++ b/tests/test-githelp.t Mon Jul 20 21:56:27 2020 +0530
@@ -318,3 +318,10 @@
hg journal --all
note: in hg commits can be deleted from repo but we always have backups
+
+ $ hg githelp -- git log -Gnarf
+ hg grep --diff narf
+ $ hg githelp -- git log -S narf
+ hg grep --diff narf
+ $ hg githelp -- git log --pickaxe-regex narf
+ hg grep --diff narf
--- a/tests/test-graft-interrupted.t Tue Jul 14 10:25:41 2020 +0200
+++ b/tests/test-graft-interrupted.t Mon Jul 20 21:56:27 2020 +0530
@@ -249,17 +249,17 @@
[1]
$ hg graft --stop --continue
- abort: cannot use '--continue' and '--stop' together
+ abort: cannot specify both --stop and --continue
[255]
$ hg graft --stop -U
- abort: cannot specify any other flag with '--stop'
+ abort: cannot specify both --stop and --user
[255]
$ hg graft --stop --rev 4
- abort: cannot specify any other flag with '--stop'
+ abort: cannot specify both --stop and --rev
[255]
$ hg graft --stop --log
- abort: cannot specify any other flag with '--stop'
+ abort: cannot specify both --stop and --log
[255]
$ hg graft --stop
@@ -355,19 +355,19 @@
[1]
$ hg graft --continue --abort
- abort: cannot use '--continue' and '--abort' together
+ abort: cannot specify both --abort and --continue
[255]
$ hg graft --abort --stop
- abort: cannot use '--abort' and '--stop' together
+ abort: cannot specify both --abort and --stop
[255]
$ hg graft --abort --currentuser
- abort: cannot specify any other flag with '--abort'
+ abort: cannot specify both --abort and --user
[255]
$ hg graft --abort --edit
- abort: cannot specify any other flag with '--abort'
+ abort: cannot specify both --abort and --edit
[255]
#if abortcommand
@@ -553,15 +553,15 @@
Check reporting when --no-commit used with non-applicable options:
$ hg graft 1 --no-commit -e
- abort: cannot specify --no-commit and --edit together
+ abort: cannot specify both --no-commit and --edit
[255]
$ hg graft 1 --no-commit --log
- abort: cannot specify --no-commit and --log together
+ abort: cannot specify both --no-commit and --log
[255]
$ hg graft 1 --no-commit -D
- abort: cannot specify --no-commit and --currentdate together
+ abort: cannot specify both --no-commit and --currentdate
[255]
Test --no-commit is working:
--- a/tests/test-grep.t Tue Jul 14 10:25:41 2020 +0200
+++ b/tests/test-grep.t Mon Jul 20 21:56:27 2020 +0530
@@ -645,22 +645,45 @@
$ hg init sng
$ cd sng
$ echo "unmod" >> um
- $ hg ci -A -m "adds unmod to um"
- adding um
+ $ echo old > old
+ $ hg ci -q -A -m "adds unmod to um"
$ echo "something else" >> new
$ hg ci -A -m "second commit"
adding new
$ hg grep -r "." "unmod"
um:1:unmod
-Working directory is searched by default
+Existing tracked files in the working directory are searched by default
$ echo modified >> new
- $ hg grep mod
+ $ echo 'added' > added; hg add added
+ $ echo 'added, missing' > added-missing; hg add added-missing; rm added-missing
+ $ echo 'untracked' > untracked
+ $ hg rm old
+ $ hg grep ''
+ added:added
+ new:something else
new:modified
um:unmod
- which can be overridden by -rREV
+#if symlink
+Grepping a symlink greps its destination
+
+ $ rm -f added; ln -s symlink-added added
+ $ hg grep '' | grep added
+ added:symlink-added
+
+But we reject symlinks as directories components of a tracked file as
+usual:
+
+ $ mkdir dir; touch dir/f; hg add dir/f
+ $ rm -rf dir; ln -s / dir
+ $ hg grep ''
+ abort: path 'dir/f' traverses symbolic link 'dir'
+ [255]
+#endif
+
+But we can search files from some other revision with -rREV
$ hg grep -r. mod
um:1:unmod
@@ -670,17 +693,6 @@
$ cd ..
-Fix_Wdir(): test that passing wdir() t -r flag does greps on the
-files modified in the working directory
-
- $ cd a
- $ echo "abracadara" >> a
- $ hg add a
- $ hg grep -r "wdir()" "abra"
- a:2147483647:abracadara
-
- $ cd ..
-
Change Default of grep by ui.tweakdefaults, that is, the files not in current
working directory should not be grepp-ed on
--- a/tests/test-help.t Tue Jul 14 10:25:41 2020 +0200
+++ b/tests/test-help.t Mon Jul 20 21:56:27 2020 +0530
@@ -730,7 +730,7 @@
-m --modified show only modified files
-a --added show only added files
-r --removed show only removed files
- -d --deleted show only deleted (but tracked) files
+ -d --deleted show only missing files
-c --clean show only files without changes
-u --unknown show only unknown (not tracked) files
-i --ignored show only ignored files
@@ -971,6 +971,8 @@
debugancestor
find the ancestor revision of two revisions in a given index
+ debugantivirusrunning
+ attempt to trigger an antivirus scanner to see if one is active
debugapplystreamclonebundle
apply a stream clone bundle file
debugbackupbundle
@@ -1051,6 +1053,8 @@
debugrebuildfncache
rebuild the fncache file
debugrename dump rename information
+ debugrequires
+ print the current repo requirements
debugrevlog show data and statistics about a revlog
debugrevlogindex
dump the contents of a revlog index
@@ -1094,6 +1098,7 @@
To access a subtopic, use "hg help internals.{subtopic-name}"
+ bid-merge Bid Merge Algorithm
bundle2 Bundle2
bundles Bundles
cbor CBOR
@@ -1917,9 +1922,9 @@
> EOF
$ "$PYTHON" <<EOF | sh
- > from mercurial import pycompat
+ > from mercurial.utils import procutil
> upper = b"\x8bL\x98^"
- > pycompat.stdout.write(b"hg --encoding cp932 help -e ambiguous.%s\n" % upper)
+ > procutil.stdout.write(b"hg --encoding cp932 help -e ambiguous.%s\n" % upper)
> EOF
\x8bL\x98^ (esc)
----
@@ -1928,9 +1933,9 @@
$ "$PYTHON" <<EOF | sh
- > from mercurial import pycompat
+ > from mercurial.utils import procutil
> lower = b"\x8bl\x98^"
- > pycompat.stdout.write(b"hg --encoding cp932 help -e ambiguous.%s\n" % lower)
+ > procutil.stdout.write(b"hg --encoding cp932 help -e ambiguous.%s\n" % lower)
> EOF
\x8bl\x98^ (esc)
----
@@ -3439,6 +3444,13 @@
<tr><td colspan="2"><h2><a name="topics" href="#topics">Topics</a></h2></td></tr>
<tr><td>
+ <a href="/help/internals.bid-merge">
+ bid-merge
+ </a>
+ </td><td>
+ Bid Merge Algorithm
+ </td></tr>
+ <tr><td>
<a href="/help/internals.bundle2">
bundle2
</a>
--- a/tests/test-hgweb-non-interactive.t Tue Jul 14 10:25:41 2020 +0200
+++ b/tests/test-hgweb-non-interactive.t Mon Jul 20 21:56:27 2020 +0530
@@ -14,10 +14,12 @@
> dispatch,
> encoding,
> hg,
- > pycompat,
> ui as uimod,
> util,
> )
+ > from mercurial.utils import (
+ > procutil,
+ > )
> ui = uimod.ui
> from mercurial.hgweb import hgweb_mod
> stringio = util.stringio
@@ -69,8 +71,8 @@
> for c in i(env, startrsp):
> pass
> sys.stdout.flush()
- > pycompat.stdout.write(b'---- ERRORS\n')
- > pycompat.stdout.write(b'%s\n' % errors.getvalue())
+ > procutil.stdout.write(b'---- ERRORS\n')
+ > procutil.stdout.write(b'%s\n' % errors.getvalue())
> print('---- OS.ENVIRON wsgi variables')
> print(sorted([x for x in os.environ if x.startswith('wsgi')]))
> print('---- request.ENVIRON wsgi variables')
--- a/tests/test-histedit-edit.t Tue Jul 14 10:25:41 2020 +0200
+++ b/tests/test-histedit-edit.t Mon Jul 20 21:56:27 2020 +0530
@@ -373,6 +373,7 @@
transaction abort!
rollback completed
note: commit message saved in .hg/last-message.txt
+ note: use 'hg commit --logfile .hg/last-message.txt --edit' to reuse it
abort: pretxncommit.unexpectedabort hook exited with status 1
[255]
$ cat .hg/last-message.txt
@@ -397,6 +398,7 @@
transaction abort!
rollback completed
note: commit message saved in .hg/last-message.txt
+ note: use 'hg commit --logfile .hg/last-message.txt --edit' to reuse it
abort: pretxncommit.unexpectedabort hook exited with status 1
[255]
--- a/tests/test-hook.t Tue Jul 14 10:25:41 2020 +0200
+++ b/tests/test-hook.t Mon Jul 20 21:56:27 2020 +0530
@@ -443,7 +443,7 @@
HG_PENDING=$TESTTMP/a
transaction abort!
- txnabort Python hook: txnid,txnname
+ txnabort Python hook: changes,txnid,txnname
txnabort hook: HG_HOOKNAME=txnabort.1
HG_HOOKTYPE=txnabort
HG_TXNID=TXN:$ID$
--- a/tests/test-hooklib-changeset_obsoleted.t Tue Jul 14 10:25:41 2020 +0200
+++ b/tests/test-hooklib-changeset_obsoleted.t Mon Jul 20 21:56:27 2020 +0530
@@ -24,7 +24,7 @@
$ cat <<EOF >> b/.hg/hgrc
> [hooks]
> incoming.notify = python:hgext.notify.hook
- > pretxnclose.changeset_obsoleted = python:hgext.hooklib.changeset_obsoleted.hook
+ > txnclose.changeset_obsoleted = python:hgext.hooklib.changeset_obsoleted.hook
> EOF
$ hg --cwd b pull ../a | "$PYTHON" $TESTDIR/unwrap-message-id.py
pulling from ../a
@@ -72,6 +72,8 @@
pushing to ../b
searching for changes
no changes found
+ 1 new obsolescence markers
+ obsoleted 1 changesets
Subject: changeset abandoned
In-reply-to: <hg.81c297828fd2d5afaadf2775a6a71b74143b6451dfaac09fac939e9107a50d01@example.com>
Message-Id: <hg.d6329e9481594f0f3c8a84362b3511318bfbce50748ab1123f909eb6fbcab018@example.com>
@@ -80,5 +82,33 @@
To: baz@example.com
This changeset has been abandoned.
+
+Check that known changesets with known successors do not result in a mail.
+
+ $ hg init c
+ $ hg init d
+ $ cat <<EOF >> d/.hg/hgrc
+ > [hooks]
+ > incoming.notify = python:hgext.notify.hook
+ > txnclose.changeset_obsoleted = python:hgext.hooklib.changeset_obsoleted.hook
+ > EOF
+ $ hg --cwd c debugbuilddag '.:parent.*parent'
+ $ hg --cwd c push ../d -r 1
+ pushing to ../d
+ searching for changes
+ adding changesets
+ adding manifests
+ adding file changes
+ added 2 changesets with 0 changes to 0 files
+ $ hg --cwd c debugobsolete $(hg --cwd c log -T '{node}' -r 1) $(hg --cwd c log -T '{node}' -r 2)
1 new obsolescence markers
obsoleted 1 changesets
+ $ hg --cwd c push ../d | "$PYTHON" $TESTDIR/unwrap-message-id.py
+ pushing to ../d
+ searching for changes
+ adding changesets
+ adding manifests
+ adding file changes
+ added 1 changesets with 0 changes to 0 files (+1 heads)
+ 1 new obsolescence markers
+ obsoleted 1 changesets
--- a/tests/test-https.t Tue Jul 14 10:25:41 2020 +0200
+++ b/tests/test-https.t Mon Jul 20 21:56:27 2020 +0530
@@ -34,7 +34,6 @@
cacert not found
$ hg in --config web.cacerts=no-such.pem https://localhost:$HGPORT/
- warning: connecting to localhost using legacy security technology (TLS 1.0); see https://mercurial-scm.org/wiki/SecureConnections for more info (?)
abort: could not find web.cacerts: no-such.pem
[255]
@@ -49,101 +48,47 @@
Our test cert is not signed by a trusted CA. It should fail to verify if
we are able to load CA certs.
-#if sslcontext defaultcacerts no-defaultcacertsloaded
+#if no-defaultcacertsloaded
$ hg clone https://localhost:$HGPORT/ copy-pull
(an attempt was made to load CA certificates but none were loaded; see https://mercurial-scm.org/wiki/SecureConnections for how to configure Mercurial to avoid this error)
abort: error: *certificate verify failed* (glob)
[255]
#endif
-#if no-sslcontext defaultcacerts
- $ hg clone https://localhost:$HGPORT/ copy-pull
- warning: connecting to localhost using legacy security technology (TLS 1.0); see https://mercurial-scm.org/wiki/SecureConnections for more info (?)
- (using CA certificates from *; if you see this message, your Mercurial install is not properly configured; see https://mercurial-scm.org/wiki/SecureConnections for how to configure Mercurial to avoid this message) (glob) (?)
- abort: error: *certificate verify failed* (glob)
- [255]
-#endif
-
-#if no-sslcontext windows
- $ hg clone https://localhost:$HGPORT/ copy-pull
- warning: connecting to localhost using legacy security technology (TLS 1.0); see https://mercurial-scm.org/wiki/SecureConnections for more info
- (unable to load Windows CA certificates; see https://mercurial-scm.org/wiki/SecureConnections for how to configure Mercurial to avoid this message)
- abort: error: *certificate verify failed* (glob)
- [255]
-#endif
-
-#if no-sslcontext osx
- $ hg clone https://localhost:$HGPORT/ copy-pull
- warning: connecting to localhost using legacy security technology (TLS 1.0); see https://mercurial-scm.org/wiki/SecureConnections for more info
- (unable to load CA certificates; see https://mercurial-scm.org/wiki/SecureConnections for how to configure Mercurial to avoid this message)
- abort: localhost certificate error: no certificate received
- (set hostsecurity.localhost:certfingerprints=sha256:20:de:b3:ad:b4:cd:a5:42:f0:74:41:1c:a2:70:1e:da:6e:c0:5c:16:9e:e7:22:0f:f1:b7:e5:6e:e4:92:af:7e config setting or use --insecure to connect insecurely)
- [255]
-#endif
-
#if defaultcacertsloaded
$ hg clone https://localhost:$HGPORT/ copy-pull
- warning: connecting to localhost using legacy security technology (TLS 1.0); see https://mercurial-scm.org/wiki/SecureConnections for more info (?)
- (using CA certificates from *; if you see this message, your Mercurial install is not properly configured; see https://mercurial-scm.org/wiki/SecureConnections for how to configure Mercurial to avoid this message) (glob) (?)
(the full certificate chain may not be available locally; see "hg help debugssl") (windows !)
abort: error: *certificate verify failed* (glob)
[255]
#endif
-#if no-defaultcacerts
- $ hg clone https://localhost:$HGPORT/ copy-pull
- warning: connecting to localhost using legacy security technology (TLS 1.0); see https://mercurial-scm.org/wiki/SecureConnections for more info (?)
- (unable to load * certificates; see https://mercurial-scm.org/wiki/SecureConnections for how to configure Mercurial to avoid this message) (glob) (?)
- abort: localhost certificate error: no certificate received
- (set hostsecurity.localhost:certfingerprints=sha256:20:de:b3:ad:b4:cd:a5:42:f0:74:41:1c:a2:70:1e:da:6e:c0:5c:16:9e:e7:22:0f:f1:b7:e5:6e:e4:92:af:7e config setting or use --insecure to connect insecurely)
- [255]
-#endif
-
Specifying a per-host certificate file that doesn't exist will abort. The full
C:/path/to/msysroot will print on Windows.
$ hg --config hostsecurity.localhost:verifycertsfile=/does/not/exist clone https://localhost:$HGPORT/
- warning: connecting to localhost using legacy security technology (TLS 1.0); see https://mercurial-scm.org/wiki/SecureConnections for more info (?)
abort: path specified by hostsecurity.localhost:verifycertsfile does not exist: */does/not/exist (glob)
[255]
A malformed per-host certificate file will raise an error
$ echo baddata > badca.pem
-#if sslcontext
$ hg --config hostsecurity.localhost:verifycertsfile=badca.pem clone https://localhost:$HGPORT/
- warning: connecting to localhost using legacy security technology (TLS 1.0); see https://mercurial-scm.org/wiki/SecureConnections for more info (?)
abort: error loading CA file badca.pem: * (glob)
(file is empty or malformed?)
[255]
-#else
- $ hg --config hostsecurity.localhost:verifycertsfile=badca.pem clone https://localhost:$HGPORT/
- warning: connecting to localhost using legacy security technology (TLS 1.0); see https://mercurial-scm.org/wiki/SecureConnections for more info (?)
- abort: error: * (glob)
- [255]
-#endif
A per-host certificate mismatching the server will fail verification
(modern ssl is able to discern whether the loaded cert is a CA cert)
-#if sslcontext
$ hg --config hostsecurity.localhost:verifycertsfile="$CERTSDIR/client-cert.pem" clone https://localhost:$HGPORT/
- warning: connecting to localhost using legacy security technology (TLS 1.0); see https://mercurial-scm.org/wiki/SecureConnections for more info (?)
(an attempt was made to load CA certificates but none were loaded; see https://mercurial-scm.org/wiki/SecureConnections for how to configure Mercurial to avoid this error)
(the full certificate chain may not be available locally; see "hg help debugssl") (windows !)
abort: error: *certificate verify failed* (glob)
[255]
-#else
- $ hg --config hostsecurity.localhost:verifycertsfile="$CERTSDIR/client-cert.pem" clone https://localhost:$HGPORT/
- warning: connecting to localhost using legacy security technology (TLS 1.0); see https://mercurial-scm.org/wiki/SecureConnections for more info (?)
- abort: error: *certificate verify failed* (glob)
- [255]
-#endif
A per-host certificate matching the server's cert will be accepted
$ hg --config hostsecurity.localhost:verifycertsfile="$CERTSDIR/pub.pem" clone -U https://localhost:$HGPORT/ perhostgood1
- warning: connecting to localhost using legacy security technology (TLS 1.0); see https://mercurial-scm.org/wiki/SecureConnections for more info (?)
requesting all changes
adding changesets
adding manifests
@@ -155,7 +100,6 @@
$ cat "$CERTSDIR/client-cert.pem" "$CERTSDIR/pub.pem" > perhost.pem
$ hg --config hostsecurity.localhost:verifycertsfile=perhost.pem clone -U https://localhost:$HGPORT/ perhostgood2
- warning: connecting to localhost using legacy security technology (TLS 1.0); see https://mercurial-scm.org/wiki/SecureConnections for more info (?)
requesting all changes
adding changesets
adding manifests
@@ -166,7 +110,6 @@
Defining both per-host certificate and a fingerprint will print a warning
$ hg --config hostsecurity.localhost:verifycertsfile="$CERTSDIR/pub.pem" --config hostsecurity.localhost:fingerprints=sha1:ecd87cd6b386d04fc1b8b41c9d8f5e168eef1c03 clone -U https://localhost:$HGPORT/ caandfingerwarning
- warning: connecting to localhost using legacy security technology (TLS 1.0); see https://mercurial-scm.org/wiki/SecureConnections for more info (?)
(hostsecurity.localhost:verifycertsfile ignored when host fingerprints defined; using host fingerprints for verification)
requesting all changes
adding changesets
@@ -180,13 +123,11 @@
Inability to verify peer certificate will result in abort
$ hg clone https://localhost:$HGPORT/ copy-pull $DISABLECACERTS
- warning: connecting to localhost using legacy security technology (TLS 1.0); see https://mercurial-scm.org/wiki/SecureConnections for more info (?)
abort: unable to verify security of localhost (no loaded CA certificates); refusing to connect
(see https://mercurial-scm.org/wiki/SecureConnections for how to configure Mercurial to avoid this error or set hostsecurity.localhost:fingerprints=sha256:20:de:b3:ad:b4:cd:a5:42:f0:74:41:1c:a2:70:1e:da:6e:c0:5c:16:9e:e7:22:0f:f1:b7:e5:6e:e4:92:af:7e to trust this server)
[255]
$ hg clone --insecure https://localhost:$HGPORT/ copy-pull
- warning: connecting to localhost using legacy security technology (TLS 1.0); see https://mercurial-scm.org/wiki/SecureConnections for more info (?)
warning: connection security to localhost is disabled per current settings; communication is susceptible to eavesdropping and tampering
requesting all changes
adding changesets
@@ -217,14 +158,12 @@
> EOF
$ hg pull $DISABLECACERTS
pulling from https://localhost:$HGPORT/
- warning: connecting to localhost using legacy security technology (TLS 1.0); see https://mercurial-scm.org/wiki/SecureConnections for more info (?)
abort: unable to verify security of localhost (no loaded CA certificates); refusing to connect
(see https://mercurial-scm.org/wiki/SecureConnections for how to configure Mercurial to avoid this error or set hostsecurity.localhost:fingerprints=sha256:20:de:b3:ad:b4:cd:a5:42:f0:74:41:1c:a2:70:1e:da:6e:c0:5c:16:9e:e7:22:0f:f1:b7:e5:6e:e4:92:af:7e to trust this server)
[255]
$ hg pull --insecure
pulling from https://localhost:$HGPORT/
- warning: connecting to localhost using legacy security technology (TLS 1.0); see https://mercurial-scm.org/wiki/SecureConnections for more info (?)
warning: connection security to localhost is disabled per current settings; communication is susceptible to eavesdropping and tampering
searching for changes
adding changesets
@@ -252,7 +191,6 @@
$ echo "cacerts=$CERTSDIR/pub.pem" >> copy-pull/.hg/hgrc
$ hg -R copy-pull pull
pulling from https://localhost:$HGPORT/
- warning: connecting to localhost using legacy security technology (TLS 1.0); see https://mercurial-scm.org/wiki/SecureConnections for more info (?)
searching for changes
no changes found
$ mv copy-pull/.hg/hgrc.bu copy-pull/.hg/hgrc
@@ -264,12 +202,10 @@
$ echo 'cacerts=$P/pub.pem' >> $HGRCPATH
$ P="$CERTSDIR" hg -R copy-pull pull
pulling from https://localhost:$HGPORT/
- warning: connecting to localhost using legacy security technology (TLS 1.0); see https://mercurial-scm.org/wiki/SecureConnections for more info (?)
searching for changes
no changes found
$ P="$CERTSDIR" hg -R copy-pull pull --insecure
pulling from https://localhost:$HGPORT/
- warning: connecting to localhost using legacy security technology (TLS 1.0); see https://mercurial-scm.org/wiki/SecureConnections for more info (?)
warning: connection security to localhost is disabled per current settings; communication is susceptible to eavesdropping and tampering
searching for changes
no changes found
@@ -278,47 +214,34 @@
$ touch emptycafile
-#if sslcontext
$ hg --config web.cacerts=emptycafile -R copy-pull pull
pulling from https://localhost:$HGPORT/
- warning: connecting to localhost using legacy security technology (TLS 1.0); see https://mercurial-scm.org/wiki/SecureConnections for more info (?)
abort: error loading CA file emptycafile: * (glob)
(file is empty or malformed?)
[255]
-#else
- $ hg --config web.cacerts=emptycafile -R copy-pull pull
- pulling from https://localhost:$HGPORT/
- warning: connecting to localhost using legacy security technology (TLS 1.0); see https://mercurial-scm.org/wiki/SecureConnections for more info (?)
- abort: error: * (glob)
- [255]
-#endif
cacert mismatch
$ hg -R copy-pull pull --config web.cacerts="$CERTSDIR/pub.pem" \
> https://$LOCALIP:$HGPORT/
pulling from https://*:$HGPORT/ (glob)
- warning: connecting to $LOCALIP using legacy security technology (TLS 1.0); see https://mercurial-scm.org/wiki/SecureConnections for more info (?)
abort: $LOCALIP certificate error: certificate is for localhost (glob)
(set hostsecurity.$LOCALIP:certfingerprints=sha256:20:de:b3:ad:b4:cd:a5:42:f0:74:41:1c:a2:70:1e:da:6e:c0:5c:16:9e:e7:22:0f:f1:b7:e5:6e:e4:92:af:7e config setting or use --insecure to connect insecurely)
[255]
$ hg -R copy-pull pull --config web.cacerts="$CERTSDIR/pub.pem" \
> https://$LOCALIP:$HGPORT/ --insecure
pulling from https://*:$HGPORT/ (glob)
- warning: connecting to $LOCALIP using legacy security technology (TLS 1.0); see https://mercurial-scm.org/wiki/SecureConnections for more info (?)
warning: connection security to $LOCALIP is disabled per current settings; communication is susceptible to eavesdropping and tampering (glob)
searching for changes
no changes found
$ hg -R copy-pull pull --config web.cacerts="$CERTSDIR/pub-other.pem"
pulling from https://localhost:$HGPORT/
- warning: connecting to localhost using legacy security technology (TLS 1.0); see https://mercurial-scm.org/wiki/SecureConnections for more info (?)
(the full certificate chain may not be available locally; see "hg help debugssl") (windows !)
abort: error: *certificate verify failed* (glob)
[255]
$ hg -R copy-pull pull --config web.cacerts="$CERTSDIR/pub-other.pem" \
> --insecure
pulling from https://localhost:$HGPORT/
- warning: connecting to localhost using legacy security technology (TLS 1.0); see https://mercurial-scm.org/wiki/SecureConnections for more info (?)
warning: connection security to localhost is disabled per current settings; communication is susceptible to eavesdropping and tampering
searching for changes
no changes found
@@ -330,7 +253,6 @@
$ hg -R copy-pull pull --config web.cacerts="$CERTSDIR/pub-not-yet.pem" \
> https://localhost:$HGPORT1/
pulling from https://localhost:$HGPORT1/
- warning: connecting to localhost using legacy security technology (TLS 1.0); see https://mercurial-scm.org/wiki/SecureConnections for more info (?)
(the full certificate chain may not be available locally; see "hg help debugssl") (windows !)
abort: error: *certificate verify failed* (glob)
[255]
@@ -342,40 +264,17 @@
$ hg -R copy-pull pull --config web.cacerts="$CERTSDIR/pub-expired.pem" \
> https://localhost:$HGPORT2/
pulling from https://localhost:$HGPORT2/
- warning: connecting to localhost using legacy security technology (TLS 1.0); see https://mercurial-scm.org/wiki/SecureConnections for more info (?)
(the full certificate chain may not be available locally; see "hg help debugssl") (windows !)
abort: error: *certificate verify failed* (glob)
[255]
-Disabling the TLS 1.0 warning works
- $ hg -R copy-pull id https://localhost:$HGPORT/ \
- > --config hostsecurity.localhost:fingerprints=sha1:ecd87cd6b386d04fc1b8b41c9d8f5e168eef1c03 \
- > --config hostsecurity.disabletls10warning=true
- 5fed3813f7f5
-
-Error message for setting ciphers is different depending on SSLContext support
-
-#if no-sslcontext
- $ P="$CERTSDIR" hg --config hostsecurity.ciphers=invalid -R copy-pull id https://localhost:$HGPORT/
- warning: connecting to localhost using legacy security technology (TLS 1.0); see https://mercurial-scm.org/wiki/SecureConnections for more info
- abort: *No cipher can be selected. (glob)
- [255]
-
- $ P="$CERTSDIR" hg --config hostsecurity.ciphers=HIGH -R copy-pull id https://localhost:$HGPORT/
- warning: connecting to localhost using legacy security technology (TLS 1.0); see https://mercurial-scm.org/wiki/SecureConnections for more info
- 5fed3813f7f5
-#endif
-
-#if sslcontext
Setting ciphers to an invalid value aborts
$ P="$CERTSDIR" hg --config hostsecurity.ciphers=invalid -R copy-pull id https://localhost:$HGPORT/
- warning: connecting to localhost using legacy security technology (TLS 1.0); see https://mercurial-scm.org/wiki/SecureConnections for more info (?)
abort: could not set ciphers: No cipher can be selected.
(change cipher string (invalid) in config)
[255]
$ P="$CERTSDIR" hg --config hostsecurity.localhost:ciphers=invalid -R copy-pull id https://localhost:$HGPORT/
- warning: connecting to localhost using legacy security technology (TLS 1.0); see https://mercurial-scm.org/wiki/SecureConnections for more info (?)
abort: could not set ciphers: No cipher can be selected.
(change cipher string (invalid) in config)
[255]
@@ -383,64 +282,52 @@
Changing the cipher string works
$ P="$CERTSDIR" hg --config hostsecurity.ciphers=HIGH -R copy-pull id https://localhost:$HGPORT/
- warning: connecting to localhost using legacy security technology (TLS 1.0); see https://mercurial-scm.org/wiki/SecureConnections for more info (?)
5fed3813f7f5
-#endif
Fingerprints
- works without cacerts (hostfingerprints)
$ hg -R copy-pull id https://localhost:$HGPORT/ --insecure --config hostfingerprints.localhost=ec:d8:7c:d6:b3:86:d0:4f:c1:b8:b4:1c:9d:8f:5e:16:8e:ef:1c:03
- warning: connecting to localhost using legacy security technology (TLS 1.0); see https://mercurial-scm.org/wiki/SecureConnections for more info (?)
(SHA-1 fingerprint for localhost found in legacy [hostfingerprints] section; if you trust this fingerprint, remove the old SHA-1 fingerprint from [hostfingerprints] and add the following entry to the new [hostsecurity] section: localhost:fingerprints=sha256:20:de:b3:ad:b4:cd:a5:42:f0:74:41:1c:a2:70:1e:da:6e:c0:5c:16:9e:e7:22:0f:f1:b7:e5:6e:e4:92:af:7e)
5fed3813f7f5
- works without cacerts (hostsecurity)
$ hg -R copy-pull id https://localhost:$HGPORT/ --config hostsecurity.localhost:fingerprints=sha1:ecd87cd6b386d04fc1b8b41c9d8f5e168eef1c03
- warning: connecting to localhost using legacy security technology (TLS 1.0); see https://mercurial-scm.org/wiki/SecureConnections for more info (?)
5fed3813f7f5
$ hg -R copy-pull id https://localhost:$HGPORT/ --config hostsecurity.localhost:fingerprints=sha256:20:de:b3:ad:b4:cd:a5:42:f0:74:41:1c:a2:70:1e:da:6e:c0:5c:16:9e:e7:22:0f:f1:b7:e5:6e:e4:92:af:7e
- warning: connecting to localhost using legacy security technology (TLS 1.0); see https://mercurial-scm.org/wiki/SecureConnections for more info (?)
5fed3813f7f5
- multiple fingerprints specified and first matches
$ hg --config 'hostfingerprints.localhost=ecd87cd6b386d04fc1b8b41c9d8f5e168eef1c03, deadbeefdeadbeefdeadbeefdeadbeefdeadbeef' -R copy-pull id https://localhost:$HGPORT/ --insecure
- warning: connecting to localhost using legacy security technology (TLS 1.0); see https://mercurial-scm.org/wiki/SecureConnections for more info (?)
(SHA-1 fingerprint for localhost found in legacy [hostfingerprints] section; if you trust this fingerprint, remove the old SHA-1 fingerprint from [hostfingerprints] and add the following entry to the new [hostsecurity] section: localhost:fingerprints=sha256:20:de:b3:ad:b4:cd:a5:42:f0:74:41:1c:a2:70:1e:da:6e:c0:5c:16:9e:e7:22:0f:f1:b7:e5:6e:e4:92:af:7e)
5fed3813f7f5
$ hg --config 'hostsecurity.localhost:fingerprints=sha1:ecd87cd6b386d04fc1b8b41c9d8f5e168eef1c03, sha1:deadbeefdeadbeefdeadbeefdeadbeefdeadbeef' -R copy-pull id https://localhost:$HGPORT/
- warning: connecting to localhost using legacy security technology (TLS 1.0); see https://mercurial-scm.org/wiki/SecureConnections for more info (?)
5fed3813f7f5
- multiple fingerprints specified and last matches
$ hg --config 'hostfingerprints.localhost=deadbeefdeadbeefdeadbeefdeadbeefdeadbeef, ecd87cd6b386d04fc1b8b41c9d8f5e168eef1c03' -R copy-pull id https://localhost:$HGPORT/ --insecure
- warning: connecting to localhost using legacy security technology (TLS 1.0); see https://mercurial-scm.org/wiki/SecureConnections for more info (?)
(SHA-1 fingerprint for localhost found in legacy [hostfingerprints] section; if you trust this fingerprint, remove the old SHA-1 fingerprint from [hostfingerprints] and add the following entry to the new [hostsecurity] section: localhost:fingerprints=sha256:20:de:b3:ad:b4:cd:a5:42:f0:74:41:1c:a2:70:1e:da:6e:c0:5c:16:9e:e7:22:0f:f1:b7:e5:6e:e4:92:af:7e)
5fed3813f7f5
$ hg --config 'hostsecurity.localhost:fingerprints=sha1:deadbeefdeadbeefdeadbeefdeadbeefdeadbeef, sha1:ecd87cd6b386d04fc1b8b41c9d8f5e168eef1c03' -R copy-pull id https://localhost:$HGPORT/
- warning: connecting to localhost using legacy security technology (TLS 1.0); see https://mercurial-scm.org/wiki/SecureConnections for more info (?)
5fed3813f7f5
- multiple fingerprints specified and none match
$ hg --config 'hostfingerprints.localhost=deadbeefdeadbeefdeadbeefdeadbeefdeadbeef, aeadbeefdeadbeefdeadbeefdeadbeefdeadbeef' -R copy-pull id https://localhost:$HGPORT/ --insecure
- warning: connecting to localhost using legacy security technology (TLS 1.0); see https://mercurial-scm.org/wiki/SecureConnections for more info (?)
abort: certificate for localhost has unexpected fingerprint ec:d8:7c:d6:b3:86:d0:4f:c1:b8:b4:1c:9d:8f:5e:16:8e:ef:1c:03
(check hostfingerprint configuration)
[255]
$ hg --config 'hostsecurity.localhost:fingerprints=sha1:deadbeefdeadbeefdeadbeefdeadbeefdeadbeef, sha1:aeadbeefdeadbeefdeadbeefdeadbeefdeadbeef' -R copy-pull id https://localhost:$HGPORT/
- warning: connecting to localhost using legacy security technology (TLS 1.0); see https://mercurial-scm.org/wiki/SecureConnections for more info (?)
abort: certificate for localhost has unexpected fingerprint sha1:ec:d8:7c:d6:b3:86:d0:4f:c1:b8:b4:1c:9d:8f:5e:16:8e:ef:1c:03
(check hostsecurity configuration)
[255]
- fails when cert doesn't match hostname (port is ignored)
$ hg -R copy-pull id https://localhost:$HGPORT1/ --config hostfingerprints.localhost=ecd87cd6b386d04fc1b8b41c9d8f5e168eef1c03
- warning: connecting to localhost using legacy security technology (TLS 1.0); see https://mercurial-scm.org/wiki/SecureConnections for more info (?)
abort: certificate for localhost has unexpected fingerprint f4:2f:5a:0c:3e:52:5b:db:e7:24:a8:32:1d:18:97:6d:69:b5:87:84
(check hostfingerprint configuration)
[255]
@@ -448,7 +335,6 @@
- ignores that certificate doesn't match hostname
$ hg -R copy-pull id https://$LOCALIP:$HGPORT/ --config hostfingerprints.$LOCALIP=ecd87cd6b386d04fc1b8b41c9d8f5e168eef1c03
- warning: connecting to $LOCALIP using legacy security technology (TLS 1.0); see https://mercurial-scm.org/wiki/SecureConnections for more info (?)
(SHA-1 fingerprint for $LOCALIP found in legacy [hostfingerprints] section; if you trust this fingerprint, remove the old SHA-1 fingerprint from [hostfingerprints] and add the following entry to the new [hostsecurity] section: $LOCALIP:fingerprints=sha256:20:de:b3:ad:b4:cd:a5:42:f0:74:41:1c:a2:70:1e:da:6e:c0:5c:16:9e:e7:22:0f:f1:b7:e5:6e:e4:92:af:7e)
5fed3813f7f5
@@ -458,7 +344,7 @@
$ killdaemons.py hg1.pid
$ killdaemons.py hg2.pid
-#if sslcontext tls1.2
+#if tls1.2
Start servers running supported TLS versions
$ cd test
@@ -572,7 +458,6 @@
$ http_proxy=http://localhost:$HGPORT1/ hg -R copy-pull pull --insecure
pulling from https://localhost:$HGPORT/
- warning: connecting to localhost using legacy security technology (TLS 1.0); see https://mercurial-scm.org/wiki/SecureConnections for more info (?)
warning: connection security to localhost is disabled per current settings; communication is susceptible to eavesdropping and tampering
searching for changes
no changes found
@@ -582,12 +467,10 @@
$ http_proxy=http://localhost:$HGPORT1/ hg -R copy-pull pull \
> --config web.cacerts="$CERTSDIR/pub.pem"
pulling from https://localhost:$HGPORT/
- warning: connecting to localhost using legacy security technology (TLS 1.0); see https://mercurial-scm.org/wiki/SecureConnections for more info (?)
searching for changes
no changes found
$ http_proxy=http://localhost:$HGPORT1/ hg -R copy-pull pull https://localhost:$HGPORT/ --config hostfingerprints.localhost=ecd87cd6b386d04fc1b8b41c9d8f5e168eef1c03 --trace
pulling from https://*:$HGPORT/ (glob)
- warning: connecting to localhost using legacy security technology (TLS 1.0); see https://mercurial-scm.org/wiki/SecureConnections for more info (?)
(SHA-1 fingerprint for localhost found in legacy [hostfingerprints] section; if you trust this fingerprint, remove the old SHA-1 fingerprint from [hostfingerprints] and add the following entry to the new [hostsecurity] section: localhost:fingerprints=sha256:20:de:b3:ad:b4:cd:a5:42:f0:74:41:1c:a2:70:1e:da:6e:c0:5c:16:9e:e7:22:0f:f1:b7:e5:6e:e4:92:af:7e)
searching for changes
no changes found
@@ -597,14 +480,12 @@
$ http_proxy=http://localhost:$HGPORT1/ hg -R copy-pull pull \
> --config web.cacerts="$CERTSDIR/pub-other.pem"
pulling from https://localhost:$HGPORT/
- warning: connecting to localhost using legacy security technology (TLS 1.0); see https://mercurial-scm.org/wiki/SecureConnections for more info (?)
(the full certificate chain may not be available locally; see "hg help debugssl") (windows !)
abort: error: *certificate verify failed* (glob)
[255]
$ http_proxy=http://localhost:$HGPORT1/ hg -R copy-pull pull \
> --config web.cacerts="$CERTSDIR/pub-expired.pem" https://localhost:$HGPORT2/
pulling from https://localhost:$HGPORT2/
- warning: connecting to localhost using legacy security technology (TLS 1.0); see https://mercurial-scm.org/wiki/SecureConnections for more info (?)
(the full certificate chain may not be available locally; see "hg help debugssl") (windows !)
abort: error: *certificate verify failed* (glob)
[255]
@@ -612,8 +493,6 @@
$ killdaemons.py hg0.pid
-#if sslcontext
-
$ cd test
Missing certificate file(s) are detected
@@ -638,7 +517,6 @@
without client certificate:
$ P="$CERTSDIR" hg id https://localhost:$HGPORT/
- warning: connecting to localhost using legacy security technology (TLS 1.0); see https://mercurial-scm.org/wiki/SecureConnections for more info (?)
abort: error: .*(\$ECONNRESET\$|certificate required|handshake failure).* (re)
[255]
@@ -653,16 +531,13 @@
$ P="$CERTSDIR" hg id https://localhost:$HGPORT/ \
> --config auth.l.key="$CERTSDIR/client-key-decrypted.pem"
- warning: connecting to localhost using legacy security technology (TLS 1.0); see https://mercurial-scm.org/wiki/SecureConnections for more info (?)
5fed3813f7f5
$ printf '1234\n' | env P="$CERTSDIR" hg id https://localhost:$HGPORT/ \
> --config ui.interactive=True --config ui.nontty=True
- warning: connecting to localhost using legacy security technology (TLS 1.0); see https://mercurial-scm.org/wiki/SecureConnections for more info (?)
passphrase for */client-key.pem: 5fed3813f7f5 (glob)
$ env P="$CERTSDIR" hg id https://localhost:$HGPORT/
- warning: connecting to localhost using legacy security technology (TLS 1.0); see https://mercurial-scm.org/wiki/SecureConnections for more info (?)
abort: error: * (glob)
[255]
@@ -677,5 +552,3 @@
abort: certificate file (*/missing/key) does not exist; cannot connect to localhost (glob)
(restore missing file or fix references in Mercurial config)
[255]
-
-#endif
--- a/tests/test-import-context.t Tue Jul 14 10:25:41 2020 +0200
+++ b/tests/test-import-context.t Mon Jul 20 21:56:27 2020 +0530
@@ -19,9 +19,8 @@
> EOF
$ cat > cat.py <<EOF
> import sys
- > from mercurial import pycompat
- > from mercurial.utils import stringutil
- > pycompat.stdout.write(b'%s\n'
+ > from mercurial.utils import procutil, stringutil
+ > procutil.stdout.write(b'%s\n'
> % stringutil.pprint(open(sys.argv[1], 'rb').read()))
> EOF
--- a/tests/test-install.t Tue Jul 14 10:25:41 2020 +0200
+++ b/tests/test-install.t Mon Jul 20 21:56:27 2020 +0530
@@ -18,7 +18,6 @@
checking available compression engines (*zlib*) (glob)
checking available compression engines for wire protocol (*zlib*) (glob)
checking "re2" regexp engine \((available|missing)\) (re)
- checking "re2" regexp engine Rust bindings \((installed|missing)\) (re) (rust !)
checking templates (*mercurial?templates)... (glob)
checking default template (*mercurial?templates?map-cmdline.default) (glob)
checking commit editor... (*) (glob)
@@ -78,7 +77,6 @@
checking available compression engines (*zlib*) (glob)
checking available compression engines for wire protocol (*zlib*) (glob)
checking "re2" regexp engine \((available|missing)\) (re)
- checking "re2" regexp engine Rust bindings \((installed|missing)\) (re) (rust !)
checking templates (*mercurial?templates)... (glob)
checking default template (*mercurial?templates?map-cmdline.default) (glob)
checking commit editor... (*) (glob)
@@ -126,7 +124,6 @@
checking available compression engines (*zlib*) (glob)
checking available compression engines for wire protocol (*zlib*) (glob)
checking "re2" regexp engine \((available|missing)\) (re)
- checking "re2" regexp engine Rust bindings \((installed|missing)\) (re) (rust !)
checking templates (*mercurial?templates)... (glob)
checking default template (*mercurial?templates?map-cmdline.default) (glob)
checking commit editor... ($TESTTMP/tools/testeditor.exe)
@@ -154,7 +151,6 @@
checking available compression engines (*zlib*) (glob)
checking available compression engines for wire protocol (*zlib*) (glob)
checking "re2" regexp engine \((available|missing)\) (re)
- checking "re2" regexp engine Rust bindings \((installed|missing)\) (re) (rust !)
checking templates (*mercurial?templates)... (glob)
checking default template (*mercurial?templates?map-cmdline.default) (glob)
checking commit editor... (c:\foo\bar\baz.exe) (windows !)
@@ -211,7 +207,6 @@
checking available compression engines (*) (glob)
checking available compression engines for wire protocol (*) (glob)
checking "re2" regexp engine \((available|missing)\) (re)
- checking "re2" regexp engine Rust bindings \((installed|missing)\) (re) (rust !)
checking templates ($TESTTMP/installenv/*/site-packages/mercurial/templates)... (glob)
checking default template ($TESTTMP/installenv/*/site-packages/mercurial/templates/map-cmdline.default) (glob)
checking commit editor... (*) (glob)
@@ -252,7 +247,6 @@
checking available compression engines (*) (glob)
checking available compression engines for wire protocol (*) (glob)
checking "re2" regexp engine \((available|missing)\) (re)
- checking "re2" regexp engine Rust bindings \((installed|missing)\) (re) (rust !)
checking templates ($TESTTMP/installenv/*/site-packages/mercurial/templates)... (glob)
checking default template ($TESTTMP/installenv/*/site-packages/mercurial/templates/map-cmdline.default) (glob)
checking commit editor... (*) (glob)
--- a/tests/test-largefiles-update.t Tue Jul 14 10:25:41 2020 +0200
+++ b/tests/test-largefiles-update.t Mon Jul 20 21:56:27 2020 +0530
@@ -593,7 +593,7 @@
what do you want to do? o
merging normal1
warning: conflicts while merging normal1! (edit, then use 'hg resolve --mark')
- unresolved conflicts (see hg resolve, then hg rebase --continue)
+ unresolved conflicts (see 'hg resolve', then 'hg rebase --continue')
[1]
$ cat .hglf/large1
58e24f733a964da346e2407a2bee99d9001184f5
--- a/tests/test-lfs-serve.t Tue Jul 14 10:25:41 2020 +0200
+++ b/tests/test-lfs-serve.t Mon Jul 20 21:56:27 2020 +0530
@@ -133,30 +133,6 @@
requirements
preserved: dotencode, fncache, generaldelta, revlogv1, sparserevlog, store
- beginning upgrade...
- repository locked and read-only
- creating temporary repository to stage migrated data: * (glob)
- (it is safe to interrupt this process any time before data migration completes)
- migrating 3 total revisions (1 in filelogs, 1 in manifests, 1 in changelog)
- migrating 324 bytes in store; 129 bytes tracked data
- migrating 1 filelogs containing 1 revisions (73 bytes in store; 8 bytes tracked data)
- finished migrating 1 filelog revisions across 1 filelogs; change in size: 0 bytes
- migrating 1 manifests containing 1 revisions (117 bytes in store; 52 bytes tracked data)
- finished migrating 1 manifest revisions across 1 manifests; change in size: 0 bytes
- migrating changelog containing 1 revisions (134 bytes in store; 69 bytes tracked data)
- finished migrating 1 changelog revisions; change in size: 0 bytes
- finished migrating 3 total revisions; total change in store size: 0 bytes
- copying phaseroots
- data fully migrated to temporary repository
- marking source repository as being upgraded; clients will be unable to read from repository
- starting in-place swap of repository data
- replaced files will be backed up at * (glob)
- replacing store...
- store replacement complete; repository was inconsistent for *s (glob)
- finalizing requirements file and making repository readable again
- removing temporary repository * (glob)
- copy of old repository backed up at * (glob)
- the old repository will not be deleted; remove it to free up disk space once the upgraded repository is verified
$ grep 'lfs' .hg/requires $SERVER_REQUIRES
[1]
--- a/tests/test-lfs.t Tue Jul 14 10:25:41 2020 +0200
+++ b/tests/test-lfs.t Mon Jul 20 21:56:27 2020 +0530
@@ -697,6 +697,7 @@
> revlog,
> )
> from mercurial.utils import (
+ > procutil,
> stringutil,
> )
> def hash(rawtext):
@@ -713,7 +714,7 @@
> texts = [fl.rawdata(i) for i in fl]
> flags = [int(fl._revlog.flags(i)) for i in fl]
> hashes = [hash(t) for t in texts]
- > pycompat.stdout.write(b' %s: rawsizes=%r flags=%r hashes=%s\n'
+ > procutil.stdout.write(b' %s: rawsizes=%r flags=%r hashes=%s\n'
> % (name, sizes, flags, stringutil.pprint(hashes)))
> EOF
--- a/tests/test-manifest.py Tue Jul 14 10:25:41 2020 +0200
+++ b/tests/test-manifest.py Mon Jul 20 21:56:27 2020 +0530
@@ -156,39 +156,6 @@
with self.assertRaises(KeyError):
m[b'foo']
- def testSetGetNodeSuffix(self):
- clean = self.parsemanifest(A_SHORT_MANIFEST)
- m = self.parsemanifest(A_SHORT_MANIFEST)
- h = m[b'foo']
- f = m.flags(b'foo')
- want = h + b'a'
- # Merge code wants to set 21-byte fake hashes at times
- m[b'foo'] = want
- self.assertEqual(want, m[b'foo'])
- self.assertEqual(
- [(b'bar/baz/qux.py', BIN_HASH_2), (b'foo', BIN_HASH_1 + b'a')],
- list(m.items()),
- )
- # Sometimes it even tries a 22-byte fake hash, but we can
- # return 21 and it'll work out
- m[b'foo'] = want + b'+'
- self.assertEqual(want, m[b'foo'])
- # make sure the suffix survives a copy
- match = matchmod.match(util.localpath(b'/repo'), b'', [b're:foo'])
- m2 = m._matches(match)
- self.assertEqual(want, m2[b'foo'])
- self.assertEqual(1, len(m2))
- m2 = m.copy()
- self.assertEqual(want, m2[b'foo'])
- # suffix with iteration
- self.assertEqual(
- [(b'bar/baz/qux.py', BIN_HASH_2), (b'foo', want)], list(m.items())
- )
-
- # shows up in diff
- self.assertEqual({b'foo': ((want, f), (h, b''))}, m.diff(clean))
- self.assertEqual({b'foo': ((h, b''), (want, f))}, clean.diff(m))
-
def testMatchException(self):
m = self.parsemanifest(A_SHORT_MANIFEST)
match = matchmod.match(util.localpath(b'/repo'), b'', [b're:.*'])
--- a/tests/test-merge-halt.t Tue Jul 14 10:25:41 2020 +0200
+++ b/tests/test-merge-halt.t Mon Jul 20 21:56:27 2020 +0530
@@ -27,7 +27,7 @@
merging b
merging a failed!
merging b failed!
- unresolved conflicts (see hg resolve, then hg rebase --continue)
+ unresolved conflicts (see 'hg resolve', then 'hg rebase --continue')
[1]
$ hg resolve --list
--- a/tests/test-mq-qfold.t Tue Jul 14 10:25:41 2020 +0200
+++ b/tests/test-mq-qfold.t Mon Jul 20 21:56:27 2020 +0530
@@ -230,6 +230,7 @@
HG: changed a
====
note: commit message saved in .hg/last-message.txt
+ note: use 'hg commit --logfile .hg/last-message.txt --edit' to reuse it
transaction abort!
rollback completed
qrefresh interrupted while patch was popped! (revert --all, qpush to recover)
--- a/tests/test-mq-qnew.t Tue Jul 14 10:25:41 2020 +0200
+++ b/tests/test-mq-qnew.t Mon Jul 20 21:56:27 2020 +0530
@@ -308,6 +308,7 @@
transaction abort!
rollback completed
note: commit message saved in .hg/last-message.txt
+ note: use 'hg commit --logfile .hg/last-message.txt --edit' to reuse it
abort: pretxncommit.unexpectedabort hook exited with status 1
[255]
$ cat .hg/last-message.txt
--- a/tests/test-mq-qrefresh-replace-log-message.t Tue Jul 14 10:25:41 2020 +0200
+++ b/tests/test-mq-qrefresh-replace-log-message.t Mon Jul 20 21:56:27 2020 +0530
@@ -186,6 +186,7 @@
HG: added file2
====
note: commit message saved in .hg/last-message.txt
+ note: use 'hg commit --logfile .hg/last-message.txt --edit' to reuse it
transaction abort!
rollback completed
qrefresh interrupted while patch was popped! (revert --all, qpush to recover)
@@ -229,6 +230,7 @@
A file2
====
note: commit message saved in .hg/last-message.txt
+ note: use 'hg commit --logfile .hg/last-message.txt --edit' to reuse it
transaction abort!
rollback completed
qrefresh interrupted while patch was popped! (revert --all, qpush to recover)
--- a/tests/test-narrow-rebase.t Tue Jul 14 10:25:41 2020 +0200
+++ b/tests/test-narrow-rebase.t Mon Jul 20 21:56:27 2020 +0530
@@ -72,7 +72,7 @@
$ hg rebase -d 'desc("modify inside/f1")' 2>&1 | egrep -v '(warning:|incomplete!)'
rebasing 6:cdce97fbf653 "conflicting inside/f1" (tip)
merging inside/f1
- unresolved conflicts (see hg resolve, then hg rebase --continue)
+ unresolved conflicts (see 'hg resolve', then 'hg rebase --continue')
$ echo modified3 > inside/f1
$ hg resolve -m 2>&1 | grep -v continue:
(no more unresolved files)
--- a/tests/test-obsolete-divergent.t Tue Jul 14 10:25:41 2020 +0200
+++ b/tests/test-obsolete-divergent.t Mon Jul 20 21:56:27 2020 +0530
@@ -118,7 +118,9 @@
$ hg push ../other
pushing to ../other
searching for changes
- abort: push includes content-divergent changeset: 392fd25390da!
+ abort: push includes unstable changesets:
+ 82623d38b9ba (content-divergent)
+ 392fd25390da (content-divergent)
[255]
$ cd ..
--- a/tests/test-obsolete.t Tue Jul 14 10:25:41 2020 +0200
+++ b/tests/test-obsolete.t Mon Jul 20 21:56:27 2020 +0530
@@ -251,7 +251,8 @@
$ hg push ../tmpa
pushing to ../tmpa
searching for changes
- abort: push includes phase-divergent changeset: 5601fb93a350!
+ abort: push includes unstable changesets:
+ 5601fb93a350 (phase-divergent)
[255]
Fixing "bumped" situation
@@ -616,7 +617,8 @@
$ hg push ../tmpc/ -r 'desc("original_d")'
pushing to ../tmpc/
searching for changes
- abort: push includes obsolete changeset: 94b33453f93b!
+ abort: push includes obsolete changesets:
+ 94b33453f93b
[255]
refuse to push unstable changeset
@@ -624,9 +626,52 @@
$ hg push ../tmpc/
pushing to ../tmpc/
searching for changes
- abort: push includes orphan changeset: cda648ca50f5!
+ abort: push includes obsolete changesets:
+ 94b33453f93b
+ push includes unstable changesets:
+ cda648ca50f5 (orphan)
[255]
+with --force it will work anyway
+
+ $ hg push ../tmpc/ --force
+ pushing to ../tmpc/
+ searching for changes
+ adding changesets
+ adding manifests
+ adding file changes
+ added 2 changesets with 2 changes to 2 files
+ 1 new obsolescence markers
+ 1 new orphan changesets
+
+if the orphan changeset is already on the server, pushing should work
+
+ $ hg push ../tmpc/
+ pushing to ../tmpc/
+ searching for changes
+ no changes found
+ [1]
+
+pushing should work even if the outgoing changes contain an unrelated changeset
+(neither obsolete nor unstable) (issue6372)
+
+ $ hg up 1 -q
+ $ hg branch new -q
+ $ mkcommit c
+
+ $ hg push ../tmpc/ --new-branch
+ pushing to ../tmpc/
+ searching for changes
+ adding changesets
+ adding manifests
+ adding file changes
+ added 1 changesets with 1 changes to 1 files (+1 heads)
+
+make later tests work unmodified
+
+ $ hg --config extensions.strip= strip tip -q
+ $ hg up 5 -q
+
Test that extinct changeset are properly detected
$ hg log -r 'extinct()'
@@ -1176,6 +1221,14 @@
phase-divergent: immutable predecessor 245b
content-divergent: predecessor 245b
+ $ hg push ../tmpf -r 50c51b361e60
+ pushing to ../tmpf
+ searching for changes
+ abort: push includes unstable changesets:
+ 50c51b361e60 (orphan, phase-divergent, content-divergent)
+ [255]
+
+
#if serve
$ hg serve -n test -p $HGPORT -d --pid-file=hg.pid -A access.log -E errors.log
--- a/tests/test-parseindex.t Tue Jul 14 10:25:41 2020 +0200
+++ b/tests/test-parseindex.t Mon Jul 20 21:56:27 2020 +0530
@@ -185,7 +185,7 @@
> ops = [
> ('reachableroots',
> lambda: cl.index.reachableroots2(0, [1], [0], False)),
- > ('compute_phases_map_sets', lambda: cl.computephases([[0], []])),
+ > ('compute_phases_map_sets', lambda: cl.computephases({1: {cl.node(0)}})),
> ('index_headrevs', lambda: cl.headrevs()),
> ('find_gca_candidates', lambda: cl.commonancestorsheads(n0, n1)),
> ('find_deepest', lambda: cl.ancestor(n0, n1)),
--- a/tests/test-patchbomb-tls.t Tue Jul 14 10:25:41 2020 +0200
+++ b/tests/test-patchbomb-tls.t Mon Jul 20 21:56:27 2020 +0530
@@ -39,7 +39,7 @@
Our test cert is not signed by a trusted CA. It should fail to verify if
we are able to load CA certs:
-#if sslcontext defaultcacerts no-defaultcacertsloaded
+#if no-defaultcacertsloaded
$ try
this patch series consists of 1 patches.
@@ -49,41 +49,17 @@
[255]
#endif
-#if no-sslcontext defaultcacerts
- $ try
- this patch series consists of 1 patches.
-
-
- warning: connecting to localhost using legacy security technology (TLS 1.0); see https://mercurial-scm.org/wiki/SecureConnections for more info
- (using CA certificates from *; if you see this message, your Mercurial install is not properly configured; see https://mercurial-scm.org/wiki/SecureConnections for how to configure Mercurial to avoid this message) (glob) (?)
- (?i)abort: .*?certificate.verify.failed.* (re)
- [255]
-#endif
-
#if defaultcacertsloaded
$ try
this patch series consists of 1 patches.
- warning: connecting to localhost using legacy security technology (TLS 1.0); see https://mercurial-scm.org/wiki/SecureConnections for more info (?)
- (using CA certificates from *; if you see this message, your Mercurial install is not properly configured; see https://mercurial-scm.org/wiki/SecureConnections for how to configure Mercurial to avoid this message) (glob) (?)
(the full certificate chain may not be available locally; see "hg help debugssl") (windows !)
(?i)abort: .*?certificate.verify.failed.* (re)
[255]
#endif
-#if no-defaultcacerts
- $ try
- this patch series consists of 1 patches.
-
-
- (unable to load * certificates; see https://mercurial-scm.org/wiki/SecureConnections for how to configure Mercurial to avoid this message) (glob) (?)
- abort: localhost certificate error: no certificate received
- (set hostsecurity.localhost:certfingerprints=sha256:62:09:97:2f:97:60:e3:65:8f:12:5d:78:9e:35:a1:36:7a:65:4b:0e:9f:ac:db:c3:bc:6e:b6:a3:c0:16:e0:30 config setting or use --insecure to connect insecurely)
- [255]
-#endif
-
$ DISABLECACERTS="--config devel.disableloaddefaultcerts=true"
Without certificates:
@@ -94,7 +70,6 @@
(using smtps)
sending mail: smtp host localhost, port * (glob)
- warning: connecting to localhost using legacy security technology (TLS 1.0); see https://mercurial-scm.org/wiki/SecureConnections for more info (?)
(verifying remote certificate)
abort: unable to verify security of localhost (no loaded CA certificates); refusing to connect
(see https://mercurial-scm.org/wiki/SecureConnections for how to configure Mercurial to avoid this error or set hostsecurity.localhost:fingerprints=sha256:20:de:b3:ad:b4:cd:a5:42:f0:74:41:1c:a2:70:1e:da:6e:c0:5c:16:9e:e7:22:0f:f1:b7:e5:6e:e4:92:af:7e to trust this server)
@@ -108,7 +83,6 @@
(using smtps)
sending mail: smtp host localhost, port * (glob)
- warning: connecting to localhost using legacy security technology (TLS 1.0); see https://mercurial-scm.org/wiki/SecureConnections for more info (?)
(verifying remote certificate)
sending [PATCH] a ...
@@ -118,7 +92,6 @@
this patch series consists of 1 patches.
- warning: connecting to localhost using legacy security technology (TLS 1.0); see https://mercurial-scm.org/wiki/SecureConnections for more info (?)
(the full certificate chain may not be available locally; see "hg help debugssl") (windows !)
(?i)abort: .*?certificate.verify.failed.* (re)
[255]
--- a/tests/test-persistent-nodemap.t Tue Jul 14 10:25:41 2020 +0200
+++ b/tests/test-persistent-nodemap.t Mon Jul 20 21:56:27 2020 +0530
@@ -2,20 +2,34 @@
Test the persistent on-disk nodemap
===================================
- $ hg init test-repo
- $ cd test-repo
- $ cat << EOF >> .hg/hgrc
- > [experimental]
- > exp-persistent-nodemap=yes
+ $ cat << EOF >> $HGRCPATH
+ > [format]
+ > use-persistent-nodemap=yes
> [devel]
> persistent-nodemap=yes
> EOF
- $ hg debugbuilddag .+5000
+ $ hg init test-repo
+ $ cd test-repo
+ $ hg debugformat
+ format-variant repo
+ fncache: yes
+ dotencode: yes
+ generaldelta: yes
+ sparserevlog: yes
+ sidedata: no
+ persistent-nodemap: yes
+ copies-sdc: no
+ plain-cl-delta: yes
+ compression: zlib
+ compression-level: default
+ $ hg debugbuilddag .+5000 --new-file --config "storage.revlog.nodemap.mode=warn"
+ persistent nodemap in strict mode without efficient method (no-rust no-pure !)
+ persistent nodemap in strict mode without efficient method (no-rust no-pure !)
$ hg debugnodemap --metadata
uid: ???????????????? (glob)
tip-rev: 5000
- tip-node: 06ddac466af534d365326c13c3879f97caca3cb1
- data-length: 122880
+ tip-node: 6b02b8c7b96654c25e86ba69eda198d7e6ad8b3c
+ data-length: 121088
data-unused: 0
data-unused: 0.000%
$ f --size .hg/store/00changelog.n
@@ -31,53 +45,56 @@
#if rust
$ f --sha256 .hg/store/00changelog-*.nd
- .hg/store/00changelog-????????????????.nd: sha256=1e38e9ffaa45cad13f15c1a9880ad606f4241e8beea2f61b4d5365abadfb55f6 (glob)
+ .hg/store/00changelog-????????????????.nd: sha256=2e029d3200bd1a986b32784fc2ef1a3bd60dc331f025718bcf5ff44d93f026fd (glob)
+
+ $ f --sha256 .hg/store/00manifest-*.nd
+ .hg/store/00manifest-????????????????.nd: sha256=97117b1c064ea2f86664a124589e47db0e254e8d34739b5c5cc5bf31c9da2b51 (glob)
$ hg debugnodemap --dump-new | f --sha256 --size
- size=122880, sha256=1e38e9ffaa45cad13f15c1a9880ad606f4241e8beea2f61b4d5365abadfb55f6
+ size=121088, sha256=2e029d3200bd1a986b32784fc2ef1a3bd60dc331f025718bcf5ff44d93f026fd
$ hg debugnodemap --dump-disk | f --sha256 --bytes=256 --hexdump --size
- size=122880, sha256=1e38e9ffaa45cad13f15c1a9880ad606f4241e8beea2f61b4d5365abadfb55f6
- 0000: 00 00 00 76 00 00 01 65 00 00 00 95 00 00 01 34 |...v...e.......4|
- 0010: 00 00 00 19 00 00 01 69 00 00 00 ab 00 00 00 4b |.......i.......K|
- 0020: 00 00 00 07 00 00 01 4c 00 00 00 f8 00 00 00 8f |.......L........|
- 0030: 00 00 00 c0 00 00 00 a7 00 00 00 89 00 00 01 46 |...............F|
- 0040: 00 00 00 92 00 00 01 bc 00 00 00 71 00 00 00 ac |...........q....|
- 0050: 00 00 00 af 00 00 00 b4 00 00 00 34 00 00 01 ca |...........4....|
- 0060: 00 00 00 23 00 00 01 45 00 00 00 2d 00 00 00 b2 |...#...E...-....|
- 0070: 00 00 00 56 00 00 01 0f 00 00 00 4e 00 00 02 4c |...V.......N...L|
- 0080: 00 00 00 e7 00 00 00 cd 00 00 01 5b 00 00 00 78 |...........[...x|
- 0090: 00 00 00 e3 00 00 01 8e 00 00 00 4f 00 00 00 b1 |...........O....|
- 00a0: 00 00 00 30 00 00 00 11 00 00 00 25 00 00 00 d2 |...0.......%....|
- 00b0: 00 00 00 ec 00 00 00 69 00 00 01 2b 00 00 01 2e |.......i...+....|
- 00c0: 00 00 00 aa 00 00 00 15 00 00 00 3a 00 00 01 4e |...........:...N|
- 00d0: 00 00 00 4d 00 00 00 9d 00 00 00 8e 00 00 00 a4 |...M............|
- 00e0: 00 00 00 c3 00 00 00 eb 00 00 00 29 00 00 00 ad |...........)....|
- 00f0: 00 00 01 3a 00 00 01 32 00 00 00 04 00 00 00 53 |...:...2.......S|
+ size=121088, sha256=2e029d3200bd1a986b32784fc2ef1a3bd60dc331f025718bcf5ff44d93f026fd
+ 0000: 00 00 00 91 00 00 00 20 00 00 00 bb 00 00 00 e7 |....... ........|
+ 0010: 00 00 00 66 00 00 00 a1 00 00 01 13 00 00 01 22 |...f..........."|
+ 0020: 00 00 00 23 00 00 00 fc 00 00 00 ba 00 00 00 5e |...#...........^|
+ 0030: 00 00 00 df 00 00 01 4e 00 00 01 65 00 00 00 ab |.......N...e....|
+ 0040: 00 00 00 a9 00 00 00 95 00 00 00 73 00 00 00 38 |...........s...8|
+ 0050: 00 00 00 cc 00 00 00 92 00 00 00 90 00 00 00 69 |...............i|
+ 0060: 00 00 00 ec 00 00 00 8d 00 00 01 4f 00 00 00 12 |...........O....|
+ 0070: 00 00 02 0c 00 00 00 77 00 00 00 9c 00 00 00 8f |.......w........|
+ 0080: 00 00 00 d5 00 00 00 6b 00 00 00 48 00 00 00 b3 |.......k...H....|
+ 0090: 00 00 00 e5 00 00 00 b5 00 00 00 8e 00 00 00 ad |................|
+ 00a0: 00 00 00 7b 00 00 00 7c 00 00 00 0b 00 00 00 2b |...{...|.......+|
+ 00b0: 00 00 00 c6 00 00 00 1e 00 00 01 08 00 00 00 11 |................|
+ 00c0: 00 00 01 30 00 00 00 26 00 00 01 9c 00 00 00 35 |...0...&.......5|
+ 00d0: 00 00 00 b8 00 00 01 31 00 00 00 2c 00 00 00 55 |.......1...,...U|
+ 00e0: 00 00 00 8a 00 00 00 9a 00 00 00 0c 00 00 01 1e |................|
+ 00f0: 00 00 00 a4 00 00 00 83 00 00 00 c9 00 00 00 8c |................|
#else
$ f --sha256 .hg/store/00changelog-*.nd
- .hg/store/00changelog-????????????????.nd: sha256=b961925120e1c9bc345c199b2cc442abc477029fdece37ef9d99cbe59c0558b7 (glob)
+ .hg/store/00changelog-????????????????.nd: sha256=f544f5462ff46097432caf6d764091f6d8c46d6121be315ead8576d548c9dd79 (glob)
$ hg debugnodemap --dump-new | f --sha256 --size
- size=122880, sha256=b961925120e1c9bc345c199b2cc442abc477029fdece37ef9d99cbe59c0558b7
+ size=121088, sha256=f544f5462ff46097432caf6d764091f6d8c46d6121be315ead8576d548c9dd79
$ hg debugnodemap --dump-disk | f --sha256 --bytes=256 --hexdump --size
- size=122880, sha256=b961925120e1c9bc345c199b2cc442abc477029fdece37ef9d99cbe59c0558b7
+ size=121088, sha256=f544f5462ff46097432caf6d764091f6d8c46d6121be315ead8576d548c9dd79
0000: ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff |................|
- 0010: ff ff ff ff ff ff ff ff ff ff fa c2 ff ff ff ff |................|
- 0020: ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff |................|
- 0030: ff ff ff ff ff ff ed b3 ff ff ff ff ff ff ff ff |................|
- 0040: ff ff ff ff ff ff ee 34 00 00 00 00 ff ff ff ff |.......4........|
- 0050: ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff |................|
- 0060: ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff |................|
+ 0010: ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff |................|
+ 0020: ff ff ff ff ff ff f5 06 ff ff ff ff ff ff f3 e7 |................|
+ 0030: ff ff ef ca ff ff ff ff ff ff ff ff ff ff ff ff |................|
+ 0040: ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff |................|
+ 0050: ff ff ff ff ff ff ff ff ff ff ff ff ff ff ed 08 |................|
+ 0060: ff ff ed 66 ff ff ff ff ff ff ff ff ff ff ff ff |...f............|
0070: ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff |................|
- 0080: ff ff ff ff ff ff f8 50 ff ff ff ff ff ff ff ff |.......P........|
- 0090: ff ff ff ff ff ff ff ff ff ff ec c7 ff ff ff ff |................|
- 00a0: ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff |................|
- 00b0: ff ff ff ff ff ff fa be ff ff f2 fc ff ff ff ff |................|
- 00c0: ff ff ff ff ff ff ef ea ff ff ff ff ff ff f9 17 |................|
+ 0080: ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff |................|
+ 0090: ff ff ff ff ff ff ff ff ff ff ff ff ff ff f6 ed |................|
+ 00a0: ff ff ff ff ff ff fe 61 ff ff ff ff ff ff ff ff |.......a........|
+ 00b0: ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff |................|
+ 00c0: ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff |................|
00d0: ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff |................|
- 00e0: ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff |................|
- 00f0: ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff |................|
+ 00e0: ff ff ff ff ff ff ff ff ff ff ff ff ff ff f1 02 |................|
+ 00f0: ff ff ff ff ff ff ed 1b ff ff ff ff ff ff ff ff |................|
#endif
@@ -88,27 +105,38 @@
add a new commit
$ hg up
- 0 files updated, 0 files merged, 0 files removed, 0 files unresolved
+ 5001 files updated, 0 files merged, 0 files removed, 0 files unresolved
$ echo foo > foo
$ hg add foo
+
+#if no-pure no-rust
+
+ $ hg ci -m 'foo' --config "storage.revlog.nodemap.mode=strict"
+ transaction abort!
+ rollback completed
+ abort: persistent nodemap in strict mode without efficient method
+ [255]
+
+#endif
+
$ hg ci -m 'foo'
#if no-pure no-rust
$ hg debugnodemap --metadata
uid: ???????????????? (glob)
tip-rev: 5001
- tip-node: 2dd9b5258caa46469ff07d4a3da1eb3529a51f49
- data-length: 122880
+ tip-node: 16395c3cf7e231394735e6b1717823ada303fb0c
+ data-length: 121088
data-unused: 0
data-unused: 0.000%
#else
$ hg debugnodemap --metadata
uid: ???????????????? (glob)
tip-rev: 5001
- tip-node: 2dd9b5258caa46469ff07d4a3da1eb3529a51f49
- data-length: 123072
- data-unused: 192
- data-unused: 0.156%
+ tip-node: 16395c3cf7e231394735e6b1717823ada303fb0c
+ data-length: 121344
+ data-unused: 256
+ data-unused: 0.211%
#endif
$ f --size .hg/store/00changelog.n
@@ -118,17 +146,17 @@
#if pure
$ f --sha256 .hg/store/00changelog-*.nd --size
- .hg/store/00changelog-????????????????.nd: size=123072, sha256=136472751566c8198ff09e306a7d2f9bd18bd32298d614752b73da4d6df23340 (glob)
+ .hg/store/00changelog-????????????????.nd: size=121344, sha256=cce54c5da5bde3ad72a4938673ed4064c86231b9c64376b082b163fdb20f8f66 (glob)
#endif
#if rust
$ f --sha256 .hg/store/00changelog-*.nd --size
- .hg/store/00changelog-????????????????.nd: size=123072, sha256=ccc8a43310ace13812fcc648683e259346754ef934c12dd238cf9b7fadfe9a4b (glob)
+ .hg/store/00changelog-????????????????.nd: size=121344, sha256=952b042fcf614ceb37b542b1b723e04f18f83efe99bee4e0f5ccd232ef470e58 (glob)
#endif
#if no-pure no-rust
$ f --sha256 .hg/store/00changelog-*.nd --size
- .hg/store/00changelog-????????????????.nd: size=122880, sha256=bfafebd751c4f6d116a76a37a1dee2a251747affe7efbcc4f4842ccc746d4db9 (glob)
+ .hg/store/00changelog-????????????????.nd: size=121088, sha256=df7c06a035b96cb28c7287d349d603baef43240be7736fe34eea419a49702e17 (glob)
#endif
$ hg debugnodemap --check
@@ -140,12 +168,12 @@
$ echo bar > bar
$ hg add bar
- $ hg ci -m 'bar' --config experimental.exp-persistent-nodemap.mmap=no
+ $ hg ci -m 'bar' --config storage.revlog.nodemap.mmap=no
- $ hg debugnodemap --check --config experimental.exp-persistent-nodemap.mmap=yes
+ $ hg debugnodemap --check --config storage.revlog.nodemap.mmap=yes
revision in index: 5003
revision in nodemap: 5003
- $ hg debugnodemap --check --config experimental.exp-persistent-nodemap.mmap=no
+ $ hg debugnodemap --check --config storage.revlog.nodemap.mmap=no
revision in index: 5003
revision in nodemap: 5003
@@ -154,34 +182,34 @@
$ hg debugnodemap --metadata
uid: ???????????????? (glob)
tip-rev: 5002
- tip-node: 6ce944fafcee85af91f29ea5b51654cc6101ad7e
- data-length: 123328
- data-unused: 384
- data-unused: 0.311%
+ tip-node: 880b18d239dfa9f632413a2071bfdbcc4806a4fd
+ data-length: 121600
+ data-unused: 512
+ data-unused: 0.421%
$ f --sha256 .hg/store/00changelog-*.nd --size
- .hg/store/00changelog-????????????????.nd: size=123328, sha256=10d26e9776b6596af0f89143a54eba8cc581e929c38242a02a7b0760698c6c70 (glob)
+ .hg/store/00changelog-????????????????.nd: size=121600, sha256=def52503d049ccb823974af313a98a935319ba61f40f3aa06a8be4d35c215054 (glob)
#endif
#if rust
$ hg debugnodemap --metadata
uid: ???????????????? (glob)
tip-rev: 5002
- tip-node: 6ce944fafcee85af91f29ea5b51654cc6101ad7e
- data-length: 123328
- data-unused: 384
- data-unused: 0.311%
+ tip-node: 880b18d239dfa9f632413a2071bfdbcc4806a4fd
+ data-length: 121600
+ data-unused: 512
+ data-unused: 0.421%
$ f --sha256 .hg/store/00changelog-*.nd --size
- .hg/store/00changelog-????????????????.nd: size=123328, sha256=081eec9eb6708f2bf085d939b4c97bc0b6762bc8336bc4b93838f7fffa1516bf (glob)
+ .hg/store/00changelog-????????????????.nd: size=121600, sha256=dacf5b5f1d4585fee7527d0e67cad5b1ba0930e6a0928f650f779aefb04ce3fb (glob)
#endif
#if no-pure no-rust
$ hg debugnodemap --metadata
uid: ???????????????? (glob)
tip-rev: 5002
- tip-node: 6ce944fafcee85af91f29ea5b51654cc6101ad7e
- data-length: 122944
+ tip-node: 880b18d239dfa9f632413a2071bfdbcc4806a4fd
+ data-length: 121088
data-unused: 0
data-unused: 0.000%
$ f --sha256 .hg/store/00changelog-*.nd --size
- .hg/store/00changelog-????????????????.nd: size=122944, sha256=755976b22b64ab680401b45395953504e64e7fa8c31ac570f58dee21e15f9bc0 (glob)
+ .hg/store/00changelog-????????????????.nd: size=121088, sha256=59fcede3e3cc587755916ceed29e3c33748cd1aa7d2f91828ac83e7979d935e8 (glob)
#endif
Test force warming the cache
@@ -193,16 +221,16 @@
$ hg debugnodemap --metadata
uid: ???????????????? (glob)
tip-rev: 5002
- tip-node: 6ce944fafcee85af91f29ea5b51654cc6101ad7e
- data-length: 122944
+ tip-node: 880b18d239dfa9f632413a2071bfdbcc4806a4fd
+ data-length: 121088
data-unused: 0
data-unused: 0.000%
#else
$ hg debugnodemap --metadata
uid: ???????????????? (glob)
tip-rev: 5002
- tip-node: 6ce944fafcee85af91f29ea5b51654cc6101ad7e
- data-length: 122944
+ tip-node: 880b18d239dfa9f632413a2071bfdbcc4806a4fd
+ data-length: 121088
data-unused: 0
data-unused: 0.000%
#endif
@@ -231,22 +259,22 @@
$ hg debugnodemap --metadata
uid: ???????????????? (glob)
tip-rev: 5003
- tip-node: 5c049e9c4a4af159bdcd65dce1b6bf303a0da6cf
- data-length: 123200 (pure !)
- data-length: 123200 (rust !)
- data-length: 122944 (no-rust no-pure !)
- data-unused: 256 (pure !)
- data-unused: 256 (rust !)
+ tip-node: c9329770f979ade2d16912267c38ba5f82fd37b3
+ data-length: 121344 (pure !)
+ data-length: 121344 (rust !)
+ data-length: 121152 (no-rust no-pure !)
+ data-unused: 192 (pure !)
+ data-unused: 192 (rust !)
data-unused: 0 (no-rust no-pure !)
- data-unused: 0.208% (pure !)
- data-unused: 0.208% (rust !)
+ data-unused: 0.158% (pure !)
+ data-unused: 0.158% (rust !)
data-unused: 0.000% (no-rust no-pure !)
$ cp -f ../tmp-copies/* .hg/store/
$ hg debugnodemap --metadata
uid: ???????????????? (glob)
tip-rev: 5002
- tip-node: 6ce944fafcee85af91f29ea5b51654cc6101ad7e
- data-length: 122944
+ tip-node: 880b18d239dfa9f632413a2071bfdbcc4806a4fd
+ data-length: 121088
data-unused: 0
data-unused: 0.000%
$ hg log -r "$NODE" -T '{rev}\n'
@@ -260,7 +288,7 @@
compatible with the persistent nodemap. We need to detect that.
$ hg up "$NODE~5"
- 0 files updated, 0 files merged, 2 files removed, 0 files unresolved
+ 0 files updated, 0 files merged, 4 files removed, 0 files unresolved
$ echo bar > babar
$ hg add babar
$ hg ci -m 'babar'
@@ -276,23 +304,23 @@
$ hg debugnodemap --metadata
uid: ???????????????? (glob)
tip-rev: 5002
- tip-node: 42bf3068c7ddfdfded53c4eb11d02266faeebfee
- data-length: 123456 (pure !)
- data-length: 123008 (rust !)
- data-length: 123008 (no-pure no-rust !)
+ tip-node: b355ef8adce0949b8bdf6afc72ca853740d65944
+ data-length: 121536 (pure !)
+ data-length: 121088 (rust !)
+ data-length: 121088 (no-pure no-rust !)
data-unused: 448 (pure !)
data-unused: 0 (rust !)
data-unused: 0 (no-pure no-rust !)
data-unused: 0.000% (rust !)
- data-unused: 0.363% (pure !)
+ data-unused: 0.369% (pure !)
data-unused: 0.000% (no-pure no-rust !)
$ cp -f ../tmp-copies/* .hg/store/
$ hg debugnodemap --metadata
uid: ???????????????? (glob)
tip-rev: 5002
- tip-node: 6ce944fafcee85af91f29ea5b51654cc6101ad7e
- data-length: 122944
+ tip-node: 880b18d239dfa9f632413a2071bfdbcc4806a4fd
+ data-length: 121088
data-unused: 0
data-unused: 0.000%
$ hg log -r "$OTHERNODE" -T '{rev}\n'
@@ -309,36 +337,36 @@
$ hg debugnodemap --metadata
uid: ???????????????? (glob)
tip-rev: 5003
- tip-node: c91af76d172f1053cca41b83f7c2e4e514fe2bcf
- data-length: 123008
+ tip-node: a52c5079765b5865d97b993b303a18740113bbb2
+ data-length: 121088
data-unused: 0
data-unused: 0.000%
$ echo babar2 > babar
$ hg ci -m 'babar2' --config "hooks.pretxnclose.nodemap-test=hg debugnodemap --metadata"
uid: ???????????????? (glob)
tip-rev: 5004
- tip-node: ba87cd9559559e4b91b28cb140d003985315e031
- data-length: 123328 (pure !)
- data-length: 123328 (rust !)
- data-length: 123136 (no-pure no-rust !)
+ tip-node: 2f5fb1c06a16834c5679d672e90da7c5f3b1a984
+ data-length: 121280 (pure !)
+ data-length: 121280 (rust !)
+ data-length: 121088 (no-pure no-rust !)
data-unused: 192 (pure !)
data-unused: 192 (rust !)
data-unused: 0 (no-pure no-rust !)
- data-unused: 0.156% (pure !)
- data-unused: 0.156% (rust !)
+ data-unused: 0.158% (pure !)
+ data-unused: 0.158% (rust !)
data-unused: 0.000% (no-pure no-rust !)
$ hg debugnodemap --metadata
uid: ???????????????? (glob)
tip-rev: 5004
- tip-node: ba87cd9559559e4b91b28cb140d003985315e031
- data-length: 123328 (pure !)
- data-length: 123328 (rust !)
- data-length: 123136 (no-pure no-rust !)
+ tip-node: 2f5fb1c06a16834c5679d672e90da7c5f3b1a984
+ data-length: 121280 (pure !)
+ data-length: 121280 (rust !)
+ data-length: 121088 (no-pure no-rust !)
data-unused: 192 (pure !)
data-unused: 192 (rust !)
data-unused: 0 (no-pure no-rust !)
- data-unused: 0.156% (pure !)
- data-unused: 0.156% (rust !)
+ data-unused: 0.158% (pure !)
+ data-unused: 0.158% (rust !)
data-unused: 0.000% (no-pure no-rust !)
Another process does not see the pending nodemap content during run.
@@ -356,28 +384,28 @@
> wait-on-file 20 sync-txn-close sync-repo-read
uid: ???????????????? (glob)
tip-rev: 5004
- tip-node: ba87cd9559559e4b91b28cb140d003985315e031
- data-length: 123328 (pure !)
- data-length: 123328 (rust !)
- data-length: 123136 (no-pure no-rust !)
+ tip-node: 2f5fb1c06a16834c5679d672e90da7c5f3b1a984
+ data-length: 121280 (pure !)
+ data-length: 121280 (rust !)
+ data-length: 121088 (no-pure no-rust !)
data-unused: 192 (pure !)
data-unused: 192 (rust !)
data-unused: 0 (no-pure no-rust !)
- data-unused: 0.156% (pure !)
- data-unused: 0.156% (rust !)
+ data-unused: 0.158% (pure !)
+ data-unused: 0.158% (rust !)
data-unused: 0.000% (no-pure no-rust !)
$ hg debugnodemap --metadata
uid: ???????????????? (glob)
tip-rev: 5005
- tip-node: bae4d45c759e30f1cb1a40e1382cf0e0414154db
- data-length: 123584 (pure !)
- data-length: 123584 (rust !)
- data-length: 123136 (no-pure no-rust !)
+ tip-node: 90d5d3ba2fc47db50f712570487cb261a68c8ffe
+ data-length: 121536 (pure !)
+ data-length: 121536 (rust !)
+ data-length: 121088 (no-pure no-rust !)
data-unused: 448 (pure !)
data-unused: 448 (rust !)
data-unused: 0 (no-pure no-rust !)
- data-unused: 0.363% (pure !)
- data-unused: 0.363% (rust !)
+ data-unused: 0.369% (pure !)
+ data-unused: 0.369% (rust !)
data-unused: 0.000% (no-pure no-rust !)
$ cat output.txt
@@ -386,9 +414,9 @@
$ echo plakfe > a
$ f --size --sha256 .hg/store/00changelog-*.nd
- .hg/store/00changelog-????????????????.nd: size=123584, sha256=8c6cef6fd3d3fac291968793ee19a4be6d0b8375e9508bd5c7d4a8879e8df180 (glob) (pure !)
- .hg/store/00changelog-????????????????.nd: size=123584, sha256=eb9e9a4bcafdb5e1344bc8a0cbb3288b2106413b8efae6265fb8a7973d7e97f9 (glob) (rust !)
- .hg/store/00changelog-????????????????.nd: size=123136, sha256=4f504f5a834db3811ced50ab3e9e80bcae3581bb0f9b13a7a9f94b7fc34bcebe (glob) (no-pure no-rust !)
+ .hg/store/00changelog-????????????????.nd: size=121536, sha256=bb414468d225cf52d69132e1237afba34d4346ee2eb81b505027e6197b107f03 (glob) (pure !)
+ .hg/store/00changelog-????????????????.nd: size=121536, sha256=909ac727bc4d1c0fda5f7bff3c620c98bd4a2967c143405a1503439e33b377da (glob) (rust !)
+ .hg/store/00changelog-????????????????.nd: size=121088, sha256=342d36d30d86dde67d3cb6c002606c4a75bcad665595d941493845066d9c8ee0 (glob) (no-pure no-rust !)
$ hg ci -m a3 --config "extensions.abort=$RUNTESTDIR/testlib/crash_transaction_late.py"
transaction abort!
rollback completed
@@ -397,20 +425,20 @@
$ hg debugnodemap --metadata
uid: ???????????????? (glob)
tip-rev: 5005
- tip-node: bae4d45c759e30f1cb1a40e1382cf0e0414154db
- data-length: 123584 (pure !)
- data-length: 123584 (rust !)
- data-length: 123136 (no-pure no-rust !)
+ tip-node: 90d5d3ba2fc47db50f712570487cb261a68c8ffe
+ data-length: 121536 (pure !)
+ data-length: 121536 (rust !)
+ data-length: 121088 (no-pure no-rust !)
data-unused: 448 (pure !)
data-unused: 448 (rust !)
data-unused: 0 (no-pure no-rust !)
- data-unused: 0.363% (pure !)
- data-unused: 0.363% (rust !)
+ data-unused: 0.369% (pure !)
+ data-unused: 0.369% (rust !)
data-unused: 0.000% (no-pure no-rust !)
$ f --size --sha256 .hg/store/00changelog-*.nd
- .hg/store/00changelog-????????????????.nd: size=123584, sha256=8c6cef6fd3d3fac291968793ee19a4be6d0b8375e9508bd5c7d4a8879e8df180 (glob) (pure !)
- .hg/store/00changelog-????????????????.nd: size=123584, sha256=eb9e9a4bcafdb5e1344bc8a0cbb3288b2106413b8efae6265fb8a7973d7e97f9 (glob) (rust !)
- .hg/store/00changelog-????????????????.nd: size=123136, sha256=4f504f5a834db3811ced50ab3e9e80bcae3581bb0f9b13a7a9f94b7fc34bcebe (glob) (no-pure no-rust !)
+ .hg/store/00changelog-????????????????.nd: size=121536, sha256=bb414468d225cf52d69132e1237afba34d4346ee2eb81b505027e6197b107f03 (glob) (pure !)
+ .hg/store/00changelog-????????????????.nd: size=121536, sha256=909ac727bc4d1c0fda5f7bff3c620c98bd4a2967c143405a1503439e33b377da (glob) (rust !)
+ .hg/store/00changelog-????????????????.nd: size=121088, sha256=342d36d30d86dde67d3cb6c002606c4a75bcad665595d941493845066d9c8ee0 (glob) (no-pure no-rust !)
Check that removing content does not confuse the nodemap
--------------------------------------------------------
@@ -423,7 +451,7 @@
repository tip rolled back to revision 5005 (undo commit)
working directory now based on revision 5005
$ hg id -r .
- bae4d45c759e tip
+ 90d5d3ba2fc4 tip
roming data with strip
@@ -432,4 +460,100 @@
$ hg --config extensions.strip= strip -r . --no-backup
1 files updated, 0 files merged, 0 files removed, 0 files unresolved
$ hg id -r . --traceback
- bae4d45c759e tip
+ 90d5d3ba2fc4 tip
+
+Test upgrade / downgrade
+========================
+
+downgrading
+
+ $ cat << EOF >> .hg/hgrc
+ > [format]
+ > use-persistent-nodemap=no
+ > EOF
+ $ hg debugformat -v
+ format-variant repo config default
+ fncache: yes yes yes
+ dotencode: yes yes yes
+ generaldelta: yes yes yes
+ sparserevlog: yes yes yes
+ sidedata: no no no
+ persistent-nodemap: yes no no
+ copies-sdc: no no no
+ plain-cl-delta: yes yes yes
+ compression: zlib zlib zlib
+ compression-level: default default default
+ $ hg debugupgraderepo --run --no-backup --quiet
+ upgrade will perform the following actions:
+
+ requirements
+ preserved: dotencode, fncache, generaldelta, revlogv1, sparserevlog, store
+ removed: persistent-nodemap
+
+ $ ls -1 .hg/store/ | egrep '00(changelog|manifest)(\.n|-.*\.nd)'
+ [1]
+ $ hg debugnodemap --metadata
+
+
+upgrading
+
+ $ cat << EOF >> .hg/hgrc
+ > [format]
+ > use-persistent-nodemap=yes
+ > EOF
+ $ hg debugformat -v
+ format-variant repo config default
+ fncache: yes yes yes
+ dotencode: yes yes yes
+ generaldelta: yes yes yes
+ sparserevlog: yes yes yes
+ sidedata: no no no
+ persistent-nodemap: no yes no
+ copies-sdc: no no no
+ plain-cl-delta: yes yes yes
+ compression: zlib zlib zlib
+ compression-level: default default default
+ $ hg debugupgraderepo --run --no-backup --quiet
+ upgrade will perform the following actions:
+
+ requirements
+ preserved: dotencode, fncache, generaldelta, revlogv1, sparserevlog, store
+ added: persistent-nodemap
+
+ $ ls -1 .hg/store/ | egrep '00(changelog|manifest)(\.n|-.*\.nd)'
+ 00changelog-*.nd (glob)
+ 00changelog.n
+ 00manifest-*.nd (glob)
+ 00manifest.n
+
+ $ hg debugnodemap --metadata
+ uid: * (glob)
+ tip-rev: 5005
+ tip-node: 90d5d3ba2fc47db50f712570487cb261a68c8ffe
+ data-length: 121088
+ data-unused: 0
+ data-unused: 0.000%
+
+Running unrelated upgrade
+
+ $ hg debugupgraderepo --run --no-backup --quiet --optimize re-delta-all
+ upgrade will perform the following actions:
+
+ requirements
+ preserved: dotencode, fncache, generaldelta, persistent-nodemap, revlogv1, sparserevlog, store
+
+ optimisations: re-delta-all
+
+ $ ls -1 .hg/store/ | egrep '00(changelog|manifest)(\.n|-.*\.nd)'
+ 00changelog-*.nd (glob)
+ 00changelog.n
+ 00manifest-*.nd (glob)
+ 00manifest.n
+
+ $ hg debugnodemap --metadata
+ uid: * (glob)
+ tip-rev: 5005
+ tip-node: 90d5d3ba2fc47db50f712570487cb261a68c8ffe
+ data-length: 121088
+ data-unused: 0
+ data-unused: 0.000%
--- a/tests/test-phabricator.t Tue Jul 14 10:25:41 2020 +0200
+++ b/tests/test-phabricator.t Mon Jul 20 21:56:27 2020 +0530
@@ -86,6 +86,10 @@
[255]
$ hg phabupdate --accept D7913 -m 'LGTM' --test-vcr "$VCR/accept-7913.json"
+phabupdate with --plan-changes:
+
+ $ hg phabupdate --plan-changes D6876 --test-vcr "$VCR/phabupdate-change-6876.json"
+
Create a differential diff:
$ HGENCODING=utf-8; export HGENCODING
$ echo alpha > alpha
@@ -592,7 +596,7 @@
Phabsend requires a linear range of commits
$ hg phabsend -r 0+2+3
- abort: cannot phabsend multiple head revisions: c44b38f24a45
+ abort: cannot phabsend multiple head revisions: c44b38f24a45 aaef04066140
(the revisions must form a linear chain)
[255]
@@ -670,7 +674,7 @@
NEW - a959a3f69d8d: one: first commit to review
NEW - 24a4438154ba: two: second commit to review
NEW - d235829e802c: 3: a commit with no detailed message
- Send the above changes to https://phab.mercurial-scm.org/ (yn)? y
+ Send the above changes to https://phab.mercurial-scm.org/ (Y/n)? y
D8387 - created - a959a3f69d8d: one: first commit to review
D8387 - created - 24a4438154ba: two: second commit to review
D8387 - created - d235829e802c: 3: a commit with no detailed message
@@ -734,7 +738,7 @@
D8387 - 602c4e738243: one: first commit to review
D8387 - 0124e5474c88: two: second commit to review
D8387 - e4edb1fe3565: 3: a commit with no detailed message
- Send the above changes to https://phab.mercurial-scm.org/ (yn)? y
+ Send the above changes to https://phab.mercurial-scm.org/ (Y/n)? y
D8387 - updated - 602c4e738243: one: first commit to review
D8387 - updated - 0124e5474c88: two: second commit to review
D8387 - updated - e4edb1fe3565: 3: a commit with no detailed message
--- a/tests/test-progress.t Tue Jul 14 10:25:41 2020 +0200
+++ b/tests/test-progress.t Mon Jul 20 21:56:27 2020 +0530
@@ -18,7 +18,8 @@
> @command(b'loop',
> [(b'', b'total', b'', b'override for total'),
> (b'', b'nested', False, b'show nested results'),
- > (b'', b'parallel', False, b'show parallel sets of results')],
+ > (b'', b'parallel', False, b'show parallel sets of results'),
+ > (b'', b'warn', False, b'show warning if step divisible by 3')],
> b'hg loop LOOPS',
> norepo=True)
> def loop(ui, loops, **opts):
@@ -32,6 +33,7 @@
> if opts.get('nested', None):
> nested = True
> loops = abs(loops)
+ > showwarn = opts.get('warn', False)
>
> progress = ui.makeprogress(topiclabel, unit=b'loopnum', total=total)
> other = ui.makeprogress(b'other', unit=b'othernum', total=total)
@@ -48,6 +50,8 @@
> for j in range(nested_steps):
> nested.update(j, item=b'nested.%d' % j)
> nested.complete()
+ > if showwarn and i % 3 == 0:
+ > ui.warn(b'reached step %d\n' %i)
> progress.complete()
>
> topiclabel = b'loop'
@@ -179,6 +183,42 @@
loop [ <=> ] 5/4\r (no-eol) (esc)
\r (no-eol) (esc)
+test interaction with ui.warn
+
+ $ hg loop --warn 6
+ \r (no-eol) (esc)
+ loop [ ] 0/6\r (no-eol) (esc)
+ \r (no-eol) (esc)
+ reached step 0
+ \r (no-eol) (esc)
+ loop [=======> ] 1/6\r (no-eol) (esc)
+ loop [===============> ] 2/6\r (no-eol) (esc)
+ loop [=======================> ] 3/6\r (no-eol) (esc)
+ \r (no-eol) (esc)
+ reached step 3
+ \r (no-eol) (esc)
+ loop [===============================> ] 4/6\r (no-eol) (esc)
+ loop [=======================================> ] 5/6\r (no-eol) (esc)
+ \r (no-eol) (esc)
+
+test interaction with ui.timestamp-output
+
+ $ hg loop --warn --config ui.timestamp-output=true 6
+ \r (no-eol) (esc)
+ loop [ ] 0/6\r (no-eol) (esc)
+ \r (no-eol) (esc)
+ \[20[2-9][0-9]-[01][0-9]-[0-3][0-9]T[0-5][0-9]:[0-5][0-9]:[0-5][0-9]\.[0-9][0-9][0-9][0-9][0-9][0-9]\] reached step 0 (re)
+ \r (no-eol) (esc)
+ loop [=======> ] 1/6\r (no-eol) (esc)
+ loop [===============> ] 2/6\r (no-eol) (esc)
+ loop [=======================> ] 3/6\r (no-eol) (esc)
+ \r (no-eol) (esc)
+ \[20[2-9][0-9]-[01][0-9]-[0-3][0-9]T[0-5][0-9]:[0-5][0-9]:[0-5][0-9]\.[0-9][0-9][0-9][0-9][0-9][0-9]\] reached step 3 (re)
+ \r (no-eol) (esc)
+ loop [===============================> ] 4/6\r (no-eol) (esc)
+ loop [=======================================> ] 5/6\r (no-eol) (esc)
+ \r (no-eol) (esc)
+
test immediate progress completion
$ hg -y loop 0
--- a/tests/test-pull.t Tue Jul 14 10:25:41 2020 +0200
+++ b/tests/test-pull.t Mon Jul 20 21:56:27 2020 +0530
@@ -142,9 +142,9 @@
pulling from ssh://fakehost%7Ctouch%24%7BIFS%7Downed/path
abort: no suitable response from remote hg!
[255]
- $ hg pull 'ssh://fakehost%7Ctouch%20owned/path'
- pulling from ssh://fakehost%7Ctouch%20owned/path
- abort: no suitable response from remote hg!
+ $ hg --config ui.timestamp-output=true pull 'ssh://fakehost%7Ctouch%20owned/path'
+ \[20[2-9][0-9]-[01][0-9]-[0-3][0-9]T[0-5][0-9]:[0-5][0-9]:[0-5][0-9]\.[0-9][0-9][0-9][0-9][0-9][0-9]\] pulling from ssh://fakehost%7Ctouch%20owned/path (re)
+ \[20[2-9][0-9]-[01][0-9]-[0-3][0-9]T[0-5][0-9]:[0-5][0-9]:[0-5][0-9]\.[0-9][0-9][0-9][0-9][0-9][0-9]\] abort: no suitable response from remote hg! (re)
[255]
$ [ ! -f owned ] || echo 'you got owned'
--- a/tests/test-rebase-abort.t Tue Jul 14 10:25:41 2020 +0200
+++ b/tests/test-rebase-abort.t Mon Jul 20 21:56:27 2020 +0530
@@ -81,7 +81,7 @@
rebasing 4:46f0b057b5c0 "L2" (tip)
merging common
warning: conflicts while merging common! (edit, then use 'hg resolve --mark')
- unresolved conflicts (see hg resolve, then hg rebase --continue)
+ unresolved conflicts (see 'hg resolve', then 'hg rebase --continue')
[1]
Insert unsupported advisory merge record:
@@ -148,7 +148,7 @@
rebasing 4:46f0b057b5c0 "L2" (tip)
merging common
warning: conflicts while merging common! (edit, then use 'hg resolve --mark')
- unresolved conflicts (see hg resolve, then hg rebase --continue)
+ unresolved conflicts (see 'hg resolve', then 'hg rebase --continue')
[1]
$ mv .hg/rebasestate .hg/rebasestate.back
@@ -218,7 +218,7 @@
rebasing 4:145842775fec "C1" (tip)
merging c
warning: conflicts while merging c! (edit, then use 'hg resolve --mark')
- unresolved conflicts (see hg resolve, then hg rebase --continue)
+ unresolved conflicts (see 'hg resolve', then 'hg rebase --continue')
[1]
$ hg tglog
@@ -277,7 +277,7 @@
rebasing 3:6c0f977a22d8 "C" (foo tip)
merging c
warning: conflicts while merging c! (edit, then use 'hg resolve --mark')
- unresolved conflicts (see hg resolve, then hg rebase --continue)
+ unresolved conflicts (see 'hg resolve', then 'hg rebase --continue')
[1]
$ hg abort
rebase aborted
@@ -315,7 +315,7 @@
$ hg rebase -d @ -b foo --tool=internal:fail
rebasing 2:070cf4580bb5 "b2" (foo tip)
- unresolved conflicts (see hg resolve, then hg rebase --continue)
+ unresolved conflicts (see 'hg resolve', then 'hg rebase --continue')
[1]
$ mv .hg/rebasestate ./ # so we're allowed to hg up like in mercurial <2.6.3
@@ -462,7 +462,7 @@
$ hg rebase -d 1 --tool 'internal:fail'
rebasing 2:e4ea5cdc9789 "conflicting 1"
- unresolved conflicts (see hg resolve, then hg rebase --continue)
+ unresolved conflicts (see 'hg resolve', then 'hg rebase --continue')
[1]
$ hg abort
rebase aborted
@@ -503,7 +503,7 @@
$ hg rebase -d 'public()' --tool :merge -q
note: not rebasing 3:0682fd3dabf5 "disappear draft", its destination already has all its changes
warning: conflicts while merging root! (edit, then use 'hg resolve --mark')
- unresolved conflicts (see hg resolve, then hg rebase --continue)
+ unresolved conflicts (see 'hg resolve', then 'hg rebase --continue')
[1]
$ hg abort
rebase aborted
--- a/tests/test-rebase-backup.t Tue Jul 14 10:25:41 2020 +0200
+++ b/tests/test-rebase-backup.t Mon Jul 20 21:56:27 2020 +0530
@@ -126,7 +126,7 @@
rebasing 6:f8bc7d28e573 "c"
merging c
warning: conflicts while merging c! (edit, then use 'hg resolve --mark')
- unresolved conflicts (see hg resolve, then hg rebase --continue)
+ unresolved conflicts (see 'hg resolve', then 'hg rebase --continue')
[1]
$ hg rebase --abort
saved backup bundle to $TESTTMP/repo1/.hg/strip-backup/818c1a43c916-2b644d96-backup.hg
@@ -142,7 +142,7 @@
rebasing 6:f8bc7d28e573 "c"
merging c
warning: conflicts while merging c! (edit, then use 'hg resolve --mark')
- unresolved conflicts (see hg resolve, then hg rebase --continue)
+ unresolved conflicts (see 'hg resolve', then 'hg rebase --continue')
[1]
$ hg rebase --abort
rebase aborted
--- a/tests/test-rebase-bookmarks.t Tue Jul 14 10:25:41 2020 +0200
+++ b/tests/test-rebase-bookmarks.t Mon Jul 20 21:56:27 2020 +0530
@@ -172,7 +172,7 @@
rebasing 3:3d5fa227f4b5 "C" (Y Z)
merging c
warning: conflicts while merging c! (edit, then use 'hg resolve --mark')
- unresolved conflicts (see hg resolve, then hg rebase --continue)
+ unresolved conflicts (see 'hg resolve', then 'hg rebase --continue')
[1]
$ echo 'c' > c
$ hg resolve --mark c
--- a/tests/test-rebase-check-restore.t Tue Jul 14 10:25:41 2020 +0200
+++ b/tests/test-rebase-check-restore.t Mon Jul 20 21:56:27 2020 +0530
@@ -69,7 +69,7 @@
rebasing 2:965c486023db "C"
merging A
warning: conflicts while merging A! (edit, then use 'hg resolve --mark')
- unresolved conflicts (see hg resolve, then hg rebase --continue)
+ unresolved conflicts (see 'hg resolve', then 'hg rebase --continue')
[1]
Solve the conflict and go on:
@@ -126,7 +126,7 @@
rebasing 5:01e6ebbd8272 "F" (tip)
merging A
warning: conflicts while merging A! (edit, then use 'hg resolve --mark')
- unresolved conflicts (see hg resolve, then hg rebase --continue)
+ unresolved conflicts (see 'hg resolve', then 'hg rebase --continue')
[1]
Solve the conflict and go on:
--- a/tests/test-rebase-collapse.t Tue Jul 14 10:25:41 2020 +0200
+++ b/tests/test-rebase-collapse.t Mon Jul 20 21:56:27 2020 +0530
@@ -291,7 +291,7 @@
file 'E' was deleted in local [dest] but was modified in other [source].
You can use (c)hanged version, leave (d)eleted, or leave (u)nresolved.
What do you want to do? u
- unresolved conflicts (see hg resolve, then hg rebase --continue)
+ unresolved conflicts (see 'hg resolve', then 'hg rebase --continue')
[1]
$ echo F > E
@@ -658,7 +658,7 @@
rebasing 1:81e5401e4d37 "B" (B)
merging A
warning: conflicts while merging A! (edit, then use 'hg resolve --mark')
- unresolved conflicts (see hg resolve, then hg rebase --continue)
+ unresolved conflicts (see 'hg resolve', then 'hg rebase --continue')
[1]
$ rm A.orig
$ hg resolve --mark A
@@ -705,7 +705,7 @@
rebasing 1:f899f3910ce7 "B" (B)
merging A
warning: conflicts while merging A! (edit, then use 'hg resolve --mark')
- unresolved conflicts (see hg resolve, then hg rebase --continue)
+ unresolved conflicts (see 'hg resolve', then 'hg rebase --continue')
[1]
$ hg tglog
o 3: 63668d570d21 'C'
@@ -733,7 +733,7 @@
rebasing 3:63668d570d21 "C" (C tip)
merging A
warning: conflicts while merging A! (edit, then use 'hg resolve --mark')
- unresolved conflicts (see hg resolve, then hg rebase --continue)
+ unresolved conflicts (see 'hg resolve', then 'hg rebase --continue')
[1]
$ hg tglog
% 3: 63668d570d21 'C'
--- a/tests/test-rebase-conflicts.t Tue Jul 14 10:25:41 2020 +0200
+++ b/tests/test-rebase-conflicts.t Mon Jul 20 21:56:27 2020 +0530
@@ -66,7 +66,7 @@
rebasing 4:46f0b057b5c0 "L2"
merging common
warning: conflicts while merging common! (edit, then use 'hg resolve --mark')
- unresolved conflicts (see hg resolve, then hg rebase --continue)
+ unresolved conflicts (see 'hg resolve', then 'hg rebase --continue')
[1]
$ hg status --config commands.status.verbose=1
@@ -344,7 +344,7 @@
rebasing 13:7bc217434fc1 "abc" (tip)
merging a
warning: conflicts while merging a! (edit, then use 'hg resolve --mark')
- unresolved conflicts (see hg resolve, then hg rebase --continue)
+ unresolved conflicts (see 'hg resolve', then 'hg rebase --continue')
[1]
$ hg diff
diff -r 328e4ab1f7cc a
@@ -364,7 +364,7 @@
rebasing 13:7bc217434fc1 "abc" (tip)
merging a
warning: conflicts while merging a! (edit, then use 'hg resolve --mark')
- unresolved conflicts (see hg resolve, then hg rebase --continue)
+ unresolved conflicts (see 'hg resolve', then 'hg rebase --continue')
[1]
$ hg diff
diff -r 328e4ab1f7cc a
@@ -402,7 +402,7 @@
rebasing 1:112478962961 "B" (B)
merging B
warning: conflicts while merging B! (edit, then use 'hg resolve --mark')
- unresolved conflicts (see hg resolve, then hg rebase --continue)
+ unresolved conflicts (see 'hg resolve', then 'hg rebase --continue')
[1]
$ echo 4 > B
@@ -454,7 +454,7 @@
rebasing 5:9a6b91dc2044 "F" (F tip)
merging conflict
warning: conflicts while merging conflict! (edit, then use 'hg resolve --mark')
- unresolved conflicts (see hg resolve, then hg rebase --continue)
+ unresolved conflicts (see 'hg resolve', then 'hg rebase --continue')
[1]
$ hg tglog
@ 8:draft 'E'
--- a/tests/test-rebase-dest.t Tue Jul 14 10:25:41 2020 +0200
+++ b/tests/test-rebase-dest.t Mon Jul 20 21:56:27 2020 +0530
@@ -48,7 +48,7 @@
rebasing 3:0537f6b50def "dc" (tip)
merging c
warning: conflicts while merging c! (edit, then use 'hg resolve --mark')
- unresolved conflicts (see hg resolve, then hg rebase --continue)
+ unresolved conflicts (see 'hg resolve', then 'hg rebase --continue')
[1]
$ echo d > c
$ hg resolve --mark --all
--- a/tests/test-rebase-detach.t Tue Jul 14 10:25:41 2020 +0200
+++ b/tests/test-rebase-detach.t Mon Jul 20 21:56:27 2020 +0530
@@ -297,7 +297,7 @@
rebasing 3:17b4880d2402 "B2" (tip)
merging B
warning: conflicts while merging B! (edit, then use 'hg resolve --mark')
- unresolved conflicts (see hg resolve, then hg rebase --continue)
+ unresolved conflicts (see 'hg resolve', then 'hg rebase --continue')
[1]
$ hg resolve --all -t internal:local
(no more unresolved files)
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/tests/test-rebase-empty-successor.t Mon Jul 20 21:56:27 2020 +0530
@@ -0,0 +1,44 @@
+ $ cat << EOF >> $HGRCPATH
+ > [extensions]
+ > rebase=
+ > [alias]
+ > tglog = log -G -T "{rev} '{desc}'\n"
+ > EOF
+
+ $ hg init
+
+ $ echo a > a; hg add a; hg ci -m a
+ $ echo b > b; hg add b; hg ci -m b1
+ $ hg up 0 -q
+ $ echo b > b; hg add b; hg ci -m b2 -q
+
+ $ hg tglog
+ @ 2 'b2'
+ |
+ | o 1 'b1'
+ |/
+ o 0 'a'
+
+
+With rewrite.empty-successor=skip, b2 is skipped because it would become empty.
+
+ $ hg rebase -s 2 -d 1 --config rewrite.empty-successor=skip --dry-run
+ starting dry-run rebase; repository will not be changed
+ rebasing 2:6e2aad5e0f3c "b2" (tip)
+ note: not rebasing 2:6e2aad5e0f3c "b2" (tip), its destination already has all its changes
+ dry-run rebase completed successfully; run without -n/--dry-run to perform this rebase
+
+With rewrite.empty-successor=keep, b2 will be recreated although it became empty.
+
+ $ hg rebase -s 2 -d 1 --config rewrite.empty-successor=keep
+ rebasing 2:6e2aad5e0f3c "b2" (tip)
+ note: created empty successor for 2:6e2aad5e0f3c "b2" (tip), its destination already has all its changes
+ saved backup bundle to $TESTTMP/.hg/strip-backup/6e2aad5e0f3c-7d7c8801-rebase.hg
+
+ $ hg tglog
+ @ 2 'b2'
+ |
+ o 1 'b1'
+ |
+ o 0 'a'
+
--- a/tests/test-rebase-emptycommit.t Tue Jul 14 10:25:41 2020 +0200
+++ b/tests/test-rebase-emptycommit.t Mon Jul 20 21:56:27 2020 +0530
@@ -129,7 +129,16 @@
> B
> EOS
- $ hg rebase -r '(A::)-(B::)-A' -d H
+Previously, there was a bug where the empty commit check compared the parent
+branch name with the wdir branch name instead of the actual branch name (which
+should stay unchanged if --keepbranches is passed), and erroneously assumed
+that an otherwise empty changeset should be created because of the incorrectly
+assumed branch name change.
+
+ $ hg update H -q
+ $ hg branch foo -q
+
+ $ hg rebase -r '(A::)-(B::)-A' -d H --keepbranches
rebasing 2:dc0947a82db8 "C" (BOOK-C)
note: not rebasing 2:dc0947a82db8 "C" (BOOK-C), its destination already has all its changes
rebasing 3:b18e25de2cf5 "D" (BOOK-D)
@@ -137,6 +146,7 @@
rebasing 4:86a1f6686812 "E" (BOOK-E E)
note: not rebasing 4:86a1f6686812 "E" (BOOK-E E), its destination already has all its changes
saved backup bundle to $TESTTMP/merge1/.hg/strip-backup/b18e25de2cf5-1fd0a4ba-rebase.hg
+ $ hg update null -q
$ hg log -G -T '{rev} {desc} {bookmarks}'
o 4 H BOOK-C BOOK-D BOOK-E
--- a/tests/test-rebase-inmemory.t Tue Jul 14 10:25:41 2020 +0200
+++ b/tests/test-rebase-inmemory.t Mon Jul 20 21:56:27 2020 +0530
@@ -471,7 +471,7 @@
rebasing 4:e860deea161a "e"
merging e
warning: conflicts while merging e! (edit, then use 'hg resolve --mark')
- unresolved conflicts (see hg resolve, then hg rebase --continue)
+ unresolved conflicts (see 'hg resolve', then 'hg rebase --continue')
[1]
$ hg rebase --abort
saved backup bundle to $TESTTMP/repo3/.hg/strip-backup/c1e524d4287c-f91f82e1-backup.hg
@@ -863,7 +863,7 @@
rebasing 2:fb62b706688e "add b to foo" (tip)
merging foo
warning: conflicts while merging foo! (edit, then use 'hg resolve --mark')
- unresolved conflicts (see hg resolve, then hg rebase --continue)
+ unresolved conflicts (see 'hg resolve', then 'hg rebase --continue')
[1]
$ cd $TESTTMP
@@ -897,7 +897,7 @@
rebasing 2:b4d249fbf8dd "bye from foo"
merging foo
warning: conflicts while merging foo! (edit, then use 'hg resolve --mark')
- unresolved conflicts (see hg resolve, then hg rebase --continue)
+ unresolved conflicts (see 'hg resolve', then 'hg rebase --continue')
[1]
$ hg rebase -r 3 -d 1 -t:merge3
abort: rebase in progress
@@ -920,3 +920,46 @@
|/ foo
o 0: r0
r0
+
+ $ cd ..
+
+Changesets that become empty should not be committed. Merges are not empty by
+definition.
+
+ $ hg init keep_merge
+ $ cd keep_merge
+ $ echo base > base; hg add base; hg ci -m base
+ $ echo test > test; hg add test; hg ci -m a
+ $ hg up 0 -q
+ $ echo test > test; hg add test; hg ci -m b -q
+ $ hg up 0 -q
+ $ echo test > test; hg add test; hg ci -m c -q
+ $ hg up 1 -q
+ $ hg merge 2 -q
+ $ hg ci -m merge
+ $ hg up null -q
+ $ hg tglog
+ o 4: 59c8292117b1 'merge'
+ |\
+ | | o 3: 531f80391e4a 'c'
+ | | |
+ | o | 2: 0194f1db184a 'b'
+ | |/
+ o / 1: 6f252845ea45 'a'
+ |/
+ o 0: d20a80d4def3 'base'
+
+ $ hg rebase -s 2 -d 3
+ rebasing 2:0194f1db184a "b"
+ note: not rebasing 2:0194f1db184a "b", its destination already has all its changes
+ rebasing 4:59c8292117b1 "merge" (tip)
+ saved backup bundle to $TESTTMP/keep_merge/.hg/strip-backup/0194f1db184a-aee31d03-rebase.hg
+ $ hg tglog
+ o 3: 506e2454484b 'merge'
+ |\
+ | o 2: 531f80391e4a 'c'
+ | |
+ o | 1: 6f252845ea45 'a'
+ |/
+ o 0: d20a80d4def3 'base'
+
--- a/tests/test-rebase-interruptions.t Tue Jul 14 10:25:41 2020 +0200
+++ b/tests/test-rebase-interruptions.t Mon Jul 20 21:56:27 2020 +0530
@@ -61,7 +61,7 @@
rebasing 2:965c486023db "C"
merging A
warning: conflicts while merging A! (edit, then use 'hg resolve --mark')
- unresolved conflicts (see hg resolve, then hg rebase --continue)
+ unresolved conflicts (see 'hg resolve', then 'hg rebase --continue')
[1]
Force a commit on C during the interruption:
@@ -98,7 +98,7 @@
rebasing 2:965c486023db "C"
merging A
warning: conflicts while merging A! (edit, then use 'hg resolve --mark')
- unresolved conflicts (see hg resolve, then hg rebase --continue)
+ unresolved conflicts (see 'hg resolve', then 'hg rebase --continue')
[1]
Solve the conflict and go on:
@@ -157,7 +157,7 @@
rebasing 2:965c486023db "C"
merging A
warning: conflicts while merging A! (edit, then use 'hg resolve --mark')
- unresolved conflicts (see hg resolve, then hg rebase --continue)
+ unresolved conflicts (see 'hg resolve', then 'hg rebase --continue')
[1]
Force a commit on B' during the interruption:
@@ -229,7 +229,7 @@
rebasing 2:965c486023db "C"
merging A
warning: conflicts while merging A! (edit, then use 'hg resolve --mark')
- unresolved conflicts (see hg resolve, then hg rebase --continue)
+ unresolved conflicts (see 'hg resolve', then 'hg rebase --continue')
[1]
Change phase on B and B'
@@ -302,7 +302,7 @@
rebasing 2:965c486023db "C"
merging A
warning: conflicts while merging A! (edit, then use 'hg resolve --mark')
- unresolved conflicts (see hg resolve, then hg rebase --continue)
+ unresolved conflicts (see 'hg resolve', then 'hg rebase --continue')
[1]
$ hg tglog
@ 5: 45396c49d53b 'B'
@@ -505,7 +505,7 @@
rebasing 1:fdaca8533b86 "b"
merging a
warning: conflicts while merging a! (edit, then use 'hg resolve --mark')
- unresolved conflicts (see hg resolve, then hg rebase --continue)
+ unresolved conflicts (see 'hg resolve', then 'hg rebase --continue')
[1]
$ echo a > a
$ echo c >> a
@@ -525,7 +525,7 @@
rebasing 2:fdaca8533b86 "b" (tip)
merging a
warning: conflicts while merging a! (edit, then use 'hg resolve --mark')
- unresolved conflicts (see hg resolve, then hg rebase --continue)
+ unresolved conflicts (see 'hg resolve', then 'hg rebase --continue')
[1]
$ echo a > a
$ echo c >> a
--- a/tests/test-rebase-legacy.t Tue Jul 14 10:25:41 2020 +0200
+++ b/tests/test-rebase-legacy.t Mon Jul 20 21:56:27 2020 +0530
@@ -26,26 +26,30 @@
> \|/
> R
> EOF
+ $ for r in A B C D E F G H R Z
+ > do
+ > eval node_$r=$(hg log -r $r -T '{node}')
+ > done
rebasestate generated by a legacy client running "hg rebase -r B+D+E+G+H -d Z"
$ touch .hg/last-message.txt
$ cat > .hg/rebasestate <<EOF
> 0000000000000000000000000000000000000000
- > f424eb6a8c01c4a0c0fba9f863f79b3eb5b4b69f
+ > $node_Z
> 0000000000000000000000000000000000000000
> 0
> 0
> 0
>
- > 21a6c45028857f500f56ae84fbf40689c429305b:-2
- > de008c61a447fcfd93f808ef527d933a84048ce7:0000000000000000000000000000000000000000
- > c1e6b162678d07d0b204e5c8267d51b4e03b633c:0000000000000000000000000000000000000000
- > aeba276fcb7df8e10153a07ee728d5540693f5aa:-3
- > bd5548558fcf354d37613005737a143871bf3723:-3
- > d2fa1c02b2401b0e32867f26cce50818a4bd796a:0000000000000000000000000000000000000000
- > 6f7a236de6852570cd54649ab62b1012bb78abc8:0000000000000000000000000000000000000000
- > 6582e6951a9c48c236f746f186378e36f59f4928:0000000000000000000000000000000000000000
+ > $node_A:-2
+ > $node_E:0000000000000000000000000000000000000000
+ > $node_B:0000000000000000000000000000000000000000
+ > $node_F:-3
+ > $node_C:-3
+ > $node_G:0000000000000000000000000000000000000000
+ > $node_D:0000000000000000000000000000000000000000
+ > $node_H:0000000000000000000000000000000000000000
> EOF
#if continuecommand
--- a/tests/test-rebase-mq-skip.t Tue Jul 14 10:25:41 2020 +0200
+++ b/tests/test-rebase-mq-skip.t Mon Jul 20 21:56:27 2020 +0530
@@ -155,7 +155,7 @@
rebasing 3:6ff5b8feed8e "r3" (r3)
note: not rebasing 3:6ff5b8feed8e "r3" (r3), its destination already has all its changes
rebasing 4:094320fec554 "r4" (r4)
- unresolved conflicts (see hg resolve, then hg rebase --continue)
+ unresolved conflicts (see 'hg resolve', then 'hg rebase --continue')
[1]
$ HGMERGE=internal:local hg resolve --all
--- a/tests/test-rebase-mq.t Tue Jul 14 10:25:41 2020 +0200
+++ b/tests/test-rebase-mq.t Mon Jul 20 21:56:27 2020 +0530
@@ -62,7 +62,7 @@
rebasing 2:3504f44bffc0 "P0" (f.patch qbase)
merging f
warning: conflicts while merging f! (edit, then use 'hg resolve --mark')
- unresolved conflicts (see hg resolve, then hg rebase --continue)
+ unresolved conflicts (see 'hg resolve', then 'hg rebase --continue')
[1]
Fix the 1st conflict:
@@ -76,7 +76,7 @@
rebasing 3:929394423cd3 "P1" (f2.patch qtip tip)
merging f
warning: conflicts while merging f! (edit, then use 'hg resolve --mark')
- unresolved conflicts (see hg resolve, then hg rebase --continue)
+ unresolved conflicts (see 'hg resolve', then 'hg rebase --continue')
[1]
Fix the 2nd conflict:
--- a/tests/test-rebase-obsolete.t Tue Jul 14 10:25:41 2020 +0200
+++ b/tests/test-rebase-obsolete.t Mon Jul 20 21:56:27 2020 +0530
@@ -1032,7 +1032,7 @@
rebasing 19:b82fb57ea638 "willconflict second version"
merging willconflict
warning: conflicts while merging willconflict! (edit, then use 'hg resolve --mark')
- unresolved conflicts (see hg resolve, then hg rebase --continue)
+ unresolved conflicts (see 'hg resolve', then 'hg rebase --continue')
[1]
$ hg resolve --mark willconflict
@@ -1787,7 +1787,7 @@
rebasing 1:2ec65233581b "B"
merging D
warning: conflicts while merging D! (edit, then use 'hg resolve --mark')
- unresolved conflicts (see hg resolve, then hg rebase --continue)
+ unresolved conflicts (see 'hg resolve', then 'hg rebase --continue')
[1]
$ cp -R . $TESTTMP/hidden-state2
@@ -1872,7 +1872,7 @@
rebasing 3:055a42cdd887 "d"
merging d
warning: conflicts while merging d! (edit, then use 'hg resolve --mark')
- unresolved conflicts (see hg resolve, then hg rebase --continue)
+ unresolved conflicts (see 'hg resolve', then 'hg rebase --continue')
[1]
$ hg rebase --stop
1 new orphan changesets
@@ -1934,7 +1934,7 @@
rebasing 3:055a42cdd887 "d"
merging d
warning: conflicts while merging d! (edit, then use 'hg resolve --mark')
- unresolved conflicts (see hg resolve, then hg rebase --continue)
+ unresolved conflicts (see 'hg resolve', then 'hg rebase --continue')
[1]
$ hg rebase --stop
abort: cannot remove original changesets with unrebased descendants
@@ -1952,7 +1952,7 @@
rebasing 3:055a42cdd887 "d"
merging d
warning: conflicts while merging d! (edit, then use 'hg resolve --mark')
- unresolved conflicts (see hg resolve, then hg rebase --continue)
+ unresolved conflicts (see 'hg resolve', then 'hg rebase --continue')
[1]
$ hg rebase --stop
$ hg log -G --template "{rev}:{short(node)} {person(author)}\n{firstline(desc)} {topic}\n\n"
@@ -2014,7 +2014,7 @@
rebasing 3:055a42cdd887 "d"
merging d
warning: conflicts while merging d! (edit, then use 'hg resolve --mark')
- unresolved conflicts (see hg resolve, then hg rebase --continue)
+ unresolved conflicts (see 'hg resolve', then 'hg rebase --continue')
[1]
$ hg rebase --stop
abort: cannot stop in --collapse session
@@ -2047,7 +2047,7 @@
rebasing 3:055a42cdd887 "d"
merging d
warning: conflicts while merging d! (edit, then use 'hg resolve --mark')
- unresolved conflicts (see hg resolve, then hg rebase --continue)
+ unresolved conflicts (see 'hg resolve', then 'hg rebase --continue')
[1]
$ hg rebase --stop --dry-run
abort: cannot specify both --stop and --dry-run
@@ -2115,7 +2115,7 @@
rebasing 3:67a385d4e6f2 "D" (Z)
merging d
warning: conflicts while merging d! (edit, then use 'hg resolve --mark')
- unresolved conflicts (see hg resolve, then hg rebase --continue)
+ unresolved conflicts (see 'hg resolve', then 'hg rebase --continue')
[1]
$ hg rebase --stop
1 new orphan changesets
--- a/tests/test-rebase-parameters.t Tue Jul 14 10:25:41 2020 +0200
+++ b/tests/test-rebase-parameters.t Mon Jul 20 21:56:27 2020 +0530
@@ -479,7 +479,7 @@
$ hg rebase -s 2 -d 1 --tool internal:fail
rebasing 2:e4e3f3546619 "c2b" (tip)
- unresolved conflicts (see hg resolve, then hg rebase --continue)
+ unresolved conflicts (see 'hg resolve', then 'hg rebase --continue')
[1]
$ hg summary
--- a/tests/test-rebase-partial.t Tue Jul 14 10:25:41 2020 +0200
+++ b/tests/test-rebase-partial.t Mon Jul 20 21:56:27 2020 +0530
@@ -84,7 +84,7 @@
rebasing 2:ef8c0fe0897b "D" (D)
merging file
warning: conflicts while merging file! (edit, then use 'hg resolve --mark')
- unresolved conflicts (see hg resolve, then hg rebase --continue)
+ unresolved conflicts (see 'hg resolve', then 'hg rebase --continue')
[1]
$ hg rebase --abort
rebase aborted
--- a/tests/test-rebase-transaction.t Tue Jul 14 10:25:41 2020 +0200
+++ b/tests/test-rebase-transaction.t Mon Jul 20 21:56:27 2020 +0530
@@ -107,7 +107,7 @@
rebasing 3:c26739dbe603 "C" (C)
merging conflict
warning: conflicts while merging conflict! (edit, then use 'hg resolve --mark')
- unresolved conflicts (see hg resolve, then hg rebase --continue)
+ unresolved conflicts (see 'hg resolve', then 'hg rebase --continue')
[1]
$ hg tglog
o 5: D
--- a/tests/test-requires.t Tue Jul 14 10:25:41 2020 +0200
+++ b/tests/test-requires.t Mon Jul 20 21:56:27 2020 +0530
@@ -48,6 +48,14 @@
> # enable extension locally
> supportlocally = $TESTTMP/supported-locally/supportlocally.py
> EOF
+ $ hg -R supported debugrequirements
+ dotencode
+ featuresetup-test
+ fncache
+ generaldelta
+ revlogv1
+ sparserevlog
+ store
$ hg -R supported status
$ hg init push-dst
--- a/tests/test-resolve.t Tue Jul 14 10:25:41 2020 +0200
+++ b/tests/test-resolve.t Mon Jul 20 21:56:27 2020 +0530
@@ -92,7 +92,7 @@
$ cat > $TESTTMP/markdriver.py << EOF
> '''mark and unmark files as driver-resolved'''
> from mercurial import (
- > merge,
+ > mergestate,
> pycompat,
> registrar,
> scmutil,
@@ -106,7 +106,7 @@
> wlock = repo.wlock()
> opts = pycompat.byteskwargs(opts)
> try:
- > ms = merge.mergestate.read(repo)
+ > ms = mergestate.mergestate.read(repo)
> m = scmutil.match(repo[None], pats, opts)
> for f in ms:
> if not m(f):
@@ -520,7 +520,7 @@
warning: conflicts while merging emp1! (edit, then use 'hg resolve --mark')
warning: conflicts while merging emp2! (edit, then use 'hg resolve --mark')
warning: conflicts while merging emp3! (edit, then use 'hg resolve --mark')
- unresolved conflicts (see hg resolve, then hg rebase --continue)
+ unresolved conflicts (see 'hg resolve', then 'hg rebase --continue')
[1]
Test when commands.resolve.confirm config option is not set:
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/tests/test-revert-interactive-curses.t Mon Jul 20 21:56:27 2020 +0530
@@ -0,0 +1,55 @@
+#require tic
+
+Revert interactive tests with the Curses interface
+
+ $ cat <<EOF >> $HGRCPATH
+ > [ui]
+ > interactive = true
+ > interface = curses
+ > [experimental]
+ > crecordtest = testModeCommands
+ > EOF
+
+TODO: Make a curses version of the other tests from test-revert-interactive.t.
+
+When a line without EOL is selected during "revert -i"
+
+ $ hg init $TESTTMP/revert-i-curses-eol
+ $ cd $TESTTMP/revert-i-curses-eol
+ $ echo 0 > a
+ $ hg ci -qAm 0
+ $ printf 1 >> a
+ $ hg ci -qAm 1
+ $ cat a
+ 0
+ 1 (no-eol)
+
+ $ cat <<EOF >testModeCommands
+ > c
+ > EOF
+
+ $ hg revert -ir'.^'
+ reverting a
+ $ cat a
+ 0
+
+When a selected line is reverted to have no EOL
+
+ $ hg init $TESTTMP/revert-i-curses-eol2
+ $ cd $TESTTMP/revert-i-curses-eol2
+ $ printf 0 > a
+ $ hg ci -qAm 0
+ $ echo 0 > a
+ $ hg ci -qAm 1
+ $ cat a
+ 0
+
+ $ cat <<EOF >testModeCommands
+ > c
+ > EOF
+
+ $ hg revert -ir'.^'
+ reverting a
+ $ cat a
+ 0 (no-eol)
+
--- a/tests/test-revset.t Tue Jul 14 10:25:41 2020 +0200
+++ b/tests/test-revset.t Mon Jul 20 21:56:27 2020 +0530
@@ -1864,12 +1864,12 @@
$ log 'id(2)'
$ log 'id(8)'
3
- $ hg --config experimental.revisions.prefixhexnode=yes log --template '{rev}\n' -r 'id(x8)'
+ $ hg log --template '{rev}\n' -r 'id(x8)'
3
- $ hg --config experimental.revisions.prefixhexnode=yes log --template '{rev}\n' -r 'x8'
+ $ hg log --template '{rev}\n' -r 'x8'
3
- $ hg --config experimental.revisions.prefixhexnode=yes log --template '{rev}\n' -r 'id(x)'
- $ hg --config experimental.revisions.prefixhexnode=yes log --template '{rev}\n' -r 'x'
+ $ hg log --template '{rev}\n' -r 'id(x)'
+ $ hg log --template '{rev}\n' -r 'x'
abort: 00changelog.i@: ambiguous identifier!
[255]
$ log 'id(23268)'
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/tests/test-rhg.t Mon Jul 20 21:56:27 2020 +0530
@@ -0,0 +1,26 @@
+#require rust
+
+ $ rhg() {
+ > if [ -f "$RUNTESTDIR/../rust/target/debug/rhg" ]; then
+ > "$RUNTESTDIR/../rust/target/debug/rhg" "$@"
+ > else
+ > echo "skipped: Cannot find rhg. Try to run cargo build in rust/rhg."
+ > exit 80
+ > fi
+ > }
+ $ rhg unimplemented-command
+ [252]
+ $ rhg root
+ abort: no repository found in '$TESTTMP' (.hg not found)!
+ [255]
+ $ hg init repository
+ $ cd repository
+ $ rhg root
+ $TESTTMP/repository
+ $ rhg root > /dev/full
+ abort: No space left on device (os error 28)
+ [255]
+ $ rm -rf `pwd`
+ $ rhg root
+ abort: error getting current working directory: $ENOENT$
+ [255]
--- a/tests/test-rollback.t Tue Jul 14 10:25:41 2020 +0200
+++ b/tests/test-rollback.t Mon Jul 20 21:56:27 2020 +0530
@@ -116,6 +116,7 @@
transaction abort!
rollback completed
note: commit message saved in .hg/last-message.txt
+ note: use 'hg commit --logfile .hg/last-message.txt --edit' to reuse it
abort: pretxncommit hook exited with status * (glob)
[255]
$ cat .hg/last-message.txt
--- a/tests/test-run-tests.t Tue Jul 14 10:25:41 2020 +0200
+++ b/tests/test-run-tests.t Mon Jul 20 21:56:27 2020 +0530
@@ -747,7 +747,7 @@
This is a noop statement so that
this test is still more bytes than success.
pad pad pad pad............................................................
- Accept this change? [n]
+ Accept this change? [y/N]
ERROR: test-failure.t output changed
!.
Failed test-failure.t: output changed
@@ -772,7 +772,7 @@
$ echo 'n' | rt -i --view echo
running 2 tests using 1 parallel processes
$TESTTMP/test-failure.t $TESTTMP/test-failure.t.err
- Accept this change? [n]* (glob)
+ Accept this change? [y/N]* (glob)
ERROR: test-failure.t output changed
!.
Failed test-failure.t: output changed
@@ -823,7 +823,7 @@
+ saved backup bundle to $TESTTMP/foo.hg
$ echo 'saved backup bundle to $TESTTMP/foo.hg'
saved backup bundle to $TESTTMP/*.hg (glob)
- Accept this change? [n] ..
+ Accept this change? [y/N] ..
# Ran 2 tests, 0 skipped, 0 failed.
$ sed -e 's,(glob)$,&<,g' test-failure.t
@@ -900,7 +900,7 @@
#endif
#if b
$ echo 2
- Accept this change? [n] .
+ Accept this change? [y/N] .
--- $TESTTMP/test-cases.t
+++ $TESTTMP/test-cases.t#b.err
@@ -5,4 +5,5 @@
@@ -909,7 +909,7 @@
$ echo 2
+ 2
#endif
- Accept this change? [n] .
+ Accept this change? [y/N] .
# Ran 2 tests, 0 skipped, 0 failed.
$ cat test-cases.t
@@ -1285,7 +1285,7 @@
This is a noop statement so that
this test is still more bytes than success.
pad pad pad pad............................................................
- Accept this change? [n] ..s
+ Accept this change? [y/N] ..s
Skipped test-skip.t: missing feature: nail clipper
# Ran 2 tests, 1 skipped, 0 failed.
--- a/tests/test-serve.t Tue Jul 14 10:25:41 2020 +0200
+++ b/tests/test-serve.t Mon Jul 20 21:56:27 2020 +0530
@@ -103,7 +103,10 @@
issue6362: Previously, this crashed on Python 3
- $ hg serve -a 0.0.0.0 -d
- listening at http://*:$HGPORT1/ (bound to *:$HGPORT1) (glob)
+ $ hg serve -a 0.0.0.0 -d --pid-file=hg.pid
+ listening at http://*:$HGPORT1/ (bound to *:$HGPORT1) (glob) (?)
+
+ $ cat hg.pid > "$DAEMON_PIDS"
+ $ "$PYTHON" $RUNTESTDIR/killdaemons.py $DAEMON_PIDS
$ cd ..
--- a/tests/test-setdiscovery.t Tue Jul 14 10:25:41 2020 +0200
+++ b/tests/test-setdiscovery.t Mon Jul 20 21:56:27 2020 +0530
@@ -1112,3 +1112,40 @@
* @5d0b986a083e0d91f116de4691e2aaa54d5bbec0 (*)> found 101 common and 1 unknown server heads, 1 roundtrips in *.????s (glob)
* @5d0b986a083e0d91f116de4691e2aaa54d5bbec0 (*)> -R r1 outgoing r2 *-T{rev} * --config *extensions.blackbox=* exited 0 after *.?? seconds (glob)
$ cd ..
+
+Even if the set of revs to discover is restricted, unrelated revs may be
+returned as common heads.
+
+ $ mkdir ancestorsof
+ $ cd ancestorsof
+ $ hg init a
+ $ hg clone a b -q
+ $ cd b
+ $ hg debugbuilddag '.:root *root *root'
+ $ hg log -G -T '{node|short}'
+ o fa942426a6fd
+ |
+ | o 66f7d451a68b
+ |/
+ o 1ea73414a91b
+
+ $ hg push -r 66f7d451a68b -q
+ $ hg debugdiscovery --verbose --rev fa942426a6fd
+ comparing with $TESTTMP/ancestorsof/a
+ searching for changes
+ elapsed time: * seconds (glob)
+ heads summary:
+ total common heads: 1
+ also local heads: 1
+ also remote heads: 1
+ both: 1
+ local heads: 2
+ common: 1
+ missing: 1
+ remote heads: 1
+ common: 1
+ unknown: 0
+ local changesets: 3
+ common: 2
+ missing: 1
+ common heads: 66f7d451a68b
--- a/tests/test-sidedata.t Tue Jul 14 10:25:41 2020 +0200
+++ b/tests/test-sidedata.t Mon Jul 20 21:56:27 2020 +0530
@@ -50,27 +50,29 @@
$ hg init up-no-side-data --config format.exp-use-side-data=no
$ hg debugformat -v -R up-no-side-data
- format-variant repo config default
- fncache: yes yes yes
- dotencode: yes yes yes
- generaldelta: yes yes yes
- sparserevlog: yes yes yes
- sidedata: no no no
- copies-sdc: no no no
- plain-cl-delta: yes yes yes
- compression: zlib zlib zlib
- compression-level: default default default
+ format-variant repo config default
+ fncache: yes yes yes
+ dotencode: yes yes yes
+ generaldelta: yes yes yes
+ sparserevlog: yes yes yes
+ sidedata: no no no
+ persistent-nodemap: no no no
+ copies-sdc: no no no
+ plain-cl-delta: yes yes yes
+ compression: zlib zlib zlib
+ compression-level: default default default
$ hg debugformat -v -R up-no-side-data --config format.exp-use-side-data=yes
- format-variant repo config default
- fncache: yes yes yes
- dotencode: yes yes yes
- generaldelta: yes yes yes
- sparserevlog: yes yes yes
- sidedata: no yes no
- copies-sdc: no no no
- plain-cl-delta: yes yes yes
- compression: zlib zlib zlib
- compression-level: default default default
+ format-variant repo config default
+ fncache: yes yes yes
+ dotencode: yes yes yes
+ generaldelta: yes yes yes
+ sparserevlog: yes yes yes
+ sidedata: no yes no
+ persistent-nodemap: no no no
+ copies-sdc: no no no
+ plain-cl-delta: yes yes yes
+ compression: zlib zlib zlib
+ compression-level: default default default
$ hg debugupgraderepo -R up-no-side-data --config format.exp-use-side-data=yes > /dev/null
Check that we can downgrade from sidedata
@@ -78,25 +80,27 @@
$ hg init up-side-data --config format.exp-use-side-data=yes
$ hg debugformat -v -R up-side-data
- format-variant repo config default
- fncache: yes yes yes
- dotencode: yes yes yes
- generaldelta: yes yes yes
- sparserevlog: yes yes yes
- sidedata: yes no no
- copies-sdc: no no no
- plain-cl-delta: yes yes yes
- compression: zlib zlib zlib
- compression-level: default default default
+ format-variant repo config default
+ fncache: yes yes yes
+ dotencode: yes yes yes
+ generaldelta: yes yes yes
+ sparserevlog: yes yes yes
+ sidedata: yes no no
+ persistent-nodemap: no no no
+ copies-sdc: no no no
+ plain-cl-delta: yes yes yes
+ compression: zlib zlib zlib
+ compression-level: default default default
$ hg debugformat -v -R up-side-data --config format.exp-use-side-data=no
- format-variant repo config default
- fncache: yes yes yes
- dotencode: yes yes yes
- generaldelta: yes yes yes
- sparserevlog: yes yes yes
- sidedata: yes no no
- copies-sdc: no no no
- plain-cl-delta: yes yes yes
- compression: zlib zlib zlib
- compression-level: default default default
+ format-variant repo config default
+ fncache: yes yes yes
+ dotencode: yes yes yes
+ generaldelta: yes yes yes
+ sparserevlog: yes yes yes
+ sidedata: yes no no
+ persistent-nodemap: no no no
+ copies-sdc: no no no
+ plain-cl-delta: yes yes yes
+ compression: zlib zlib zlib
+ compression-level: default default default
$ hg debugupgraderepo -R up-side-data --config format.exp-use-side-data=no > /dev/null
--- a/tests/test-sparse-profiles.t Tue Jul 14 10:25:41 2020 +0200
+++ b/tests/test-sparse-profiles.t Mon Jul 20 21:56:27 2020 +0530
@@ -200,7 +200,7 @@
merging data.py
warning: conflicts while merging backend.sparse! (edit, then use 'hg resolve --mark')
warning: conflicts while merging data.py! (edit, then use 'hg resolve --mark')
- unresolved conflicts (see hg resolve, then hg rebase --continue)
+ unresolved conflicts (see 'hg resolve', then 'hg rebase --continue')
[1]
$ rm *.orig
$ ls -A
--- a/tests/test-sparse.t Tue Jul 14 10:25:41 2020 +0200
+++ b/tests/test-sparse.t Mon Jul 20 21:56:27 2020 +0530
@@ -200,7 +200,7 @@
temporarily included 2 file(s) in the sparse checkout for merging
merging hide
warning: conflicts while merging hide! (edit, then use 'hg resolve --mark')
- unresolved conflicts (see hg resolve, then hg rebase --continue)
+ unresolved conflicts (see 'hg resolve', then 'hg rebase --continue')
[1]
$ hg debugsparse
--- a/tests/test-ssh.t Tue Jul 14 10:25:41 2020 +0200
+++ b/tests/test-ssh.t Mon Jul 20 21:56:27 2020 +0530
@@ -46,6 +46,10 @@
remote: abort: repository nonexistent not found!
abort: no suitable response from remote hg!
[255]
+ $ hg clone -q -e "\"$PYTHON\" \"$TESTDIR/dummyssh\"" ssh://user@dummy/nonexistent local
+ remote: abort: repository nonexistent not found!
+ abort: no suitable response from remote hg!
+ [255]
non-existent absolute path
@@ -553,6 +557,7 @@
$ cat dummylog
Got arguments 1:user@dummy 2:hg -R nonexistent serve --stdio
+ Got arguments 1:user@dummy 2:hg -R nonexistent serve --stdio
Got arguments 1:user@dummy 2:hg -R $TESTTMP/nonexistent serve --stdio
Got arguments 1:user@dummy 2:hg -R remote serve --stdio
Got arguments 1:user@dummy 2:hg -R local-stream serve --stdio (no-reposimplestore !)
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/tests/test-state-extension.t Mon Jul 20 21:56:27 2020 +0530
@@ -0,0 +1,136 @@
+Test extension of unfinished states support.
+ $ mkdir chainify
+ $ cd chainify
+ $ cat >> chainify.py <<EOF
+ > from mercurial import cmdutil, error, extensions, exthelper, node, scmutil, state
+ > from hgext import rebase
+ >
+ > eh = exthelper.exthelper()
+ >
+ > extsetup = eh.finalextsetup
+ > cmdtable = eh.cmdtable
+ >
+ > # Rebase calls addunfinished in uisetup, so we have to call it in extsetup.
+ > # Ideally there'd by an 'extensions.afteruisetup()' just like
+ > # 'extensions.afterloaded()' to allow nesting multiple commands.
+ > @eh.extsetup
+ > def _extsetup(ui):
+ > state.addunfinished(
+ > b'chainify',
+ > b'chainify.state',
+ > continueflag=True,
+ > childopnames=[b'rebase'])
+ >
+ > def _node(repo, arg):
+ > return node.hex(scmutil.revsingle(repo, arg).node())
+ >
+ > @eh.command(
+ > b'chainify',
+ > [(b'r', b'revs', [], b'revs to chain', b'REV'),
+ > (b'', b'continue', False, b'continue op')],
+ > b'chainify [-r REV] +',
+ > inferrepo=True)
+ > def chainify(ui, repo, **opts):
+ > """Rebases r1, r2, r3, etc. into a chain."""
+ > with repo.wlock(), repo.lock():
+ > cmdstate = state.cmdstate(repo, b'chainify.state')
+ > if opts['continue']:
+ > if not cmdstate.exists():
+ > raise error.Abort(b'no chainify in progress')
+ > else:
+ > cmdutil.checkunfinished(repo)
+ > data = {
+ > b'tip': _node(repo, opts['revs'][0]),
+ > b'revs': b','.join(_node(repo, r) for r in opts['revs'][1:]),
+ > }
+ > cmdstate.save(1, data)
+ >
+ > data = cmdstate.read()
+ > while data[b'revs']:
+ > tip = data[b'tip']
+ > revs = data[b'revs'].split(b',')
+ > with state.delegating(repo, b'chainify', b'rebase'):
+ > ui.status(b'rebasing %s onto %s\n' % (revs[0][:12], tip[:12]))
+ > if state.ischildunfinished(repo, b'chainify', b'rebase'):
+ > rc = state.continuechild(ui, repo, b'chainify', b'rebase')
+ > else:
+ > rc = rebase.rebase(ui, repo, rev=[revs[0]], dest=tip)
+ > if rc and rc != 0:
+ > raise error.Abort(b'rebase failed (rc: %d)' % rc)
+ > data[b'tip'] = _node(repo, b'tip')
+ > data[b'revs'] = b','.join(revs[1:])
+ > cmdstate.save(1, data)
+ > cmdstate.delete()
+ > ui.status(b'done chainifying\n')
+ > EOF
+
+ $ chainifypath=`pwd`/chainify.py
+ $ echo '[extensions]' >> $HGRCPATH
+ $ echo "chainify = $chainifypath" >> $HGRCPATH
+ $ echo "rebase =" >> $HGRCPATH
+
+ $ cd $TESTTMP
+ $ hg init a
+ $ cd a
+ $ echo base > base.txt
+ $ hg commit -Aqm 'base commit'
+ $ echo foo > file1
+ $ hg commit -Aqm 'add file'
+ $ hg co -q ".^"
+ $ echo bar > file2
+ $ hg commit -Aqm 'add other file'
+ $ hg co -q ".^"
+ $ echo foo2 > file1
+ $ hg commit -Aqm 'add conflicting file'
+ $ hg co -q ".^"
+ $ hg log --graph --template '{rev} {files}'
+ o 3 file1
+ |
+ | o 2 file2
+ |/
+ | o 1 file1
+ |/
+ @ 0 base.txt
+
+ $ hg chainify -r 8430cfdf77c2 -r f8596309dff8 -r a858b338b3e9
+ rebasing f8596309dff8 onto 8430cfdf77c2
+ rebasing 2:f8596309dff8 "add other file"
+ saved backup bundle to $TESTTMP/* (glob)
+ rebasing a858b338b3e9 onto 83c722183a8e
+ rebasing 2:a858b338b3e9 "add conflicting file"
+ merging file1
+ warning: conflicts while merging file1! (edit, then use 'hg resolve --mark')
+ unresolved conflicts (see 'hg resolve', then 'hg chainify --continue')
+ [1]
+ $ hg status --config commands.status.verbose=True
+ M file1
+ ? file1.orig
+ # The repository is in an unfinished *chainify* state.
+
+ # Unresolved merge conflicts:
+ #
+ # file1
+ #
+ # To mark files as resolved: hg resolve --mark FILE
+
+ # To continue: hg chainify --continue
+ # To abort: hg chainify --abort
+
+ $ echo foo3 > file1
+ $ hg resolve --mark file1
+ (no more unresolved files)
+ continue: hg chainify --continue
+ $ hg chainify --continue
+ rebasing a858b338b3e9 onto 83c722183a8e
+ rebasing 2:a858b338b3e9 "add conflicting file"
+ saved backup bundle to $TESTTMP/* (glob)
+ done chainifying
+ $ hg log --graph --template '{rev} {files}'
+ o 3 file1
+ |
+ o 2 file2
+ |
+ o 1 file1
+ |
+ @ 0 base.txt
+
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/tests/test-stdio.py Mon Jul 20 21:56:27 2020 +0530
@@ -0,0 +1,365 @@
+#!/usr/bin/env python
+"""
+Tests the buffering behavior of stdio streams in `mercurial.utils.procutil`.
+"""
+from __future__ import absolute_import
+
+import contextlib
+import errno
+import os
+import signal
+import subprocess
+import sys
+import tempfile
+import unittest
+
+from mercurial import pycompat, util
+
+
+if pycompat.ispy3:
+
+ def set_noninheritable(fd):
+ # On Python 3, file descriptors are non-inheritable by default.
+ pass
+
+
+else:
+ if pycompat.iswindows:
+ # unused
+ set_noninheritable = None
+ else:
+ import fcntl
+
+ def set_noninheritable(fd):
+ old = fcntl.fcntl(fd, fcntl.F_GETFD)
+ fcntl.fcntl(fd, fcntl.F_SETFD, old | fcntl.FD_CLOEXEC)
+
+
+TEST_BUFFERING_CHILD_SCRIPT = r'''
+import os
+
+from mercurial import dispatch
+from mercurial.utils import procutil
+
+dispatch.initstdio()
+procutil.{stream}.write(b'aaa')
+os.write(procutil.{stream}.fileno(), b'[written aaa]')
+procutil.{stream}.write(b'bbb\n')
+os.write(procutil.{stream}.fileno(), b'[written bbb\\n]')
+'''
+UNBUFFERED = b'aaa[written aaa]bbb\n[written bbb\\n]'
+LINE_BUFFERED = b'[written aaa]aaabbb\n[written bbb\\n]'
+FULLY_BUFFERED = b'[written aaa][written bbb\\n]aaabbb\n'
+
+
+TEST_LARGE_WRITE_CHILD_SCRIPT = r'''
+import os
+import signal
+import sys
+
+from mercurial import dispatch
+from mercurial.utils import procutil
+
+signal.signal(signal.SIGINT, lambda *x: None)
+dispatch.initstdio()
+write_result = procutil.{stream}.write(b'x' * 1048576)
+with os.fdopen(
+ os.open({write_result_fn!r}, os.O_WRONLY | getattr(os, 'O_TEMPORARY', 0)),
+ 'w',
+) as write_result_f:
+ write_result_f.write(str(write_result))
+'''
+
+
+TEST_BROKEN_PIPE_CHILD_SCRIPT = r'''
+import os
+import pickle
+
+from mercurial import dispatch
+from mercurial.utils import procutil
+
+dispatch.initstdio()
+procutil.stdin.read(1) # wait until parent process closed pipe
+try:
+ procutil.{stream}.write(b'test')
+ procutil.{stream}.flush()
+except EnvironmentError as e:
+ with os.fdopen(
+ os.open(
+ {err_fn!r},
+ os.O_WRONLY
+ | getattr(os, 'O_BINARY', 0)
+ | getattr(os, 'O_TEMPORARY', 0),
+ ),
+ 'wb',
+ ) as err_f:
+ pickle.dump(e, err_f)
+# Exit early to suppress further broken pipe errors at interpreter shutdown.
+os._exit(0)
+'''
+
+
+@contextlib.contextmanager
+def _closing(fds):
+ try:
+ yield
+ finally:
+ for fd in fds:
+ try:
+ os.close(fd)
+ except EnvironmentError:
+ pass
+
+
+# In the following, we set the FDs non-inheritable mainly to make it possible
+# for tests to close the receiving end of the pipe / PTYs.
+
+
+@contextlib.contextmanager
+def _devnull():
+ devnull = os.open(os.devnull, os.O_WRONLY)
+ # We don't have a receiving end, so it's not worth the effort on Python 2
+ # on Windows to make the FD non-inheritable.
+ with _closing([devnull]):
+ yield (None, devnull)
+
+
+@contextlib.contextmanager
+def _pipes():
+ rwpair = os.pipe()
+ # Pipes are already non-inheritable on Windows.
+ if not pycompat.iswindows:
+ set_noninheritable(rwpair[0])
+ set_noninheritable(rwpair[1])
+ with _closing(rwpair):
+ yield rwpair
+
+
+@contextlib.contextmanager
+def _ptys():
+ if pycompat.iswindows:
+ raise unittest.SkipTest("PTYs are not supported on Windows")
+ import pty
+ import tty
+
+ rwpair = pty.openpty()
+ set_noninheritable(rwpair[0])
+ set_noninheritable(rwpair[1])
+ with _closing(rwpair):
+ tty.setraw(rwpair[0])
+ yield rwpair
+
+
+def _readall(fd, buffer_size, initial_buf=None):
+ buf = initial_buf or []
+ while True:
+ try:
+ s = os.read(fd, buffer_size)
+ except OSError as e:
+ if e.errno == errno.EIO:
+ # If the child-facing PTY got closed, reading from the
+ # parent-facing PTY raises EIO.
+ break
+ raise
+ if not s:
+ break
+ buf.append(s)
+ return b''.join(buf)
+
+
+class TestStdio(unittest.TestCase):
+ def _test(
+ self,
+ child_script,
+ stream,
+ rwpair_generator,
+ check_output,
+ python_args=[],
+ post_child_check=None,
+ stdin_generator=None,
+ ):
+ assert stream in ('stdout', 'stderr')
+ if stdin_generator is None:
+ stdin_generator = open(os.devnull, 'rb')
+ with rwpair_generator() as (
+ stream_receiver,
+ child_stream,
+ ), stdin_generator as child_stdin:
+ proc = subprocess.Popen(
+ [sys.executable] + python_args + ['-c', child_script],
+ stdin=child_stdin,
+ stdout=child_stream if stream == 'stdout' else None,
+ stderr=child_stream if stream == 'stderr' else None,
+ )
+ try:
+ os.close(child_stream)
+ if stream_receiver is not None:
+ check_output(stream_receiver, proc)
+ except: # re-raises
+ proc.terminate()
+ raise
+ finally:
+ retcode = proc.wait()
+ self.assertEqual(retcode, 0)
+ if post_child_check is not None:
+ post_child_check()
+
+ def _test_buffering(
+ self, stream, rwpair_generator, expected_output, python_args=[]
+ ):
+ def check_output(stream_receiver, proc):
+ self.assertEqual(_readall(stream_receiver, 1024), expected_output)
+
+ self._test(
+ TEST_BUFFERING_CHILD_SCRIPT.format(stream=stream),
+ stream,
+ rwpair_generator,
+ check_output,
+ python_args,
+ )
+
+ def test_buffering_stdout_devnull(self):
+ self._test_buffering('stdout', _devnull, None)
+
+ def test_buffering_stdout_pipes(self):
+ self._test_buffering('stdout', _pipes, FULLY_BUFFERED)
+
+ def test_buffering_stdout_ptys(self):
+ self._test_buffering('stdout', _ptys, LINE_BUFFERED)
+
+ def test_buffering_stdout_devnull_unbuffered(self):
+ self._test_buffering('stdout', _devnull, None, python_args=['-u'])
+
+ def test_buffering_stdout_pipes_unbuffered(self):
+ self._test_buffering('stdout', _pipes, UNBUFFERED, python_args=['-u'])
+
+ def test_buffering_stdout_ptys_unbuffered(self):
+ self._test_buffering('stdout', _ptys, UNBUFFERED, python_args=['-u'])
+
+ if not pycompat.ispy3 and not pycompat.iswindows:
+ # On Python 2 on non-Windows, we manually open stdout in line-buffered
+ # mode if connected to a TTY. We should check if Python was configured
+ # to use unbuffered stdout, but it's hard to do that.
+ test_buffering_stdout_ptys_unbuffered = unittest.expectedFailure(
+ test_buffering_stdout_ptys_unbuffered
+ )
+
+ def _test_large_write(self, stream, rwpair_generator, python_args=[]):
+ if not pycompat.ispy3 and pycompat.isdarwin:
+ # Python 2 doesn't always retry on EINTR, but the libc might retry.
+ # So far, it was observed only on macOS that EINTR is raised at the
+ # Python level. As Python 2 support will be dropped soon-ish, we
+ # won't attempt to fix it.
+ raise unittest.SkipTest("raises EINTR on macOS")
+
+ def check_output(stream_receiver, proc):
+ if not pycompat.iswindows:
+ # On Unix, we can provoke a partial write() by interrupting it
+ # by a signal handler as soon as a bit of data was written.
+ # We test that write() is called until all data is written.
+ buf = [os.read(stream_receiver, 1)]
+ proc.send_signal(signal.SIGINT)
+ else:
+ # On Windows, there doesn't seem to be a way to cause partial
+ # writes.
+ buf = []
+ self.assertEqual(
+ _readall(stream_receiver, 131072, buf), b'x' * 1048576
+ )
+
+ def post_child_check():
+ write_result_str = write_result_f.read()
+ if pycompat.ispy3:
+ # On Python 3, we test that the correct number of bytes is
+ # claimed to have been written.
+ expected_write_result_str = '1048576'
+ else:
+ # On Python 2, we only check that the large write does not
+ # crash.
+ expected_write_result_str = 'None'
+ self.assertEqual(write_result_str, expected_write_result_str)
+
+ with tempfile.NamedTemporaryFile('r') as write_result_f:
+ self._test(
+ TEST_LARGE_WRITE_CHILD_SCRIPT.format(
+ stream=stream, write_result_fn=write_result_f.name
+ ),
+ stream,
+ rwpair_generator,
+ check_output,
+ python_args,
+ post_child_check=post_child_check,
+ )
+
+ def test_large_write_stdout_devnull(self):
+ self._test_large_write('stdout', _devnull)
+
+ def test_large_write_stdout_pipes(self):
+ self._test_large_write('stdout', _pipes)
+
+ def test_large_write_stdout_ptys(self):
+ self._test_large_write('stdout', _ptys)
+
+ def test_large_write_stdout_devnull_unbuffered(self):
+ self._test_large_write('stdout', _devnull, python_args=['-u'])
+
+ def test_large_write_stdout_pipes_unbuffered(self):
+ self._test_large_write('stdout', _pipes, python_args=['-u'])
+
+ def test_large_write_stdout_ptys_unbuffered(self):
+ self._test_large_write('stdout', _ptys, python_args=['-u'])
+
+ def test_large_write_stderr_devnull(self):
+ self._test_large_write('stderr', _devnull)
+
+ def test_large_write_stderr_pipes(self):
+ self._test_large_write('stderr', _pipes)
+
+ def test_large_write_stderr_ptys(self):
+ self._test_large_write('stderr', _ptys)
+
+ def test_large_write_stderr_devnull_unbuffered(self):
+ self._test_large_write('stderr', _devnull, python_args=['-u'])
+
+ def test_large_write_stderr_pipes_unbuffered(self):
+ self._test_large_write('stderr', _pipes, python_args=['-u'])
+
+ def test_large_write_stderr_ptys_unbuffered(self):
+ self._test_large_write('stderr', _ptys, python_args=['-u'])
+
+ def _test_broken_pipe(self, stream):
+ assert stream in ('stdout', 'stderr')
+
+ def check_output(stream_receiver, proc):
+ os.close(stream_receiver)
+ proc.stdin.write(b'x')
+ proc.stdin.close()
+
+ def post_child_check():
+ err = util.pickle.load(err_f)
+ self.assertEqual(err.errno, errno.EPIPE)
+ self.assertEqual(err.strerror, "Broken pipe")
+
+ with tempfile.NamedTemporaryFile('rb') as err_f:
+ self._test(
+ TEST_BROKEN_PIPE_CHILD_SCRIPT.format(
+ stream=stream, err_fn=err_f.name
+ ),
+ stream,
+ _pipes,
+ check_output,
+ post_child_check=post_child_check,
+ stdin_generator=util.nullcontextmanager(subprocess.PIPE),
+ )
+
+ def test_broken_pipe_stdout(self):
+ self._test_broken_pipe('stdout')
+
+ def test_broken_pipe_stderr(self):
+ self._test_broken_pipe('stderr')
+
+
+if __name__ == '__main__':
+ import silenttestrunner
+
+ silenttestrunner.main(__name__)
--- a/tests/test-tag.t Tue Jul 14 10:25:41 2020 +0200
+++ b/tests/test-tag.t Mon Jul 20 21:56:27 2020 +0530
@@ -323,6 +323,7 @@
transaction abort!
rollback completed
note: commit message saved in .hg/last-message.txt
+ note: use 'hg commit --logfile .hg/last-message.txt --edit' to reuse it
abort: pretxncommit.unexpectedabort hook exited with status 1
[255]
$ cat .hg/last-message.txt
--- a/tests/test-template-functions.t Tue Jul 14 10:25:41 2020 +0200
+++ b/tests/test-template-functions.t Mon Jul 20 21:56:27 2020 +0530
@@ -1575,15 +1575,15 @@
> from __future__ import absolute_import
> from mercurial import (
> dispatch,
- > pycompat,
> )
> from mercurial.utils import (
> cborutil,
+ > procutil,
> stringutil,
> )
> dispatch.initstdio()
- > items = cborutil.decodeall(pycompat.stdin.read())
- > pycompat.stdout.write(stringutil.pprint(items, indent=1) + b'\n')
+ > items = cborutil.decodeall(procutil.stdin.read())
+ > procutil.stdout.write(stringutil.pprint(items, indent=1) + b'\n')
> EOF
$ hg log -T "{rev|cbor}" -R a -l2 | "$PYTHON" "$TESTTMP/decodecbor.py"
--- a/tests/test-template-map.t Tue Jul 14 10:25:41 2020 +0200
+++ b/tests/test-template-map.t Mon Jul 20 21:56:27 2020 +0530
@@ -675,20 +675,20 @@
> from __future__ import absolute_import
> from mercurial import (
> dispatch,
- > pycompat,
> )
> from mercurial.utils import (
> cborutil,
+ > procutil,
> stringutil,
> )
> dispatch.initstdio()
- > data = pycompat.stdin.read()
+ > data = procutil.stdin.read()
> # our CBOR decoder doesn't support parsing indefinite-length arrays,
> # but the log output is indefinite stream by nature.
> assert data[:1] == cborutil.BEGIN_INDEFINITE_ARRAY
> assert data[-1:] == cborutil.BREAK
> items = cborutil.decodeall(data[1:-1])
- > pycompat.stdout.write(stringutil.pprint(items, indent=1) + b'\n')
+ > procutil.stdout.write(stringutil.pprint(items, indent=1) + b'\n')
> EOF
$ hg log -k nosuch -Tcbor | "$PYTHON" "$TESTTMP/decodecborarray.py"
--- a/tests/test-upgrade-repo.t Tue Jul 14 10:25:41 2020 +0200
+++ b/tests/test-upgrade-repo.t Mon Jul 20 21:56:27 2020 +0530
@@ -52,49 +52,53 @@
$ hg init empty
$ cd empty
$ hg debugformat
- format-variant repo
- fncache: yes
- dotencode: yes
- generaldelta: yes
- sparserevlog: yes
- sidedata: no
- copies-sdc: no
- plain-cl-delta: yes
- compression: zlib
- compression-level: default
+ format-variant repo
+ fncache: yes
+ dotencode: yes
+ generaldelta: yes
+ sparserevlog: yes
+ sidedata: no
+ persistent-nodemap: no
+ copies-sdc: no
+ plain-cl-delta: yes
+ compression: zlib
+ compression-level: default
$ hg debugformat --verbose
- format-variant repo config default
- fncache: yes yes yes
- dotencode: yes yes yes
- generaldelta: yes yes yes
- sparserevlog: yes yes yes
- sidedata: no no no
- copies-sdc: no no no
- plain-cl-delta: yes yes yes
- compression: zlib zlib zlib
- compression-level: default default default
+ format-variant repo config default
+ fncache: yes yes yes
+ dotencode: yes yes yes
+ generaldelta: yes yes yes
+ sparserevlog: yes yes yes
+ sidedata: no no no
+ persistent-nodemap: no no no
+ copies-sdc: no no no
+ plain-cl-delta: yes yes yes
+ compression: zlib zlib zlib
+ compression-level: default default default
$ hg debugformat --verbose --config format.usefncache=no
- format-variant repo config default
- fncache: yes no yes
- dotencode: yes no yes
- generaldelta: yes yes yes
- sparserevlog: yes yes yes
- sidedata: no no no
- copies-sdc: no no no
- plain-cl-delta: yes yes yes
- compression: zlib zlib zlib
- compression-level: default default default
+ format-variant repo config default
+ fncache: yes no yes
+ dotencode: yes no yes
+ generaldelta: yes yes yes
+ sparserevlog: yes yes yes
+ sidedata: no no no
+ persistent-nodemap: no no no
+ copies-sdc: no no no
+ plain-cl-delta: yes yes yes
+ compression: zlib zlib zlib
+ compression-level: default default default
$ hg debugformat --verbose --config format.usefncache=no --color=debug
- format-variant repo config default
- [formatvariant.name.mismatchconfig|fncache: ][formatvariant.repo.mismatchconfig| yes][formatvariant.config.special| no][formatvariant.default| yes]
- [formatvariant.name.mismatchconfig|dotencode: ][formatvariant.repo.mismatchconfig| yes][formatvariant.config.special| no][formatvariant.default| yes]
- [formatvariant.name.uptodate|generaldelta: ][formatvariant.repo.uptodate| yes][formatvariant.config.default| yes][formatvariant.default| yes]
- [formatvariant.name.uptodate|sparserevlog: ][formatvariant.repo.uptodate| yes][formatvariant.config.default| yes][formatvariant.default| yes]
- [formatvariant.name.uptodate|sidedata: ][formatvariant.repo.uptodate| no][formatvariant.config.default| no][formatvariant.default| no]
- [formatvariant.name.uptodate|copies-sdc: ][formatvariant.repo.uptodate| no][formatvariant.config.default| no][formatvariant.default| no]
- [formatvariant.name.uptodate|plain-cl-delta: ][formatvariant.repo.uptodate| yes][formatvariant.config.default| yes][formatvariant.default| yes]
- [formatvariant.name.uptodate|compression: ][formatvariant.repo.uptodate| zlib][formatvariant.config.default| zlib][formatvariant.default| zlib]
- [formatvariant.name.uptodate|compression-level:][formatvariant.repo.uptodate| default][formatvariant.config.default| default][formatvariant.default| default]
+ format-variant repo config default
+ [formatvariant.name.mismatchconfig|fncache: ][formatvariant.repo.mismatchconfig| yes][formatvariant.config.special| no][formatvariant.default| yes]
+ [formatvariant.name.mismatchconfig|dotencode: ][formatvariant.repo.mismatchconfig| yes][formatvariant.config.special| no][formatvariant.default| yes]
+ [formatvariant.name.uptodate|generaldelta: ][formatvariant.repo.uptodate| yes][formatvariant.config.default| yes][formatvariant.default| yes]
+ [formatvariant.name.uptodate|sparserevlog: ][formatvariant.repo.uptodate| yes][formatvariant.config.default| yes][formatvariant.default| yes]
+ [formatvariant.name.uptodate|sidedata: ][formatvariant.repo.uptodate| no][formatvariant.config.default| no][formatvariant.default| no]
+ [formatvariant.name.uptodate|persistent-nodemap:][formatvariant.repo.uptodate| no][formatvariant.config.default| no][formatvariant.default| no]
+ [formatvariant.name.uptodate|copies-sdc: ][formatvariant.repo.uptodate| no][formatvariant.config.default| no][formatvariant.default| no]
+ [formatvariant.name.uptodate|plain-cl-delta: ][formatvariant.repo.uptodate| yes][formatvariant.config.default| yes][formatvariant.default| yes]
+ [formatvariant.name.uptodate|compression: ][formatvariant.repo.uptodate| zlib][formatvariant.config.default| zlib][formatvariant.default| zlib]
+ [formatvariant.name.uptodate|compression-level: ][formatvariant.repo.uptodate| default][formatvariant.config.default| default][formatvariant.default| default]
$ hg debugformat -Tjson
[
{
@@ -130,6 +134,12 @@
{
"config": false,
"default": false,
+ "name": "persistent-nodemap",
+ "repo": false
+ },
+ {
+ "config": false,
+ "default": false,
"name": "copies-sdc",
"repo": false
},
@@ -174,6 +184,11 @@
every revision will be re-added as if it was new content. It will go through the full storage mechanism giving extensions a chance to process it (eg. lfs). This is similar to "re-delta-all" but even slower since more logic is involved.
+ $ hg debugupgraderepo --quiet
+ requirements
+ preserved: dotencode, fncache, generaldelta, revlogv1, sparserevlog, store
+
+
--optimize can be used to add optimizations
$ hg debugupgrade --optimize redeltaparent
@@ -183,6 +198,8 @@
requirements
preserved: dotencode, fncache, generaldelta, revlogv1, sparserevlog, store
+ optimisations: re-delta-parent
+
re-delta-parent
deltas within internal storage will choose a new base revision if needed
@@ -207,6 +224,8 @@
requirements
preserved: dotencode, fncache, generaldelta, revlogv1, sparserevlog, store
+ optimisations: re-delta-parent
+
re-delta-parent
deltas within internal storage will choose a new base revision if needed
@@ -221,6 +240,12 @@
re-delta-fulladd
every revision will be re-added as if it was new content. It will go through the full storage mechanism giving extensions a chance to process it (eg. lfs). This is similar to "re-delta-all" but even slower since more logic is involved.
+ $ hg debugupgrade --optimize re-delta-parent --quiet
+ requirements
+ preserved: dotencode, fncache, generaldelta, revlogv1, sparserevlog, store
+
+ optimisations: re-delta-parent
+
unknown optimization:
@@ -237,49 +262,53 @@
> EOF
$ hg debugformat
- format-variant repo
- fncache: no
- dotencode: no
- generaldelta: no
- sparserevlog: no
- sidedata: no
- copies-sdc: no
- plain-cl-delta: yes
- compression: zlib
- compression-level: default
+ format-variant repo
+ fncache: no
+ dotencode: no
+ generaldelta: no
+ sparserevlog: no
+ sidedata: no
+ persistent-nodemap: no
+ copies-sdc: no
+ plain-cl-delta: yes
+ compression: zlib
+ compression-level: default
$ hg debugformat --verbose
- format-variant repo config default
- fncache: no yes yes
- dotencode: no yes yes
- generaldelta: no yes yes
- sparserevlog: no yes yes
- sidedata: no no no
- copies-sdc: no no no
- plain-cl-delta: yes yes yes
- compression: zlib zlib zlib
- compression-level: default default default
+ format-variant repo config default
+ fncache: no yes yes
+ dotencode: no yes yes
+ generaldelta: no yes yes
+ sparserevlog: no yes yes
+ sidedata: no no no
+ persistent-nodemap: no no no
+ copies-sdc: no no no
+ plain-cl-delta: yes yes yes
+ compression: zlib zlib zlib
+ compression-level: default default default
$ hg debugformat --verbose --config format.usegeneraldelta=no
- format-variant repo config default
- fncache: no yes yes
- dotencode: no yes yes
- generaldelta: no no yes
- sparserevlog: no no yes
- sidedata: no no no
- copies-sdc: no no no
- plain-cl-delta: yes yes yes
- compression: zlib zlib zlib
- compression-level: default default default
+ format-variant repo config default
+ fncache: no yes yes
+ dotencode: no yes yes
+ generaldelta: no no yes
+ sparserevlog: no no yes
+ sidedata: no no no
+ persistent-nodemap: no no no
+ copies-sdc: no no no
+ plain-cl-delta: yes yes yes
+ compression: zlib zlib zlib
+ compression-level: default default default
$ hg debugformat --verbose --config format.usegeneraldelta=no --color=debug
- format-variant repo config default
- [formatvariant.name.mismatchconfig|fncache: ][formatvariant.repo.mismatchconfig| no][formatvariant.config.default| yes][formatvariant.default| yes]
- [formatvariant.name.mismatchconfig|dotencode: ][formatvariant.repo.mismatchconfig| no][formatvariant.config.default| yes][formatvariant.default| yes]
- [formatvariant.name.mismatchdefault|generaldelta: ][formatvariant.repo.mismatchdefault| no][formatvariant.config.special| no][formatvariant.default| yes]
- [formatvariant.name.mismatchdefault|sparserevlog: ][formatvariant.repo.mismatchdefault| no][formatvariant.config.special| no][formatvariant.default| yes]
- [formatvariant.name.uptodate|sidedata: ][formatvariant.repo.uptodate| no][formatvariant.config.default| no][formatvariant.default| no]
- [formatvariant.name.uptodate|copies-sdc: ][formatvariant.repo.uptodate| no][formatvariant.config.default| no][formatvariant.default| no]
- [formatvariant.name.uptodate|plain-cl-delta: ][formatvariant.repo.uptodate| yes][formatvariant.config.default| yes][formatvariant.default| yes]
- [formatvariant.name.uptodate|compression: ][formatvariant.repo.uptodate| zlib][formatvariant.config.default| zlib][formatvariant.default| zlib]
- [formatvariant.name.uptodate|compression-level:][formatvariant.repo.uptodate| default][formatvariant.config.default| default][formatvariant.default| default]
+ format-variant repo config default
+ [formatvariant.name.mismatchconfig|fncache: ][formatvariant.repo.mismatchconfig| no][formatvariant.config.default| yes][formatvariant.default| yes]
+ [formatvariant.name.mismatchconfig|dotencode: ][formatvariant.repo.mismatchconfig| no][formatvariant.config.default| yes][formatvariant.default| yes]
+ [formatvariant.name.mismatchdefault|generaldelta: ][formatvariant.repo.mismatchdefault| no][formatvariant.config.special| no][formatvariant.default| yes]
+ [formatvariant.name.mismatchdefault|sparserevlog: ][formatvariant.repo.mismatchdefault| no][formatvariant.config.special| no][formatvariant.default| yes]
+ [formatvariant.name.uptodate|sidedata: ][formatvariant.repo.uptodate| no][formatvariant.config.default| no][formatvariant.default| no]
+ [formatvariant.name.uptodate|persistent-nodemap:][formatvariant.repo.uptodate| no][formatvariant.config.default| no][formatvariant.default| no]
+ [formatvariant.name.uptodate|copies-sdc: ][formatvariant.repo.uptodate| no][formatvariant.config.default| no][formatvariant.default| no]
+ [formatvariant.name.uptodate|plain-cl-delta: ][formatvariant.repo.uptodate| yes][formatvariant.config.default| yes][formatvariant.default| yes]
+ [formatvariant.name.uptodate|compression: ][formatvariant.repo.uptodate| zlib][formatvariant.config.default| zlib][formatvariant.default| zlib]
+ [formatvariant.name.uptodate|compression-level: ][formatvariant.repo.uptodate| default][formatvariant.config.default| default][formatvariant.default| default]
$ hg debugupgraderepo
repository lacks features recommended by current config options:
@@ -328,6 +357,11 @@
re-delta-fulladd
every revision will be re-added as if it was new content. It will go through the full storage mechanism giving extensions a chance to process it (eg. lfs). This is similar to "re-delta-all" but even slower since more logic is involved.
+ $ hg debugupgraderepo --quiet
+ requirements
+ preserved: revlogv1, store
+ added: dotencode, fncache, generaldelta, sparserevlog
+
$ hg --config format.dotencode=false debugupgraderepo
repository lacks features recommended by current config options:
@@ -569,6 +603,8 @@
requirements
preserved: dotencode, fncache, generaldelta, revlogv1, sparserevlog, store
+ optimisations: re-delta-parent
+
re-delta-parent
deltas within internal storage will choose a new base revision if needed
@@ -643,6 +679,8 @@
requirements
preserved: dotencode, fncache, generaldelta, revlogv1, sparserevlog, store
+ optimisations: re-delta-parent
+
re-delta-parent
deltas within internal storage will choose a new base revision if needed
@@ -689,6 +727,8 @@
requirements
preserved: dotencode, fncache, generaldelta, revlogv1, sparserevlog, store
+ optimisations: re-delta-parent
+
re-delta-parent
deltas within internal storage will choose a new base revision if needed
@@ -735,6 +775,8 @@
requirements
preserved: dotencode, fncache, generaldelta, revlogv1, sparserevlog, store
+ optimisations: re-delta-parent
+
re-delta-parent
deltas within internal storage will choose a new base revision if needed
@@ -786,6 +828,8 @@
preserved: dotencode, fncache, generaldelta, revlogv1, store
removed: sparserevlog
+ optimisations: re-delta-parent
+
re-delta-parent
deltas within internal storage will choose a new base revision if needed
@@ -835,6 +879,8 @@
preserved: dotencode, fncache, generaldelta, revlogv1, store
added: sparserevlog
+ optimisations: re-delta-parent
+
sparserevlog
Revlog supports delta chain with more unused data between payload. These gaps will be skipped at read time. This allows for better delta chains, making a better compression and faster exchange with server.
@@ -923,6 +969,8 @@
requirements
preserved: dotencode, fncache, generaldelta, revlogv1, sparserevlog, store
+ optimisations: re-delta-fulladd
+
re-delta-fulladd
each revision will be added as new content to the internal storage; this will likely drastically slow down execution time, but some extensions might need it
@@ -1135,6 +1183,8 @@
requirements
preserved: dotencode, fncache, generaldelta, revlogv1, sparserevlog, store
+ optimisations: re-delta-all
+
re-delta-all
deltas within internal storage will be fully recomputed; this will likely drastically slow down execution time
@@ -1190,9 +1240,13 @@
store
Check that we can add the sparse-revlog format requirement
- $ hg --config format.sparse-revlog=yes debugupgraderepo --run >/dev/null
- copy of old repository backed up at $TESTTMP/sparserevlogrepo/.hg/upgradebackup.* (glob)
- the old repository will not be deleted; remove it to free up disk space once the upgraded repository is verified
+ $ hg --config format.sparse-revlog=yes debugupgraderepo --run --quiet
+ upgrade will perform the following actions:
+
+ requirements
+ preserved: dotencode, fncache, generaldelta, revlogv1, store
+ added: sparserevlog
+
$ cat .hg/requires
dotencode
fncache
@@ -1202,9 +1256,13 @@
store
Check that we can remove the sparse-revlog format requirement
- $ hg --config format.sparse-revlog=no debugupgraderepo --run >/dev/null
- copy of old repository backed up at $TESTTMP/sparserevlogrepo/.hg/upgradebackup.* (glob)
- the old repository will not be deleted; remove it to free up disk space once the upgraded repository is verified
+ $ hg --config format.sparse-revlog=no debugupgraderepo --run --quiet
+ upgrade will perform the following actions:
+
+ requirements
+ preserved: dotencode, fncache, generaldelta, revlogv1, store
+ removed: sparserevlog
+
$ cat .hg/requires
dotencode
fncache
@@ -1219,18 +1277,25 @@
upgrade
- $ hg --config format.revlog-compression=zstd debugupgraderepo --run --no-backup >/dev/null
+ $ hg --config format.revlog-compression=zstd debugupgraderepo --run --no-backup --quiet
+ upgrade will perform the following actions:
+
+ requirements
+ preserved: dotencode, fncache, generaldelta, revlogv1, store
+ added: revlog-compression-zstd, sparserevlog
+
$ hg debugformat -v
- format-variant repo config default
- fncache: yes yes yes
- dotencode: yes yes yes
- generaldelta: yes yes yes
- sparserevlog: yes yes yes
- sidedata: no no no
- copies-sdc: no no no
- plain-cl-delta: yes yes yes
- compression: zstd zlib zlib
- compression-level: default default default
+ format-variant repo config default
+ fncache: yes yes yes
+ dotencode: yes yes yes
+ generaldelta: yes yes yes
+ sparserevlog: yes yes yes
+ sidedata: no no no
+ persistent-nodemap: no no no
+ copies-sdc: no no no
+ plain-cl-delta: yes yes yes
+ compression: zstd zlib zlib
+ compression-level: default default default
$ cat .hg/requires
dotencode
fncache
@@ -1242,18 +1307,25 @@
downgrade
- $ hg debugupgraderepo --run --no-backup > /dev/null
+ $ hg debugupgraderepo --run --no-backup --quiet
+ upgrade will perform the following actions:
+
+ requirements
+ preserved: dotencode, fncache, generaldelta, revlogv1, sparserevlog, store
+ removed: revlog-compression-zstd
+
$ hg debugformat -v
- format-variant repo config default
- fncache: yes yes yes
- dotencode: yes yes yes
- generaldelta: yes yes yes
- sparserevlog: yes yes yes
- sidedata: no no no
- copies-sdc: no no no
- plain-cl-delta: yes yes yes
- compression: zlib zlib zlib
- compression-level: default default default
+ format-variant repo config default
+ fncache: yes yes yes
+ dotencode: yes yes yes
+ generaldelta: yes yes yes
+ sparserevlog: yes yes yes
+ sidedata: no no no
+ persistent-nodemap: no no no
+ copies-sdc: no no no
+ plain-cl-delta: yes yes yes
+ compression: zlib zlib zlib
+ compression-level: default default default
$ cat .hg/requires
dotencode
fncache
@@ -1268,18 +1340,25 @@
> [format]
> revlog-compression=zstd
> EOF
- $ hg debugupgraderepo --run --no-backup > /dev/null
+ $ hg debugupgraderepo --run --no-backup --quiet
+ upgrade will perform the following actions:
+
+ requirements
+ preserved: dotencode, fncache, generaldelta, revlogv1, sparserevlog, store
+ added: revlog-compression-zstd
+
$ hg debugformat -v
- format-variant repo config default
- fncache: yes yes yes
- dotencode: yes yes yes
- generaldelta: yes yes yes
- sparserevlog: yes yes yes
- sidedata: no no no
- copies-sdc: no no no
- plain-cl-delta: yes yes yes
- compression: zstd zstd zlib
- compression-level: default default default
+ format-variant repo config default
+ fncache: yes yes yes
+ dotencode: yes yes yes
+ generaldelta: yes yes yes
+ sparserevlog: yes yes yes
+ sidedata: no no no
+ persistent-nodemap: no no no
+ copies-sdc: no no no
+ plain-cl-delta: yes yes yes
+ compression: zstd zstd zlib
+ compression-level: default default default
$ cat .hg/requires
dotencode
fncache
@@ -1296,19 +1375,28 @@
upgrade
- $ hg --config format.exp-use-side-data=yes debugupgraderepo --run --no-backup --config "extensions.sidedata=$TESTDIR/testlib/ext-sidedata.py" >/dev/null
+ $ hg --config format.exp-use-side-data=yes debugupgraderepo --run --no-backup --config "extensions.sidedata=$TESTDIR/testlib/ext-sidedata.py" --quiet
+ upgrade will perform the following actions:
+
+ requirements
+ preserved: dotencode, fncache, generaldelta, revlogv1, store (no-zstd !)
+ preserved: dotencode, fncache, generaldelta, revlog-compression-zstd, revlogv1, sparserevlog, store (zstd !)
+ added: exp-sidedata-flag (zstd !)
+ added: exp-sidedata-flag, sparserevlog (no-zstd !)
+
$ hg debugformat -v
- format-variant repo config default
- fncache: yes yes yes
- dotencode: yes yes yes
- generaldelta: yes yes yes
- sparserevlog: yes yes yes
- sidedata: yes no no
- copies-sdc: no no no
- plain-cl-delta: yes yes yes
- compression: zstd zstd zlib (zstd !)
- compression: zlib zlib zlib (no-zstd !)
- compression-level: default default default
+ format-variant repo config default
+ fncache: yes yes yes
+ dotencode: yes yes yes
+ generaldelta: yes yes yes
+ sparserevlog: yes yes yes
+ sidedata: yes no no
+ persistent-nodemap: no no no
+ copies-sdc: no no no
+ plain-cl-delta: yes yes yes
+ compression: zlib zlib zlib (no-zstd !)
+ compression: zstd zstd zlib (zstd !)
+ compression-level: default default default
$ cat .hg/requires
dotencode
exp-sidedata-flag
@@ -1325,19 +1413,27 @@
downgrade
- $ hg debugupgraderepo --config format.exp-use-side-data=no --run --no-backup > /dev/null
+ $ hg debugupgraderepo --config format.exp-use-side-data=no --run --no-backup --quiet
+ upgrade will perform the following actions:
+
+ requirements
+ preserved: dotencode, fncache, generaldelta, revlogv1, sparserevlog, store (no-zstd !)
+ preserved: dotencode, fncache, generaldelta, revlog-compression-zstd, revlogv1, sparserevlog, store (zstd !)
+ removed: exp-sidedata-flag
+
$ hg debugformat -v
- format-variant repo config default
- fncache: yes yes yes
- dotencode: yes yes yes
- generaldelta: yes yes yes
- sparserevlog: yes yes yes
- sidedata: no no no
- copies-sdc: no no no
- plain-cl-delta: yes yes yes
- compression: zstd zstd zlib (zstd !)
- compression: zlib zlib zlib (no-zstd !)
- compression-level: default default default
+ format-variant repo config default
+ fncache: yes yes yes
+ dotencode: yes yes yes
+ generaldelta: yes yes yes
+ sparserevlog: yes yes yes
+ sidedata: no no no
+ persistent-nodemap: no no no
+ copies-sdc: no no no
+ plain-cl-delta: yes yes yes
+ compression: zlib zlib zlib (no-zstd !)
+ compression: zstd zstd zlib (zstd !)
+ compression-level: default default default
$ cat .hg/requires
dotencode
fncache
@@ -1354,19 +1450,27 @@
> [format]
> exp-use-side-data=yes
> EOF
- $ hg debugupgraderepo --run --no-backup > /dev/null
+ $ hg debugupgraderepo --run --no-backup --quiet
+ upgrade will perform the following actions:
+
+ requirements
+ preserved: dotencode, fncache, generaldelta, revlogv1, sparserevlog, store (no-zstd !)
+ preserved: dotencode, fncache, generaldelta, revlog-compression-zstd, revlogv1, sparserevlog, store (zstd !)
+ added: exp-sidedata-flag
+
$ hg debugformat -v
- format-variant repo config default
- fncache: yes yes yes
- dotencode: yes yes yes
- generaldelta: yes yes yes
- sparserevlog: yes yes yes
- sidedata: yes yes no
- copies-sdc: no no no
- plain-cl-delta: yes yes yes
- compression: zstd zstd zlib (zstd !)
- compression: zlib zlib zlib (no-zstd !)
- compression-level: default default default
+ format-variant repo config default
+ fncache: yes yes yes
+ dotencode: yes yes yes
+ generaldelta: yes yes yes
+ sparserevlog: yes yes yes
+ sidedata: yes yes no
+ persistent-nodemap: no no no
+ copies-sdc: no no no
+ plain-cl-delta: yes yes yes
+ compression: zlib zlib zlib (no-zstd !)
+ compression: zstd zstd zlib (zstd !)
+ compression-level: default default default
$ cat .hg/requires
dotencode
exp-sidedata-flag
--- a/tests/test-wireproto-command-lookup.t Tue Jul 14 10:25:41 2020 +0200
+++ b/tests/test-wireproto-command-lookup.t Mon Jul 20 21:56:27 2020 +0530
@@ -15,6 +15,7 @@
> |
> A
> EOF
+ $ root_node=$(hg log -r A -T '{node}')
$ hg serve -p $HGPORT -d --pid-file hg.pid -E error.log
$ cat hg.pid > $DAEMON_PIDS
@@ -23,7 +24,7 @@
$ sendhttpv2peer << EOF
> command lookup
- > key 426bada5c67598ca65036d57d9e4b64b0c1ce7a0
+ > key $root_node
> EOF
creating http peer for wire protocol version 2
sending lookup command
--- a/tests/test-wireproto-exchangev2-shallow.t Tue Jul 14 10:25:41 2020 +0200
+++ b/tests/test-wireproto-exchangev2-shallow.t Mon Jul 20 21:56:27 2020 +0530
@@ -265,13 +265,14 @@
received frame(size=9; request=1; stream=2; streamflags=stream-begin; type=stream-settings; flags=eos)
received frame(size=11; request=1; stream=2; streamflags=encoded; type=command-response; flags=continuation)
received frame(size=1170; request=1; stream=2; streamflags=encoded; type=command-response; flags=continuation)
- received frame(size=0; request=1; stream=2; streamflags=; type=command-response; flags=eos)
+ received frame(size=0; request=1; stream=2; streamflags=; type=command-response; flags=eos) (?)
add changeset 3390ef850073
add changeset b709380892b1
add changeset 47fe012ab237
add changeset 97765fc3cd62
add changeset dc666cf9ecf3
add changeset 93a8bd067ed2
+ received frame(size=0; request=1; stream=2; streamflags=; type=command-response; flags=eos) (?)
checking for updated bookmarks
sending 1 commands
sending command manifestdata: {