--- a/Makefile Wed Jan 09 20:00:35 2019 -0800
+++ b/Makefile Fri Jan 18 13:28:22 2019 -0500
@@ -72,6 +72,8 @@
rm -rf build mercurial/locale
$(MAKE) -C doc clean
$(MAKE) -C contrib/chg distclean
+ rm -rf rust/target
+ rm -f mercurial/rustext.so
clean: cleanbutpackages
rm -rf packages
@@ -178,6 +180,7 @@
docker-fedora20 \
docker-fedora21 \
docker-fedora28 \
+ docker-fedora29 \
docker-ubuntu-trusty \
docker-ubuntu-trusty-ppa \
docker-ubuntu-xenial \
@@ -189,6 +192,7 @@
fedora20 \
fedora21 \
fedora28 \
+ fedora29 \
linux-wheels \
linux-wheels-x86_64 \
linux-wheels-i686 \
--- a/contrib/all-revsets.txt Wed Jan 09 20:00:35 2019 -0800
+++ b/contrib/all-revsets.txt Fri Jan 18 13:28:22 2019 -0500
@@ -139,3 +139,18 @@
# test finding common ancestors
heads(commonancestors(last(head(), 2)))
heads(commonancestors(head()))
+
+# more heads testing
+heads(all())
+heads(-10000:-1)
+(-5000:-1000) and heads(-10000:-1)
+heads(matching(tip, "author"))
+heads(matching(tip, "author")) and -10000:-1
+(-10000:-1) and heads(matching(tip, "author"))
+# more roots testing
+roots(all())
+roots(-10000:-1)
+(-5000:-1000) and roots(-10000:-1)
+roots(matching(tip, "author"))
+roots(matching(tip, "author")) and -10000:-1
+(-10000:-1) and roots(matching(tip, "author"))
--- a/contrib/catapipe.py Wed Jan 09 20:00:35 2019 -0800
+++ b/contrib/catapipe.py Fri Jan 18 13:28:22 2019 -0500
@@ -6,6 +6,14 @@
# GNU General Public License version 2 or any later version.
"""Tool read primitive events from a pipe to produce a catapult trace.
+Usage:
+ Terminal 1: $ catapipe.py /tmp/mypipe /tmp/trace.json
+ Terminal 2: $ HGCATAPULTSERVERPIPE=/tmp/mypipe hg root
+ <ctrl-c catapipe.py in Terminal 1>
+ $ catapult/tracing/bin/trace2html /tmp/trace.json # produce /tmp/trace.html
+ <open trace.html in your browser of choice; the WASD keys are very useful>
+ (catapult is located at https://github.com/catapult-project/catapult)
+
For now the event stream supports
START $SESSIONID ...
@@ -24,7 +32,7 @@
Typically you'll want to place the path to the named pipe in the
HGCATAPULTSERVERPIPE environment variable, which both run-tests and hg
-understand.
+understand. To trace *only* run-tests, use HGTESTCATAPULTSERVERPIPE instead.
"""
from __future__ import absolute_import, print_function
--- a/contrib/check-commit Wed Jan 09 20:00:35 2019 -0800
+++ b/contrib/check-commit Fri Jan 18 13:28:22 2019 -0500
@@ -34,7 +34,7 @@
(commitheader + r"(?!merge with )[^#]\S+[^:] ",
"summary line doesn't start with 'topic: '"),
(afterheader + r"[A-Z][a-z]\S+", "don't capitalize summary lines"),
- (afterheader + r"[^\n]*: *[A-Z][a-z]\S+", "don't capitalize summary lines"),
+ (afterheader + r"^\S+: *[A-Z][a-z]\S+", "don't capitalize summary lines"),
(afterheader + r"\S*[^A-Za-z0-9-_]\S*: ",
"summary keyword should be most user-relevant one-word command or topic"),
(afterheader + r".*\.\s*\n", "don't add trailing period on summary line"),
--- a/contrib/clang-format-ignorelist Wed Jan 09 20:00:35 2019 -0800
+++ b/contrib/clang-format-ignorelist Fri Jan 18 13:28:22 2019 -0500
@@ -3,7 +3,6 @@
mercurial/cext/dirs.c
mercurial/cext/manifest.c
mercurial/cext/osutil.c
-mercurial/cext/revlog.c
# Vendored code that we should never format:
contrib/python-zstandard/c-ext/bufferutil.c
contrib/python-zstandard/c-ext/compressionchunker.c
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/contrib/discovery-helper.sh Fri Jan 18 13:28:22 2019 -0500
@@ -0,0 +1,64 @@
+#!/bin/bash
+#
+# produces two repositories with different common and missing subsets
+#
+# $ discovery-helper.sh REPO NBHEADS DEPT
+#
+# The Goal is to produce two repositories with some common part and some
+# exclusive part on each side. Provide a source repository REPO, it will
+# produce two repositories REPO-left and REPO-right.
+#
+# Each repository will be missing some revisions exclusive to NBHEADS of the
+# repo topological heads. These heads and revisions exclusive to them (up to
+# DEPTH depth) are stripped.
+#
+# The "left" repository will use the NBHEADS first heads (sorted by
+# description). The "right" use the last NBHEADS one.
+#
+# To find out how many topological heads a repo has, use:
+#
+# $ hg heads -t -T '{rev}\n' | wc -l
+#
+# Example:
+#
+# The `pypy-2018-09-01` repository has 192 heads. To produce two repositories
+# with 92 common heads and ~50 exclusive heads on each side.
+#
+# $ ./discovery-helper.sh pypy-2018-08-01 50 10
+
+set -euo pipefail
+
+if [ $# -lt 3 ]; then
+ echo "usage: `basename $0` REPO NBHEADS DEPTH"
+ exit 64
+fi
+
+repo="$1"
+shift
+
+nbheads="$1"
+shift
+
+depth="$1"
+shift
+
+leftrepo="${repo}-left"
+rightrepo="${repo}-right"
+
+left="first(sort(heads(all()), 'desc'), $nbheads)"
+right="last(sort(heads(all()), 'desc'), $nbheads)"
+
+leftsubset="ancestors($left, $depth) and only($left, heads(all() - $left))"
+rightsubset="ancestors($right, $depth) and only($right, heads(all() - $right))"
+
+echo '### building left repository:' $left-repo
+echo '# cloning'
+hg clone --noupdate "${repo}" "${leftrepo}"
+echo '# stripping' '"'${leftsubset}'"'
+hg -R "${leftrepo}" --config extensions.strip= strip --rev "$leftsubset" --no-backup
+
+echo '### building right repository:' $right-repo
+echo '# cloning'
+hg clone --noupdate "${repo}" "${rightrepo}"
+echo '# stripping:' '"'${rightsubset}'"'
+hg -R "${rightrepo}" --config extensions.strip= strip --rev "$rightsubset" --no-backup
--- a/contrib/fuzz/Makefile Wed Jan 09 20:00:35 2019 -0800
+++ b/contrib/fuzz/Makefile Fri Jan 18 13:28:22 2019 -0500
@@ -4,7 +4,7 @@
all: bdiff mpatch xdiff
fuzzutil.o: fuzzutil.cc fuzzutil.h
- $(CXX) $(CXXFLAGS) -g -O1 -fsanitize=fuzzer-no-link,address \
+ $(CXX) $(CXXFLAGS) -g -O1 \
-std=c++17 \
-I../../mercurial -c -o fuzzutil.o fuzzutil.cc
@@ -12,6 +12,11 @@
$(CXX) $(CXXFLAGS) -std=c++17 \
-I../../mercurial -c -o fuzzutil-oss-fuzz.o fuzzutil.cc
+pyutil.o: pyutil.cc pyutil.h
+ $(CXX) $(CXXFLAGS) -g -O1 \
+ `$$OUT/sanpy/bin/python-config --cflags` \
+ -I../../mercurial -c -o pyutil.o pyutil.cc
+
bdiff.o: ../../mercurial/bdiff.c
$(CC) $(CFLAGS) -fsanitize=fuzzer-no-link,address -c -o bdiff.o \
../../mercurial/bdiff.c
@@ -70,59 +75,86 @@
fuzz-xdiffi.o fuzz-xprepare.o fuzz-xutils.o fuzzutil-oss-fuzz.o \
-lFuzzingEngine -o $$OUT/xdiff_fuzzer
-# TODO use the $OUT env var instead of hardcoding /out
-/out/sanpy/bin/python:
- cd /Python-2.7.15/ && ./configure --without-pymalloc --prefix=$$OUT/sanpy CFLAGS='-O1 -fno-omit-frame-pointer -g -fwrapv -fstack-protector-strong' LDFLAGS=-lasan && ASAN_OPTIONS=detect_leaks=0 make && make install
-
-sanpy: /out/sanpy/bin/python
-
-manifest.o: sanpy ../../mercurial/cext/manifest.c
+manifest.o: ../../mercurial/cext/manifest.c
$(CC) $(CFLAGS) `$$OUT/sanpy/bin/python-config --cflags` \
-I../../mercurial \
-c -o manifest.o ../../mercurial/cext/manifest.c
-charencode.o: sanpy ../../mercurial/cext/charencode.c
+charencode.o: ../../mercurial/cext/charencode.c
$(CC) $(CFLAGS) `$$OUT/sanpy/bin/python-config --cflags` \
-I../../mercurial \
-c -o charencode.o ../../mercurial/cext/charencode.c
-parsers.o: sanpy ../../mercurial/cext/parsers.c
+parsers.o: ../../mercurial/cext/parsers.c
$(CC) $(CFLAGS) `$$OUT/sanpy/bin/python-config --cflags` \
-I../../mercurial \
-c -o parsers.o ../../mercurial/cext/parsers.c
-dirs.o: sanpy ../../mercurial/cext/dirs.c
+dirs.o: ../../mercurial/cext/dirs.c
$(CC) $(CFLAGS) `$$OUT/sanpy/bin/python-config --cflags` \
-I../../mercurial \
-c -o dirs.o ../../mercurial/cext/dirs.c
-pathencode.o: sanpy ../../mercurial/cext/pathencode.c
+pathencode.o: ../../mercurial/cext/pathencode.c
$(CC) $(CFLAGS) `$$OUT/sanpy/bin/python-config --cflags` \
-I../../mercurial \
-c -o pathencode.o ../../mercurial/cext/pathencode.c
-revlog.o: sanpy ../../mercurial/cext/revlog.c
+revlog.o: ../../mercurial/cext/revlog.c
$(CC) $(CFLAGS) `$$OUT/sanpy/bin/python-config --cflags` \
-I../../mercurial \
-c -o revlog.o ../../mercurial/cext/revlog.c
-manifest_fuzzer: sanpy manifest.cc manifest.o charencode.o parsers.o dirs.o pathencode.o revlog.o
+manifest_fuzzer: manifest.cc manifest.o charencode.o parsers.o dirs.o pathencode.o revlog.o pyutil.o
$(CXX) $(CXXFLAGS) `$$OUT/sanpy/bin/python-config --cflags` \
-Wno-register -Wno-macro-redefined \
-I../../mercurial manifest.cc \
- manifest.o charencode.o parsers.o dirs.o pathencode.o revlog.o \
+ manifest.o charencode.o parsers.o dirs.o pathencode.o revlog.o pyutil.o \
-lFuzzingEngine `$$OUT/sanpy/bin/python-config --ldflags` \
-o $$OUT/manifest_fuzzer
manifest_corpus.zip:
python manifest_corpus.py $$OUT/manifest_fuzzer_seed_corpus.zip
+revlog_fuzzer: revlog.cc manifest.o charencode.o parsers.o dirs.o pathencode.o revlog.o pyutil.o
+ $(CXX) $(CXXFLAGS) `$$OUT/sanpy/bin/python-config --cflags` \
+ -Wno-register -Wno-macro-redefined \
+ -I../../mercurial revlog.cc \
+ manifest.o charencode.o parsers.o dirs.o pathencode.o revlog.o pyutil.o \
+ -lFuzzingEngine `$$OUT/sanpy/bin/python-config --ldflags` \
+ -o $$OUT/revlog_fuzzer
+
+revlog_corpus.zip:
+ python revlog_corpus.py $$OUT/revlog_fuzzer_seed_corpus.zip
+
+dirstate_fuzzer: dirstate.cc manifest.o charencode.o parsers.o dirs.o pathencode.o revlog.o pyutil.o
+ $(CXX) $(CXXFLAGS) `$$OUT/sanpy/bin/python-config --cflags` \
+ -Wno-register -Wno-macro-redefined \
+ -I../../mercurial dirstate.cc \
+ manifest.o charencode.o parsers.o dirs.o pathencode.o revlog.o pyutil.o \
+ -lFuzzingEngine `$$OUT/sanpy/bin/python-config --ldflags` \
+ -o $$OUT/dirstate_fuzzer
+
+dirstate_corpus.zip:
+ python dirstate_corpus.py $$OUT/dirstate_fuzzer_seed_corpus.zip
+
+fm1readmarkers_fuzzer: fm1readmarkers.cc manifest.o charencode.o parsers.o dirs.o pathencode.o revlog.o pyutil.o
+ $(CXX) $(CXXFLAGS) `$$OUT/sanpy/bin/python-config --cflags` \
+ -Wno-register -Wno-macro-redefined \
+ -I../../mercurial fm1readmarkers.cc \
+ manifest.o charencode.o parsers.o dirs.o pathencode.o revlog.o pyutil.o \
+ -lFuzzingEngine `$$OUT/sanpy/bin/python-config --ldflags` \
+ -o $$OUT/fm1readmarkers_fuzzer
+
+fm1readmarkers_corpus.zip:
+ python fm1readmarkers_corpus.py $$OUT/fm1readmarkers_fuzzer_seed_corpus.zip
+
clean:
$(RM) *.o *_fuzzer \
bdiff \
mpatch \
xdiff
-oss-fuzz: bdiff_fuzzer mpatch_fuzzer mpatch_corpus.zip xdiff_fuzzer manifest_fuzzer manifest_corpus.zip
+oss-fuzz: bdiff_fuzzer mpatch_fuzzer mpatch_corpus.zip xdiff_fuzzer manifest_fuzzer manifest_corpus.zip revlog_fuzzer revlog_corpus.zip dirstate_fuzzer dirstate_corpus.zip fm1readmarkers_fuzzer fm1readmarkers_corpus.zip
-.PHONY: all clean oss-fuzz sanpy
+.PHONY: all clean oss-fuzz
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/contrib/fuzz/dirstate.cc Fri Jan 18 13:28:22 2019 -0500
@@ -0,0 +1,48 @@
+#include <Python.h>
+#include <assert.h>
+#include <stdlib.h>
+#include <unistd.h>
+
+#include <string>
+
+#include "pyutil.h"
+
+extern "C" {
+
+static PyCodeObject *code;
+
+extern "C" int LLVMFuzzerInitialize(int *argc, char ***argv)
+{
+ contrib::initpy(*argv[0]);
+ code = (PyCodeObject *)Py_CompileString(R"py(
+from parsers import parse_dirstate
+try:
+ dmap = {}
+ copymap = {}
+ p = parse_dirstate(dmap, copymap, data)
+except Exception as e:
+ pass
+ # uncomment this print if you're editing this Python code
+ # to debug failures.
+ # print e
+)py",
+ "fuzzer", Py_file_input);
+ return 0;
+}
+
+int LLVMFuzzerTestOneInput(const uint8_t *Data, size_t Size)
+{
+ PyObject *text =
+ PyBytes_FromStringAndSize((const char *)Data, (Py_ssize_t)Size);
+ PyObject *locals = PyDict_New();
+ PyDict_SetItemString(locals, "data", text);
+ PyObject *res = PyEval_EvalCode(code, contrib::pyglobals(), locals);
+ if (!res) {
+ PyErr_Print();
+ }
+ Py_XDECREF(res);
+ Py_DECREF(locals);
+ Py_DECREF(text);
+ return 0; // Non-zero return values are reserved for future use.
+}
+}
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/contrib/fuzz/dirstate_corpus.py Fri Jan 18 13:28:22 2019 -0500
@@ -0,0 +1,18 @@
+from __future__ import absolute_import, print_function
+
+import argparse
+import os
+import zipfile
+
+ap = argparse.ArgumentParser()
+ap.add_argument("out", metavar="some.zip", type=str, nargs=1)
+args = ap.parse_args()
+
+reporoot = os.path.normpath(os.path.join(os.path.dirname(__file__),
+ '..', '..'))
+dirstate = os.path.join(reporoot, '.hg', 'dirstate')
+
+with zipfile.ZipFile(args.out[0], "w", zipfile.ZIP_STORED) as zf:
+ if os.path.exists(dirstate):
+ with open(dirstate) as f:
+ zf.writestr("dirstate", f.read())
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/contrib/fuzz/fm1readmarkers.cc Fri Jan 18 13:28:22 2019 -0500
@@ -0,0 +1,60 @@
+#include <Python.h>
+#include <assert.h>
+#include <stdlib.h>
+#include <unistd.h>
+
+#include <string>
+
+#include "pyutil.h"
+
+extern "C" {
+
+static PyCodeObject *code;
+
+extern "C" int LLVMFuzzerInitialize(int *argc, char ***argv)
+{
+ contrib::initpy(*argv[0]);
+ code = (PyCodeObject *)Py_CompileString(R"py(
+from parsers import fm1readmarkers
+def maybeint(s, default):
+ try:
+ return int(s)
+ except ValueError:
+ return default
+try:
+ parts = data.split('\0', 2)
+ if len(parts) == 3:
+ offset, stop, data = parts
+ elif len(parts) == 2:
+ stop, data = parts
+ offset = 0
+ else:
+ offset = stop = 0
+ offset, stop = maybeint(offset, 0), maybeint(stop, len(data))
+ fm1readmarkers(data, offset, stop)
+except Exception as e:
+ pass
+ # uncomment this print if you're editing this Python code
+ # to debug failures.
+ # print e
+)py",
+ "fuzzer", Py_file_input);
+ return 0;
+}
+
+int LLVMFuzzerTestOneInput(const uint8_t *Data, size_t Size)
+{
+ PyObject *text =
+ PyBytes_FromStringAndSize((const char *)Data, (Py_ssize_t)Size);
+ PyObject *locals = PyDict_New();
+ PyDict_SetItemString(locals, "data", text);
+ PyObject *res = PyEval_EvalCode(code, contrib::pyglobals(), locals);
+ if (!res) {
+ PyErr_Print();
+ }
+ Py_XDECREF(res);
+ Py_DECREF(locals);
+ Py_DECREF(text);
+ return 0; // Non-zero return values are reserved for future use.
+}
+}
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/contrib/fuzz/fm1readmarkers_corpus.py Fri Jan 18 13:28:22 2019 -0500
@@ -0,0 +1,36 @@
+from __future__ import absolute_import, print_function
+
+import argparse
+import zipfile
+
+ap = argparse.ArgumentParser()
+ap.add_argument("out", metavar="some.zip", type=str, nargs=1)
+args = ap.parse_args()
+
+with zipfile.ZipFile(args.out[0], "w", zipfile.ZIP_STORED) as zf:
+ zf.writestr(
+ 'smallish_obsstore',
+ (
+ # header: fm1readmarkers should start at offset 1, and
+ # read until byte 597.
+ '1\x00597\x00'
+ # body of obsstore file
+ '\x01\x00\x00\x00vA\xd7\x02+C\x1a<)\x01,\x00\x00\x01\x03\x03\xe6'
+ '\x92\xde)x\x16\xd1Xph\xc7\xa7[\xe5\xe2\x1a\xab\x1e6e\xaf\xc2\xae'
+ '\xe7\xbc\x83\xe1\x88\xa5\xda\xce>O\xbd\x04\xe9\x03\xc4o\xeb\x03'
+ '\x01\t\x05\x04\x1fef18operationamenduserAugie Fackler <raf@duri'
+ 'n42.com>\x00\x00\x00vA\xd7\x02-\x8aD\xaf-\x01,\x00\x00\x01\x03\x03'
+ '\x17*\xca\x8f\x9e}i\xe0i\xbb\xdf\x9fb\x03\xd2XG?\xd3h\x98\x89\x1a'
+ '=2\xeb\xc3\xc5<\xb3\x9e\xcc\x0e;#\xee\xc3\x10ux\x03\x01\t\x05\x04'
+ '\x1fef18operationamenduserAugie Fackler <raf@durin42.com>\x00\x00'
+ '\x00vA\xd7\x02Mn\xd9%\xea\x01,\x00\x00\x01\x03\x03\x98\x89\x1a='
+ '2\xeb\xc3\xc5<\xb3\x9e\xcc\x0e;#\xee\xc3\x10ux\xe0*\xcaT\x86Z8J'
+ '\x85)\x97\xff7\xcc)\xc1\x7f\x19\x0c\x01\x03\x01\t\x05\x04\x1fef'
+ '18operationamenduserAugie Fackler <raf@durin42.com>\x00\x00\x00'
+ 'yA\xd7\x02MtA\xbfj\x01,\x00\x00\x01\x03\x03\xe0*\xcaT\x86Z8J\x85'
+ ')\x97\xff7\xcc)\xc1\x7f\x19\x0c\x01\x00\x94\x01\xa9\n\xf80\x92\xa3'
+ 'j\xc5X\xb1\xc9:\xd51\xb8*\xa9\x03\x01\t\x08\x04\x1fef11operatio'
+ 'nhistedituserAugie Fackler <raf@durin42.com>\x00\x00\x00yA\xd7\x02'
+ 'MtA\xd4\xe1\x01,\x00\x00\x01\x03\x03"\xa5\xcb\x86\xb6\xf4\xbaO\xa0'
+ 'sH\xe7?\xcb\x9b\xc2n\xcfI\x9e\x14\xf0D\xf0!\x18DN\xcd\x97\x016\xa5'
+ '\xef\xa06\xcb\x884\x8a\x03\x01\t\x08\x04\x1fef14operationhisted'))
--- a/contrib/fuzz/manifest.cc Wed Jan 09 20:00:35 2019 -0800
+++ b/contrib/fuzz/manifest.cc Fri Jan 18 13:28:22 2019 -0500
@@ -3,43 +3,17 @@
#include <stdlib.h>
#include <unistd.h>
+#include "pyutil.h"
+
#include <string>
extern "C" {
-/* TODO: use Python 3 for this fuzzing? */
-PyMODINIT_FUNC initparsers(void);
-
-static char cpypath[8192] = "\0";
-
static PyCodeObject *code;
-static PyObject *mainmod;
-static PyObject *globals;
extern "C" int LLVMFuzzerInitialize(int *argc, char ***argv)
{
- const std::string subdir = "/sanpy/lib/python2.7";
- /* HACK ALERT: we need a full Python installation built without
- pymalloc and with ASAN, so we dump one in
- $OUT/sanpy/lib/python2.7. This helps us wire that up. */
- std::string selfpath(*argv[0]);
- std::string pypath;
- auto pos = selfpath.rfind("/");
- if (pos == std::string::npos) {
- char wd[8192];
- getcwd(wd, 8192);
- pypath = std::string(wd) + subdir;
- } else {
- pypath = selfpath.substr(0, pos) + subdir;
- }
- strncpy(cpypath, pypath.c_str(), pypath.size());
- setenv("PYTHONPATH", cpypath, 1);
- setenv("PYTHONNOUSERSITE", "1", 1);
- /* prevent Python from looking up users in the fuzz environment */
- setenv("PYTHONUSERBASE", cpypath, 1);
- Py_SetPythonHome(cpypath);
- Py_InitializeEx(0);
- initparsers();
+ contrib::initpy(*argv[0]);
code = (PyCodeObject *)Py_CompileString(R"py(
from parsers import lazymanifest
try:
@@ -60,8 +34,6 @@
# print e
)py",
"fuzzer", Py_file_input);
- mainmod = PyImport_AddModule("__main__");
- globals = PyModule_GetDict(mainmod);
return 0;
}
@@ -71,7 +43,7 @@
PyBytes_FromStringAndSize((const char *)Data, (Py_ssize_t)Size);
PyObject *locals = PyDict_New();
PyDict_SetItemString(locals, "mdata", mtext);
- PyObject *res = PyEval_EvalCode(code, globals, locals);
+ PyObject *res = PyEval_EvalCode(code, contrib::pyglobals(), locals);
if (!res) {
PyErr_Print();
}
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/contrib/fuzz/pyutil.cc Fri Jan 18 13:28:22 2019 -0500
@@ -0,0 +1,49 @@
+#include "pyutil.h"
+
+#include <string>
+
+namespace contrib
+{
+
+static char cpypath[8192] = "\0";
+
+static PyObject *mainmod;
+static PyObject *globals;
+
+/* TODO: use Python 3 for this fuzzing? */
+PyMODINIT_FUNC initparsers(void);
+
+void initpy(const char *cselfpath)
+{
+ const std::string subdir = "/sanpy/lib/python2.7";
+ /* HACK ALERT: we need a full Python installation built without
+ pymalloc and with ASAN, so we dump one in
+ $OUT/sanpy/lib/python2.7. This helps us wire that up. */
+ std::string selfpath(cselfpath);
+ std::string pypath;
+ auto pos = selfpath.rfind("/");
+ if (pos == std::string::npos) {
+ char wd[8192];
+ getcwd(wd, 8192);
+ pypath = std::string(wd) + subdir;
+ } else {
+ pypath = selfpath.substr(0, pos) + subdir;
+ }
+ strncpy(cpypath, pypath.c_str(), pypath.size());
+ setenv("PYTHONPATH", cpypath, 1);
+ setenv("PYTHONNOUSERSITE", "1", 1);
+ /* prevent Python from looking up users in the fuzz environment */
+ setenv("PYTHONUSERBASE", cpypath, 1);
+ Py_SetPythonHome(cpypath);
+ Py_InitializeEx(0);
+ mainmod = PyImport_AddModule("__main__");
+ globals = PyModule_GetDict(mainmod);
+ initparsers();
+}
+
+PyObject *pyglobals()
+{
+ return globals;
+}
+
+} // namespace contrib
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/contrib/fuzz/pyutil.h Fri Jan 18 13:28:22 2019 -0500
@@ -0,0 +1,9 @@
+#include <Python.h>
+
+namespace contrib
+{
+
+void initpy(const char *cselfpath);
+PyObject *pyglobals();
+
+} /* namespace contrib */
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/contrib/fuzz/revlog.cc Fri Jan 18 13:28:22 2019 -0500
@@ -0,0 +1,47 @@
+#include <Python.h>
+#include <assert.h>
+#include <stdlib.h>
+#include <unistd.h>
+
+#include <string>
+
+#include "pyutil.h"
+
+extern "C" {
+
+static PyCodeObject *code;
+
+extern "C" int LLVMFuzzerInitialize(int *argc, char ***argv)
+{
+ contrib::initpy(*argv[0]);
+ code = (PyCodeObject *)Py_CompileString(R"py(
+from parsers import parse_index2
+for inline in (True, False):
+ try:
+ index, cache = parse_index2(data, inline)
+ except Exception as e:
+ pass
+ # uncomment this print if you're editing this Python code
+ # to debug failures.
+ # print e
+)py",
+ "fuzzer", Py_file_input);
+ return 0;
+}
+
+int LLVMFuzzerTestOneInput(const uint8_t *Data, size_t Size)
+{
+ PyObject *text =
+ PyBytes_FromStringAndSize((const char *)Data, (Py_ssize_t)Size);
+ PyObject *locals = PyDict_New();
+ PyDict_SetItemString(locals, "data", text);
+ PyObject *res = PyEval_EvalCode(code, contrib::pyglobals(), locals);
+ if (!res) {
+ PyErr_Print();
+ }
+ Py_XDECREF(res);
+ Py_DECREF(locals);
+ Py_DECREF(text);
+ return 0; // Non-zero return values are reserved for future use.
+}
+}
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/contrib/fuzz/revlog_corpus.py Fri Jan 18 13:28:22 2019 -0500
@@ -0,0 +1,28 @@
+from __future__ import absolute_import, print_function
+
+import argparse
+import os
+import zipfile
+
+ap = argparse.ArgumentParser()
+ap.add_argument("out", metavar="some.zip", type=str, nargs=1)
+args = ap.parse_args()
+
+reporoot = os.path.normpath(os.path.join(os.path.dirname(__file__),
+ '..', '..'))
+# typically a standalone index
+changelog = os.path.join(reporoot, '.hg', 'store', '00changelog.i')
+# an inline revlog with only a few revisions
+contributing = os.path.join(
+ reporoot, '.hg', 'store', 'data', 'contrib', 'fuzz', 'mpatch.cc.i')
+
+print(changelog, os.path.exists(changelog))
+print(contributing, os.path.exists(contributing))
+
+with zipfile.ZipFile(args.out[0], "w", zipfile.ZIP_STORED) as zf:
+ if os.path.exists(changelog):
+ with open(changelog) as f:
+ zf.writestr("00changelog.i", f.read())
+ if os.path.exists(contributing):
+ with open(contributing) as f:
+ zf.writestr("contributing.i", f.read())
--- a/contrib/fuzz/xdiff.cc Wed Jan 09 20:00:35 2019 -0800
+++ b/contrib/fuzz/xdiff.cc Fri Jan 18 13:28:22 2019 -0500
@@ -22,6 +22,11 @@
int LLVMFuzzerTestOneInput(const uint8_t *Data, size_t Size)
{
+ // Don't allow fuzzer inputs larger than 100k, since we'll just bog
+ // down and not accomplish much.
+ if (Size > 100000) {
+ return 0;
+ }
auto maybe_inputs = SplitInputs(Data, Size);
if (!maybe_inputs) {
return 0;
--- a/contrib/hgclient.py Wed Jan 09 20:00:35 2019 -0800
+++ b/contrib/hgclient.py Fri Jan 18 13:28:22 2019 -0500
@@ -27,12 +27,18 @@
stringio = cStringIO.StringIO
bprint = print
-def connectpipe(path=None):
+def connectpipe(path=None, extraargs=()):
cmdline = [b'hg', b'serve', b'--cmdserver', b'pipe']
if path:
cmdline += [b'-R', path]
+ cmdline.extend(extraargs)
- server = subprocess.Popen(cmdline, stdin=subprocess.PIPE,
+ def tonative(cmdline):
+ if os.name != r'nt':
+ return cmdline
+ return [arg.decode("utf-8") for arg in cmdline]
+
+ server = subprocess.Popen(tonative(cmdline), stdin=subprocess.PIPE,
stdout=subprocess.PIPE)
return server
@@ -114,6 +120,8 @@
writeblock(server, input.read(data))
elif ch == b'L':
writeblock(server, input.readline(data))
+ elif ch == b'm':
+ bprint(b"message: %r" % data)
elif ch == b'r':
ret, = struct.unpack('>i', data)
if ret != 0:
@@ -132,3 +140,8 @@
finally:
server.stdin.close()
server.wait()
+
+def checkwith(connect=connectpipe, **kwargs):
+ def wrap(func):
+ return check(func, lambda: connect(**kwargs))
+ return wrap
--- a/contrib/import-checker.py Wed Jan 09 20:00:35 2019 -0800
+++ b/contrib/import-checker.py Fri Jan 18 13:28:22 2019 -0500
@@ -40,8 +40,6 @@
# third-party imports should be directly imported
'mercurial.thirdparty',
'mercurial.thirdparty.attr',
- 'mercurial.thirdparty.cbor',
- 'mercurial.thirdparty.cbor.cbor2',
'mercurial.thirdparty.zope',
'mercurial.thirdparty.zope.interface',
)
@@ -260,10 +258,12 @@
break
else:
stdlib_prefixes.add(dirname)
+ sourceroot = os.path.abspath(os.path.dirname(os.path.dirname(__file__)))
for libpath in sys.path:
- # We want to walk everything in sys.path that starts with
- # something in stdlib_prefixes.
- if not any(libpath.startswith(p) for p in stdlib_prefixes):
+ # We want to walk everything in sys.path that starts with something in
+ # stdlib_prefixes, but not directories from the hg sources.
+ if (os.path.abspath(libpath).startswith(sourceroot)
+ or not any(libpath.startswith(p) for p in stdlib_prefixes)):
continue
for top, dirs, files in os.walk(libpath):
for i, d in reversed(list(enumerate(dirs))):
@@ -674,6 +674,8 @@
# "starts" is "line number" (1-origin), but embedded() is
# expected to return "line offset" (0-origin). Therefore, this
# yields "starts - 1".
+ if not isinstance(modname, str):
+ modname = modname.decode('utf8')
yield code, "%s[%d]" % (modname, starts), name, starts - 1
def sources(f, modname):
@@ -694,7 +696,7 @@
if py or f.endswith('.t'):
with open(f, 'rb') as src:
for script, modname, t, line in embedded(f, modname, src):
- yield script, modname, t, line
+ yield script, modname.encode('utf8'), t, line
def main(argv):
if len(argv) < 2 or (argv[1] == '-' and len(argv) > 2):
--- a/contrib/packaging/Makefile Wed Jan 09 20:00:35 2019 -0800
+++ b/contrib/packaging/Makefile Fri Jan 18 13:28:22 2019 -0500
@@ -14,7 +14,8 @@
FEDORA_RELEASES := \
20 \
21 \
- 28
+ 28 \
+ 29
CENTOS_RELEASES := \
5 \
--- a/contrib/packaging/docker/centos5 Wed Jan 09 20:00:35 2019 -0800
+++ b/contrib/packaging/docker/centos5 Fri Jan 18 13:28:22 2019 -0500
@@ -1,7 +1,7 @@
FROM centos:centos5
-RUN groupadd -g 1000 build && \
- useradd -u 1000 -g 1000 -s /bin/bash -d /build -m build
+RUN groupadd -g %GID% build && \
+ useradd -u %UID% -g %GID% -s /bin/bash -d /build -m build
RUN \
sed -i 's/^mirrorlist/#mirrorlist/' /etc/yum.repos.d/*.repo && \
--- a/contrib/packaging/docker/centos6 Wed Jan 09 20:00:35 2019 -0800
+++ b/contrib/packaging/docker/centos6 Fri Jan 18 13:28:22 2019 -0500
@@ -1,7 +1,7 @@
FROM centos:centos6
-RUN groupadd -g 1000 build && \
- useradd -u 1000 -g 1000 -s /bin/bash -d /build -m build
+RUN groupadd -g %GID% build && \
+ useradd -u %UID% -g %GID% -s /bin/bash -d /build -m build
RUN yum install -y \
gcc \
--- a/contrib/packaging/docker/centos7 Wed Jan 09 20:00:35 2019 -0800
+++ b/contrib/packaging/docker/centos7 Fri Jan 18 13:28:22 2019 -0500
@@ -1,7 +1,7 @@
FROM centos:centos7
-RUN groupadd -g 1000 build && \
- useradd -u 1000 -g 1000 -s /bin/bash -d /build -m build
+RUN groupadd -g %GID% build && \
+ useradd -u %UID% -g %GID% -s /bin/bash -d /build -m build
RUN yum install -y \
gcc \
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/contrib/packaging/docker/fedora29 Fri Jan 18 13:28:22 2019 -0500
@@ -0,0 +1,15 @@
+FROM fedora:29
+
+RUN groupadd -g 1000 build && \
+ useradd -u 1000 -g 1000 -s /bin/bash -d /build -m build
+
+RUN dnf install -y \
+ gcc \
+ gettext \
+ make \
+ python-devel \
+ python-docutils \
+ rpm-build
+
+# For creating repo meta data
+RUN dnf install -y createrepo
--- a/contrib/packaging/dockerrpm Wed Jan 09 20:00:35 2019 -0800
+++ b/contrib/packaging/dockerrpm Fri Jan 18 13:28:22 2019 -0500
@@ -10,7 +10,15 @@
CONTAINER=hg-docker-$PLATFORM
-$BUILDDIR/hg-docker build $BUILDDIR/docker/$PLATFORM $CONTAINER
+if [[ -z "${HG_DOCKER_OWN_USER}" ]]; then
+ DOCKERUID=1000
+ DOCKERGID=1000
+else
+ DOCKERUID=$(id -u)
+ DOCKERGID=$(id -g)
+fi
+
+$BUILDDIR/hg-docker build --build-arg UID=$DOCKERUID --build-arg GID=$DOCKERGID $BUILDDIR/docker/$PLATFORM $CONTAINER
RPMBUILDDIR=$ROOTDIR/packages/$PLATFORM
$ROOTDIR/contrib/packaging/buildrpm --rpmbuilddir $RPMBUILDDIR --prepare $*
--- a/contrib/packaging/hg-docker Wed Jan 09 20:00:35 2019 -0800
+++ b/contrib/packaging/hg-docker Fri Jan 18 13:28:22 2019 -0500
@@ -47,7 +47,7 @@
df = fh.read()
for k, v in args:
- df = df.replace(b'%%%s%%' % k, v)
+ df = df.replace(bytes('%%%s%%' % k.decode(), 'utf-8'), v)
return df
@@ -72,7 +72,12 @@
]
print('executing: %r' % args)
- subprocess.run(args, input=dockerfile, check=True)
+ p = subprocess.Popen(args, stdin=subprocess.PIPE)
+ p.communicate(input=dockerfile)
+ if p.returncode:
+ raise subprocess.CalledProcessException(
+ p.returncode, 'failed to build docker image: %s %s' \
+ % (p.stdout, p.stderr))
def command_build(args):
build_args = []
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/contrib/perf-utils/perf-revlog-write-plot.py Fri Jan 18 13:28:22 2019 -0500
@@ -0,0 +1,127 @@
+#!/usr/bin/env python
+#
+# Copyright 2018 Paul Morelle <Paul.Morelle@octobus.net>
+#
+# This software may be used and distributed according to the terms of the
+# GNU General Public License version 2 or any later version.
+#
+# This script use the output of `hg perfrevlogwrite -T json --details` to draw
+# various plot related to write performance in a revlog
+#
+# usage: perf-revlog-write-plot.py details.json
+from __future__ import absolute_import, print_function
+import json
+import re
+
+import numpy as np
+import scipy.signal
+
+from matplotlib import (
+ pyplot as plt,
+ ticker as mticker,
+)
+
+
+def plot(data, title=None):
+ items = {}
+ re_title = re.compile(r'^revisions #\d+ of \d+, rev (\d+)$')
+ for item in data:
+ m = re_title.match(item['title'])
+ if m is None:
+ continue
+
+ rev = int(m.group(1))
+ items[rev] = item
+
+ min_rev = min(items.keys())
+ max_rev = max(items.keys())
+ ary = np.empty((2, max_rev - min_rev + 1))
+ for rev, item in items.items():
+ ary[0][rev - min_rev] = rev
+ ary[1][rev - min_rev] = item['wall']
+
+ fig = plt.figure()
+ comb_plt = fig.add_subplot(211)
+ other_plt = fig.add_subplot(212)
+
+ comb_plt.plot(ary[0],
+ np.cumsum(ary[1]),
+ color='red',
+ linewidth=1,
+ label='comb')
+
+ plots = []
+ p = other_plt.plot(ary[0],
+ ary[1],
+ color='red',
+ linewidth=1,
+ label='wall')
+ plots.append(p)
+
+ colors = {
+ 10: ('green', 'xkcd:grass green'),
+ 100: ('blue', 'xkcd:bright blue'),
+ 1000: ('purple', 'xkcd:dark pink'),
+ }
+ for n, color in colors.items():
+ avg_n = np.convolve(ary[1], np.full(n, 1. / n), 'valid')
+ p = other_plt.plot(ary[0][n - 1:],
+ avg_n,
+ color=color[0],
+ linewidth=1,
+ label='avg time last %d' % n)
+ plots.append(p)
+
+ med_n = scipy.signal.medfilt(ary[1], n + 1)
+ p = other_plt.plot(ary[0],
+ med_n,
+ color=color[1],
+ linewidth=1,
+ label='median time last %d' % n)
+ plots.append(p)
+
+ formatter = mticker.ScalarFormatter()
+ formatter.set_scientific(False)
+ formatter.set_useOffset(False)
+
+ comb_plt.grid()
+ comb_plt.xaxis.set_major_formatter(formatter)
+ comb_plt.legend()
+
+ other_plt.grid()
+ other_plt.xaxis.set_major_formatter(formatter)
+ leg = other_plt.legend()
+ leg2plot = {}
+ for legline, plot in zip(leg.get_lines(), plots):
+ legline.set_picker(5)
+ leg2plot[legline] = plot
+
+ def onpick(event):
+ legline = event.artist
+ plot = leg2plot[legline]
+ visible = not plot[0].get_visible()
+ for l in plot:
+ l.set_visible(visible)
+
+ if visible:
+ legline.set_alpha(1.0)
+ else:
+ legline.set_alpha(0.2)
+ fig.canvas.draw()
+ if title is not None:
+ fig.canvas.set_window_title(title)
+ fig.canvas.mpl_connect('pick_event', onpick)
+
+ plt.show()
+
+
+if __name__ == '__main__':
+ import sys
+
+ if len(sys.argv) > 1:
+ print('reading from %r' % sys.argv[1])
+ with open(sys.argv[1], 'r') as fp:
+ plot(json.load(fp), title=sys.argv[1])
+ else:
+ print('reading from stdin')
+ plot(json.load(sys.stdin))
--- a/contrib/perf.py Wed Jan 09 20:00:35 2019 -0800
+++ b/contrib/perf.py Fri Jan 18 13:28:22 2019 -0500
@@ -24,8 +24,10 @@
import gc
import os
import random
+import shutil
import struct
import sys
+import tempfile
import threading
import time
from mercurial import (
@@ -35,6 +37,7 @@
copies,
error,
extensions,
+ hg,
mdiff,
merge,
revlog,
@@ -65,6 +68,11 @@
from mercurial import scmutil # since 1.9 (or 8b252e826c68)
except ImportError:
pass
+try:
+ from mercurial import setdiscovery # since 1.9 (or cb98fed52495)
+except ImportError:
+ pass
+
def identity(a):
return a
@@ -273,7 +281,9 @@
displayall = ui.configbool(b"perf", b"all-timing", False)
return functools.partial(_timer, fm, displayall=displayall), fm
-def stub_timer(fm, func, title=None):
+def stub_timer(fm, func, setup=None, title=None):
+ if setup is not None:
+ setup()
func()
@contextlib.contextmanager
@@ -287,12 +297,14 @@
a, b = ostart, ostop
r.append((cstop - cstart, b[0] - a[0], b[1]-a[1]))
-def _timer(fm, func, title=None, displayall=False):
+def _timer(fm, func, setup=None, title=None, displayall=False):
gc.collect()
results = []
begin = util.timer()
count = 0
while True:
+ if setup is not None:
+ setup()
with timeone() as item:
r = func()
count += 1
@@ -453,11 +465,19 @@
# utilities to clear cache
-def clearfilecache(repo, attrname):
- unfi = repo.unfiltered()
- if attrname in vars(unfi):
- delattr(unfi, attrname)
- unfi._filecache.pop(attrname, None)
+def clearfilecache(obj, attrname):
+ unfiltered = getattr(obj, 'unfiltered', None)
+ if unfiltered is not None:
+ obj = obj.unfiltered()
+ if attrname in vars(obj):
+ delattr(obj, attrname)
+ obj._filecache.pop(attrname, None)
+
+def clearchangelog(repo):
+ if repo is not repo.unfiltered():
+ object.__setattr__(repo, r'_clcachekey', None)
+ object.__setattr__(repo, r'_clcache', None)
+ clearfilecache(repo.unfiltered(), 'changelog')
# perf commands
@@ -524,23 +544,23 @@
timer(d)
fm.end()
-@command(b'perftags', formatteropts)
+@command(b'perftags', formatteropts+
+ [
+ (b'', b'clear-revlogs', False, b'refresh changelog and manifest'),
+ ])
def perftags(ui, repo, **opts):
- import mercurial.changelog
- import mercurial.manifest
-
opts = _byteskwargs(opts)
timer, fm = gettimer(ui, opts)
- svfs = getsvfs(repo)
repocleartagscache = repocleartagscachefunc(repo)
+ clearrevlogs = opts[b'clear_revlogs']
+ def s():
+ if clearrevlogs:
+ clearchangelog(repo)
+ clearfilecache(repo.unfiltered(), 'manifest')
+ repocleartagscache()
def t():
- repo.changelog = mercurial.changelog.changelog(svfs)
- rootmanifest = mercurial.manifest.manifestrevlog(svfs)
- repo.manifestlog = mercurial.manifest.manifestlog(svfs, repo,
- rootmanifest)
- repocleartagscache()
return len(repo.tags())
- timer(t)
+ timer(t, setup=s)
fm.end()
@command(b'perfancestors', formatteropts)
@@ -567,15 +587,38 @@
timer(d)
fm.end()
-@command(b'perfbookmarks', formatteropts)
+@command(b'perfdiscovery', formatteropts, b'PATH')
+def perfdiscovery(ui, repo, path, **opts):
+ """benchmark discovery between local repo and the peer at given path
+ """
+ repos = [repo, None]
+ timer, fm = gettimer(ui, opts)
+ path = ui.expandpath(path)
+
+ def s():
+ repos[1] = hg.peer(ui, opts, path)
+ def d():
+ setdiscovery.findcommonheads(ui, *repos)
+ timer(d, setup=s)
+ fm.end()
+
+@command(b'perfbookmarks', formatteropts +
+ [
+ (b'', b'clear-revlogs', False, b'refresh changelog and manifest'),
+ ])
def perfbookmarks(ui, repo, **opts):
"""benchmark parsing bookmarks from disk to memory"""
opts = _byteskwargs(opts)
timer, fm = gettimer(ui, opts)
- def d():
+
+ clearrevlogs = opts[b'clear_revlogs']
+ def s():
+ if clearrevlogs:
+ clearchangelog(repo)
clearfilecache(repo, b'_bookmarks')
+ def d():
repo._bookmarks
- timer(d)
+ timer(d, setup=s)
fm.end()
@command(b'perfbundleread', formatteropts, b'BUNDLE')
@@ -697,9 +740,9 @@
fm.end()
@command(b'perfchangegroupchangelog', formatteropts +
- [(b'', b'version', b'02', b'changegroup version'),
+ [(b'', b'cgversion', b'02', b'changegroup version'),
(b'r', b'rev', b'', b'revisions to add to changegroup')])
-def perfchangegroupchangelog(ui, repo, version=b'02', rev=None, **opts):
+def perfchangegroupchangelog(ui, repo, cgversion=b'02', rev=None, **opts):
"""Benchmark producing a changelog group for a changegroup.
This measures the time spent processing the changelog during a
@@ -712,7 +755,7 @@
opts = _byteskwargs(opts)
cl = repo.changelog
nodes = [cl.lookup(r) for r in repo.revs(rev or b'all()')]
- bundler = changegroup.getbundler(version, repo)
+ bundler = changegroup.getbundler(cgversion, repo)
def d():
state, chunks = bundler._generatechangelog(cl, nodes)
@@ -819,6 +862,7 @@
@command(b'perfpathcopies', [], b"REV REV")
def perfpathcopies(ui, repo, rev1, rev2, **opts):
+ """benchmark the copy tracing logic"""
opts = _byteskwargs(opts)
timer, fm = gettimer(ui, opts)
ctx1 = scmutil.revsingle(repo, rev1, rev1)
@@ -952,18 +996,48 @@
timer(d)
fm.end()
-@command(b'perfindex', formatteropts)
+@command(b'perfignore', formatteropts)
+def perfignore(ui, repo, **opts):
+ """benchmark operation related to computing ignore"""
+ opts = _byteskwargs(opts)
+ timer, fm = gettimer(ui, opts)
+ dirstate = repo.dirstate
+
+ def setupone():
+ dirstate.invalidate()
+ clearfilecache(dirstate, b'_ignore')
+
+ def runone():
+ dirstate._ignore
+
+ timer(runone, setup=setupone, title=b"load")
+ fm.end()
+
+@command(b'perfindex', [
+ (b'', b'rev', b'', b'revision to be looked up (default tip)'),
+ ] + formatteropts)
def perfindex(ui, repo, **opts):
import mercurial.revlog
opts = _byteskwargs(opts)
timer, fm = gettimer(ui, opts)
mercurial.revlog._prereadsize = 2**24 # disable lazy parser in old hg
- n = repo[b"tip"].node()
- svfs = getsvfs(repo)
+ if opts[b'rev'] is None:
+ n = repo[b"tip"].node()
+ else:
+ rev = scmutil.revsingle(repo, opts[b'rev'])
+ n = repo[rev].node()
+
+ unfi = repo.unfiltered()
+ # find the filecache func directly
+ # This avoid polluting the benchmark with the filecache logic
+ makecl = unfi.__class__.changelog.func
+ def setup():
+ # probably not necessary, but for good measure
+ clearchangelog(unfi)
def d():
- cl = mercurial.revlog.revlog(svfs, b"00changelog.i")
+ cl = makecl(unfi)
cl.rev(n)
- timer(d)
+ timer(d, setup=setup)
fm.end()
@command(b'perfstartup', formatteropts)
@@ -1144,6 +1218,82 @@
timer(format)
fm.end()
+@command(b'perfhelper-pathcopies', formatteropts +
+ [
+ (b'r', b'revs', [], b'restrict search to these revisions'),
+ (b'', b'timing', False, b'provides extra data (costly)'),
+ ])
+def perfhelperpathcopies(ui, repo, revs=[], **opts):
+ """find statistic about potential parameters for the `perftracecopies`
+
+ This command find source-destination pair relevant for copytracing testing.
+ It report value for some of the parameters that impact copy tracing time.
+
+ If `--timing` is set, rename detection is run and the associated timing
+ will be reported. The extra details comes at the cost of a slower command
+ execution.
+
+ Since the rename detection is only run once, other factors might easily
+ affect the precision of the timing. However it should give a good
+ approximation of which revision pairs are very costly.
+ """
+ opts = _byteskwargs(opts)
+ fm = ui.formatter(b'perf', opts)
+ dotiming = opts[b'timing']
+
+ if dotiming:
+ header = '%12s %12s %12s %12s %12s %12s\n'
+ output = ("%(source)12s %(destination)12s "
+ "%(nbrevs)12d %(nbmissingfiles)12d "
+ "%(nbrenamedfiles)12d %(time)18.5f\n")
+ header_names = ("source", "destination", "nb-revs", "nb-files",
+ "nb-renames", "time")
+ fm.plain(header % header_names)
+ else:
+ header = '%12s %12s %12s %12s\n'
+ output = ("%(source)12s %(destination)12s "
+ "%(nbrevs)12d %(nbmissingfiles)12d\n")
+ fm.plain(header % ("source", "destination", "nb-revs", "nb-files"))
+
+ if not revs:
+ revs = ['all()']
+ revs = scmutil.revrange(repo, revs)
+
+ roi = repo.revs('merge() and %ld', revs)
+ for r in roi:
+ ctx = repo[r]
+ p1 = ctx.p1().rev()
+ p2 = ctx.p2().rev()
+ bases = repo.changelog._commonancestorsheads(p1, p2)
+ for p in (p1, p2):
+ for b in bases:
+ base = repo[b]
+ parent = repo[p]
+ missing = copies._computeforwardmissing(base, parent)
+ if not missing:
+ continue
+ data = {
+ b'source': base.hex(),
+ b'destination': parent.hex(),
+ b'nbrevs': len(repo.revs('%d::%d', b, p)),
+ b'nbmissingfiles': len(missing),
+ }
+ if dotiming:
+ begin = util.timer()
+ renames = copies.pathcopies(base, parent)
+ end = util.timer()
+ # not very stable timing since we did only one run
+ data['time'] = end - begin
+ data['nbrenamedfiles'] = len(renames)
+ fm.startitem()
+ fm.data(**data)
+ out = data.copy()
+ out['source'] = fm.hexfunc(base.node())
+ out['destination'] = fm.hexfunc(parent.node())
+ fm.plain(output % out)
+
+ fm.end()
+
@command(b'perfcca', formatteropts)
def perfcca(ui, repo, **opts):
opts = _byteskwargs(opts)
@@ -1402,7 +1552,7 @@
ui.popbuffer()
diffopt = diffopt.encode('ascii')
title = b'diffopts: %s' % (diffopt and (b'-' + diffopt) or b'none')
- timer(d, title)
+ timer(d, title=title)
fm.end()
@command(b'perfrevlogindex', revlogopts + formatteropts,
@@ -1553,7 +1703,7 @@
dist = opts[b'dist']
if reverse:
- beginrev, endrev = endrev, beginrev
+ beginrev, endrev = endrev - 1, beginrev - 1
dist = -1 * dist
for x in _xrange(beginrev, endrev, dist):
@@ -1565,6 +1715,241 @@
timer(d)
fm.end()
+@command(b'perfrevlogwrite', revlogopts + formatteropts +
+ [(b's', b'startrev', 1000, b'revision to start writing at'),
+ (b'', b'stoprev', -1, b'last revision to write'),
+ (b'', b'count', 3, b'last revision to write'),
+ (b'', b'details', False, b'print timing for every revisions tested'),
+ (b'', b'source', b'full', b'the kind of data feed in the revlog'),
+ (b'', b'lazydeltabase', True, b'try the provided delta first'),
+ (b'', b'clear-caches', True, b'clear revlog cache between calls'),
+ ],
+ b'-c|-m|FILE')
+def perfrevlogwrite(ui, repo, file_=None, startrev=1000, stoprev=-1, **opts):
+ """Benchmark writing a series of revisions to a revlog.
+
+ Possible source values are:
+ * `full`: add from a full text (default).
+ * `parent-1`: add from a delta to the first parent
+ * `parent-2`: add from a delta to the second parent if it exists
+ (use a delta from the first parent otherwise)
+ * `parent-smallest`: add from the smallest delta (either p1 or p2)
+ * `storage`: add from the existing precomputed deltas
+ """
+ opts = _byteskwargs(opts)
+
+ rl = cmdutil.openrevlog(repo, b'perfrevlogwrite', file_, opts)
+ rllen = getlen(ui)(rl)
+ if startrev < 0:
+ startrev = rllen + startrev
+ if stoprev < 0:
+ stoprev = rllen + stoprev
+
+ lazydeltabase = opts['lazydeltabase']
+ source = opts['source']
+ clearcaches = opts['clear_caches']
+ validsource = (b'full', b'parent-1', b'parent-2', b'parent-smallest',
+ b'storage')
+ if source not in validsource:
+ raise error.Abort('invalid source type: %s' % source)
+
+ ### actually gather results
+ count = opts['count']
+ if count <= 0:
+ raise error.Abort('invalide run count: %d' % count)
+ allresults = []
+ for c in range(count):
+ timing = _timeonewrite(ui, rl, source, startrev, stoprev, c + 1,
+ lazydeltabase=lazydeltabase,
+ clearcaches=clearcaches)
+ allresults.append(timing)
+
+ ### consolidate the results in a single list
+ results = []
+ for idx, (rev, t) in enumerate(allresults[0]):
+ ts = [t]
+ for other in allresults[1:]:
+ orev, ot = other[idx]
+ assert orev == rev
+ ts.append(ot)
+ results.append((rev, ts))
+ resultcount = len(results)
+
+ ### Compute and display relevant statistics
+
+ # get a formatter
+ fm = ui.formatter(b'perf', opts)
+ displayall = ui.configbool(b"perf", b"all-timing", False)
+
+ # print individual details if requested
+ if opts['details']:
+ for idx, item in enumerate(results, 1):
+ rev, data = item
+ title = 'revisions #%d of %d, rev %d' % (idx, resultcount, rev)
+ formatone(fm, data, title=title, displayall=displayall)
+
+ # sorts results by median time
+ results.sort(key=lambda x: sorted(x[1])[len(x[1]) // 2])
+ # list of (name, index) to display)
+ relevants = [
+ ("min", 0),
+ ("10%", resultcount * 10 // 100),
+ ("25%", resultcount * 25 // 100),
+ ("50%", resultcount * 70 // 100),
+ ("75%", resultcount * 75 // 100),
+ ("90%", resultcount * 90 // 100),
+ ("95%", resultcount * 95 // 100),
+ ("99%", resultcount * 99 // 100),
+ ("99.9%", resultcount * 999 // 1000),
+ ("99.99%", resultcount * 9999 // 10000),
+ ("99.999%", resultcount * 99999 // 100000),
+ ("max", -1),
+ ]
+ if not ui.quiet:
+ for name, idx in relevants:
+ data = results[idx]
+ title = '%s of %d, rev %d' % (name, resultcount, data[0])
+ formatone(fm, data[1], title=title, displayall=displayall)
+
+ # XXX summing that many float will not be very precise, we ignore this fact
+ # for now
+ totaltime = []
+ for item in allresults:
+ totaltime.append((sum(x[1][0] for x in item),
+ sum(x[1][1] for x in item),
+ sum(x[1][2] for x in item),)
+ )
+ formatone(fm, totaltime, title="total time (%d revs)" % resultcount,
+ displayall=displayall)
+ fm.end()
+
+class _faketr(object):
+ def add(s, x, y, z=None):
+ return None
+
+def _timeonewrite(ui, orig, source, startrev, stoprev, runidx=None,
+ lazydeltabase=True, clearcaches=True):
+ timings = []
+ tr = _faketr()
+ with _temprevlog(ui, orig, startrev) as dest:
+ dest._lazydeltabase = lazydeltabase
+ revs = list(orig.revs(startrev, stoprev))
+ total = len(revs)
+ topic = 'adding'
+ if runidx is not None:
+ topic += ' (run #%d)' % runidx
+ # Support both old and new progress API
+ if util.safehasattr(ui, 'makeprogress'):
+ progress = ui.makeprogress(topic, unit='revs', total=total)
+ def updateprogress(pos):
+ progress.update(pos)
+ def completeprogress():
+ progress.complete()
+ else:
+ def updateprogress(pos):
+ ui.progress(topic, pos, unit='revs', total=total)
+ def completeprogress():
+ ui.progress(topic, None, unit='revs', total=total)
+
+ for idx, rev in enumerate(revs):
+ updateprogress(idx)
+ addargs, addkwargs = _getrevisionseed(orig, rev, tr, source)
+ if clearcaches:
+ dest.index.clearcaches()
+ dest.clearcaches()
+ with timeone() as r:
+ dest.addrawrevision(*addargs, **addkwargs)
+ timings.append((rev, r[0]))
+ updateprogress(total)
+ completeprogress()
+ return timings
+
+def _getrevisionseed(orig, rev, tr, source):
+ from mercurial.node import nullid
+
+ linkrev = orig.linkrev(rev)
+ node = orig.node(rev)
+ p1, p2 = orig.parents(node)
+ flags = orig.flags(rev)
+ cachedelta = None
+ text = None
+
+ if source == b'full':
+ text = orig.revision(rev)
+ elif source == b'parent-1':
+ baserev = orig.rev(p1)
+ cachedelta = (baserev, orig.revdiff(p1, rev))
+ elif source == b'parent-2':
+ parent = p2
+ if p2 == nullid:
+ parent = p1
+ baserev = orig.rev(parent)
+ cachedelta = (baserev, orig.revdiff(parent, rev))
+ elif source == b'parent-smallest':
+ p1diff = orig.revdiff(p1, rev)
+ parent = p1
+ diff = p1diff
+ if p2 != nullid:
+ p2diff = orig.revdiff(p2, rev)
+ if len(p1diff) > len(p2diff):
+ parent = p2
+ diff = p2diff
+ baserev = orig.rev(parent)
+ cachedelta = (baserev, diff)
+ elif source == b'storage':
+ baserev = orig.deltaparent(rev)
+ cachedelta = (baserev, orig.revdiff(orig.node(baserev), rev))
+
+ return ((text, tr, linkrev, p1, p2),
+ {'node': node, 'flags': flags, 'cachedelta': cachedelta})
+
+@contextlib.contextmanager
+def _temprevlog(ui, orig, truncaterev):
+ from mercurial import vfs as vfsmod
+
+ if orig._inline:
+ raise error.Abort('not supporting inline revlog (yet)')
+
+ origindexpath = orig.opener.join(orig.indexfile)
+ origdatapath = orig.opener.join(orig.datafile)
+ indexname = 'revlog.i'
+ dataname = 'revlog.d'
+
+ tmpdir = tempfile.mkdtemp(prefix='tmp-hgperf-')
+ try:
+ # copy the data file in a temporary directory
+ ui.debug('copying data in %s\n' % tmpdir)
+ destindexpath = os.path.join(tmpdir, 'revlog.i')
+ destdatapath = os.path.join(tmpdir, 'revlog.d')
+ shutil.copyfile(origindexpath, destindexpath)
+ shutil.copyfile(origdatapath, destdatapath)
+
+ # remove the data we want to add again
+ ui.debug('truncating data to be rewritten\n')
+ with open(destindexpath, 'ab') as index:
+ index.seek(0)
+ index.truncate(truncaterev * orig._io.size)
+ with open(destdatapath, 'ab') as data:
+ data.seek(0)
+ data.truncate(orig.start(truncaterev))
+
+ # instantiate a new revlog from the temporary copy
+ ui.debug('truncating adding to be rewritten\n')
+ vfs = vfsmod.vfs(tmpdir)
+ vfs.options = getattr(orig.opener, 'options', None)
+
+ dest = revlog.revlog(vfs,
+ indexfile=indexname,
+ datafile=dataname)
+ if dest._inline:
+ raise error.Abort('not supporting inline revlog (yet)')
+ # make sure internals are initialized
+ dest.revision(len(dest) - 1)
+ yield dest
+ del dest, vfs
+ finally:
+ shutil.rmtree(tmpdir, True)
+
@command(b'perfrevlogchunks', revlogopts + formatteropts +
[(b'e', b'engines', b'', b'compression engines to use'),
(b's', b'startrev', 0, b'revision to start at')],
@@ -1692,10 +2077,11 @@
Obtaining a revlog revision consists of roughly the following steps:
1. Compute the delta chain
- 2. Obtain the raw chunks for that delta chain
- 3. Decompress each raw chunk
- 4. Apply binary patches to obtain fulltext
- 5. Verify hash of fulltext
+ 2. Slice the delta chain if applicable
+ 3. Obtain the raw chunks for that delta chain
+ 4. Decompress each raw chunk
+ 5. Apply binary patches to obtain fulltext
+ 6. Verify hash of fulltext
This command measures the time spent in each of these phases.
"""
@@ -1723,17 +2109,18 @@
inline = r._inline
iosize = r._io.size
buffer = util.buffer
- offset = start(chain[0])
chunks = []
ladd = chunks.append
-
- for rev in chain:
- chunkstart = start(rev)
- if inline:
- chunkstart += (rev + 1) * iosize
- chunklength = length(rev)
- ladd(buffer(data, chunkstart - offset, chunklength))
+ for idx, item in enumerate(chain):
+ offset = start(item[0])
+ bits = data[idx]
+ for rev in item:
+ chunkstart = start(rev)
+ if inline:
+ chunkstart += (rev + 1) * iosize
+ chunklength = length(rev)
+ ladd(buffer(bits, chunkstart - offset, chunklength))
return chunks
@@ -1745,7 +2132,12 @@
def doread(chain):
if not cache:
r.clearcaches()
- segmentforrevs(chain[0], chain[-1])
+ for item in slicedchain:
+ segmentforrevs(item[0], item[-1])
+
+ def doslice(r, chain, size):
+ for s in slicechunk(r, chain, targetsize=size):
+ pass
def dorawchunks(data, chain):
if not cache:
@@ -1772,9 +2164,19 @@
r.clearcaches()
r.revision(node)
+ try:
+ from mercurial.revlogutils.deltas import slicechunk
+ except ImportError:
+ slicechunk = getattr(revlog, '_slicechunk', None)
+
+ size = r.length(rev)
chain = r._deltachain(rev)[0]
- data = segmentforrevs(chain[0], chain[-1])[1]
- rawchunks = getrawchunks(data, chain)
+ if not getattr(r, '_withsparseread', False):
+ slicedchain = (chain,)
+ else:
+ slicedchain = tuple(slicechunk(r, chain, targetsize=size))
+ data = [segmentforrevs(seg[0], seg[-1])[1] for seg in slicedchain]
+ rawchunks = getrawchunks(data, slicedchain)
bins = r._chunks(chain)
text = bytes(bins[0])
bins = bins[1:]
@@ -1784,16 +2186,23 @@
(lambda: dorevision(), b'full'),
(lambda: dodeltachain(rev), b'deltachain'),
(lambda: doread(chain), b'read'),
- (lambda: dorawchunks(data, chain), b'rawchunks'),
+ ]
+
+ if getattr(r, '_withsparseread', False):
+ slicing = (lambda: doslice(r, chain, size), b'slice-sparse-chain')
+ benches.append(slicing)
+
+ benches.extend([
+ (lambda: dorawchunks(data, slicedchain), b'rawchunks'),
(lambda: dodecompress(rawchunks), b'decompress'),
(lambda: dopatch(text, bins), b'patch'),
(lambda: dohash(text), b'hash'),
- ]
+ ])
+ timer, fm = gettimer(ui, opts)
for fn, title in benches:
- timer, fm = gettimer(ui, opts)
timer(fn, title=title)
- fm.end()
+ fm.end()
@command(b'perfrevset',
[(b'C', b'clear', False, b'clear volatile cache between each call.'),
@@ -1929,13 +2338,120 @@
branchcachewrite.restore()
fm.end()
+@command(b'perfbranchmapupdate', [
+ (b'', b'base', [], b'subset of revision to start from'),
+ (b'', b'target', [], b'subset of revision to end with'),
+ (b'', b'clear-caches', False, b'clear cache between each runs')
+ ] + formatteropts)
+def perfbranchmapupdate(ui, repo, base=(), target=(), **opts):
+ """benchmark branchmap update from for <base> revs to <target> revs
+
+ If `--clear-caches` is passed, the following items will be reset before
+ each update:
+ * the changelog instance and associated indexes
+ * the rev-branch-cache instance
+
+ Examples:
+
+ # update for the one last revision
+ $ hg perfbranchmapupdate --base 'not tip' --target 'tip'
+
+ $ update for change coming with a new branch
+ $ hg perfbranchmapupdate --base 'stable' --target 'default'
+ """
+ from mercurial import branchmap
+ from mercurial import repoview
+ opts = _byteskwargs(opts)
+ timer, fm = gettimer(ui, opts)
+ clearcaches = opts[b'clear_caches']
+ unfi = repo.unfiltered()
+ x = [None] # used to pass data between closure
+
+ # we use a `list` here to avoid possible side effect from smartset
+ baserevs = list(scmutil.revrange(repo, base))
+ targetrevs = list(scmutil.revrange(repo, target))
+ if not baserevs:
+ raise error.Abort(b'no revisions selected for --base')
+ if not targetrevs:
+ raise error.Abort(b'no revisions selected for --target')
+
+ # make sure the target branchmap also contains the one in the base
+ targetrevs = list(set(baserevs) | set(targetrevs))
+ targetrevs.sort()
+
+ cl = repo.changelog
+ allbaserevs = list(cl.ancestors(baserevs, inclusive=True))
+ allbaserevs.sort()
+ alltargetrevs = frozenset(cl.ancestors(targetrevs, inclusive=True))
+
+ newrevs = list(alltargetrevs.difference(allbaserevs))
+ newrevs.sort()
+
+ allrevs = frozenset(unfi.changelog.revs())
+ basefilterrevs = frozenset(allrevs.difference(allbaserevs))
+ targetfilterrevs = frozenset(allrevs.difference(alltargetrevs))
+
+ def basefilter(repo, visibilityexceptions=None):
+ return basefilterrevs
+
+ def targetfilter(repo, visibilityexceptions=None):
+ return targetfilterrevs
+
+ msg = b'benchmark of branchmap with %d revisions with %d new ones\n'
+ ui.status(msg % (len(allbaserevs), len(newrevs)))
+ if targetfilterrevs:
+ msg = b'(%d revisions still filtered)\n'
+ ui.status(msg % len(targetfilterrevs))
+
+ try:
+ repoview.filtertable[b'__perf_branchmap_update_base'] = basefilter
+ repoview.filtertable[b'__perf_branchmap_update_target'] = targetfilter
+
+ baserepo = repo.filtered(b'__perf_branchmap_update_base')
+ targetrepo = repo.filtered(b'__perf_branchmap_update_target')
+
+ # try to find an existing branchmap to reuse
+ subsettable = getbranchmapsubsettable()
+ candidatefilter = subsettable.get(None)
+ while candidatefilter is not None:
+ candidatebm = repo.filtered(candidatefilter).branchmap()
+ if candidatebm.validfor(baserepo):
+ filtered = repoview.filterrevs(repo, candidatefilter)
+ missing = [r for r in allbaserevs if r in filtered]
+ base = candidatebm.copy()
+ base.update(baserepo, missing)
+ break
+ candidatefilter = subsettable.get(candidatefilter)
+ else:
+ # no suitable subset where found
+ base = branchmap.branchcache()
+ base.update(baserepo, allbaserevs)
+
+ def setup():
+ x[0] = base.copy()
+ if clearcaches:
+ unfi._revbranchcache = None
+ clearchangelog(repo)
+
+ def bench():
+ x[0].update(targetrepo, newrevs)
+
+ timer(bench, setup=setup)
+ fm.end()
+ finally:
+ repoview.filtertable.pop(b'__perf_branchmap_update_base', None)
+ repoview.filtertable.pop(b'__perf_branchmap_update_target', None)
+
@command(b'perfbranchmapload', [
(b'f', b'filter', b'', b'Specify repoview filter'),
(b'', b'list', False, b'List brachmap filter caches'),
+ (b'', b'clear-revlogs', False, b'refresh changelog and manifest'),
+
] + formatteropts)
-def perfbranchmapread(ui, repo, filter=b'', list=False, **opts):
+def perfbranchmapload(ui, repo, filter=b'', list=False, **opts):
"""benchmark reading the branchmap"""
opts = _byteskwargs(opts)
+ clearrevlogs = opts[b'clear_revlogs']
if list:
for name, kind, st in repo.cachevfs.readdir(stat=True):
@@ -1944,16 +2460,31 @@
ui.status(b'%s - %s\n'
% (filtername, util.bytecount(st.st_size)))
return
- if filter:
+ if not filter:
+ filter = None
+ subsettable = getbranchmapsubsettable()
+ if filter is None:
+ repo = repo.unfiltered()
+ else:
repo = repoview.repoview(repo, filter)
- else:
- repo = repo.unfiltered()
+
+ repo.branchmap() # make sure we have a relevant, up to date branchmap
+
+ currentfilter = filter
# try once without timer, the filter may not be cached
- if branchmap.read(repo) is None:
- raise error.Abort(b'No brachmap cached for %s repo'
- % (filter or b'unfiltered'))
+ while branchmap.read(repo) is None:
+ currentfilter = subsettable.get(currentfilter)
+ if currentfilter is None:
+ raise error.Abort(b'No branchmap cached for %s repo'
+ % (filter or b'unfiltered'))
+ repo = repo.filtered(currentfilter)
timer, fm = gettimer(ui, opts)
- timer(lambda: branchmap.read(repo) and None)
+ def setup():
+ if clearrevlogs:
+ clearchangelog(repo)
+ def bench():
+ branchmap.read(repo)
+ timer(bench, setup=setup)
fm.end()
@command(b'perfloadmarkers')
@@ -2124,3 +2655,21 @@
hint=b"use 3.5 or later")
return orig(repo, cmd, file_, opts)
extensions.wrapfunction(cmdutil, b'openrevlog', openrevlog)
+
+@command(b'perfprogress', formatteropts + [
+ (b'', b'topic', b'topic', b'topic for progress messages'),
+ (b'c', b'total', 1000000, b'total value we are progressing to'),
+], norepo=True)
+def perfprogress(ui, topic=None, total=None, **opts):
+ """printing of progress bars"""
+ opts = _byteskwargs(opts)
+
+ timer, fm = gettimer(ui, opts)
+
+ def doprogress():
+ with ui.makeprogress(topic, total=total) as progress:
+ for i in pycompat.xrange(total):
+ progress.increment()
+
+ timer(doprogress)
+ fm.end()
--- a/contrib/python3-whitelist Wed Jan 09 20:00:35 2019 -0800
+++ b/contrib/python3-whitelist Fri Jan 18 13:28:22 2019 -0500
@@ -1,4 +1,5 @@
test-abort-checkin.t
+test-absorb-edit-lines.t
test-absorb-filefixupstate.py
test-absorb-phase.t
test-absorb-rename.t
@@ -30,6 +31,7 @@
test-bisect2.t
test-bisect3.t
test-blackbox.t
+test-bookflow.t
test-bookmarks-current.t
test-bookmarks-merge.t
test-bookmarks-pushpull.t
@@ -62,6 +64,7 @@
test-check-config.py
test-check-config.t
test-check-execute.t
+test-check-help.t
test-check-interfaces.py
test-check-module-imports.t
test-check-py3-compat.t
@@ -116,6 +119,7 @@
test-copy-move-merge.t
test-copy.t
test-copytrace-heuristics.t
+test-custom-filters.t
test-debugbuilddag.t
test-debugbundle.t
test-debugcommands.t
@@ -193,9 +197,18 @@
test-export.t
test-extdata.t
test-extdiff.t
+test-extension-timing.t
test-extensions-afterloaded.t
test-extensions-wrapfunction.py
test-extra-filelog-entry.t
+test-fastannotate-corrupt.t
+test-fastannotate-diffopts.t
+test-fastannotate-hg.t
+test-fastannotate-perfhack.t
+test-fastannotate-protocol.t
+test-fastannotate-renames.t
+test-fastannotate-revmap.py
+test-fastannotate.t
test-fetch.t
test-filebranch.t
test-filecache.py
@@ -206,6 +219,19 @@
test-fix.t
test-flags.t
test-fncache.t
+test-gendoc-da.t
+test-gendoc-de.t
+test-gendoc-el.t
+test-gendoc-fr.t
+test-gendoc-it.t
+test-gendoc-ja.t
+test-gendoc-pt_BR.t
+test-gendoc-ro.t
+test-gendoc-ru.t
+test-gendoc-sv.t
+test-gendoc-zh_CN.t
+test-gendoc-zh_TW.t
+test-gendoc.t
test-generaldelta.t
test-getbundle.t
test-git-export.t
@@ -217,6 +243,7 @@
test-graft.t
test-grep.t
test-hardlinks.t
+test-help-hide.t
test-help.t
test-hg-parseurl.py
test-hghave.t
@@ -261,6 +288,7 @@
test-identify.t
test-impexp-branch.t
test-import-bypass.t
+test-import-context.t
test-import-eol.t
test-import-merge.t
test-import-unknown.t
@@ -301,16 +329,22 @@
test-largefiles-misc.t
test-largefiles-small-disk.t
test-largefiles-update.t
+test-largefiles-wireproto.t
test-largefiles.t
+test-lfconvert.t
+test-lfs-bundle.t
test-lfs-largefiles.t
test-lfs-pointer.py
+test-lfs.t
test-linelog.py
test-linerange.py
test-locate.t
test-lock-badness.t
+test-log-exthook.t
test-log-linerange.t
test-log.t
test-logexchange.t
+test-logtoprocess.t
test-lrucachedict.py
test-mactext.t
test-mailmap.t
@@ -394,6 +428,8 @@
test-narrow-rebase.t
test-narrow-shallow-merges.t
test-narrow-shallow.t
+test-narrow-share.t
+test-narrow-sparse.t
test-narrow-strip.t
test-narrow-trackedcmd.t
test-narrow-update.t
@@ -474,6 +510,7 @@
test-push-checkheads-unpushed-D6.t
test-push-checkheads-unpushed-D7.t
test-push-http.t
+test-push-race.t
test-push-warn.t
test-push.t
test-pushvars.t
@@ -512,6 +549,28 @@
test-releasenotes-merging.t
test-releasenotes-parsing.t
test-relink.t
+test-remotefilelog-bad-configs.t
+test-remotefilelog-bgprefetch.t
+test-remotefilelog-blame.t
+test-remotefilelog-bundle2.t
+test-remotefilelog-bundles.t
+test-remotefilelog-cacheprocess.t
+test-remotefilelog-clone-tree.t
+test-remotefilelog-clone.t
+test-remotefilelog-gcrepack.t
+test-remotefilelog-http.t
+test-remotefilelog-keepset.t
+test-remotefilelog-local.t
+test-remotefilelog-log.t
+test-remotefilelog-partial-shallow.t
+test-remotefilelog-permissions.t
+test-remotefilelog-permisssions.t
+test-remotefilelog-prefetch.t
+test-remotefilelog-pull-noshallow.t
+test-remotefilelog-share.t
+test-remotefilelog-sparse.t
+test-remotefilelog-tags.t
+test-remotefilelog-wireproto.t
test-remove.t
test-removeemptydirs.t
test-rename-after-merge.t
@@ -541,11 +600,13 @@
test-rollback.t
test-run-tests.py
test-run-tests.t
+test-rust-ancestor.py
test-schemes.t
test-serve.t
test-setdiscovery.t
test-share.t
test-shelve.t
+test-shelve2.t
test-show-stack.t
test-show-work.t
test-show.t
--- a/contrib/revsetbenchmarks.py Wed Jan 09 20:00:35 2019 -0800
+++ b/contrib/revsetbenchmarks.py Fri Jan 18 13:28:22 2019 -0500
@@ -56,9 +56,11 @@
def perf(revset, target=None, contexts=False):
"""run benchmark for this very revset"""
try:
- args = ['perfrevset', revset]
+ args = ['perfrevset']
if contexts:
args.append('--contexts')
+ args.append('--')
+ args.append(revset)
output = hg(args, repo=target)
return parseoutput(output)
except subprocess.CalledProcessError as exc:
--- a/contrib/wix/help.wxs Wed Jan 09 20:00:35 2019 -0800
+++ b/contrib/wix/help.wxs Fri Jan 18 13:28:22 2019 -0500
@@ -47,6 +47,7 @@
<File Id="internals.censor.txt" Name="censor.txt" />
<File Id="internals.changegroups.txt" Name="changegroups.txt" />
<File Id="internals.config.txt" Name="config.txt" />
+ <File Id="internals.extensions.txt" Name="extensions.txt" />
<File Id="internals.linelog.txt" Name="linelog.txt" />
<File Id="internals.requirements.txt" Name="requirements.txt" />
<File Id="internals.revlogs.txt" Name="revlogs.txt" />
--- a/doc/docchecker Wed Jan 09 20:00:35 2019 -0800
+++ b/doc/docchecker Fri Jan 18 13:28:22 2019 -0500
@@ -9,18 +9,28 @@
from __future__ import absolute_import, print_function
+import os
import re
import sys
-leadingline = re.compile(r'(^\s*)(\S.*)$')
+try:
+ import msvcrt
+ msvcrt.setmode(sys.stdout.fileno(), os.O_BINARY)
+ msvcrt.setmode(sys.stderr.fileno(), os.O_BINARY)
+except ImportError:
+ pass
+
+stdout = getattr(sys.stdout, 'buffer', sys.stdout)
+
+leadingline = re.compile(br'(^\s*)(\S.*)$')
checks = [
- (r""":hg:`[^`]*'[^`]*`""",
- """warning: please avoid nesting ' in :hg:`...`"""),
- (r'\w:hg:`',
- 'warning: please have a space before :hg:'),
- (r"""(?:[^a-z][^'.])hg ([^,;"`]*'(?!hg)){2}""",
- '''warning: please use " instead of ' for hg ... "..."'''),
+ (br""":hg:`[^`]*'[^`]*`""",
+ b"""warning: please avoid nesting ' in :hg:`...`"""),
+ (br'\w:hg:`',
+ b'warning: please have a space before :hg:'),
+ (br"""(?:[^a-z][^'.])hg ([^,;"`]*'(?!hg)){2}""",
+ b'''warning: please use " instead of ' for hg ... "..."'''),
]
def check(line):
@@ -29,25 +39,25 @@
if re.search(match, line):
messages.append(msg)
if messages:
- print(line)
+ stdout.write(b'%s\n' % line)
for msg in messages:
- print(msg)
+ stdout.write(b'%s\n' % msg)
def work(file):
- (llead, lline) = ('', '')
+ (llead, lline) = (b'', b'')
for line in file:
# this section unwraps lines
match = leadingline.match(line)
if not match:
check(lline)
- (llead, lline) = ('', '')
+ (llead, lline) = (b'', b'')
continue
lead, line = match.group(1), match.group(2)
if (lead == llead):
- if (lline != ''):
- lline += ' ' + line
+ if (lline != b''):
+ lline += b' ' + line
else:
lline = line
else:
@@ -58,9 +68,9 @@
def main():
for f in sys.argv[1:]:
try:
- with open(f) as file:
+ with open(f, 'rb') as file:
work(file)
except BaseException as e:
- print("failed to process %s: %s" % (f, e))
+ sys.stdout.write(r"failed to process %s: %s\n" % (f, e))
main()
--- a/doc/gendoc.py Wed Jan 09 20:00:35 2019 -0800
+++ b/doc/gendoc.py Fri Jan 18 13:28:22 2019 -0500
@@ -10,11 +10,18 @@
import sys
import textwrap
+try:
+ import msvcrt
+ msvcrt.setmode(sys.stdout.fileno(), os.O_BINARY)
+ msvcrt.setmode(sys.stderr.fileno(), os.O_BINARY)
+except ImportError:
+ pass
+
# This script is executed during installs and may not have C extensions
# available. Relax C module requirements.
-os.environ['HGMODULEPOLICY'] = 'allow'
+os.environ[r'HGMODULEPOLICY'] = r'allow'
# import from the live mercurial repo
-sys.path.insert(0, "..")
+sys.path.insert(0, r"..")
from mercurial import demandimport; demandimport.enable()
# Load util so that the locale path is set by i18n.setdatapath() before
# calling _().
@@ -22,9 +29,11 @@
util.datapath
from mercurial import (
commands,
+ encoding,
extensions,
help,
minirst,
+ pycompat,
ui as uimod,
)
from mercurial.i18n import (
@@ -39,19 +48,19 @@
def get_desc(docstr):
if not docstr:
- return "", ""
+ return b"", b""
# sanitize
- docstr = docstr.strip("\n")
+ docstr = docstr.strip(b"\n")
docstr = docstr.rstrip()
shortdesc = docstr.splitlines()[0].strip()
- i = docstr.find("\n")
+ i = docstr.find(b"\n")
if i != -1:
desc = docstr[i + 2:]
else:
desc = shortdesc
- desc = textwrap.dedent(desc)
+ desc = textwrap.dedent(desc.decode('latin1')).encode('latin1')
return (shortdesc, desc)
@@ -61,91 +70,93 @@
shortopt, longopt, default, desc, optlabel = opt
else:
shortopt, longopt, default, desc = opt
- optlabel = _("VALUE")
+ optlabel = _(b"VALUE")
allopts = []
if shortopt:
- allopts.append("-%s" % shortopt)
+ allopts.append(b"-%s" % shortopt)
if longopt:
- allopts.append("--%s" % longopt)
+ allopts.append(b"--%s" % longopt)
if isinstance(default, list):
- allopts[-1] += " <%s[+]>" % optlabel
+ allopts[-1] += b" <%s[+]>" % optlabel
elif (default is not None) and not isinstance(default, bool):
- allopts[-1] += " <%s>" % optlabel
- if '\n' in desc:
+ allopts[-1] += b" <%s>" % optlabel
+ if b'\n' in desc:
# only remove line breaks and indentation
- desc = ' '.join(l.lstrip() for l in desc.split('\n'))
- desc += default and _(" (default: %s)") % default or ""
- yield (", ".join(allopts), desc)
+ desc = b' '.join(l.lstrip() for l in desc.split(b'\n'))
+ desc += default and _(b" (default: %s)") % bytes(default) or b""
+ yield (b", ".join(allopts), desc)
def get_cmd(cmd, cmdtable):
d = {}
attr = cmdtable[cmd]
- cmds = cmd.lstrip("^").split("|")
+ cmds = cmd.lstrip(b"^").split(b"|")
- d['cmd'] = cmds[0]
- d['aliases'] = cmd.split("|")[1:]
- d['desc'] = get_desc(gettext(attr[0].__doc__))
- d['opts'] = list(get_opts(attr[1]))
+ d[b'cmd'] = cmds[0]
+ d[b'aliases'] = cmd.split(b"|")[1:]
+ d[b'desc'] = get_desc(gettext(pycompat.getdoc(attr[0])))
+ d[b'opts'] = list(get_opts(attr[1]))
- s = 'hg ' + cmds[0]
+ s = b'hg ' + cmds[0]
if len(attr) > 2:
- if not attr[2].startswith('hg'):
- s += ' ' + attr[2]
+ if not attr[2].startswith(b'hg'):
+ s += b' ' + attr[2]
else:
s = attr[2]
- d['synopsis'] = s.strip()
+ d[b'synopsis'] = s.strip()
return d
def showdoc(ui):
# print options
- ui.write(minirst.section(_("Options")))
+ ui.write(minirst.section(_(b"Options")))
multioccur = False
for optstr, desc in get_opts(globalopts):
- ui.write("%s\n %s\n\n" % (optstr, desc))
- if optstr.endswith("[+]>"):
+ ui.write(b"%s\n %s\n\n" % (optstr, desc))
+ if optstr.endswith(b"[+]>"):
multioccur = True
if multioccur:
- ui.write(_("\n[+] marked option can be specified multiple times\n"))
- ui.write("\n")
+ ui.write(_(b"\n[+] marked option can be specified multiple times\n"))
+ ui.write(b"\n")
# print cmds
- ui.write(minirst.section(_("Commands")))
+ ui.write(minirst.section(_(b"Commands")))
commandprinter(ui, table, minirst.subsection)
# print help topics
# The config help topic is included in the hgrc.5 man page.
- helpprinter(ui, helptable, minirst.section, exclude=['config'])
+ helpprinter(ui, helptable, minirst.section, exclude=[b'config'])
- ui.write(minirst.section(_("Extensions")))
- ui.write(_("This section contains help for extensions that are "
- "distributed together with Mercurial. Help for other "
- "extensions is available in the help system."))
- ui.write(("\n\n"
- ".. contents::\n"
- " :class: htmlonly\n"
- " :local:\n"
- " :depth: 1\n\n"))
+ ui.write(minirst.section(_(b"Extensions")))
+ ui.write(_(b"This section contains help for extensions that are "
+ b"distributed together with Mercurial. Help for other "
+ b"extensions is available in the help system."))
+ ui.write((b"\n\n"
+ b".. contents::\n"
+ b" :class: htmlonly\n"
+ b" :local:\n"
+ b" :depth: 1\n\n"))
for extensionname in sorted(allextensionnames()):
mod = extensions.load(ui, extensionname, None)
ui.write(minirst.subsection(extensionname))
- ui.write("%s\n\n" % gettext(mod.__doc__))
+ ui.write(b"%s\n\n" % gettext(pycompat.getdoc(mod)))
cmdtable = getattr(mod, 'cmdtable', None)
if cmdtable:
- ui.write(minirst.subsubsection(_('Commands')))
+ ui.write(minirst.subsubsection(_(b'Commands')))
commandprinter(ui, cmdtable, minirst.subsubsubsection)
def showtopic(ui, topic):
extrahelptable = [
- (["common"], '', loaddoc('common'), help.TOPIC_CATEGORY_MISC),
- (["hg.1"], '', loaddoc('hg.1'), help.TOPIC_CATEGORY_CONFIG),
- (["hg-ssh.8"], '', loaddoc('hg-ssh.8'), help.TOPIC_CATEGORY_CONFIG),
- (["hgignore.5"], '', loaddoc('hgignore.5'), help.TOPIC_CATEGORY_CONFIG),
- (["hgrc.5"], '', loaddoc('hgrc.5'), help.TOPIC_CATEGORY_CONFIG),
- (["hgignore.5.gendoc"], '', loaddoc('hgignore'),
+ ([b"common"], b'', loaddoc(b'common'), help.TOPIC_CATEGORY_MISC),
+ ([b"hg.1"], b'', loaddoc(b'hg.1'), help.TOPIC_CATEGORY_CONFIG),
+ ([b"hg-ssh.8"], b'', loaddoc(b'hg-ssh.8'), help.TOPIC_CATEGORY_CONFIG),
+ ([b"hgignore.5"], b'', loaddoc(b'hgignore.5'),
help.TOPIC_CATEGORY_CONFIG),
- (["hgrc.5.gendoc"], '', loaddoc('config'), help.TOPIC_CATEGORY_CONFIG),
+ ([b"hgrc.5"], b'', loaddoc(b'hgrc.5'), help.TOPIC_CATEGORY_CONFIG),
+ ([b"hgignore.5.gendoc"], b'', loaddoc(b'hgignore'),
+ help.TOPIC_CATEGORY_CONFIG),
+ ([b"hgrc.5.gendoc"], b'', loaddoc(b'config'),
+ help.TOPIC_CATEGORY_CONFIG),
]
helpprinter(ui, helptable + extrahelptable, None, include=[topic])
@@ -157,74 +168,73 @@
if include and names[0] not in include:
continue
for name in names:
- ui.write(".. _%s:\n" % name)
- ui.write("\n")
+ ui.write(b".. _%s:\n" % name)
+ ui.write(b"\n")
if sectionfunc:
ui.write(sectionfunc(sec))
if callable(doc):
doc = doc(ui)
ui.write(doc)
- ui.write("\n")
+ ui.write(b"\n")
def commandprinter(ui, cmdtable, sectionfunc):
h = {}
for c, attr in cmdtable.items():
- f = c.split("|")[0]
- f = f.lstrip("^")
+ f = c.split(b"|")[0]
+ f = f.lstrip(b"^")
h[f] = c
cmds = h.keys()
- cmds.sort()
- for f in cmds:
- if f.startswith("debug"):
+ for f in sorted(cmds):
+ if f.startswith(b"debug"):
continue
d = get_cmd(h[f], cmdtable)
- ui.write(sectionfunc(d['cmd']))
+ ui.write(sectionfunc(d[b'cmd']))
# short description
- ui.write(d['desc'][0])
+ ui.write(d[b'desc'][0])
# synopsis
- ui.write("::\n\n")
- synopsislines = d['synopsis'].splitlines()
+ ui.write(b"::\n\n")
+ synopsislines = d[b'synopsis'].splitlines()
for line in synopsislines:
# some commands (such as rebase) have a multi-line
# synopsis
- ui.write(" %s\n" % line)
- ui.write('\n')
+ ui.write(b" %s\n" % line)
+ ui.write(b'\n')
# description
- ui.write("%s\n\n" % d['desc'][1])
+ ui.write(b"%s\n\n" % d[b'desc'][1])
# options
- opt_output = list(d['opts'])
+ opt_output = list(d[b'opts'])
if opt_output:
opts_len = max([len(line[0]) for line in opt_output])
- ui.write(_("Options:\n\n"))
+ ui.write(_(b"Options:\n\n"))
multioccur = False
for optstr, desc in opt_output:
if desc:
- s = "%-*s %s" % (opts_len, optstr, desc)
+ s = b"%-*s %s" % (opts_len, optstr, desc)
else:
s = optstr
- ui.write("%s\n" % s)
- if optstr.endswith("[+]>"):
+ ui.write(b"%s\n" % s)
+ if optstr.endswith(b"[+]>"):
multioccur = True
if multioccur:
- ui.write(_("\n[+] marked option can be specified"
- " multiple times\n"))
- ui.write("\n")
+ ui.write(_(b"\n[+] marked option can be specified"
+ b" multiple times\n"))
+ ui.write(b"\n")
# aliases
- if d['aliases']:
- ui.write(_(" aliases: %s\n\n") % " ".join(d['aliases']))
+ if d[b'aliases']:
+ ui.write(_(b" aliases: %s\n\n") % b" ".join(d[b'aliases']))
def allextensionnames():
- return extensions.enabled().keys() + extensions.disabled().keys()
+ return set(extensions.enabled().keys()) | set(extensions.disabled().keys())
if __name__ == "__main__":
- doc = 'hg.1.gendoc'
+ doc = b'hg.1.gendoc'
if len(sys.argv) > 1:
- doc = sys.argv[1]
+ doc = encoding.strtolocal(sys.argv[1])
ui = uimod.ui.load()
- if doc == 'hg.1.gendoc':
+ if doc == b'hg.1.gendoc':
showdoc(ui)
else:
- showtopic(ui, sys.argv[1])
+ showtopic(ui, encoding.strtolocal(sys.argv[1]))
--- a/hgext/absorb.py Wed Jan 09 20:00:35 2019 -0800
+++ b/hgext/absorb.py Fri Jan 18 13:28:22 2019 -0500
@@ -489,7 +489,8 @@
if l[colonpos - 1:colonpos + 2] != ' : ':
raise error.Abort(_('malformed line: %s') % l)
linecontent = l[colonpos + 2:]
- for i, ch in enumerate(l[leftpadpos:colonpos - 1]):
+ for i, ch in enumerate(
+ pycompat.bytestr(l[leftpadpos:colonpos - 1])):
if ch == 'y':
contents[visiblefctxs[i][0]] += linecontent
# chunkstats is hard to calculate if anything changes, therefore
@@ -971,9 +972,10 @@
label='absorb.description')
fm.end()
if not opts.get('dry_run'):
- if not opts.get('apply_changes'):
- if ui.promptchoice("apply changes (yn)? $$ &Yes $$ &No", default=1):
- raise error.Abort(_('absorb cancelled\n'))
+ if (not opts.get('apply_changes') and
+ state.ctxaffected and
+ ui.promptchoice("apply changes (yn)? $$ &Yes $$ &No", default=1)):
+ raise error.Abort(_('absorb cancelled\n'))
state.apply()
if state.commit():
--- a/hgext/amend.py Wed Jan 09 20:00:35 2019 -0800
+++ b/hgext/amend.py Fri Jan 18 13:28:22 2019 -0500
@@ -36,6 +36,8 @@
('e', 'edit', None, _('invoke editor on commit messages')),
('i', 'interactive', None, _('use interactive mode')),
('n', 'note', '', _('store a note on the amend')),
+ ('D', 'currentdate', None,
+ _('record the current date as commit date')),
] + cmdutil.walkopts + cmdutil.commitopts + cmdutil.commitopts2,
_('[OPTION]... [FILE]...'),
helpcategory=command.CATEGORY_COMMITTING,
--- a/hgext/beautifygraph.py Wed Jan 09 20:00:35 2019 -0800
+++ b/hgext/beautifygraph.py Fri Jan 18 13:28:22 2019 -0500
@@ -31,8 +31,6 @@
def prettyedge(before, edge, after):
if edge == '~':
return '\xE2\x95\xA7' # U+2567 ╧
- if edge == 'X':
- return '\xE2\x95\xB3' # U+2573 ╳
if edge == '/':
return '\xE2\x95\xB1' # U+2571 ╱
if edge == '-':
--- a/hgext/blackbox.py Wed Jan 09 20:00:35 2019 -0800
+++ b/hgext/blackbox.py Fri Jan 18 13:28:22 2019 -0500
@@ -33,11 +33,15 @@
# rotate up to N log files when the current one gets too big
maxfiles = 3
+ [blackbox]
+ # Include nanoseconds in log entries with %f (see Python function
+ # datetime.datetime.strftime)
+ date-format = '%Y-%m-%d @ %H:%M:%S.%f'
+
"""
from __future__ import absolute_import
-import errno
import re
from mercurial.i18n import _
@@ -45,10 +49,8 @@
from mercurial import (
encoding,
- pycompat,
+ loggingutil,
registrar,
- ui as uimod,
- util,
)
from mercurial.utils import (
dateutil,
@@ -82,131 +84,69 @@
configitem('blackbox', 'track',
default=lambda: ['*'],
)
+configitem('blackbox', 'date-format',
+ default='%Y/%m/%d %H:%M:%S',
+)
-lastui = None
+_lastlogger = loggingutil.proxylogger()
-def _openlogfile(ui, vfs):
- def rotate(oldpath, newpath):
- try:
- vfs.unlink(newpath)
- except OSError as err:
- if err.errno != errno.ENOENT:
- ui.debug("warning: cannot remove '%s': %s\n" %
- (newpath, err.strerror))
- try:
- if newpath:
- vfs.rename(oldpath, newpath)
- except OSError as err:
- if err.errno != errno.ENOENT:
- ui.debug("warning: cannot rename '%s' to '%s': %s\n" %
- (newpath, oldpath, err.strerror))
+class blackboxlogger(object):
+ def __init__(self, ui, repo):
+ self._repo = repo
+ self._trackedevents = set(ui.configlist('blackbox', 'track'))
+ self._maxfiles = ui.configint('blackbox', 'maxfiles')
+ self._maxsize = ui.configbytes('blackbox', 'maxsize')
+ self._inlog = False
- maxsize = ui.configbytes('blackbox', 'maxsize')
- name = 'blackbox.log'
- if maxsize > 0:
+ def tracked(self, event):
+ return b'*' in self._trackedevents or event in self._trackedevents
+
+ def log(self, ui, event, msg, opts):
+ # self._log() -> ctx.dirty() may create new subrepo instance, which
+ # ui is derived from baseui. So the recursion guard in ui.log()
+ # doesn't work as it's local to the ui instance.
+ if self._inlog:
+ return
+ self._inlog = True
try:
- st = vfs.stat(name)
- except OSError:
- pass
- else:
- if st.st_size >= maxsize:
- path = vfs.join(name)
- maxfiles = ui.configint('blackbox', 'maxfiles')
- for i in pycompat.xrange(maxfiles - 1, 1, -1):
- rotate(oldpath='%s.%d' % (path, i - 1),
- newpath='%s.%d' % (path, i))
- rotate(oldpath=path,
- newpath=maxfiles > 0 and path + '.1')
- return vfs(name, 'a')
-
-def wrapui(ui):
- class blackboxui(ui.__class__):
- @property
- def _bbvfs(self):
- vfs = None
- repo = getattr(self, '_bbrepo', None)
- if repo:
- vfs = repo.vfs
- if not vfs.isdir('.'):
- vfs = None
- return vfs
-
- @util.propertycache
- def track(self):
- return self.configlist('blackbox', 'track')
-
- def debug(self, *msg, **opts):
- super(blackboxui, self).debug(*msg, **opts)
- if self.debugflag:
- self.log('debug', '%s', ''.join(msg))
-
- def log(self, event, *msg, **opts):
- global lastui
- super(blackboxui, self).log(event, *msg, **opts)
+ self._log(ui, event, msg, opts)
+ finally:
+ self._inlog = False
- if not '*' in self.track and not event in self.track:
- return
-
- if self._bbvfs:
- ui = self
- else:
- # certain ui instances exist outside the context of
- # a repo, so just default to the last blackbox that
- # was seen.
- ui = lastui
-
- if not ui:
- return
- vfs = ui._bbvfs
- if not vfs:
- return
+ def _log(self, ui, event, msg, opts):
+ default = ui.configdate('devel', 'default-date')
+ date = dateutil.datestr(default, ui.config('blackbox', 'date-format'))
+ user = procutil.getuser()
+ pid = '%d' % procutil.getpid()
+ rev = '(unknown)'
+ changed = ''
+ ctx = self._repo[None]
+ parents = ctx.parents()
+ rev = ('+'.join([hex(p.node()) for p in parents]))
+ if (ui.configbool('blackbox', 'dirty') and
+ ctx.dirty(missing=True, merge=False, branch=False)):
+ changed = '+'
+ if ui.configbool('blackbox', 'logsource'):
+ src = ' [%s]' % event
+ else:
+ src = ''
+ try:
+ fmt = '%s %s @%s%s (%s)%s> %s'
+ args = (date, user, rev, changed, pid, src, msg)
+ with loggingutil.openlogfile(
+ ui, self._repo.vfs, name='blackbox.log',
+ maxfiles=self._maxfiles, maxsize=self._maxsize) as fp:
+ fp.write(fmt % args)
+ except (IOError, OSError) as err:
+ # deactivate this to avoid failed logging again
+ self._trackedevents.clear()
+ ui.debug('warning: cannot write to blackbox.log: %s\n' %
+ encoding.strtolocal(err.strerror))
+ return
+ _lastlogger.logger = self
- repo = getattr(ui, '_bbrepo', None)
- if not lastui or repo:
- lastui = ui
- if getattr(ui, '_bbinlog', False):
- # recursion and failure guard
- return
- ui._bbinlog = True
- default = self.configdate('devel', 'default-date')
- date = dateutil.datestr(default, '%Y/%m/%d %H:%M:%S')
- user = procutil.getuser()
- pid = '%d' % procutil.getpid()
- formattedmsg = msg[0] % msg[1:]
- rev = '(unknown)'
- changed = ''
- if repo:
- ctx = repo[None]
- parents = ctx.parents()
- rev = ('+'.join([hex(p.node()) for p in parents]))
- if (ui.configbool('blackbox', 'dirty') and
- ctx.dirty(missing=True, merge=False, branch=False)):
- changed = '+'
- if ui.configbool('blackbox', 'logsource'):
- src = ' [%s]' % event
- else:
- src = ''
- try:
- fmt = '%s %s @%s%s (%s)%s> %s'
- args = (date, user, rev, changed, pid, src, formattedmsg)
- with _openlogfile(ui, vfs) as fp:
- fp.write(fmt % args)
- except (IOError, OSError) as err:
- self.debug('warning: cannot write to blackbox.log: %s\n' %
- encoding.strtolocal(err.strerror))
- # do not restore _bbinlog intentionally to avoid failed
- # logging again
- else:
- ui._bbinlog = False
-
- def setrepo(self, repo):
- self._bbrepo = repo
-
- ui.__class__ = blackboxui
- uimod.ui = blackboxui
-
-def uisetup(ui):
- wrapui(ui)
+def uipopulate(ui):
+ ui.setlogger(b'blackbox', _lastlogger)
def reposetup(ui, repo):
# During 'hg pull' a httppeer repo is created to represent the remote repo.
@@ -215,14 +155,15 @@
if not repo.local():
return
- if util.safehasattr(ui, 'setrepo'):
- ui.setrepo(repo)
+ # Since blackbox.log is stored in the repo directory, the logger should be
+ # instantiated per repository.
+ logger = blackboxlogger(ui, repo)
+ ui.setlogger(b'blackbox', logger)
- # Set lastui even if ui.log is not called. This gives blackbox a
- # fallback place to log.
- global lastui
- if lastui is None:
- lastui = ui
+ # Set _lastlogger even if ui.log is not called. This gives blackbox a
+ # fallback place to log
+ if _lastlogger.logger is None:
+ _lastlogger.logger = logger
repo._wlockfreeprefix.add('blackbox.log')
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/hgext/bookflow.py Fri Jan 18 13:28:22 2019 -0500
@@ -0,0 +1,104 @@
+"""implements bookmark-based branching (EXPERIMENTAL)
+
+ - Disables creation of new branches (config: enable_branches=False).
+ - Requires an active bookmark on commit (config: require_bookmark=True).
+ - Doesn't move the active bookmark on update, only on commit.
+ - Requires '--rev' for moving an existing bookmark.
+ - Protects special bookmarks (config: protect=@).
+
+ flow related commands
+
+ :hg book NAME: create a new bookmark
+ :hg book NAME -r REV: move bookmark to revision (fast-forward)
+ :hg up|co NAME: switch to bookmark
+ :hg push -B .: push active bookmark
+"""
+from __future__ import absolute_import
+
+from mercurial.i18n import _
+from mercurial import (
+ bookmarks,
+ commands,
+ error,
+ extensions,
+ registrar,
+)
+
+MY_NAME = 'bookflow'
+
+configtable = {}
+configitem = registrar.configitem(configtable)
+
+configitem(MY_NAME, 'protect', ['@'])
+configitem(MY_NAME, 'require-bookmark', True)
+configitem(MY_NAME, 'enable-branches', False)
+
+cmdtable = {}
+command = registrar.command(cmdtable)
+
+def commit_hook(ui, repo, **kwargs):
+ active = repo._bookmarks.active
+ if active:
+ if active in ui.configlist(MY_NAME, 'protect'):
+ raise error.Abort(
+ _('cannot commit, bookmark %s is protected') % active)
+ if not cwd_at_bookmark(repo, active):
+ raise error.Abort(
+ _('cannot commit, working directory out of sync with active bookmark'),
+ hint=_("run 'hg up %s'") % active)
+ elif ui.configbool(MY_NAME, 'require-bookmark', True):
+ raise error.Abort(_('cannot commit without an active bookmark'))
+ return 0
+
+def bookmarks_update(orig, repo, parents, node):
+ if len(parents) == 2:
+ # called during commit
+ return orig(repo, parents, node)
+ else:
+ # called during update
+ return False
+
+def bookmarks_addbookmarks(
+ orig, repo, tr, names, rev=None, force=False, inactive=False):
+ if not rev:
+ marks = repo._bookmarks
+ for name in names:
+ if name in marks:
+ raise error.Abort(_(
+ "bookmark %s already exists, to move use the --rev option"
+ ) % name)
+ return orig(repo, tr, names, rev, force, inactive)
+
+def commands_commit(orig, ui, repo, *args, **opts):
+ commit_hook(ui, repo)
+ return orig(ui, repo, *args, **opts)
+
+def commands_pull(orig, ui, repo, *args, **opts):
+ rc = orig(ui, repo, *args, **opts)
+ active = repo._bookmarks.active
+ if active and not cwd_at_bookmark(repo, active):
+ ui.warn(_(
+ "working directory out of sync with active bookmark, run "
+ "'hg up %s'"
+ ) % active)
+ return rc
+
+def commands_branch(orig, ui, repo, label=None, **opts):
+ if label and not opts.get(r'clean') and not opts.get(r'rev'):
+ raise error.Abort(
+ _("creating named branches is disabled and you should use bookmarks"),
+ hint="see 'hg help bookflow'")
+ return orig(ui, repo, label, **opts)
+
+def cwd_at_bookmark(repo, mark):
+ mark_id = repo._bookmarks[mark]
+ cur_id = repo.lookup('.')
+ return cur_id == mark_id
+
+def uisetup(ui):
+ extensions.wrapfunction(bookmarks, 'update', bookmarks_update)
+ extensions.wrapfunction(bookmarks, 'addbookmarks', bookmarks_addbookmarks)
+ extensions.wrapcommand(commands.table, 'commit', commands_commit)
+ extensions.wrapcommand(commands.table, 'pull', commands_pull)
+ if not ui.configbool(MY_NAME, 'enable-branches'):
+ extensions.wrapcommand(commands.table, 'branch', commands_branch)
--- a/hgext/convert/filemap.py Wed Jan 09 20:00:35 2019 -0800
+++ b/hgext/convert/filemap.py Fri Jan 18 13:28:22 2019 -0500
@@ -270,6 +270,9 @@
self.children[p] = self.children.get(p, 0) + 1
return c
+ def numcommits(self):
+ return self.base.numcommits()
+
def _cachedcommit(self, rev):
if rev in self.commits:
return self.commits[rev]
@@ -302,7 +305,18 @@
for f in files:
if self.filemapper(f):
return True
- return False
+
+ # The include directive is documented to include nothing else (though
+ # valid branch closes are included).
+ if self.filemapper.include:
+ return False
+
+ # Allow empty commits in the source revision through. The getchanges()
+ # method doesn't even bother calling this if it determines that the
+ # close marker is significant (i.e. all of the branch ancestors weren't
+ # eliminated). Therefore if there *is* a close marker, getchanges()
+ # doesn't consider it significant, and this revision should be dropped.
+ return not files and 'close' not in self.commits[rev].extra
def mark_not_wanted(self, rev, p):
# Mark rev as not interesting and update data structures.
--- a/hgext/convert/hg.py Wed Jan 09 20:00:35 2019 -0800
+++ b/hgext/convert/hg.py Fri Jan 18 13:28:22 2019 -0500
@@ -597,6 +597,9 @@
saverev=self.saverev,
phase=ctx.phase())
+ def numcommits(self):
+ return len(self.repo)
+
def gettags(self):
# This will get written to .hgtags, filter non global tags out.
tags = [t for t in self.repo.tagslist()
--- a/hgext/extdiff.py Wed Jan 09 20:00:35 2019 -0800
+++ b/hgext/extdiff.py Fri Jan 18 13:28:22 2019 -0500
@@ -139,7 +139,7 @@
repo.ui.setconfig("ui", "archivemeta", False)
archival.archive(repo, base, node, 'files',
- matchfn=scmutil.matchfiles(repo, files),
+ match=scmutil.matchfiles(repo, files),
subrepos=listsubrepos)
for fn in sorted(files):
@@ -152,6 +152,29 @@
fnsandstat.append((dest, repo.wjoin(fn), os.lstat(dest)))
return dirname, fnsandstat
+def formatcmdline(cmdline, repo_root, do3way,
+ parent1, plabel1, parent2, plabel2, child, clabel):
+ # Function to quote file/dir names in the argument string.
+ # When not operating in 3-way mode, an empty string is
+ # returned for parent2
+ replace = {'parent': parent1, 'parent1': parent1, 'parent2': parent2,
+ 'plabel1': plabel1, 'plabel2': plabel2,
+ 'child': child, 'clabel': clabel,
+ 'root': repo_root}
+ def quote(match):
+ pre = match.group(2)
+ key = match.group(3)
+ if not do3way and key == 'parent2':
+ return pre
+ return pre + procutil.shellquote(replace[key])
+
+ # Match parent2 first, so 'parent1?' will match both parent1 and parent
+ regex = (br'''(['"]?)([^\s'"$]*)'''
+ br'\$(parent2|parent1?|child|plabel1|plabel2|clabel|root)\1')
+ if not do3way and not re.search(regex, cmdline):
+ cmdline += ' $parent1 $child'
+ return re.sub(regex, quote, cmdline)
+
def dodiff(ui, repo, cmdline, pats, opts):
'''Do the actual diff:
@@ -281,28 +304,14 @@
label1b = None
fnsandstat = []
- # Function to quote file/dir names in the argument string.
- # When not operating in 3-way mode, an empty string is
- # returned for parent2
- replace = {'parent': dir1a, 'parent1': dir1a, 'parent2': dir1b,
- 'plabel1': label1a, 'plabel2': label1b,
- 'clabel': label2, 'child': dir2,
- 'root': repo.root}
- def quote(match):
- pre = match.group(2)
- key = match.group(3)
- if not do3way and key == 'parent2':
- return pre
- return pre + procutil.shellquote(replace[key])
-
- # Match parent2 first, so 'parent1?' will match both parent1 and parent
- regex = (br'''(['"]?)([^\s'"$]*)'''
- br'\$(parent2|parent1?|child|plabel1|plabel2|clabel|root)\1')
- if not do3way and not re.search(regex, cmdline):
- cmdline += ' $parent1 $child'
- cmdline = re.sub(regex, quote, cmdline)
-
- ui.debug('running %r in %s\n' % (pycompat.bytestr(cmdline), tmproot))
+ # Run the external tool on the 2 temp directories or the patches
+ cmdline = formatcmdline(
+ cmdline, repo.root, do3way=do3way,
+ parent1=dir1a, plabel1=label1a,
+ parent2=dir1b, plabel2=label1b,
+ child=dir2, clabel=label2)
+ ui.debug('running %r in %s\n' % (pycompat.bytestr(cmdline),
+ tmproot))
ui.system(cmdline, cwd=tmproot, blockedtag='extdiff')
for copy_fn, working_fn, st in fnsandstat:
@@ -383,8 +392,9 @@
def __init__(self, path, cmdline):
# We can't pass non-ASCII through docstrings (and path is
- # in an unknown encoding anyway)
- docpath = stringutil.escapestr(path)
+ # in an unknown encoding anyway), but avoid double separators on
+ # Windows
+ docpath = stringutil.escapestr(path).replace(b'\\\\', b'\\')
self.__doc__ %= {r'path': pycompat.sysstr(stringutil.uirepr(docpath))}
self._cmdline = cmdline
--- a/hgext/fastannotate/commands.py Wed Jan 09 20:00:35 2019 -0800
+++ b/hgext/fastannotate/commands.py Fri Jan 18 13:28:22 2019 -0500
@@ -261,8 +261,9 @@
repo.prefetchfastannotate(paths)
else:
# server, or full repo
+ progress = ui.makeprogress(_('building'), total=len(paths))
for i, path in enumerate(paths):
- ui.progress(_('building'), i, total=len(paths))
+ progress.update(i)
with facontext.annotatecontext(repo, path) as actx:
try:
if actx.isuptodate(rev):
@@ -281,5 +282,4 @@
# cache for other files.
ui.warn(_('fastannotate: %s: failed to '
'build cache: %r\n') % (path, ex))
- # clear the progress bar
- ui.write()
+ progress.complete()
--- a/hgext/fastannotate/context.py Wed Jan 09 20:00:35 2019 -0800
+++ b/hgext/fastannotate/context.py Fri Jan 18 13:28:22 2019 -0500
@@ -138,7 +138,7 @@
(k, getattr(diffopts, k))
for k in mdiff.diffopts.defaults
))
- return hashlib.sha1(diffoptstr).hexdigest()[:6]
+ return node.hex(hashlib.sha1(diffoptstr).digest())[:6]
_defaultdiffopthash = hashdiffopts(mdiff.defaultopts)
@@ -156,6 +156,7 @@
}
def __init__(self, **opts):
+ opts = pycompat.byteskwargs(opts)
for k, v in self.defaults.iteritems():
setattr(self, k, opts.get(k, v))
@@ -397,7 +398,8 @@
# 3rd DFS does the actual annotate
visit = initvisit[:]
- progress = 0
+ progress = self.ui.makeprogress(('building cache'),
+ total=len(newmainbranch))
while visit:
f = visit[-1]
if f in hist:
@@ -436,10 +438,7 @@
del pcache[f]
if ismainbranch: # need to write to linelog
- if not self.ui.quiet:
- progress += 1
- self.ui.progress(_('building cache'), progress,
- total=len(newmainbranch))
+ progress.increment()
bannotated = None
if len(pl) == 2 and self.opts.followmerge: # merge
bannotated = curr[0]
@@ -449,8 +448,7 @@
elif showpath: # not append linelog, but we need to record path
self._node2path[f.node()] = f.path()
- if progress: # clean progress bar
- self.ui.write()
+ progress.complete()
result = [
((self.revmap.rev2hsh(fr) if isinstance(fr, int) else fr.node()), l)
@@ -604,7 +602,7 @@
the best case, the user provides a node and we don't need to read the
filelog or construct any filecontext.
"""
- if isinstance(f, str):
+ if isinstance(f, bytes):
hsh = f
else:
hsh = f.node()
@@ -627,7 +625,7 @@
if showpath:
result = self._addpathtoresult(result)
if showlines:
- if isinstance(f, str): # f: node or fctx
+ if isinstance(f, bytes): # f: node or fctx
llrev = self.revmap.hsh2rev(f)
fctx = self._resolvefctx(f, self.revmap.rev2path(llrev))
else:
--- a/hgext/fastannotate/formatter.py Wed Jan 09 20:00:35 2019 -0800
+++ b/hgext/fastannotate/formatter.py Fri Jan 18 13:28:22 2019 -0500
@@ -39,23 +39,26 @@
orig = hexfunc
hexfunc = lambda x: None if x is None else orig(x)
wnode = hexfunc(repo[None].p1().node()) + '+'
- wrev = str(repo[None].p1().rev())
+ wrev = '%d' % repo[None].p1().rev()
wrevpad = ''
if not opts.get('changeset'): # only show + if changeset is hidden
wrev += '+'
wrevpad = ' '
- revenc = lambda x: wrev if x is None else str(x) + wrevpad
- csetenc = lambda x: wnode if x is None else str(x) + ' '
+ revenc = lambda x: wrev if x is None else ('%d' % x) + wrevpad
+ def csetenc(x):
+ if x is None:
+ return wnode
+ return pycompat.bytestr(x) + ' '
else:
- revenc = csetenc = str
+ revenc = csetenc = pycompat.bytestr
# opt name, separator, raw value (for json/plain), encoder (for plain)
opmap = [('user', ' ', lambda x: getctx(x).user(), ui.shortuser),
('number', ' ', lambda x: getctx(x).rev(), revenc),
('changeset', ' ', lambda x: hexfunc(x[0]), csetenc),
('date', ' ', lambda x: getctx(x).date(), datefunc),
- ('file', ' ', lambda x: x[2], str),
- ('line_number', ':', lambda x: x[1] + 1, str)]
+ ('file', ' ', lambda x: x[2], pycompat.bytestr),
+ ('line_number', ':', lambda x: x[1] + 1, pycompat.bytestr)]
fieldnamemap = {'number': 'rev', 'changeset': 'node'}
funcmap = [(get, sep, fieldnamemap.get(op, op), enc)
for op, sep, get, enc in opmap
@@ -100,7 +103,7 @@
result += ': ' + self.ui.label('-' + lines[i],
'diff.deleted')
- if result[-1] != '\n':
+ if result[-1:] != '\n':
result += '\n'
self.ui.write(result)
@@ -125,7 +128,7 @@
if annotatedresult:
self._writecomma()
- pieces = [(name, map(f, annotatedresult))
+ pieces = [(name, pycompat.maplist(f, annotatedresult))
for f, sep, name, enc in self.funcmap]
if lines is not None:
pieces.append(('line', lines))
--- a/hgext/fastannotate/protocol.py Wed Jan 09 20:00:35 2019 -0800
+++ b/hgext/fastannotate/protocol.py Fri Jan 18 13:28:22 2019 -0500
@@ -98,10 +98,10 @@
state = 0 # 0: vfspath, 1: size
vfspath = size = ''
while i < l:
- ch = payload[i]
+ ch = payload[i:i + 1]
if ch == '\0':
if state == 1:
- result[vfspath] = buffer(payload, i + 1, int(size))
+ result[vfspath] = payload[i + 1:i + 1 + int(size)]
i += int(size)
state = 0
vfspath = size = ''
--- a/hgext/fastannotate/revmap.py Wed Jan 09 20:00:35 2019 -0800
+++ b/hgext/fastannotate/revmap.py Fri Jan 18 13:28:22 2019 -0500
@@ -207,7 +207,7 @@
path = self.rev2path(rev)
if path is None:
raise error.CorruptedFileError('cannot find path for %s' % rev)
- f.write(path + '\0')
+ f.write(path + b'\0')
f.write(hsh)
@staticmethod
--- a/hgext/fix.py Wed Jan 09 20:00:35 2019 -0800
+++ b/hgext/fix.py Fri Jan 18 13:28:22 2019 -0500
@@ -15,13 +15,15 @@
[fix]
clang-format:command=clang-format --assume-filename={rootpath}
clang-format:linerange=--lines={first}:{last}
- clang-format:fileset=set:**.cpp or **.hpp
+ clang-format:pattern=set:**.cpp or **.hpp
The :command suboption forms the first part of the shell command that will be
used to fix a file. The content of the file is passed on standard input, and the
-fixed file content is expected on standard output. If there is any output on
-standard error, the file will not be affected. Some values may be substituted
-into the command::
+fixed file content is expected on standard output. Any output on standard error
+will be displayed as a warning. If the exit status is not zero, the file will
+not be affected. A placeholder warning is displayed if there is a non-zero exit
+status but no standard error output. Some values may be substituted into the
+command::
{rootpath} The path of the file being fixed, relative to the repo root
{basename} The name of the file being fixed, without the directory path
@@ -34,16 +36,42 @@
{first} The 1-based line number of the first line in the modified range
{last} The 1-based line number of the last line in the modified range
-The :fileset suboption determines which files will be passed through each
-configured tool. See :hg:`help fileset` for possible values. If there are file
-arguments to :hg:`fix`, the intersection of these filesets is used.
+The :pattern suboption determines which files will be passed through each
+configured tool. See :hg:`help patterns` for possible values. If there are file
+arguments to :hg:`fix`, the intersection of these patterns is used.
There is also a configurable limit for the maximum size of file that will be
processed by :hg:`fix`::
[fix]
- maxfilesize=2MB
+ maxfilesize = 2MB
+
+Normally, execution of configured tools will continue after a failure (indicated
+by a non-zero exit status). It can also be configured to abort after the first
+such failure, so that no files will be affected if any tool fails. This abort
+will also cause :hg:`fix` to exit with a non-zero status::
+
+ [fix]
+ failure = abort
+When multiple tools are configured to affect a file, they execute in an order
+defined by the :priority suboption. The priority suboption has a default value
+of zero for each tool. Tools are executed in order of descending priority. The
+execution order of tools with equal priority is unspecified. For example, you
+could use the 'sort' and 'head' utilities to keep only the 10 smallest numbers
+in a text file by ensuring that 'sort' runs before 'head'::
+
+ [fix]
+ sort:command = sort -n
+ head:command = head -n 10
+ sort:pattern = numbers.txt
+ head:pattern = numbers.txt
+ sort:priority = 2
+ head:priority = 1
+
+To account for changes made by each tool, the line numbers used for incremental
+formatting are recomputed before executing the next tool. So, each tool may see
+different values for the arguments added by the :linerange suboption.
"""
from __future__ import absolute_import
@@ -90,16 +118,36 @@
configitem = registrar.configitem(configtable)
# Register the suboptions allowed for each configured fixer.
-FIXER_ATTRS = ('command', 'linerange', 'fileset')
+FIXER_ATTRS = {
+ 'command': None,
+ 'linerange': None,
+ 'fileset': None,
+ 'pattern': None,
+ 'priority': 0,
+}
-for key in FIXER_ATTRS:
- configitem('fix', '.*(:%s)?' % key, default=None, generic=True)
+for key, default in FIXER_ATTRS.items():
+ configitem('fix', '.*(:%s)?' % key, default=default, generic=True)
# A good default size allows most source code files to be fixed, but avoids
# letting fixer tools choke on huge inputs, which could be surprising to the
# user.
configitem('fix', 'maxfilesize', default='2MB')
+# Allow fix commands to exit non-zero if an executed fixer tool exits non-zero.
+# This helps users do shell scripts that stop when a fixer tool signals a
+# problem.
+configitem('fix', 'failure', default='continue')
+
+def checktoolfailureaction(ui, message, hint=None):
+ """Abort with 'message' if fix.failure=abort"""
+ action = ui.config('fix', 'failure')
+ if action not in ('continue', 'abort'):
+ raise error.Abort(_('unknown fix.failure action: %s') % (action,),
+ hint=_('use "continue" or "abort"'))
+ if action == 'abort':
+ raise error.Abort(message, hint=hint)
+
allopt = ('', 'all', False, _('fix all non-public non-obsolete revisions'))
baseopt = ('', 'base', [], _('revisions to diff against (overrides automatic '
'selection, and applies to every revision being '
@@ -465,9 +513,14 @@
showstderr(ui, fixctx.rev(), fixername, stderr)
if proc.returncode == 0:
newdata = newerdata
- elif not stderr:
- showstderr(ui, fixctx.rev(), fixername,
- _('exited with status %d\n') % (proc.returncode,))
+ else:
+ if not stderr:
+ message = _('exited with status %d\n') % (proc.returncode,)
+ showstderr(ui, fixctx.rev(), fixername, message)
+ checktoolfailureaction(
+ ui, _('no fixes will be applied'),
+ hint=_('use --config fix.failure=continue to apply any '
+ 'successful fixes anyway'))
return newdata
def showstderr(ui, rev, fixername, stderr):
@@ -533,6 +586,17 @@
newp1node = replacements.get(p1ctx.node(), p1ctx.node())
newp2node = replacements.get(p2ctx.node(), p2ctx.node())
+ # We don't want to create a revision that has no changes from the original,
+ # but we should if the original revision's parent has been replaced.
+ # Otherwise, we would produce an orphan that needs no actual human
+ # intervention to evolve. We can't rely on commit() to avoid creating the
+ # un-needed revision because the extra field added below produces a new hash
+ # regardless of file content changes.
+ if (not filedata and
+ p1ctx.node() not in replacements and
+ p2ctx.node() not in replacements):
+ return
+
def filectxfn(repo, memctx, path):
if path not in ctx:
return None
@@ -549,6 +613,9 @@
isexec=fctx.isexec(),
copied=copied)
+ extra = ctx.extra().copy()
+ extra['fix_source'] = ctx.hex()
+
memctx = context.memctx(
repo,
parents=(newp1node, newp2node),
@@ -557,7 +624,7 @@
filectxfn=filectxfn,
user=ctx.user(),
date=ctx.date(),
- extra=ctx.extra(),
+ extra=extra,
branch=ctx.branch(),
editor=None)
sucnode = memctx.commit()
@@ -573,14 +640,21 @@
Each value is a Fixer object with methods that implement the behavior of the
fixer's config suboptions. Does not validate the config values.
"""
- result = {}
+ fixers = {}
for name in fixernames(ui):
- result[name] = Fixer()
+ fixers[name] = Fixer()
attrs = ui.configsuboptions('fix', name)[1]
- for key in FIXER_ATTRS:
- setattr(result[name], pycompat.sysstr('_' + key),
- attrs.get(key, ''))
- return result
+ if 'fileset' in attrs and 'pattern' not in attrs:
+ ui.warn(_('the fix.tool:fileset config name is deprecated; '
+ 'please rename it to fix.tool:pattern\n'))
+ attrs['pattern'] = attrs['fileset']
+ for key, default in FIXER_ATTRS.items():
+ setattr(fixers[name], pycompat.sysstr('_' + key),
+ attrs.get(key, default))
+ fixers[name]._priority = int(fixers[name]._priority)
+ return collections.OrderedDict(
+ sorted(fixers.items(), key=lambda item: item[1]._priority,
+ reverse=True))
def fixernames(ui):
"""Returns the names of [fix] config options that have suboptions"""
@@ -595,7 +669,7 @@
def affects(self, opts, fixctx, path):
"""Should this fixer run on the file at the given path and context?"""
- return scmutil.match(fixctx, [self._fileset], opts)(path)
+ return scmutil.match(fixctx, [self._pattern], opts)(path)
def command(self, ui, path, rangesfn):
"""A shell command to use to invoke this fixer on the given file/lines
--- a/hgext/highlight/__init__.py Wed Jan 09 20:00:35 2019 -0800
+++ b/hgext/highlight/__init__.py Fri Jan 18 13:28:22 2019 -0500
@@ -87,7 +87,7 @@
]))
return web.res.sendresponse()
-def extsetup():
+def extsetup(ui):
# monkeypatch in the new version
extensions.wrapfunction(webcommands, '_filerevision',
filerevision_highlight)
--- a/hgext/histedit.py Wed Jan 09 20:00:35 2019 -0800
+++ b/hgext/histedit.py Fri Jan 18 13:28:22 2019 -0500
@@ -183,7 +183,17 @@
from __future__ import absolute_import
+# chistedit dependencies that are not available everywhere
+try:
+ import fcntl
+ import termios
+except ImportError:
+ fcntl = None
+ termios = None
+
+import functools
import os
+import struct
from mercurial.i18n import _
from mercurial import (
@@ -197,7 +207,7 @@
exchange,
extensions,
hg,
- lock,
+ logcmdutil,
merge as mergemod,
mergeutil,
node,
@@ -210,11 +220,11 @@
util,
)
from mercurial.utils import (
+ dateutil,
stringutil,
)
pickle = util.pickle
-release = lock.release
cmdtable = {}
command = registrar.command(cmdtable)
@@ -235,6 +245,9 @@
configitem('histedit', 'singletransaction',
default=False,
)
+configitem('ui', 'interface.histedit',
+ default=None,
+)
# Note for extension authors: ONLY specify testedwith = 'ships-with-hg-core' for
# extensions which SHIP WITH MERCURIAL. Non-mainline extensions should
@@ -294,21 +307,17 @@
return ''.join(['# %s\n' % l if l else '#\n' for l in lines])
class histeditstate(object):
- def __init__(self, repo, parentctxnode=None, actions=None, keep=None,
- topmost=None, replacements=None, lock=None, wlock=None):
+ def __init__(self, repo):
self.repo = repo
- self.actions = actions
- self.keep = keep
- self.topmost = topmost
- self.parentctxnode = parentctxnode
- self.lock = lock
- self.wlock = wlock
+ self.actions = None
+ self.keep = None
+ self.topmost = None
+ self.parentctxnode = None
+ self.lock = None
+ self.wlock = None
self.backupfile = None
self.stateobj = statemod.cmdstate(repo, 'histedit-state')
- if replacements is None:
- self.replacements = []
- else:
- self.replacements = replacements
+ self.replacements = []
def read(self):
"""Load histedit state from disk and set fields appropriately."""
@@ -519,9 +528,12 @@
editor = self.commiteditor()
commit = commitfuncfor(repo, rulectx)
-
+ if repo.ui.configbool('rewrite', 'update-timestamp'):
+ date = dateutil.makedate()
+ else:
+ date = rulectx.date()
commit(text=rulectx.description(), user=rulectx.user(),
- date=rulectx.date(), extra=rulectx.extra(), editor=editor)
+ date=date, extra=rulectx.extra(), editor=editor)
def commiteditor(self):
"""The editor to be used to edit the commit message."""
@@ -802,6 +814,10 @@
commitopts['date'] = ctx.date()
else:
commitopts['date'] = max(ctx.date(), oldctx.date())
+ # if date is to be updated to current
+ if ui.configbool('rewrite', 'update-timestamp'):
+ commitopts['date'] = dateutil.makedate()
+
extra = ctx.extra().copy()
# histedit_source
# note: ctx is likely a temporary commit but that the best we can do
@@ -915,6 +931,562 @@
raise error.Abort(msg, hint=hint)
return repo[roots[0]].node()
+# Curses Support
+try:
+ import curses
+except ImportError:
+ curses = None
+
+KEY_LIST = ['pick', 'edit', 'fold', 'drop', 'mess', 'roll']
+ACTION_LABELS = {
+ 'fold': '^fold',
+ 'roll': '^roll',
+}
+
+COLOR_HELP, COLOR_SELECTED, COLOR_OK, COLOR_WARN = 1, 2, 3, 4
+
+E_QUIT, E_HISTEDIT = 1, 2
+E_PAGEDOWN, E_PAGEUP, E_LINEUP, E_LINEDOWN, E_RESIZE = 3, 4, 5, 6, 7
+MODE_INIT, MODE_PATCH, MODE_RULES, MODE_HELP = 0, 1, 2, 3
+
+KEYTABLE = {
+ 'global': {
+ 'h': 'next-action',
+ 'KEY_RIGHT': 'next-action',
+ 'l': 'prev-action',
+ 'KEY_LEFT': 'prev-action',
+ 'q': 'quit',
+ 'c': 'histedit',
+ 'C': 'histedit',
+ 'v': 'showpatch',
+ '?': 'help',
+ },
+ MODE_RULES: {
+ 'd': 'action-drop',
+ 'e': 'action-edit',
+ 'f': 'action-fold',
+ 'm': 'action-mess',
+ 'p': 'action-pick',
+ 'r': 'action-roll',
+ ' ': 'select',
+ 'j': 'down',
+ 'k': 'up',
+ 'KEY_DOWN': 'down',
+ 'KEY_UP': 'up',
+ 'J': 'move-down',
+ 'K': 'move-up',
+ 'KEY_NPAGE': 'move-down',
+ 'KEY_PPAGE': 'move-up',
+ '0': 'goto', # Used for 0..9
+ },
+ MODE_PATCH: {
+ ' ': 'page-down',
+ 'KEY_NPAGE': 'page-down',
+ 'KEY_PPAGE': 'page-up',
+ 'j': 'line-down',
+ 'k': 'line-up',
+ 'KEY_DOWN': 'line-down',
+ 'KEY_UP': 'line-up',
+ 'J': 'down',
+ 'K': 'up',
+ },
+ MODE_HELP: {
+ },
+}
+
+def screen_size():
+ return struct.unpack('hh', fcntl.ioctl(1, termios.TIOCGWINSZ, ' '))
+
+class histeditrule(object):
+ def __init__(self, ctx, pos, action='pick'):
+ self.ctx = ctx
+ self.action = action
+ self.origpos = pos
+ self.pos = pos
+ self.conflicts = []
+
+ def __str__(self):
+ # Some actions ('fold' and 'roll') combine a patch with a previous one.
+ # Add a marker showing which patch they apply to, and also omit the
+ # description for 'roll' (since it will get discarded). Example display:
+ #
+ # #10 pick 316392:06a16c25c053 add option to skip tests
+ # #11 ^roll 316393:71313c964cc5
+ # #12 pick 316394:ab31f3973b0d include mfbt for mozilla-config.h
+ # #13 ^fold 316395:14ce5803f4c3 fix warnings
+ #
+ # The carets point to the changeset being folded into ("roll this
+ # changeset into the changeset above").
+ action = ACTION_LABELS.get(self.action, self.action)
+ h = self.ctx.hex()[0:12]
+ r = self.ctx.rev()
+ desc = self.ctx.description().splitlines()[0].strip()
+ if self.action == 'roll':
+ desc = ''
+ return "#{0:<2} {1:<6} {2}:{3} {4}".format(
+ self.origpos, action, r, h, desc)
+
+ def checkconflicts(self, other):
+ if other.pos > self.pos and other.origpos <= self.origpos:
+ if set(other.ctx.files()) & set(self.ctx.files()) != set():
+ self.conflicts.append(other)
+ return self.conflicts
+
+ if other in self.conflicts:
+ self.conflicts.remove(other)
+ return self.conflicts
+
+# ============ EVENTS ===============
+def movecursor(state, oldpos, newpos):
+ '''Change the rule/changeset that the cursor is pointing to, regardless of
+ current mode (you can switch between patches from the view patch window).'''
+ state['pos'] = newpos
+
+ mode, _ = state['mode']
+ if mode == MODE_RULES:
+ # Scroll through the list by updating the view for MODE_RULES, so that
+ # even if we are not currently viewing the rules, switching back will
+ # result in the cursor's rule being visible.
+ modestate = state['modes'][MODE_RULES]
+ if newpos < modestate['line_offset']:
+ modestate['line_offset'] = newpos
+ elif newpos > modestate['line_offset'] + state['page_height'] - 1:
+ modestate['line_offset'] = newpos - state['page_height'] + 1
+
+ # Reset the patch view region to the top of the new patch.
+ state['modes'][MODE_PATCH]['line_offset'] = 0
+
+def changemode(state, mode):
+ curmode, _ = state['mode']
+ state['mode'] = (mode, curmode)
+
+def makeselection(state, pos):
+ state['selected'] = pos
+
+def swap(state, oldpos, newpos):
+ """Swap two positions and calculate necessary conflicts in
+ O(|newpos-oldpos|) time"""
+
+ rules = state['rules']
+ assert 0 <= oldpos < len(rules) and 0 <= newpos < len(rules)
+
+ rules[oldpos], rules[newpos] = rules[newpos], rules[oldpos]
+
+ # TODO: swap should not know about histeditrule's internals
+ rules[newpos].pos = newpos
+ rules[oldpos].pos = oldpos
+
+ start = min(oldpos, newpos)
+ end = max(oldpos, newpos)
+ for r in pycompat.xrange(start, end + 1):
+ rules[newpos].checkconflicts(rules[r])
+ rules[oldpos].checkconflicts(rules[r])
+
+ if state['selected']:
+ makeselection(state, newpos)
+
+def changeaction(state, pos, action):
+ """Change the action state on the given position to the new action"""
+ rules = state['rules']
+ assert 0 <= pos < len(rules)
+ rules[pos].action = action
+
+def cycleaction(state, pos, next=False):
+ """Changes the action state the next or the previous action from
+ the action list"""
+ rules = state['rules']
+ assert 0 <= pos < len(rules)
+ current = rules[pos].action
+
+ assert current in KEY_LIST
+
+ index = KEY_LIST.index(current)
+ if next:
+ index += 1
+ else:
+ index -= 1
+ changeaction(state, pos, KEY_LIST[index % len(KEY_LIST)])
+
+def changeview(state, delta, unit):
+ '''Change the region of whatever is being viewed (a patch or the list of
+ changesets). 'delta' is an amount (+/- 1) and 'unit' is 'page' or 'line'.'''
+ mode, _ = state['mode']
+ if mode != MODE_PATCH:
+ return
+ mode_state = state['modes'][mode]
+ num_lines = len(patchcontents(state))
+ page_height = state['page_height']
+ unit = page_height if unit == 'page' else 1
+ num_pages = 1 + (num_lines - 1) / page_height
+ max_offset = (num_pages - 1) * page_height
+ newline = mode_state['line_offset'] + delta * unit
+ mode_state['line_offset'] = max(0, min(max_offset, newline))
+
+def event(state, ch):
+ """Change state based on the current character input
+
+ This takes the current state and based on the current character input from
+ the user we change the state.
+ """
+ selected = state['selected']
+ oldpos = state['pos']
+ rules = state['rules']
+
+ if ch in (curses.KEY_RESIZE, "KEY_RESIZE"):
+ return E_RESIZE
+
+ lookup_ch = ch
+ if '0' <= ch <= '9':
+ lookup_ch = '0'
+
+ curmode, prevmode = state['mode']
+ action = KEYTABLE[curmode].get(lookup_ch, KEYTABLE['global'].get(lookup_ch))
+ if action is None:
+ return
+ if action in ('down', 'move-down'):
+ newpos = min(oldpos + 1, len(rules) - 1)
+ movecursor(state, oldpos, newpos)
+ if selected is not None or action == 'move-down':
+ swap(state, oldpos, newpos)
+ elif action in ('up', 'move-up'):
+ newpos = max(0, oldpos - 1)
+ movecursor(state, oldpos, newpos)
+ if selected is not None or action == 'move-up':
+ swap(state, oldpos, newpos)
+ elif action == 'next-action':
+ cycleaction(state, oldpos, next=True)
+ elif action == 'prev-action':
+ cycleaction(state, oldpos, next=False)
+ elif action == 'select':
+ selected = oldpos if selected is None else None
+ makeselection(state, selected)
+ elif action == 'goto' and int(ch) < len(rules) and len(rules) <= 10:
+ newrule = next((r for r in rules if r.origpos == int(ch)))
+ movecursor(state, oldpos, newrule.pos)
+ if selected is not None:
+ swap(state, oldpos, newrule.pos)
+ elif action.startswith('action-'):
+ changeaction(state, oldpos, action[7:])
+ elif action == 'showpatch':
+ changemode(state, MODE_PATCH if curmode != MODE_PATCH else prevmode)
+ elif action == 'help':
+ changemode(state, MODE_HELP if curmode != MODE_HELP else prevmode)
+ elif action == 'quit':
+ return E_QUIT
+ elif action == 'histedit':
+ return E_HISTEDIT
+ elif action == 'page-down':
+ return E_PAGEDOWN
+ elif action == 'page-up':
+ return E_PAGEUP
+ elif action == 'line-down':
+ return E_LINEDOWN
+ elif action == 'line-up':
+ return E_LINEUP
+
+def makecommands(rules):
+ """Returns a list of commands consumable by histedit --commands based on
+ our list of rules"""
+ commands = []
+ for rules in rules:
+ commands.append("{0} {1}\n".format(rules.action, rules.ctx))
+ return commands
+
+def addln(win, y, x, line, color=None):
+ """Add a line to the given window left padding but 100% filled with
+ whitespace characters, so that the color appears on the whole line"""
+ maxy, maxx = win.getmaxyx()
+ length = maxx - 1 - x
+ line = ("{0:<%d}" % length).format(str(line).strip())[:length]
+ if y < 0:
+ y = maxy + y
+ if x < 0:
+ x = maxx + x
+ if color:
+ win.addstr(y, x, line, color)
+ else:
+ win.addstr(y, x, line)
+
+def patchcontents(state):
+ repo = state['repo']
+ rule = state['rules'][state['pos']]
+ displayer = logcmdutil.changesetdisplayer(repo.ui, repo, {
+ 'patch': True, 'verbose': True
+ }, buffered=True)
+ displayer.show(rule.ctx)
+ displayer.close()
+ return displayer.hunk[rule.ctx.rev()].splitlines()
+
+def _chisteditmain(repo, rules, stdscr):
+ # initialize color pattern
+ curses.init_pair(COLOR_HELP, curses.COLOR_WHITE, curses.COLOR_BLUE)
+ curses.init_pair(COLOR_SELECTED, curses.COLOR_BLACK, curses.COLOR_WHITE)
+ curses.init_pair(COLOR_WARN, curses.COLOR_BLACK, curses.COLOR_YELLOW)
+ curses.init_pair(COLOR_OK, curses.COLOR_BLACK, curses.COLOR_GREEN)
+
+ # don't display the cursor
+ try:
+ curses.curs_set(0)
+ except curses.error:
+ pass
+
+ def rendercommit(win, state):
+ """Renders the commit window that shows the log of the current selected
+ commit"""
+ pos = state['pos']
+ rules = state['rules']
+ rule = rules[pos]
+
+ ctx = rule.ctx
+ win.box()
+
+ maxy, maxx = win.getmaxyx()
+ length = maxx - 3
+
+ line = "changeset: {0}:{1:<12}".format(ctx.rev(), ctx)
+ win.addstr(1, 1, line[:length])
+
+ line = "user: {0}".format(stringutil.shortuser(ctx.user()))
+ win.addstr(2, 1, line[:length])
+
+ bms = repo.nodebookmarks(ctx.node())
+ line = "bookmark: {0}".format(' '.join(bms))
+ win.addstr(3, 1, line[:length])
+
+ line = "files: {0}".format(','.join(ctx.files()))
+ win.addstr(4, 1, line[:length])
+
+ line = "summary: {0}".format(ctx.description().splitlines()[0])
+ win.addstr(5, 1, line[:length])
+
+ conflicts = rule.conflicts
+ if len(conflicts) > 0:
+ conflictstr = ','.join(map(lambda r: str(r.ctx), conflicts))
+ conflictstr = "changed files overlap with {0}".format(conflictstr)
+ else:
+ conflictstr = 'no overlap'
+
+ win.addstr(6, 1, conflictstr[:length])
+ win.noutrefresh()
+
+ def helplines(mode):
+ if mode == MODE_PATCH:
+ help = """\
+?: help, k/up: line up, j/down: line down, v: stop viewing patch
+pgup: prev page, space/pgdn: next page, c: commit, q: abort
+"""
+ else:
+ help = """\
+?: help, k/up: move up, j/down: move down, space: select, v: view patch
+d: drop, e: edit, f: fold, m: mess, p: pick, r: roll
+pgup/K: move patch up, pgdn/J: move patch down, c: commit, q: abort
+"""
+ return help.splitlines()
+
+ def renderhelp(win, state):
+ maxy, maxx = win.getmaxyx()
+ mode, _ = state['mode']
+ for y, line in enumerate(helplines(mode)):
+ if y >= maxy:
+ break
+ addln(win, y, 0, line, curses.color_pair(COLOR_HELP))
+ win.noutrefresh()
+
+ def renderrules(rulesscr, state):
+ rules = state['rules']
+ pos = state['pos']
+ selected = state['selected']
+ start = state['modes'][MODE_RULES]['line_offset']
+
+ conflicts = [r.ctx for r in rules if r.conflicts]
+ if len(conflicts) > 0:
+ line = "potential conflict in %s" % ','.join(map(str, conflicts))
+ addln(rulesscr, -1, 0, line, curses.color_pair(COLOR_WARN))
+
+ for y, rule in enumerate(rules[start:]):
+ if y >= state['page_height']:
+ break
+ if len(rule.conflicts) > 0:
+ rulesscr.addstr(y, 0, " ", curses.color_pair(COLOR_WARN))
+ else:
+ rulesscr.addstr(y, 0, " ", curses.COLOR_BLACK)
+ if y + start == selected:
+ addln(rulesscr, y, 2, rule, curses.color_pair(COLOR_SELECTED))
+ elif y + start == pos:
+ addln(rulesscr, y, 2, rule, curses.A_BOLD)
+ else:
+ addln(rulesscr, y, 2, rule)
+ rulesscr.noutrefresh()
+
+ def renderstring(win, state, output):
+ maxy, maxx = win.getmaxyx()
+ length = min(maxy - 1, len(output))
+ for y in range(0, length):
+ win.addstr(y, 0, output[y])
+ win.noutrefresh()
+
+ def renderpatch(win, state):
+ start = state['modes'][MODE_PATCH]['line_offset']
+ renderstring(win, state, patchcontents(state)[start:])
+
+ def layout(mode):
+ maxy, maxx = stdscr.getmaxyx()
+ helplen = len(helplines(mode))
+ return {
+ 'commit': (8, maxx),
+ 'help': (helplen, maxx),
+ 'main': (maxy - helplen - 8, maxx),
+ }
+
+ def drawvertwin(size, y, x):
+ win = curses.newwin(size[0], size[1], y, x)
+ y += size[0]
+ return win, y, x
+
+ state = {
+ 'pos': 0,
+ 'rules': rules,
+ 'selected': None,
+ 'mode': (MODE_INIT, MODE_INIT),
+ 'page_height': None,
+ 'modes': {
+ MODE_RULES: {
+ 'line_offset': 0,
+ },
+ MODE_PATCH: {
+ 'line_offset': 0,
+ }
+ },
+ 'repo': repo,
+ }
+
+ # eventloop
+ ch = None
+ stdscr.clear()
+ stdscr.refresh()
+ while True:
+ try:
+ oldmode, _ = state['mode']
+ if oldmode == MODE_INIT:
+ changemode(state, MODE_RULES)
+ e = event(state, ch)
+
+ if e == E_QUIT:
+ return False
+ if e == E_HISTEDIT:
+ return state['rules']
+ else:
+ if e == E_RESIZE:
+ size = screen_size()
+ if size != stdscr.getmaxyx():
+ curses.resizeterm(*size)
+
+ curmode, _ = state['mode']
+ sizes = layout(curmode)
+ if curmode != oldmode:
+ state['page_height'] = sizes['main'][0]
+ # Adjust the view to fit the current screen size.
+ movecursor(state, state['pos'], state['pos'])
+
+ # Pack the windows against the top, each pane spread across the
+ # full width of the screen.
+ y, x = (0, 0)
+ helpwin, y, x = drawvertwin(sizes['help'], y, x)
+ mainwin, y, x = drawvertwin(sizes['main'], y, x)
+ commitwin, y, x = drawvertwin(sizes['commit'], y, x)
+
+ if e in (E_PAGEDOWN, E_PAGEUP, E_LINEDOWN, E_LINEUP):
+ if e == E_PAGEDOWN:
+ changeview(state, +1, 'page')
+ elif e == E_PAGEUP:
+ changeview(state, -1, 'page')
+ elif e == E_LINEDOWN:
+ changeview(state, +1, 'line')
+ elif e == E_LINEUP:
+ changeview(state, -1, 'line')
+
+ # start rendering
+ commitwin.erase()
+ helpwin.erase()
+ mainwin.erase()
+ if curmode == MODE_PATCH:
+ renderpatch(mainwin, state)
+ elif curmode == MODE_HELP:
+ renderstring(mainwin, state, __doc__.strip().splitlines())
+ else:
+ renderrules(mainwin, state)
+ rendercommit(commitwin, state)
+ renderhelp(helpwin, state)
+ curses.doupdate()
+ # done rendering
+ ch = stdscr.getkey()
+ except curses.error:
+ pass
+
+def _chistedit(ui, repo, *freeargs, **opts):
+ """interactively edit changeset history via a curses interface
+
+ Provides a ncurses interface to histedit. Press ? in chistedit mode
+ to see an extensive help. Requires python-curses to be installed."""
+
+ if curses is None:
+ raise error.Abort(_("Python curses library required"))
+
+ # disable color
+ ui._colormode = None
+
+ try:
+ keep = opts.get('keep')
+ revs = opts.get('rev', [])[:]
+ cmdutil.checkunfinished(repo)
+ cmdutil.bailifchanged(repo)
+
+ if os.path.exists(os.path.join(repo.path, 'histedit-state')):
+ raise error.Abort(_('history edit already in progress, try '
+ '--continue or --abort'))
+ revs.extend(freeargs)
+ if not revs:
+ defaultrev = destutil.desthistedit(ui, repo)
+ if defaultrev is not None:
+ revs.append(defaultrev)
+ if len(revs) != 1:
+ raise error.Abort(
+ _('histedit requires exactly one ancestor revision'))
+
+ rr = list(repo.set('roots(%ld)', scmutil.revrange(repo, revs)))
+ if len(rr) != 1:
+ raise error.Abort(_('The specified revisions must have '
+ 'exactly one common root'))
+ root = rr[0].node()
+
+ topmost, empty = repo.dirstate.parents()
+ revs = between(repo, root, topmost, keep)
+ if not revs:
+ raise error.Abort(_('%s is not an ancestor of working directory') %
+ node.short(root))
+
+ ctxs = []
+ for i, r in enumerate(revs):
+ ctxs.append(histeditrule(repo[r], i))
+ rc = curses.wrapper(functools.partial(_chisteditmain, repo, ctxs))
+ curses.echo()
+ curses.endwin()
+ if rc is False:
+ ui.write(_("chistedit aborted\n"))
+ return 0
+ if type(rc) is list:
+ ui.status(_("running histedit\n"))
+ rules = makecommands(rc)
+ filename = repo.vfs.join('chistedit')
+ with open(filename, 'w+') as fp:
+ for r in rules:
+ fp.write(r)
+ opts['commands'] = filename
+ return _texthistedit(ui, repo, *freeargs, **opts)
+ except KeyboardInterrupt:
+ pass
+ return -1
+
@command('histedit',
[('', 'commands', '',
_('read history edits from the specified file'), _('FILE')),
@@ -1029,13 +1601,20 @@
for intentional "edit" command, but also for resolving unexpected
conflicts).
"""
+ # kludge: _chistedit only works for starting an edit, not aborting
+ # or continuing, so fall back to regular _texthistedit for those
+ # operations.
+ if ui.interface('histedit') == 'curses' and _getgoal(
+ pycompat.byteskwargs(opts)) == goalnew:
+ return _chistedit(ui, repo, *freeargs, **opts)
+ return _texthistedit(ui, repo, *freeargs, **opts)
+
+def _texthistedit(ui, repo, *freeargs, **opts):
state = histeditstate(repo)
- try:
- state.wlock = repo.wlock()
- state.lock = repo.lock()
+ with repo.wlock() as wlock, repo.lock() as lock:
+ state.wlock = wlock
+ state.lock = lock
_histedit(ui, repo, state, *freeargs, **opts)
- finally:
- release(state.lock, state.wlock)
goalcontinue = 'continue'
goalabort = 'abort'
@@ -1043,11 +1622,11 @@
goalnew = 'new'
def _getgoal(opts):
- if opts.get('continue'):
+ if opts.get(b'continue'):
return goalcontinue
- if opts.get('abort'):
+ if opts.get(b'abort'):
return goalabort
- if opts.get('edit_plan'):
+ if opts.get(b'edit_plan'):
return goaleditplan
return goalnew
@@ -1110,13 +1689,26 @@
fm.startitem()
goal = _getgoal(opts)
revs = opts.get('rev', [])
- # experimental config: ui.history-editing-backup
- nobackup = not ui.configbool('ui', 'history-editing-backup')
+ nobackup = not ui.configbool('rewrite', 'backup-bundle')
rules = opts.get('commands', '')
state.keep = opts.get('keep', False)
_validateargs(ui, repo, state, freeargs, opts, goal, rules, revs)
+ hastags = False
+ if revs:
+ revs = scmutil.revrange(repo, revs)
+ ctxs = [repo[rev] for rev in revs]
+ for ctx in ctxs:
+ tags = [tag for tag in ctx.tags() if tag != 'tip']
+ if not hastags:
+ hastags = len(tags)
+ if hastags:
+ if ui.promptchoice(_('warning: tags associated with the given'
+ ' changeset will be lost after histedit.\n'
+ 'do you want to continue (yN)? $$ &Yes $$ &No'),
+ default=1):
+ raise error.Abort(_('histedit cancelled\n'))
# rebuild state
if goal == goalcontinue:
state.read()
@@ -1317,7 +1909,7 @@
state.topmost = topmost
state.replacements = []
- ui.log("histedit", "%d actions to histedit", len(actions),
+ ui.log("histedit", "%d actions to histedit\n", len(actions),
histedit_num_actions=len(actions))
# Create a backup so we can always abort completely.
--- a/hgext/largefiles/__init__.py Wed Jan 09 20:00:35 2019 -0800
+++ b/hgext/largefiles/__init__.py Fri Jan 18 13:28:22 2019 -0500
@@ -107,9 +107,14 @@
from __future__ import absolute_import
from mercurial import (
+ cmdutil,
+ extensions,
+ exthelper,
hg,
+ httppeer,
localrepo,
- registrar,
+ sshpeer,
+ wireprotov1server,
)
from . import (
@@ -117,7 +122,6 @@
overrides,
proto,
reposetup,
- uisetup as uisetupmod,
)
# Note for extension authors: ONLY specify testedwith = 'ships-with-hg-core' for
@@ -126,29 +130,65 @@
# leave the attribute unspecified.
testedwith = 'ships-with-hg-core'
-configtable = {}
-configitem = registrar.configitem(configtable)
+eh = exthelper.exthelper()
+eh.merge(lfcommands.eh)
+eh.merge(overrides.eh)
+eh.merge(proto.eh)
-configitem('largefiles', 'minsize',
- default=configitem.dynamicdefault,
+eh.configitem('largefiles', 'minsize',
+ default=eh.configitem.dynamicdefault,
)
-configitem('largefiles', 'patterns',
+eh.configitem('largefiles', 'patterns',
default=list,
)
-configitem('largefiles', 'usercache',
+eh.configitem('largefiles', 'usercache',
default=None,
)
+cmdtable = eh.cmdtable
+configtable = eh.configtable
+extsetup = eh.finalextsetup
reposetup = reposetup.reposetup
+uisetup = eh.finaluisetup
def featuresetup(ui, supported):
# don't die on seeing a repo with the largefiles requirement
supported |= {'largefiles'}
-def uisetup(ui):
+@eh.uisetup
+def _uisetup(ui):
localrepo.featuresetupfuncs.add(featuresetup)
hg.wirepeersetupfuncs.append(proto.wirereposetup)
- uisetupmod.uisetup(ui)
+
+ cmdutil.outgoinghooks.add('largefiles', overrides.outgoinghook)
+ cmdutil.summaryremotehooks.add('largefiles', overrides.summaryremotehook)
+
+ # create the new wireproto commands ...
+ wireprotov1server.wireprotocommand('putlfile', 'sha', permission='push')(
+ proto.putlfile)
+ wireprotov1server.wireprotocommand('getlfile', 'sha', permission='pull')(
+ proto.getlfile)
+ wireprotov1server.wireprotocommand('statlfile', 'sha', permission='pull')(
+ proto.statlfile)
+ wireprotov1server.wireprotocommand('lheads', '', permission='pull')(
+ wireprotov1server.heads)
-cmdtable = lfcommands.cmdtable
-revsetpredicate = overrides.revsetpredicate
+ extensions.wrapfunction(wireprotov1server.commands['heads'], 'func',
+ proto.heads)
+ # TODO also wrap wireproto.commandsv2 once heads is implemented there.
+
+ # can't do this in reposetup because it needs to have happened before
+ # wirerepo.__init__ is called
+ proto.ssholdcallstream = sshpeer.sshv1peer._callstream
+ proto.httpoldcallstream = httppeer.httppeer._callstream
+ sshpeer.sshv1peer._callstream = proto.sshrepocallstream
+ httppeer.httppeer._callstream = proto.httprepocallstream
+
+ # override some extensions' stuff as well
+ for name, module in extensions.extensions():
+ if name == 'rebase':
+ # TODO: teach exthelper to handle this
+ extensions.wrapfunction(module, 'rebase',
+ overrides.overriderebase)
+
+revsetpredicate = eh.revsetpredicate
--- a/hgext/largefiles/lfcommands.py Wed Jan 09 20:00:35 2019 -0800
+++ b/hgext/largefiles/lfcommands.py Fri Jan 18 13:28:22 2019 -0500
@@ -20,12 +20,12 @@
cmdutil,
context,
error,
+ exthelper,
hg,
lock,
match as matchmod,
node,
pycompat,
- registrar,
scmutil,
util,
)
@@ -44,10 +44,9 @@
# -- Commands ----------------------------------------------------------
-cmdtable = {}
-command = registrar.command(cmdtable)
+eh = exthelper.exthelper()
-@command('lfconvert',
+@eh.command('lfconvert',
[('s', 'size', '',
_('minimum size (MB) for files to be converted as largefiles'), 'SIZE'),
('', 'to-normal', False,
@@ -240,7 +239,7 @@
# largefile was modified, update standins
m = hashlib.sha1('')
m.update(ctx[f].data())
- hash = m.hexdigest()
+ hash = node.hex(m.digest())
if f not in lfiletohash or lfiletohash[f] != hash:
rdst.wwrite(f, ctx[f].data(), ctx[f].flags())
executable = 'x' in ctx[f].flags()
@@ -560,7 +559,7 @@
statuswriter(_('%d largefiles updated, %d removed\n') % (updated,
removed))
-@command('lfpull',
+@eh.command('lfpull',
[('r', 'rev', [], _('pull largefiles for these revisions'))
] + cmdutil.remoteopts,
_('-r REV... [-e CMD] [--remotecmd CMD] [SOURCE]'))
@@ -599,7 +598,7 @@
numcached += len(cached)
ui.status(_("%d largefiles cached\n") % numcached)
-@command('debuglfput',
+@eh.command('debuglfput',
[] + cmdutil.remoteopts,
_('FILE'))
def debuglfput(ui, repo, filepath, **kwargs):
--- a/hgext/largefiles/overrides.py Wed Jan 09 20:00:35 2019 -0800
+++ b/hgext/largefiles/overrides.py Fri Jan 18 13:28:22 2019 -0500
@@ -14,18 +14,29 @@
from mercurial.i18n import _
+from mercurial.hgweb import (
+ webcommands,
+)
+
from mercurial import (
archival,
cmdutil,
+ copies as copiesmod,
error,
+ exchange,
+ exthelper,
+ filemerge,
hg,
logcmdutil,
match as matchmod,
+ merge,
pathutil,
pycompat,
- registrar,
scmutil,
smartset,
+ subrepo,
+ upgrade,
+ url as urlmod,
util,
)
@@ -35,6 +46,8 @@
storefactory,
)
+eh = exthelper.exthelper()
+
# -- Utility functions: commonly/repeatedly needed functionality ---------------
def composelargefilematcher(match, manifest):
@@ -248,16 +261,23 @@
# For overriding mercurial.hgweb.webcommands so that largefiles will
# appear at their right place in the manifests.
+@eh.wrapfunction(webcommands, 'decodepath')
def decodepath(orig, path):
return lfutil.splitstandin(path) or path
# -- Wrappers: modify existing commands --------------------------------
+@eh.wrapcommand('add',
+ opts=[('', 'large', None, _('add as largefile')),
+ ('', 'normal', None, _('add as normal file')),
+ ('', 'lfsize', '', _('add all files above this size (in megabytes) '
+ 'as largefiles (default: 10)'))])
def overrideadd(orig, ui, repo, *pats, **opts):
if opts.get(r'normal') and opts.get(r'large'):
raise error.Abort(_('--normal cannot be used with --large'))
return orig(ui, repo, *pats, **opts)
+@eh.wrapfunction(cmdutil, 'add')
def cmdutiladd(orig, ui, repo, matcher, prefix, explicitonly, **opts):
# The --normal flag short circuits this override
if opts.get(r'normal'):
@@ -271,6 +291,7 @@
bad.extend(f for f in lbad)
return bad
+@eh.wrapfunction(cmdutil, 'remove')
def cmdutilremove(orig, ui, repo, matcher, prefix, after, force, subrepos,
dryrun):
normalmatcher = composenormalfilematcher(matcher, repo[None].manifest())
@@ -279,6 +300,7 @@
return removelargefiles(ui, repo, False, matcher, dryrun, after=after,
force=force) or result
+@eh.wrapfunction(subrepo.hgsubrepo, 'status')
def overridestatusfn(orig, repo, rev2, **opts):
try:
repo._repo.lfstatus = True
@@ -286,6 +308,7 @@
finally:
repo._repo.lfstatus = False
+@eh.wrapcommand('status')
def overridestatus(orig, ui, repo, *pats, **opts):
try:
repo.lfstatus = True
@@ -293,6 +316,7 @@
finally:
repo.lfstatus = False
+@eh.wrapfunction(subrepo.hgsubrepo, 'dirty')
def overridedirty(orig, repo, ignoreupdate=False, missing=False):
try:
repo._repo.lfstatus = True
@@ -300,6 +324,7 @@
finally:
repo._repo.lfstatus = False
+@eh.wrapcommand('log')
def overridelog(orig, ui, repo, *pats, **opts):
def overridematchandpats(ctx, pats=(), opts=None, globbed=False,
default='relpath', badfn=None):
@@ -406,6 +431,13 @@
restorematchandpatsfn()
setattr(logcmdutil, '_makenofollowfilematcher', oldmakefilematcher)
+@eh.wrapcommand('verify',
+ opts=[('', 'large', None,
+ _('verify that all largefiles in current revision exists')),
+ ('', 'lfa', None,
+ _('verify largefiles in all revisions, not just current')),
+ ('', 'lfc', None,
+ _('verify local largefile contents, not just existence'))])
def overrideverify(orig, ui, repo, *pats, **opts):
large = opts.pop(r'large', False)
all = opts.pop(r'lfa', False)
@@ -416,6 +448,8 @@
result = result or lfcommands.verifylfiles(ui, repo, all, contents)
return result
+@eh.wrapcommand('debugstate',
+ opts=[('', 'large', None, _('display largefiles dirstate'))])
def overridedebugstate(orig, ui, repo, *pats, **opts):
large = opts.pop(r'large', False)
if large:
@@ -435,6 +469,7 @@
# The overridden function filters the unknown files by removing any
# largefiles. This makes the merge proceed and we can then handle this
# case further in the overridden calculateupdates function below.
+@eh.wrapfunction(merge, '_checkunknownfile')
def overridecheckunknownfile(origfn, repo, wctx, mctx, f, f2=None):
if lfutil.standin(repo.dirstate.normalize(f)) in wctx:
return False
@@ -466,6 +501,7 @@
# Finally, the merge.applyupdates function will then take care of
# writing the files into the working copy and lfcommands.updatelfiles
# will update the largefiles.
+@eh.wrapfunction(merge, 'calculateupdates')
def overridecalculateupdates(origfn, repo, p1, p2, pas, branchmerge, force,
acceptremote, *args, **kwargs):
overwrite = force and not branchmerge
@@ -534,6 +570,7 @@
return actions, diverge, renamedelete
+@eh.wrapfunction(merge, 'recordupdates')
def mergerecordupdates(orig, repo, actions, branchmerge):
if 'lfmr' in actions:
lfdirstate = lfutil.openlfdirstate(repo.ui, repo)
@@ -549,6 +586,7 @@
# Override filemerge to prompt the user about how they wish to merge
# largefiles. This will handle identical edits without prompting the user.
+@eh.wrapfunction(filemerge, '_filemerge')
def overridefilemerge(origfn, premerge, repo, wctx, mynode, orig, fcd, fco, fca,
labels=None):
if not lfutil.isstandin(orig) or fcd.isabsent() or fco.isabsent():
@@ -570,6 +608,7 @@
repo.wwrite(fcd.path(), fco.data(), fco.flags())
return True, 0, False
+@eh.wrapfunction(copiesmod, 'pathcopies')
def copiespathcopies(orig, ctx1, ctx2, match=None):
copies = orig(ctx1, ctx2, match=match)
updated = {}
@@ -584,6 +623,7 @@
# checks if the destination largefile already exists. It also keeps a
# list of copied files so that the largefiles can be copied and the
# dirstate updated.
+@eh.wrapfunction(cmdutil, 'copy')
def overridecopy(orig, ui, repo, pats, opts, rename=False):
# doesn't remove largefile on rename
if len(pats) < 2:
@@ -729,6 +769,7 @@
# commits. Update the standins then run the original revert, changing
# the matcher to hit standins instead of largefiles. Based on the
# resulting standins update the largefiles.
+@eh.wrapfunction(cmdutil, 'revert')
def overriderevert(orig, ui, repo, ctx, parents, *pats, **opts):
# Because we put the standins in a bad state (by updating them)
# and then return them to a correct state we need to lock to
@@ -799,6 +840,11 @@
# after pulling changesets, we need to take some extra care to get
# largefiles updated remotely
+@eh.wrapcommand('pull',
+ opts=[('', 'all-largefiles', None,
+ _('download all pulled versions of largefiles (DEPRECATED)')),
+ ('', 'lfrev', [],
+ _('download largefiles for these revisions'), _('REV'))])
def overridepull(orig, ui, repo, source=None, **opts):
revsprepull = len(repo)
if not source:
@@ -822,6 +868,9 @@
ui.status(_("%d largefiles cached\n") % numcached)
return result
+@eh.wrapcommand('push',
+ opts=[('', 'lfrev', [],
+ _('upload largefiles for these revisions'), _('REV'))])
def overridepush(orig, ui, repo, *args, **kwargs):
"""Override push command and store --lfrev parameters in opargs"""
lfrevs = kwargs.pop(r'lfrev', None)
@@ -830,6 +879,7 @@
opargs['lfrevs'] = scmutil.revrange(repo, lfrevs)
return orig(ui, repo, *args, **kwargs)
+@eh.wrapfunction(exchange, 'pushoperation')
def exchangepushoperation(orig, *args, **kwargs):
"""Override pushoperation constructor and store lfrevs parameter"""
lfrevs = kwargs.pop(r'lfrevs', None)
@@ -837,9 +887,7 @@
pushop.lfrevs = lfrevs
return pushop
-revsetpredicate = registrar.revsetpredicate()
-
-@revsetpredicate('pulled()')
+@eh.revsetpredicate('pulled()')
def pulledrevsetsymbol(repo, subset, x):
"""Changesets that just has been pulled.
@@ -865,6 +913,9 @@
raise error.Abort(_("pulled() only available in --lfrev"))
return smartset.baseset([r for r in subset if r >= firstpulled])
+@eh.wrapcommand('clone',
+ opts=[('', 'all-largefiles', None,
+ _('download all versions of all largefiles'))])
def overrideclone(orig, ui, source, dest=None, **opts):
d = dest
if d is None:
@@ -876,6 +927,7 @@
return orig(ui, source, dest, **opts)
+@eh.wrapfunction(hg, 'clone')
def hgclone(orig, ui, opts, *args, **kwargs):
result = orig(ui, opts, *args, **kwargs)
@@ -900,6 +952,7 @@
return result
+@eh.wrapcommand('rebase', extension='rebase')
def overriderebase(orig, ui, repo, **opts):
if not util.safehasattr(repo, '_largefilesenabled'):
return orig(ui, repo, **opts)
@@ -913,6 +966,7 @@
repo._lfstatuswriters.pop()
repo._lfcommithooks.pop()
+@eh.wrapcommand('archive')
def overridearchivecmd(orig, ui, repo, dest, **opts):
repo.unfiltered().lfstatus = True
@@ -921,6 +975,7 @@
finally:
repo.unfiltered().lfstatus = False
+@eh.wrapfunction(webcommands, 'archive')
def hgwebarchive(orig, web):
web.repo.lfstatus = True
@@ -929,12 +984,13 @@
finally:
web.repo.lfstatus = False
-def overridearchive(orig, repo, dest, node, kind, decode=True, matchfn=None,
+@eh.wrapfunction(archival, 'archive')
+def overridearchive(orig, repo, dest, node, kind, decode=True, match=None,
prefix='', mtime=None, subrepos=None):
# For some reason setting repo.lfstatus in hgwebarchive only changes the
# unfiltered repo's attr, so check that as well.
if not repo.lfstatus and not repo.unfiltered().lfstatus:
- return orig(repo, dest, node, kind, decode, matchfn, prefix, mtime,
+ return orig(repo, dest, node, kind, decode, match, prefix, mtime,
subrepos)
# No need to lock because we are only reading history and
@@ -955,7 +1011,7 @@
prefix = archival.tidyprefix(dest, kind, prefix)
def write(name, mode, islink, getdata):
- if matchfn and not matchfn(name):
+ if match and not match(name):
return
data = getdata()
if decode:
@@ -991,12 +1047,13 @@
if subrepos:
for subpath in sorted(ctx.substate):
sub = ctx.workingsub(subpath)
- submatch = matchmod.subdirmatcher(subpath, matchfn)
+ submatch = matchmod.subdirmatcher(subpath, match)
sub._repo.lfstatus = True
sub.archive(archiver, prefix, submatch)
archiver.done()
+@eh.wrapfunction(subrepo.hgsubrepo, 'archive')
def hgsubrepoarchive(orig, repo, archiver, prefix, match=None, decode=True):
lfenabled = util.safehasattr(repo._repo, '_largefilesenabled')
if not lfenabled or not repo._repo.lfstatus:
@@ -1051,6 +1108,7 @@
# standin until a commit. cmdutil.bailifchanged() raises an exception
# if the repo has uncommitted changes. Wrap it to also check if
# largefiles were changed. This is used by bisect, backout and fetch.
+@eh.wrapfunction(cmdutil, 'bailifchanged')
def overridebailifchanged(orig, repo, *args, **kwargs):
orig(repo, *args, **kwargs)
repo.lfstatus = True
@@ -1059,6 +1117,7 @@
if s.modified or s.added or s.removed or s.deleted:
raise error.Abort(_('uncommitted changes'))
+@eh.wrapfunction(cmdutil, 'postcommitstatus')
def postcommitstatus(orig, repo, *args, **kwargs):
repo.lfstatus = True
try:
@@ -1066,6 +1125,7 @@
finally:
repo.lfstatus = False
+@eh.wrapfunction(cmdutil, 'forget')
def cmdutilforget(orig, ui, repo, match, prefix, explicitonly, dryrun,
interactive):
normalmatcher = composenormalfilematcher(match, repo[None].manifest())
@@ -1167,6 +1227,13 @@
showhashes(file)
ui.status('\n')
+@eh.wrapcommand('outgoing',
+ opts=[('', 'large', None, _('display outgoing largefiles'))])
+def _outgoingcmd(orig, *args, **kwargs):
+ # Nothing to do here other than add the extra help option- the hook above
+ # processes it.
+ return orig(*args, **kwargs)
+
def summaryremotehook(ui, repo, opts, changes):
largeopt = opts.get('large', False)
if changes is None:
@@ -1196,6 +1263,8 @@
ui.status(_('largefiles: %d entities for %d files to upload\n')
% (len(lfhashes), len(toupload)))
+@eh.wrapcommand('summary',
+ opts=[('', 'large', None, _('display outgoing largefiles'))])
def overridesummary(orig, ui, repo, *pats, **opts):
try:
repo.lfstatus = True
@@ -1203,6 +1272,7 @@
finally:
repo.lfstatus = False
+@eh.wrapfunction(scmutil, 'addremove')
def scmutiladdremove(orig, repo, matcher, prefix, opts=None):
if opts is None:
opts = {}
@@ -1242,6 +1312,7 @@
# Calling purge with --all will cause the largefiles to be deleted.
# Override repo.status to prevent this from happening.
+@eh.wrapcommand('purge', extension='purge')
def overridepurge(orig, ui, repo, *dirs, **opts):
# XXX Monkey patching a repoview will not work. The assigned attribute will
# be set on the unfiltered repo, but we will only lookup attributes in the
@@ -1267,6 +1338,7 @@
orig(ui, repo, *dirs, **opts)
repo.status = oldstatus
+@eh.wrapcommand('rollback')
def overriderollback(orig, ui, repo, **opts):
with repo.wlock():
before = repo.dirstate.parents()
@@ -1304,6 +1376,7 @@
lfdirstate.write()
return result
+@eh.wrapcommand('transplant', extension='transplant')
def overridetransplant(orig, ui, repo, *revs, **opts):
resuming = opts.get(r'continue')
repo._lfcommithooks.append(lfutil.automatedcommithook(resuming))
@@ -1315,6 +1388,7 @@
repo._lfcommithooks.pop()
return result
+@eh.wrapcommand('cat')
def overridecat(orig, ui, repo, file1, *pats, **opts):
opts = pycompat.byteskwargs(opts)
ctx = scmutil.revsingle(repo, opts.get('rev'))
@@ -1375,6 +1449,7 @@
err = 0
return err
+@eh.wrapfunction(merge, 'update')
def mergeupdate(orig, repo, node, branchmerge, force,
*args, **kwargs):
matcher = kwargs.get(r'matcher', None)
@@ -1452,6 +1527,7 @@
return result
+@eh.wrapfunction(scmutil, 'marktouched')
def scmutilmarktouched(orig, repo, files, *args, **kwargs):
result = orig(repo, files, *args, **kwargs)
@@ -1466,6 +1542,8 @@
return result
+@eh.wrapfunction(upgrade, 'preservedrequirements')
+@eh.wrapfunction(upgrade, 'supporteddestrequirements')
def upgraderequirements(orig, repo):
reqs = orig(repo)
if 'largefiles' in repo.requirements:
@@ -1473,6 +1551,8 @@
return reqs
_lfscheme = 'largefile://'
+
+@eh.wrapfunction(urlmod, 'open')
def openlargefile(orig, ui, url_, data=None):
if url_.startswith(_lfscheme):
if data:
--- a/hgext/largefiles/proto.py Wed Jan 09 20:00:35 2019 -0800
+++ b/hgext/largefiles/proto.py Fri Jan 18 13:28:22 2019 -0500
@@ -11,10 +11,12 @@
from mercurial import (
error,
+ exthelper,
httppeer,
util,
wireprototypes,
wireprotov1peer,
+ wireprotov1server,
)
from . import (
@@ -28,6 +30,8 @@
'\n\nPlease enable it in your Mercurial config '
'file.\n')
+eh = exthelper.exthelper()
+
# these will all be replaced by largefiles.uisetup
ssholdcallstream = None
httpoldcallstream = None
@@ -162,6 +166,7 @@
repo.__class__ = lfileswirerepository
# advertise the largefiles=serve capability
+@eh.wrapfunction(wireprotov1server, '_capabilities')
def _capabilities(orig, repo, proto):
'''announce largefile server capability'''
caps = orig(repo, proto)
--- a/hgext/largefiles/uisetup.py Wed Jan 09 20:00:35 2019 -0800
+++ /dev/null Thu Jan 01 00:00:00 1970 +0000
@@ -1,204 +0,0 @@
-# Copyright 2009-2010 Gregory P. Ward
-# Copyright 2009-2010 Intelerad Medical Systems Incorporated
-# Copyright 2010-2011 Fog Creek Software
-# Copyright 2010-2011 Unity Technologies
-#
-# This software may be used and distributed according to the terms of the
-# GNU General Public License version 2 or any later version.
-
-'''setup for largefiles extension: uisetup'''
-from __future__ import absolute_import
-
-from mercurial.i18n import _
-
-from mercurial.hgweb import (
- webcommands,
-)
-
-from mercurial import (
- archival,
- cmdutil,
- commands,
- copies,
- exchange,
- extensions,
- filemerge,
- hg,
- httppeer,
- merge,
- scmutil,
- sshpeer,
- subrepo,
- upgrade,
- url,
- wireprotov1server,
-)
-
-from . import (
- overrides,
- proto,
-)
-
-def uisetup(ui):
- # Disable auto-status for some commands which assume that all
- # files in the result are under Mercurial's control
-
- entry = extensions.wrapcommand(commands.table, 'add',
- overrides.overrideadd)
- addopt = [('', 'large', None, _('add as largefile')),
- ('', 'normal', None, _('add as normal file')),
- ('', 'lfsize', '', _('add all files above this size '
- '(in megabytes) as largefiles '
- '(default: 10)'))]
- entry[1].extend(addopt)
-
- # The scmutil function is called both by the (trivial) addremove command,
- # and in the process of handling commit -A (issue3542)
- extensions.wrapfunction(scmutil, 'addremove', overrides.scmutiladdremove)
- extensions.wrapfunction(cmdutil, 'add', overrides.cmdutiladd)
- extensions.wrapfunction(cmdutil, 'remove', overrides.cmdutilremove)
- extensions.wrapfunction(cmdutil, 'forget', overrides.cmdutilforget)
-
- extensions.wrapfunction(copies, 'pathcopies', overrides.copiespathcopies)
-
- extensions.wrapfunction(upgrade, 'preservedrequirements',
- overrides.upgraderequirements)
-
- extensions.wrapfunction(upgrade, 'supporteddestrequirements',
- overrides.upgraderequirements)
-
- # Subrepos call status function
- entry = extensions.wrapcommand(commands.table, 'status',
- overrides.overridestatus)
- extensions.wrapfunction(subrepo.hgsubrepo, 'status',
- overrides.overridestatusfn)
-
- entry = extensions.wrapcommand(commands.table, 'log',
- overrides.overridelog)
- entry = extensions.wrapcommand(commands.table, 'rollback',
- overrides.overriderollback)
- entry = extensions.wrapcommand(commands.table, 'verify',
- overrides.overrideverify)
-
- verifyopt = [('', 'large', None,
- _('verify that all largefiles in current revision exists')),
- ('', 'lfa', None,
- _('verify largefiles in all revisions, not just current')),
- ('', 'lfc', None,
- _('verify local largefile contents, not just existence'))]
- entry[1].extend(verifyopt)
-
- entry = extensions.wrapcommand(commands.table, 'debugstate',
- overrides.overridedebugstate)
- debugstateopt = [('', 'large', None, _('display largefiles dirstate'))]
- entry[1].extend(debugstateopt)
-
- outgoing = lambda orgfunc, *arg, **kwargs: orgfunc(*arg, **kwargs)
- entry = extensions.wrapcommand(commands.table, 'outgoing', outgoing)
- outgoingopt = [('', 'large', None, _('display outgoing largefiles'))]
- entry[1].extend(outgoingopt)
- cmdutil.outgoinghooks.add('largefiles', overrides.outgoinghook)
- entry = extensions.wrapcommand(commands.table, 'summary',
- overrides.overridesummary)
- summaryopt = [('', 'large', None, _('display outgoing largefiles'))]
- entry[1].extend(summaryopt)
- cmdutil.summaryremotehooks.add('largefiles', overrides.summaryremotehook)
-
- entry = extensions.wrapcommand(commands.table, 'pull',
- overrides.overridepull)
- pullopt = [('', 'all-largefiles', None,
- _('download all pulled versions of largefiles (DEPRECATED)')),
- ('', 'lfrev', [],
- _('download largefiles for these revisions'), _('REV'))]
- entry[1].extend(pullopt)
-
- entry = extensions.wrapcommand(commands.table, 'push',
- overrides.overridepush)
- pushopt = [('', 'lfrev', [],
- _('upload largefiles for these revisions'), _('REV'))]
- entry[1].extend(pushopt)
- extensions.wrapfunction(exchange, 'pushoperation',
- overrides.exchangepushoperation)
-
- entry = extensions.wrapcommand(commands.table, 'clone',
- overrides.overrideclone)
- cloneopt = [('', 'all-largefiles', None,
- _('download all versions of all largefiles'))]
- entry[1].extend(cloneopt)
- extensions.wrapfunction(hg, 'clone', overrides.hgclone)
-
- entry = extensions.wrapcommand(commands.table, 'cat',
- overrides.overridecat)
- extensions.wrapfunction(merge, '_checkunknownfile',
- overrides.overridecheckunknownfile)
- extensions.wrapfunction(merge, 'calculateupdates',
- overrides.overridecalculateupdates)
- extensions.wrapfunction(merge, 'recordupdates',
- overrides.mergerecordupdates)
- extensions.wrapfunction(merge, 'update', overrides.mergeupdate)
- extensions.wrapfunction(filemerge, '_filemerge',
- overrides.overridefilemerge)
- extensions.wrapfunction(cmdutil, 'copy', overrides.overridecopy)
-
- # Summary calls dirty on the subrepos
- extensions.wrapfunction(subrepo.hgsubrepo, 'dirty', overrides.overridedirty)
-
- extensions.wrapfunction(cmdutil, 'revert', overrides.overriderevert)
-
- extensions.wrapcommand(commands.table, 'archive',
- overrides.overridearchivecmd)
- extensions.wrapfunction(archival, 'archive', overrides.overridearchive)
- extensions.wrapfunction(subrepo.hgsubrepo, 'archive',
- overrides.hgsubrepoarchive)
- extensions.wrapfunction(webcommands, 'archive', overrides.hgwebarchive)
- extensions.wrapfunction(cmdutil, 'bailifchanged',
- overrides.overridebailifchanged)
-
- extensions.wrapfunction(cmdutil, 'postcommitstatus',
- overrides.postcommitstatus)
- extensions.wrapfunction(scmutil, 'marktouched',
- overrides.scmutilmarktouched)
-
- extensions.wrapfunction(url, 'open',
- overrides.openlargefile)
-
- # create the new wireproto commands ...
- wireprotov1server.wireprotocommand('putlfile', 'sha', permission='push')(
- proto.putlfile)
- wireprotov1server.wireprotocommand('getlfile', 'sha', permission='pull')(
- proto.getlfile)
- wireprotov1server.wireprotocommand('statlfile', 'sha', permission='pull')(
- proto.statlfile)
- wireprotov1server.wireprotocommand('lheads', '', permission='pull')(
- wireprotov1server.heads)
-
- # ... and wrap some existing ones
- extensions.wrapfunction(wireprotov1server.commands['heads'], 'func',
- proto.heads)
- # TODO also wrap wireproto.commandsv2 once heads is implemented there.
-
- extensions.wrapfunction(webcommands, 'decodepath', overrides.decodepath)
-
- extensions.wrapfunction(wireprotov1server, '_capabilities',
- proto._capabilities)
-
- # can't do this in reposetup because it needs to have happened before
- # wirerepo.__init__ is called
- proto.ssholdcallstream = sshpeer.sshv1peer._callstream
- proto.httpoldcallstream = httppeer.httppeer._callstream
- sshpeer.sshv1peer._callstream = proto.sshrepocallstream
- httppeer.httppeer._callstream = proto.httprepocallstream
-
- # override some extensions' stuff as well
- for name, module in extensions.extensions():
- if name == 'purge':
- extensions.wrapcommand(getattr(module, 'cmdtable'), 'purge',
- overrides.overridepurge)
- if name == 'rebase':
- extensions.wrapcommand(getattr(module, 'cmdtable'), 'rebase',
- overrides.overriderebase)
- extensions.wrapfunction(module, 'rebase',
- overrides.overriderebase)
- if name == 'transplant':
- extensions.wrapcommand(getattr(module, 'cmdtable'), 'transplant',
- overrides.overridetransplant)
--- a/hgext/lfs/__init__.py Wed Jan 09 20:00:35 2019 -0800
+++ b/hgext/lfs/__init__.py Fri Jan 18 13:28:22 2019 -0500
@@ -129,30 +129,23 @@
from mercurial.i18n import _
from mercurial import (
- bundle2,
- changegroup,
- cmdutil,
config,
context,
error,
exchange,
extensions,
+ exthelper,
filelog,
filesetlang,
localrepo,
minifileset,
node,
pycompat,
- registrar,
repository,
revlog,
scmutil,
templateutil,
- upgrade,
util,
- vfs as vfsmod,
- wireprotoserver,
- wireprotov1server,
)
from . import (
@@ -167,45 +160,48 @@
# leave the attribute unspecified.
testedwith = 'ships-with-hg-core'
-configtable = {}
-configitem = registrar.configitem(configtable)
+eh = exthelper.exthelper()
+eh.merge(wrapper.eh)
+eh.merge(wireprotolfsserver.eh)
-configitem('experimental', 'lfs.serve',
+cmdtable = eh.cmdtable
+configtable = eh.configtable
+extsetup = eh.finalextsetup
+uisetup = eh.finaluisetup
+filesetpredicate = eh.filesetpredicate
+reposetup = eh.finalreposetup
+templatekeyword = eh.templatekeyword
+
+eh.configitem('experimental', 'lfs.serve',
default=True,
)
-configitem('experimental', 'lfs.user-agent',
+eh.configitem('experimental', 'lfs.user-agent',
default=None,
)
-configitem('experimental', 'lfs.disableusercache',
+eh.configitem('experimental', 'lfs.disableusercache',
default=False,
)
-configitem('experimental', 'lfs.worker-enable',
+eh.configitem('experimental', 'lfs.worker-enable',
default=False,
)
-configitem('lfs', 'url',
+eh.configitem('lfs', 'url',
default=None,
)
-configitem('lfs', 'usercache',
+eh.configitem('lfs', 'usercache',
default=None,
)
# Deprecated
-configitem('lfs', 'threshold',
+eh.configitem('lfs', 'threshold',
default=None,
)
-configitem('lfs', 'track',
+eh.configitem('lfs', 'track',
default='none()',
)
-configitem('lfs', 'retry',
+eh.configitem('lfs', 'retry',
default=5,
)
-cmdtable = {}
-command = registrar.command(cmdtable)
-
-templatekeyword = registrar.templatekeyword()
-filesetpredicate = registrar.filesetpredicate()
-
lfsprocessor = (
wrapper.readfromstore,
wrapper.writetostore,
@@ -216,10 +212,12 @@
# don't die on seeing a repo with the lfs requirement
supported |= {'lfs'}
-def uisetup(ui):
+@eh.uisetup
+def _uisetup(ui):
localrepo.featuresetupfuncs.add(featuresetup)
-def reposetup(ui, repo):
+@eh.reposetup
+def _reposetup(ui, repo):
# Nothing to do with a remote repo
if not repo.local():
return
@@ -246,7 +244,7 @@
s = repo.set('%n:%n', _bin(kwargs[r'node']), _bin(last))
else:
s = repo.set('%n', _bin(kwargs[r'node']))
- match = repo.narrowmatch()
+ match = repo._storenarrowmatch
for ctx in s:
# TODO: is there a way to just walk the files in the commit?
if any(ctx[f].islfs() for f in ctx.files()
@@ -305,6 +303,7 @@
return _match
+# Called by remotefilelog
def wrapfilelog(filelog):
wrapfunction = extensions.wrapfunction
@@ -312,6 +311,7 @@
wrapfunction(filelog, 'renamed', wrapper.filelogrenamed)
wrapfunction(filelog, 'size', wrapper.filelogsize)
+@eh.wrapfunction(localrepo, 'resolverevlogstorevfsoptions')
def _resolverevlogstorevfsoptions(orig, ui, requirements, features):
opts = orig(ui, requirements, features)
for name, module in extensions.extensions(ui):
@@ -326,38 +326,10 @@
return opts
-def extsetup(ui):
+@eh.extsetup
+def _extsetup(ui):
wrapfilelog(filelog.filelog)
- wrapfunction = extensions.wrapfunction
-
- wrapfunction(localrepo, 'makefilestorage', wrapper.localrepomakefilestorage)
- wrapfunction(localrepo, 'resolverevlogstorevfsoptions',
- _resolverevlogstorevfsoptions)
-
- wrapfunction(cmdutil, '_updatecatformatter', wrapper._updatecatformatter)
- wrapfunction(scmutil, 'wrapconvertsink', wrapper.convertsink)
-
- wrapfunction(upgrade, '_finishdatamigration',
- wrapper.upgradefinishdatamigration)
-
- wrapfunction(upgrade, 'preservedrequirements',
- wrapper.upgraderequirements)
-
- wrapfunction(upgrade, 'supporteddestrequirements',
- wrapper.upgraderequirements)
-
- wrapfunction(changegroup,
- 'allsupportedversions',
- wrapper.allsupportedversions)
-
- wrapfunction(exchange, 'push', wrapper.push)
- wrapfunction(wireprotov1server, '_capabilities', wrapper._capabilities)
- wrapfunction(wireprotoserver, 'handlewsgirequest',
- wireprotolfsserver.handlewsgirequest)
-
- wrapfunction(context.basefilectx, 'cmp', wrapper.filectxcmp)
- wrapfunction(context.basefilectx, 'isbinary', wrapper.filectxisbinary)
context.basefilectx.islfs = wrapper.filectxislfs
scmutil.fileprefetchhooks.add('lfs', wrapper._prefetchfiles)
@@ -367,14 +339,7 @@
# "packed1". Using "packed1" with lfs will likely cause trouble.
exchange._bundlespeccontentopts["v2"]["cg.version"] = "03"
- # bundlerepo uses "vfsmod.readonlyvfs(othervfs)", we need to make sure lfs
- # options and blob stores are passed from othervfs to the new readonlyvfs.
- wrapfunction(vfsmod.readonlyvfs, '__init__', wrapper.vfsinit)
-
- # when writing a bundle via "hg bundle" command, upload related LFS blobs
- wrapfunction(bundle2, 'writenewbundle', wrapper.writenewbundle)
-
-@filesetpredicate('lfs()')
+@eh.filesetpredicate('lfs()')
def lfsfileset(mctx, x):
"""File that uses LFS storage."""
# i18n: "lfs" is a keyword
@@ -384,7 +349,7 @@
return wrapper.pointerfromctx(ctx, f, removed=True) is not None
return mctx.predicate(lfsfilep, predrepr='<lfs>')
-@templatekeyword('lfs_files', requires={'ctx'})
+@eh.templatekeyword('lfs_files', requires={'ctx'})
def lfsfiles(context, mapping):
"""List of strings. All files modified, added, or removed by this
changeset."""
@@ -409,8 +374,8 @@
f = templateutil._showcompatlist(context, mapping, 'lfs_file', files)
return templateutil.hybrid(f, files, makemap, pycompat.identity)
-@command('debuglfsupload',
- [('r', 'rev', [], _('upload large files introduced by REV'))])
+@eh.command('debuglfsupload',
+ [('r', 'rev', [], _('upload large files introduced by REV'))])
def debuglfsupload(ui, repo, **opts):
"""upload lfs blobs added by the working copy parent or given revisions"""
revs = opts.get(r'rev', [])
--- a/hgext/lfs/blobstore.py Wed Jan 09 20:00:35 2019 -0800
+++ b/hgext/lfs/blobstore.py Fri Jan 18 13:28:22 2019 -0500
@@ -7,6 +7,7 @@
from __future__ import absolute_import
+import contextlib
import errno
import hashlib
import json
@@ -17,7 +18,9 @@
from mercurial.i18n import _
from mercurial import (
+ encoding,
error,
+ node,
pathutil,
pycompat,
url as urlmod,
@@ -26,6 +29,10 @@
worker,
)
+from mercurial.utils import (
+ stringutil,
+)
+
from ..largefiles import lfutil
# 64 bytes for SHA256
@@ -150,7 +157,7 @@
fp.write(chunk)
sha256.update(chunk)
- realoid = sha256.hexdigest()
+ realoid = node.hex(sha256.digest())
if realoid != oid:
raise LfsCorruptionError(_('corrupt remote lfs object: %s')
% oid)
@@ -200,7 +207,7 @@
# Don't abort if corruption is detected, because `hg verify` will
# give more useful info about the corruption- simply don't add the
# hardlink.
- if verify or hashlib.sha256(blob).hexdigest() == oid:
+ if verify or node.hex(hashlib.sha256(blob).digest()) == oid:
self.ui.note(_('lfs: found %s in the usercache\n') % oid)
lfutil.link(self.cachevfs.join(oid), self.vfs.join(oid))
else:
@@ -224,13 +231,37 @@
for chunk in util.filechunkiter(fp, size=1048576):
sha256.update(chunk)
- return oid == sha256.hexdigest()
+ return oid == node.hex(sha256.digest())
def has(self, oid):
"""Returns True if the local blobstore contains the requested blob,
False otherwise."""
return self.cachevfs.exists(oid) or self.vfs.exists(oid)
+def _urlerrorreason(urlerror):
+ '''Create a friendly message for the given URLError to be used in an
+ LfsRemoteError message.
+ '''
+ inst = urlerror
+
+ if isinstance(urlerror.reason, Exception):
+ inst = urlerror.reason
+
+ if util.safehasattr(inst, 'reason'):
+ try: # usually it is in the form (errno, strerror)
+ reason = inst.reason.args[1]
+ except (AttributeError, IndexError):
+ # it might be anything, for example a string
+ reason = inst.reason
+ if isinstance(reason, pycompat.unicode):
+ # SSLError of Python 2.7.9 contains a unicode
+ reason = encoding.unitolocal(reason)
+ return reason
+ elif getattr(inst, "strerror", None):
+ return encoding.strtolocal(inst.strerror)
+ else:
+ return stringutil.forcebytestr(urlerror)
+
class _gitlfsremote(object):
def __init__(self, repo, url):
@@ -263,16 +294,27 @@
'objects': objects,
'operation': action,
})
- batchreq = util.urlreq.request('%s/objects/batch' % self.baseurl,
- data=requestdata)
+ url = '%s/objects/batch' % self.baseurl
+ batchreq = util.urlreq.request(url, data=requestdata)
batchreq.add_header('Accept', 'application/vnd.git-lfs+json')
batchreq.add_header('Content-Type', 'application/vnd.git-lfs+json')
try:
- rsp = self.urlopener.open(batchreq)
- rawjson = rsp.read()
+ with contextlib.closing(self.urlopener.open(batchreq)) as rsp:
+ rawjson = rsp.read()
except util.urlerr.httperror as ex:
- raise LfsRemoteError(_('LFS HTTP error: %s (action=%s)')
- % (ex, action))
+ hints = {
+ 400: _('check that lfs serving is enabled on %s and "%s" is '
+ 'supported') % (self.baseurl, action),
+ 404: _('the "lfs.url" config may be used to override %s')
+ % self.baseurl,
+ }
+ hint = hints.get(ex.code, _('api=%s, action=%s') % (url, action))
+ raise LfsRemoteError(_('LFS HTTP error: %s') % ex, hint=hint)
+ except util.urlerr.urlerror as ex:
+ hint = (_('the "lfs.url" config may be used to override %s')
+ % self.baseurl)
+ raise LfsRemoteError(_('LFS error: %s') % _urlerrorreason(ex),
+ hint=hint)
try:
response = json.loads(rawjson)
except ValueError:
@@ -379,30 +421,37 @@
response = b''
try:
- req = self.urlopener.open(request)
-
- if self.ui.debugflag:
- self.ui.debug('Status: %d\n' % req.status)
- # lfs-test-server and hg serve return headers in different order
- self.ui.debug('%s\n'
- % '\n'.join(sorted(str(req.info()).splitlines())))
+ with contextlib.closing(self.urlopener.open(request)) as req:
+ ui = self.ui # Shorten debug lines
+ if self.ui.debugflag:
+ ui.debug('Status: %d\n' % req.status)
+ # lfs-test-server and hg serve return headers in different
+ # order
+ ui.debug('%s\n'
+ % '\n'.join(sorted(str(req.info()).splitlines())))
- if action == 'download':
- # If downloading blobs, store downloaded data to local blobstore
- localstore.download(oid, req)
- else:
- while True:
- data = req.read(1048576)
- if not data:
- break
- response += data
- if response:
- self.ui.debug('lfs %s response: %s' % (action, response))
+ if action == 'download':
+ # If downloading blobs, store downloaded data to local
+ # blobstore
+ localstore.download(oid, req)
+ else:
+ while True:
+ data = req.read(1048576)
+ if not data:
+ break
+ response += data
+ if response:
+ ui.debug('lfs %s response: %s' % (action, response))
except util.urlerr.httperror as ex:
if self.ui.debugflag:
self.ui.debug('%s: %s\n' % (oid, ex.read()))
- raise LfsRemoteError(_('HTTP error: %s (oid=%s, action=%s)')
+ raise LfsRemoteError(_('LFS HTTP error: %s (oid=%s, action=%s)')
% (ex, oid, action))
+ except util.urlerr.urlerror as ex:
+ hint = (_('attempted connection to %s')
+ % util.urllibcompat.getfullurl(request))
+ raise LfsRemoteError(_('LFS error: %s') % _urlerrorreason(ex),
+ hint=hint)
def _batch(self, pointers, localstore, action):
if action not in ['upload', 'download']:
@@ -539,7 +588,7 @@
return reduced.values()
def _verify(oid, content):
- realoid = hashlib.sha256(content).hexdigest()
+ realoid = node.hex(hashlib.sha256(content).digest())
if realoid != oid:
raise LfsCorruptionError(_('detected corrupt lfs object: %s') % oid,
hint=_('run hg verify'))
--- a/hgext/lfs/wireprotolfsserver.py Wed Jan 09 20:00:35 2019 -0800
+++ b/hgext/lfs/wireprotolfsserver.py Fri Jan 18 13:28:22 2019 -0500
@@ -17,8 +17,10 @@
)
from mercurial import (
+ exthelper,
pycompat,
util,
+ wireprotoserver,
)
from . import blobstore
@@ -31,6 +33,9 @@
HTTP_NOT_ACCEPTABLE = hgwebcommon.HTTP_NOT_ACCEPTABLE
HTTP_UNSUPPORTED_MEDIA_TYPE = hgwebcommon.HTTP_UNSUPPORTED_MEDIA_TYPE
+eh = exthelper.exthelper()
+
+@eh.wrapfunction(wireprotoserver, 'handlewsgirequest')
def handlewsgirequest(orig, rctx, req, res, checkperm):
"""Wrap wireprotoserver.handlewsgirequest() to possibly process an LFS
request if it is left unprocessed by the wrapped method.
--- a/hgext/lfs/wrapper.py Wed Jan 09 20:00:35 2019 -0800
+++ b/hgext/lfs/wrapper.py Fri Jan 18 13:28:22 2019 -0500
@@ -13,10 +13,21 @@
from mercurial.node import bin, hex, nullid, short
from mercurial import (
+ bundle2,
+ changegroup,
+ cmdutil,
+ context,
error,
+ exchange,
+ exthelper,
+ localrepo,
repository,
revlog,
+ scmutil,
+ upgrade,
util,
+ vfs as vfsmod,
+ wireprotov1server,
)
from mercurial.utils import (
@@ -31,17 +42,22 @@
pointer,
)
+eh = exthelper.exthelper()
+
+@eh.wrapfunction(localrepo, 'makefilestorage')
def localrepomakefilestorage(orig, requirements, features, **kwargs):
if b'lfs' in requirements:
features.add(repository.REPO_FEATURE_LFS)
return orig(requirements=requirements, features=features, **kwargs)
+@eh.wrapfunction(changegroup, 'allsupportedversions')
def allsupportedversions(orig, ui):
versions = orig(ui)
versions.add('03')
return versions
+@eh.wrapfunction(wireprotov1server, '_capabilities')
def _capabilities(orig, repo, proto):
'''Wrap server command to announce lfs server capability'''
caps = orig(repo, proto)
@@ -130,6 +146,7 @@
flags = rlog._revlog.flags(rev)
return bool(flags & revlog.REVIDX_EXTSTORED)
+# Wrapping may also be applied by remotefilelog
def filelogaddrevision(orig, self, text, transaction, link, p1, p2,
cachedelta=None, node=None,
flags=revlog.REVIDX_DEFAULT_FLAGS, **kwds):
@@ -149,6 +166,7 @@
return orig(self, text, transaction, link, p1, p2, cachedelta=cachedelta,
node=node, flags=flags, **kwds)
+# Wrapping may also be applied by remotefilelog
def filelogrenamed(orig, self, node):
if _islfs(self, node):
rawtext = self._revlog.revision(node, raw=True)
@@ -161,6 +179,7 @@
return False
return orig(self, node)
+# Wrapping may also be applied by remotefilelog
def filelogsize(orig, self, rev):
if _islfs(self, rev=rev):
# fast path: use lfs metadata to answer size
@@ -169,6 +188,7 @@
return int(metadata['size'])
return orig(self, rev)
+@eh.wrapfunction(context.basefilectx, 'cmp')
def filectxcmp(orig, self, fctx):
"""returns True if text is different than fctx"""
# some fctx (ex. hg-git) is not based on basefilectx and do not have islfs
@@ -179,6 +199,7 @@
return p1.oid() != p2.oid()
return orig(self, fctx)
+@eh.wrapfunction(context.basefilectx, 'isbinary')
def filectxisbinary(orig, self):
if self.islfs():
# fast path: use lfs metadata to answer isbinary
@@ -190,10 +211,12 @@
def filectxislfs(self):
return _islfs(self.filelog(), self.filenode())
+@eh.wrapfunction(cmdutil, '_updatecatformatter')
def _updatecatformatter(orig, fm, ctx, matcher, path, decode):
orig(fm, ctx, matcher, path, decode)
fm.data(rawdata=ctx[path].rawdata())
+@eh.wrapfunction(scmutil, 'wrapconvertsink')
def convertsink(orig, sink):
sink = orig(sink)
if sink.repotype == 'hg':
@@ -219,6 +242,9 @@
return sink
+# bundlerepo uses "vfsmod.readonlyvfs(othervfs)", we need to make sure lfs
+# options and blob stores are passed from othervfs to the new readonlyvfs.
+@eh.wrapfunction(vfsmod.readonlyvfs, '__init__')
def vfsinit(orig, self, othervfs):
orig(self, othervfs)
# copy lfs related options
@@ -290,6 +316,7 @@
"""
return uploadblobsfromrevs(pushop.repo, pushop.outgoing.missing)
+@eh.wrapfunction(exchange, 'push')
def push(orig, repo, remote, *args, **kwargs):
"""bail on push if the extension isn't enabled on remote when needed, and
update the remote store based on the destination path."""
@@ -316,6 +343,8 @@
else:
return orig(repo, remote, *args, **kwargs)
+# when writing a bundle via "hg bundle" command, upload related LFS blobs
+@eh.wrapfunction(bundle2, 'writenewbundle')
def writenewbundle(orig, ui, repo, source, filename, bundletype, outgoing,
*args, **kwargs):
"""upload LFS blobs added by outgoing revisions on 'hg bundle'"""
@@ -393,6 +422,7 @@
remoteblob = repo.svfs.lfsremoteblobstore
remoteblob.writebatch(pointers, repo.svfs.lfslocalblobstore)
+@eh.wrapfunction(upgrade, '_finishdatamigration')
def upgradefinishdatamigration(orig, ui, srcrepo, dstrepo, requirements):
orig(ui, srcrepo, dstrepo, requirements)
@@ -407,6 +437,8 @@
ui.write(_('copying lfs blob %s\n') % oid)
lfutil.link(srclfsvfs.join(oid), dstlfsvfs.join(oid))
+@eh.wrapfunction(upgrade, 'preservedrequirements')
+@eh.wrapfunction(upgrade, 'supporteddestrequirements')
def upgraderequirements(orig, repo):
reqs = orig(repo)
if 'lfs' in repo.requirements:
--- a/hgext/logtoprocess.py Wed Jan 09 20:00:35 2019 -0800
+++ b/hgext/logtoprocess.py Fri Jan 18 13:28:22 2019 -0500
@@ -9,21 +9,21 @@
This extension lets you specify a shell command per ui.log() event,
sending all remaining arguments to as environment variables to that command.
-Each positional argument to the method results in a `MSG[N]` key in the
-environment, starting at 1 (so `MSG1`, `MSG2`, etc.). Each keyword argument
-is set as a `OPT_UPPERCASE_KEY` variable (so the key is uppercased, and
-prefixed with `OPT_`). The original event name is passed in the `EVENT`
-environment variable, and the process ID of mercurial is given in `HGPID`.
+Positional arguments construct a log message, which is passed in the `MSG1`
+environment variables. Each keyword argument is set as a `OPT_UPPERCASE_KEY`
+variable (so the key is uppercased, and prefixed with `OPT_`). The original
+event name is passed in the `EVENT` environment variable, and the process ID
+of mercurial is given in `HGPID`.
-So given a call `ui.log('foo', 'bar', 'baz', spam='eggs'), a script configured
-for the `foo` event can expect an environment with `MSG1=bar`, `MSG2=baz`, and
-`OPT_SPAM=eggs`.
+So given a call `ui.log('foo', 'bar %s\n', 'baz', spam='eggs'), a script
+configured for the `foo` event can expect an environment with `MSG1=bar baz`,
+and `OPT_SPAM=eggs`.
Scripts are configured in the `[logtoprocess]` section, each key an event name.
For example::
[logtoprocess]
- commandexception = echo "$MSG2$MSG3" > /var/log/mercurial_exceptions.log
+ commandexception = echo "$MSG1" > /var/log/mercurial_exceptions.log
would log the warning message and traceback of any failed command dispatch.
@@ -34,14 +34,7 @@
from __future__ import absolute_import
-import itertools
import os
-import subprocess
-import sys
-
-from mercurial import (
- pycompat,
-)
from mercurial.utils import (
procutil,
@@ -53,88 +46,30 @@
# leave the attribute unspecified.
testedwith = 'ships-with-hg-core'
-def uisetup(ui):
- if pycompat.iswindows:
- # no fork on Windows, but we can create a detached process
- # https://msdn.microsoft.com/en-us/library/windows/desktop/ms684863.aspx
- # No stdlib constant exists for this value
- DETACHED_PROCESS = 0x00000008
- _creationflags = DETACHED_PROCESS | subprocess.CREATE_NEW_PROCESS_GROUP
+class processlogger(object):
+ """Map log events to external commands
+
+ Arguments are passed on as environment variables.
+ """
+
+ def __init__(self, ui):
+ self._scripts = dict(ui.configitems(b'logtoprocess'))
+
+ def tracked(self, event):
+ return bool(self._scripts.get(event))
- def runshellcommand(script, env):
- # we can't use close_fds *and* redirect stdin. I'm not sure that we
- # need to because the detached process has no console connection.
- subprocess.Popen(
- procutil.tonativestr(script),
- shell=True, env=procutil.tonativeenv(env), close_fds=True,
- creationflags=_creationflags)
- else:
- def runshellcommand(script, env):
- # double-fork to completely detach from the parent process
- # based on http://code.activestate.com/recipes/278731
- pid = os.fork()
- if pid:
- # parent
- return
- # subprocess.Popen() forks again, all we need to add is
- # flag the new process as a new session.
- if sys.version_info < (3, 2):
- newsession = {'preexec_fn': os.setsid}
- else:
- newsession = {'start_new_session': True}
- try:
- # connect std* to devnull to make sure the subprocess can't
- # muck up these stream for mercurial.
- # Connect all the streams to be more close to Windows behavior
- # and pager will wait for scripts to end if we don't do that
- nullrfd = open(os.devnull, 'r')
- nullwfd = open(os.devnull, 'w')
- subprocess.Popen(
- procutil.tonativestr(script),
- shell=True, stdin=nullrfd,
- stdout=nullwfd, stderr=nullwfd,
- env=procutil.tonativeenv(env),
- close_fds=True, **newsession)
- finally:
- # mission accomplished, this child needs to exit and not
- # continue the hg process here.
- os._exit(0)
+ def log(self, ui, event, msg, opts):
+ script = self._scripts[event]
+ env = {
+ b'EVENT': event,
+ b'HGPID': os.getpid(),
+ b'MSG1': msg,
+ }
+ # keyword arguments get prefixed with OPT_ and uppercased
+ env.update((b'OPT_%s' % key.upper(), value)
+ for key, value in opts.items())
+ fullenv = procutil.shellenviron(env)
+ procutil.runbgcommand(script, fullenv, shell=True)
- class logtoprocessui(ui.__class__):
- def log(self, event, *msg, **opts):
- """Map log events to external commands
-
- Arguments are passed on as environment variables.
-
- """
- script = self.config('logtoprocess', event)
- if script:
- if msg:
- # try to format the log message given the remaining
- # arguments
- try:
- # Format the message as blackbox does
- formatted = msg[0] % msg[1:]
- except (TypeError, KeyError):
- # Failed to apply the arguments, ignore
- formatted = msg[0]
- messages = (formatted,) + msg[1:]
- else:
- messages = msg
- # positional arguments are listed as MSG[N] keys in the
- # environment
- msgpairs = (
- ('MSG{0:d}'.format(i), str(m))
- for i, m in enumerate(messages, 1))
- # keyword arguments get prefixed with OPT_ and uppercased
- optpairs = (
- ('OPT_{0}'.format(key.upper()), str(value))
- for key, value in opts.iteritems())
- env = dict(itertools.chain(procutil.shellenviron().items(),
- msgpairs, optpairs),
- EVENT=event, HGPID=str(os.getpid()))
- runshellcommand(script, env)
- return super(logtoprocessui, self).log(event, *msg, **opts)
-
- # Replace the class for this instance and all clones created from it:
- ui.__class__ = logtoprocessui
+def uipopulate(ui):
+ ui.setlogger(b'logtoprocess', processlogger(ui))
--- a/hgext/mq.py Wed Jan 09 20:00:35 2019 -0800
+++ b/hgext/mq.py Fri Jan 18 13:28:22 2019 -0500
@@ -139,6 +139,8 @@
class dummyui(object):
def debug(self, msg):
pass
+ def log(self, event, msgfmt, *msgargs, **opts):
+ pass
stripext = extensions.load(dummyui(), 'strip', '')
strip = stripext.strip
--- a/hgext/narrow/TODO.rst Wed Jan 09 20:00:35 2019 -0800
+++ b/hgext/narrow/TODO.rst Fri Jan 18 13:28:22 2019 -0500
@@ -1,6 +1,3 @@
-Integration with the share extension needs improvement. Right now
-we've seen some odd bugs.
-
Address commentary in manifest.excludedmanifestrevlog.add -
specifically we should improve the collaboration with core so that
add() never gets called on an excluded directory and we can improve
--- a/hgext/narrow/narrowbundle2.py Wed Jan 09 20:00:35 2019 -0800
+++ b/hgext/narrow/narrowbundle2.py Fri Jan 18 13:28:22 2019 -0500
@@ -20,7 +20,7 @@
changegroup,
error,
exchange,
- extensions,
+ localrepo,
narrowspec,
repair,
repository,
@@ -31,10 +31,9 @@
stringutil,
)
-NARROWCAP = 'narrow'
_NARROWACL_SECTION = 'narrowhgacl'
-_CHANGESPECPART = NARROWCAP + ':changespec'
-_SPECPART = NARROWCAP + ':spec'
+_CHANGESPECPART = 'narrow:changespec'
+_SPECPART = 'narrow:spec'
_SPECPART_INCLUDE = 'include'
_SPECPART_EXCLUDE = 'exclude'
_KILLNODESIGNAL = 'KILL'
@@ -44,12 +43,6 @@
_CSHEADERSIZE = struct.calcsize(_ELIDEDCSHEADER)
_MFHEADERSIZE = struct.calcsize(_ELIDEDMFHEADER)
-# When advertising capabilities, always include narrow clone support.
-def getrepocaps_narrow(orig, repo, **kwargs):
- caps = orig(repo, **kwargs)
- caps[NARROWCAP] = ['v0']
- return caps
-
# Serve a changegroup for a client with a narrow clone.
def getbundlechangegrouppart_narrow(bundler, repo, source,
bundlecaps=None, b2caps=None, heads=None,
@@ -158,6 +151,7 @@
op.repo.requirements.add(repository.NARROW_REQUIREMENT)
op.repo._writerequirements()
op.repo.setnarrowpats(includepats, excludepats)
+ narrowspec.copytoworkingcopy(op.repo)
@bundle2.parthandler(_CHANGESPECPART)
def _handlechangespec(op, inpart):
@@ -187,18 +181,15 @@
if clkills:
# preserve bookmarks that repair.strip() would otherwise strip
- bmstore = repo._bookmarks
+ op._bookmarksbackup = repo._bookmarks
class dummybmstore(dict):
def applychanges(self, repo, tr, changes):
pass
- def recordchange(self, tr): # legacy version
- pass
- repo._bookmarks = dummybmstore()
+ localrepo.localrepository._bookmarks.set(repo, dummybmstore())
chgrpfile = repair.strip(op.ui, repo, list(clkills), backup=True,
topic='widen')
- repo._bookmarks = bmstore
if chgrpfile:
- op._widen_uninterr = repo.ui.uninterruptable()
+ op._widen_uninterr = repo.ui.uninterruptible()
op._widen_uninterr.__enter__()
# presence of _widen_bundle attribute activates widen handler later
op._widen_bundle = chgrpfile
@@ -252,16 +243,12 @@
def setup():
"""Enable narrow repo support in bundle2-related extension points."""
- extensions.wrapfunction(bundle2, 'getrepocaps', getrepocaps_narrow)
-
getbundleargs = wireprototypes.GETBUNDLE_ARGUMENTS
getbundleargs['narrow'] = 'boolean'
getbundleargs['depth'] = 'plain'
getbundleargs['oldincludepats'] = 'csv'
getbundleargs['oldexcludepats'] = 'csv'
- getbundleargs['includepats'] = 'csv'
- getbundleargs['excludepats'] = 'csv'
getbundleargs['known'] = 'csv'
# Extend changegroup serving to handle requests from narrow clients.
@@ -284,5 +271,10 @@
origcghandler(op, inpart)
if util.safehasattr(op, '_widen_bundle'):
handlechangegroup_widen(op, inpart)
+ if util.safehasattr(op, '_bookmarksbackup'):
+ localrepo.localrepository._bookmarks.set(op.repo,
+ op._bookmarksbackup)
+ del op._bookmarksbackup
+
wrappedcghandler.params = origcghandler.params
bundle2.parthandlermapping['changegroup'] = wrappedcghandler
--- a/hgext/narrow/narrowcommands.py Wed Jan 09 20:00:35 2019 -0800
+++ b/hgext/narrow/narrowcommands.py Fri Jan 18 13:28:22 2019 -0500
@@ -20,7 +20,6 @@
exchange,
extensions,
hg,
- merge,
narrowspec,
node,
pycompat,
@@ -141,8 +140,10 @@
include, exclude = repo.narrowpats
kwargs['oldincludepats'] = include
kwargs['oldexcludepats'] = exclude
- kwargs['includepats'] = include
- kwargs['excludepats'] = exclude
+ if include:
+ kwargs['includepats'] = include
+ if exclude:
+ kwargs['excludepats'] = exclude
# calculate known nodes only in ellipses cases because in non-ellipses cases
# we have all the nodes
if wireprototypes.ELLIPSESCAP in pullop.remote.capabilities():
@@ -158,16 +159,6 @@
extensions.wrapfunction(exchange,'_pullbundle2extraprepare',
pullbundle2extraprepare)
-# This is an extension point for filesystems that need to do something other
-# than just blindly unlink the files. It's not clear what arguments would be
-# useful, so we're passing in a fair number of them, some of them redundant.
-def _narrowcleanupwdir(repo, oldincludes, oldexcludes, newincludes, newexcludes,
- oldmatch, newmatch):
- for f in repo.dirstate:
- if not newmatch(f):
- repo.dirstate.drop(f)
- repo.wvfs.unlinkpath(f)
-
def _narrow(ui, repo, remote, commoninc, oldincludes, oldexcludes,
newincludes, newexcludes, force):
oldmatch = narrowspec.match(repo.root, oldincludes, oldexcludes)
@@ -205,7 +196,7 @@
hint=_('use --force-delete-local-changes to '
'ignore'))
- with ui.uninterruptable():
+ with ui.uninterruptible():
if revstostrip:
tostrip = [unfi.changelog.node(r) for r in revstostrip]
if repo['.'].node() in tostrip:
@@ -213,7 +204,9 @@
urev = max(repo.revs('(::%n) - %ln + null',
repo['.'].node(), visibletostrip))
hg.clean(repo, urev)
- repair.strip(ui, unfi, tostrip, topic='narrow')
+ overrides = {('devel', 'strip-obsmarkers'): False}
+ with ui.configoverride(overrides, 'narrow'):
+ repair.strip(ui, unfi, tostrip, topic='narrow')
todelete = []
for f, f2, size in repo.store.datafiles():
@@ -237,22 +230,23 @@
repo.destroying()
- with repo.transaction("narrowing"):
+ with repo.transaction('narrowing'):
+ # Update narrowspec before removing revlogs, so repo won't be
+ # corrupt in case of crash
+ repo.setnarrowpats(newincludes, newexcludes)
+
for f in todelete:
ui.status(_('deleting %s\n') % f)
util.unlinkpath(repo.svfs.join(f))
repo.store.markremoved(f)
- _narrowcleanupwdir(repo, oldincludes, oldexcludes, newincludes,
- newexcludes, oldmatch, newmatch)
- repo.setnarrowpats(newincludes, newexcludes)
+ narrowspec.updateworkingcopy(repo, assumeclean=True)
+ narrowspec.copytoworkingcopy(repo)
repo.destroyed()
def _widen(ui, repo, remote, commoninc, oldincludes, oldexcludes,
newincludes, newexcludes):
- newmatch = narrowspec.match(repo.root, newincludes, newexcludes)
-
# for now we assume that if a server has ellipses enabled, we will be
# exchanging ellipses nodes. In future we should add ellipses as a client
# side requirement (maybe) to distinguish a client is shallow or not and
@@ -277,7 +271,7 @@
# silence the devel-warning of applying an empty changegroup
overrides = {('devel', 'all-warnings'): False}
- with ui.uninterruptable():
+ with ui.uninterruptible():
common = commoninc[0]
if ellipsesremote:
ds = repo.dirstate
@@ -308,19 +302,10 @@
bundle2.processbundle(repo, bundle,
transactiongetter=tgetter)
- repo.setnewnarrowpats()
- actions = {k: [] for k in 'a am f g cd dc r dm dg m e k p pr'.split()}
- addgaction = actions['g'].append
-
- mf = repo['.'].manifest().matches(newmatch)
- for f, fn in mf.iteritems():
- if f not in repo.dirstate:
- addgaction((f, (mf.flags(f), False),
- "add from widened narrow clone"))
-
- merge.applyupdates(repo, actions, wctx=repo[None],
- mctx=repo['.'], overwrite=False)
- merge.recordupdates(repo, actions, branchmerge=False)
+ with repo.transaction('widening'):
+ repo.setnewnarrowpats()
+ narrowspec.updateworkingcopy(repo)
+ narrowspec.copytoworkingcopy(repo)
# TODO(rdamazio): Make new matcher format and update description
@command('tracked',
@@ -332,6 +317,8 @@
('', 'clear', False, _('whether to replace the existing narrowspec')),
('', 'force-delete-local-changes', False,
_('forces deletion of local changes when narrowing')),
+ ('', 'update-working-copy', False,
+ _('update working copy when the store has changed')),
] + commands.remoteopts,
_('[OPTIONS]... [REMOTE]'),
inferrepo=True)
@@ -361,15 +348,13 @@
"""
opts = pycompat.byteskwargs(opts)
if repository.NARROW_REQUIREMENT not in repo.requirements:
- ui.warn(_('The narrow command is only supported on respositories cloned'
- ' with --narrow.\n'))
- return 1
+ raise error.Abort(_('the narrow command is only supported on '
+ 'respositories cloned with --narrow'))
# Before supporting, decide whether it "hg tracked --clear" should mean
# tracking no paths or all paths.
if opts['clear']:
- ui.warn(_('The --clear option is not yet supported.\n'))
- return 1
+ raise error.Abort(_('the --clear option is not yet supported'))
# import rules from a file
newrules = opts.get('import_rules')
@@ -392,27 +377,48 @@
removedincludes = narrowspec.parsepatterns(opts['removeinclude'])
addedexcludes = narrowspec.parsepatterns(opts['addexclude'])
removedexcludes = narrowspec.parsepatterns(opts['removeexclude'])
+
+ update_working_copy = opts['update_working_copy']
+ only_show = not (addedincludes or removedincludes or addedexcludes or
+ removedexcludes or newrules or update_working_copy)
+
+ oldincludes, oldexcludes = repo.narrowpats
+
+ # filter the user passed additions and deletions into actual additions and
+ # deletions of excludes and includes
+ addedincludes -= oldincludes
+ removedincludes &= oldincludes
+ addedexcludes -= oldexcludes
+ removedexcludes &= oldexcludes
+
widening = addedincludes or removedexcludes
narrowing = removedincludes or addedexcludes
- only_show = not widening and not narrowing
# Only print the current narrowspec.
if only_show:
- include, exclude = repo.narrowpats
-
ui.pager('tracked')
fm = ui.formatter('narrow', opts)
- for i in sorted(include):
+ for i in sorted(oldincludes):
fm.startitem()
fm.write('status', '%s ', 'I', label='narrow.included')
fm.write('pat', '%s\n', i, label='narrow.included')
- for i in sorted(exclude):
+ for i in sorted(oldexcludes):
fm.startitem()
fm.write('status', '%s ', 'X', label='narrow.excluded')
fm.write('pat', '%s\n', i, label='narrow.excluded')
fm.end()
return 0
+ if update_working_copy:
+ with repo.wlock(), repo.lock(), repo.transaction('narrow-wc'):
+ narrowspec.updateworkingcopy(repo)
+ narrowspec.copytoworkingcopy(repo)
+ return 0
+
+ if not widening and not narrowing:
+ ui.status(_("nothing to widen or narrow\n"))
+ return 0
+
with repo.wlock(), repo.lock():
cmdutil.bailifchanged(repo)
@@ -432,7 +438,6 @@
commoninc = discovery.findcommonincoming(repo, remote)
- oldincludes, oldexcludes = repo.narrowpats
if narrowing:
newincludes = oldincludes - removedincludes
newexcludes = oldexcludes | addedexcludes
--- a/hgext/phabricator.py Wed Jan 09 20:00:35 2019 -0800
+++ b/hgext/phabricator.py Fri Jan 18 13:28:22 2019 -0500
@@ -41,6 +41,7 @@
from __future__ import absolute_import
+import contextlib
import itertools
import json
import operator
@@ -58,6 +59,7 @@
obsutil,
parser,
patch,
+ phases,
registrar,
scmutil,
smartset,
@@ -121,7 +123,7 @@
)),
]
-def vcrcommand(name, flags, spec):
+def vcrcommand(name, flags, spec, helpcategory=None):
fullflags = flags + _VCR_FLAGS
def decorate(fn):
def inner(*args, **kwargs):
@@ -143,7 +145,7 @@
return fn(*args, **kwargs)
inner.__name__ = fn.__name__
inner.__doc__ = fn.__doc__
- return command(name, fullflags, spec)(inner)
+ return command(name, fullflags, spec, helpcategory=helpcategory)(inner)
return decorate
def urlencodenested(params):
@@ -214,7 +216,8 @@
else:
urlopener = urlmod.opener(repo.ui, authinfo)
request = util.urlreq.request(url, data=data)
- body = urlopener.open(request).read()
+ with contextlib.closing(urlopener.open(request)) as rsp:
+ body = rsp.read()
repo.ui.debug(b'Conduit Response: %s\n' % body)
parsed = json.loads(body)
if parsed.get(r'error_code'):
@@ -465,7 +468,8 @@
(b'', b'amend', True, _(b'update commit messages')),
(b'', b'reviewer', [], _(b'specify reviewers')),
(b'', b'confirm', None, _(b'ask for confirmation before sending'))],
- _(b'REV [OPTIONS]'))
+ _(b'REV [OPTIONS]'),
+ helpcategory=command.CATEGORY_IMPORT_EXPORT)
def phabsend(ui, repo, *revs, **opts):
"""upload changesets to Phabricator
@@ -581,6 +585,10 @@
newdesc = encoding.unitolocal(newdesc)
# Make sure commit message contain "Differential Revision"
if old.description() != newdesc:
+ if old.phase() == phases.public:
+ ui.warn(_("warning: not updating public commit %s\n")
+ % scmutil.formatchangeid(old))
+ continue
parents = [
mapping.get(old.p1().node(), (old.p1(),))[0],
mapping.get(old.p2().node(), (old.p2(),))[0],
@@ -919,7 +927,8 @@
@vcrcommand(b'phabread',
[(b'', b'stack', False, _(b'read dependencies'))],
- _(b'DREVSPEC [OPTIONS]'))
+ _(b'DREVSPEC [OPTIONS]'),
+ helpcategory=command.CATEGORY_IMPORT_EXPORT)
def phabread(ui, repo, spec, **opts):
"""print patches from Phabricator suitable for importing
@@ -950,7 +959,8 @@
(b'', b'abandon', False, _(b'abandon revisions')),
(b'', b'reclaim', False, _(b'reclaim revisions')),
(b'm', b'comment', b'', _(b'comment on the last revision')),
- ], _(b'DREVSPEC [OPTIONS]'))
+ ], _(b'DREVSPEC [OPTIONS]'),
+ helpcategory=command.CATEGORY_IMPORT_EXPORT)
def phabupdate(ui, repo, spec, **opts):
"""update Differential Revision in batch
@@ -987,3 +997,17 @@
b'url': m.group(b'url'),
b'id': b"D{}".format(m.group(b'id')),
})
+ else:
+ tags = ctx.repo().nodetags(ctx.node())
+ for t in tags:
+ if _differentialrevisiontagre.match(t):
+ url = ctx.repo().ui.config(b'phabricator', b'url')
+ if not url.endswith(b'/'):
+ url += b'/'
+ url += t
+
+ return templateutil.hybriddict({
+ b'url': url,
+ b'id': t,
+ })
+ return None
--- a/hgext/rebase.py Wed Jan 09 20:00:35 2019 -0800
+++ b/hgext/rebase.py Fri Jan 18 13:28:22 2019 -0500
@@ -177,7 +177,7 @@
if e:
self.extrafns = [e]
- self.backupf = ui.configbool('ui', 'history-editing-backup')
+ self.backupf = ui.configbool('rewrite', 'backup-bundle')
self.keepf = opts.get('keep', False)
self.keepbranchesf = opts.get('keepbranches', False)
self.obsoletenotrebased = {}
@@ -347,9 +347,7 @@
if isabort:
backup = backup and self.backupf
- return abort(self.repo, self.originalwd, self.destmap, self.state,
- activebookmark=self.activebookmark, backup=backup,
- suppwarns=suppwarns)
+ return self._abort(backup=backup, suppwarns=suppwarns)
def _preparenewrebase(self, destmap):
if not destmap:
@@ -404,7 +402,9 @@
else:
self.wctx = self.repo[None]
self.repo.ui.debug("rebasing on disk\n")
- self.repo.ui.log("rebase", "", rebase_imm_used=self.inmemory)
+ self.repo.ui.log("rebase",
+ "using in-memory rebase: %r\n", self.inmemory,
+ rebase_imm_used=self.inmemory)
def _performrebase(self, tr):
self._assignworkingcopy()
@@ -573,8 +573,8 @@
ui.debug('rebased as %s\n' % short(newnode))
else:
if not self.collapsef:
- ui.warn(_('note: rebase of %d:%s created no changes '
- 'to commit\n') % (rev, ctx))
+ ui.warn(_('note: not rebasing %s, its destination already '
+ 'has all its changes\n') % desc)
self.skipped.add(rev)
self.state[rev] = p1
ui.debug('next revision set to %d\n' % p1)
@@ -651,6 +651,63 @@
repo['.'].node() == repo._bookmarks[self.activebookmark]):
bookmarks.activate(repo, self.activebookmark)
+ def _abort(self, backup=True, suppwarns=False):
+ '''Restore the repository to its original state.'''
+
+ repo = self.repo
+ try:
+ # If the first commits in the rebased set get skipped during the
+ # rebase, their values within the state mapping will be the dest
+ # rev id. The rebased list must must not contain the dest rev
+ # (issue4896)
+ rebased = [s for r, s in self.state.items()
+ if s >= 0 and s != r and s != self.destmap[r]]
+ immutable = [d for d in rebased if not repo[d].mutable()]
+ cleanup = True
+ if immutable:
+ repo.ui.warn(_("warning: can't clean up public changesets %s\n")
+ % ', '.join(bytes(repo[r]) for r in immutable),
+ hint=_("see 'hg help phases' for details"))
+ cleanup = False
+
+ descendants = set()
+ if rebased:
+ descendants = set(repo.changelog.descendants(rebased))
+ if descendants - set(rebased):
+ repo.ui.warn(_("warning: new changesets detected on "
+ "destination branch, can't strip\n"))
+ cleanup = False
+
+ if cleanup:
+ shouldupdate = False
+ if rebased:
+ strippoints = [
+ c.node() for c in repo.set('roots(%ld)', rebased)]
+
+ updateifonnodes = set(rebased)
+ updateifonnodes.update(self.destmap.values())
+ updateifonnodes.add(self.originalwd)
+ shouldupdate = repo['.'].rev() in updateifonnodes
+
+ # Update away from the rebase if necessary
+ if shouldupdate or needupdate(repo, self.state):
+ mergemod.update(repo, self.originalwd, branchmerge=False,
+ force=True)
+
+ # Strip from the first rebased revision
+ if rebased:
+ repair.strip(repo.ui, repo, strippoints, backup=backup)
+
+ if self.activebookmark and self.activebookmark in repo._bookmarks:
+ bookmarks.activate(repo, self.activebookmark)
+
+ finally:
+ clearstatus(repo)
+ clearcollapsemsg(repo)
+ if not suppwarns:
+ repo.ui.warn(_('rebase aborted\n'))
+ return 0
+
@command('rebase',
[('s', 'source', '',
_('rebase the specified changeset and descendants'), _('REV')),
@@ -1080,7 +1137,8 @@
return None
rebasingwcp = repo['.'].rev() in rebaseset
- ui.log("rebase", "", rebase_rebasing_wcp=rebasingwcp)
+ ui.log("rebase", "rebasing working copy parent: %r\n", rebasingwcp,
+ rebase_rebasing_wcp=rebasingwcp)
if inmemory and rebasingwcp:
# Check these since we did not before.
cmdutil.checkunfinished(repo)
@@ -1606,64 +1664,6 @@
return False
-def abort(repo, originalwd, destmap, state, activebookmark=None, backup=True,
- suppwarns=False):
- '''Restore the repository to its original state. Additional args:
-
- activebookmark: the name of the bookmark that should be active after the
- restore'''
-
- try:
- # If the first commits in the rebased set get skipped during the rebase,
- # their values within the state mapping will be the dest rev id. The
- # rebased list must must not contain the dest rev (issue4896)
- rebased = [s for r, s in state.items()
- if s >= 0 and s != r and s != destmap[r]]
- immutable = [d for d in rebased if not repo[d].mutable()]
- cleanup = True
- if immutable:
- repo.ui.warn(_("warning: can't clean up public changesets %s\n")
- % ', '.join(bytes(repo[r]) for r in immutable),
- hint=_("see 'hg help phases' for details"))
- cleanup = False
-
- descendants = set()
- if rebased:
- descendants = set(repo.changelog.descendants(rebased))
- if descendants - set(rebased):
- repo.ui.warn(_("warning: new changesets detected on destination "
- "branch, can't strip\n"))
- cleanup = False
-
- if cleanup:
- shouldupdate = False
- if rebased:
- strippoints = [
- c.node() for c in repo.set('roots(%ld)', rebased)]
-
- updateifonnodes = set(rebased)
- updateifonnodes.update(destmap.values())
- updateifonnodes.add(originalwd)
- shouldupdate = repo['.'].rev() in updateifonnodes
-
- # Update away from the rebase if necessary
- if shouldupdate or needupdate(repo, state):
- mergemod.update(repo, originalwd, branchmerge=False, force=True)
-
- # Strip from the first rebased revision
- if rebased:
- repair.strip(repo.ui, repo, strippoints, backup=backup)
-
- if activebookmark and activebookmark in repo._bookmarks:
- bookmarks.activate(repo, activebookmark)
-
- finally:
- clearstatus(repo)
- clearcollapsemsg(repo)
- if not suppwarns:
- repo.ui.warn(_('rebase aborted\n'))
- return 0
-
def sortsource(destmap):
"""yield source revisions in an order that we only rebase things once
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/hgext/remotefilelog/README.md Fri Jan 18 13:28:22 2019 -0500
@@ -0,0 +1,109 @@
+remotefilelog
+=============
+
+The remotefilelog extension allows Mercurial to clone shallow copies of a repository such that all file contents are left on the server and only downloaded on demand by the client. This greatly speeds up clone and pull performance for repositories that have long histories or that are growing quickly.
+
+In addition, the extension allows using a caching layer (such as memcache) to serve the file contents, thus providing better scalability and reducing server load.
+
+Installing
+==========
+
+**NOTE:** See the limitations section below to check if remotefilelog will work for your use case.
+
+remotefilelog can be installed like any other Mercurial extension. Download the source code and add the remotefilelog subdirectory to your `hgrc`:
+
+ :::ini
+ [extensions]
+ remotefilelog=path/to/remotefilelog/remotefilelog
+
+Configuring
+-----------
+
+**Server**
+
+* `server` (required) - Set to 'True' to indicate that the server can serve shallow clones.
+* `serverexpiration` - The server keeps a local cache of recently requested file revision blobs in .hg/remotefilelogcache. This setting specifies how many days they should be kept locally. Defaults to 30.
+
+An example server configuration:
+
+ :::ini
+ [remotefilelog]
+ server = True
+ serverexpiration = 14
+
+**Client**
+
+* `cachepath` (required) - the location to store locally cached file revisions
+* `cachelimit` - the maximum size of the cachepath. By default it's 1000 GB.
+* `cachegroup` - the default unix group for the cachepath. Useful on shared systems so multiple users can read and write to the same cache.
+* `cacheprocess` - the external process that will handle the remote caching layer. If not set, all requests will go to the Mercurial server.
+* `fallbackpath` - the Mercurial repo path to fetch file revisions from. By default it uses the paths.default repo. This setting is useful for cloning from shallow clones and still talking to the central server for file revisions.
+* `includepattern` - a list of regex patterns matching files that should be kept remotely. Defaults to all files.
+* `excludepattern` - a list of regex patterns matching files that should not be kept remotely and should always be downloaded.
+* `pullprefetch` - a revset of commits whose file content should be prefetched after every pull. The most common value for this will be '(bookmark() + head()) & public()'. This is useful in environments where offline work is common, since it will enable offline updating to, rebasing to, and committing on every head and bookmark.
+
+An example client configuration:
+
+ :::ini
+ [remotefilelog]
+ cachepath = /dev/shm/hgcache
+ cachelimit = 2 GB
+
+Using as a largefiles replacement
+---------------------------------
+
+remotefilelog can theoretically be used as a replacement for the largefiles extension. You can use the `includepattern` setting to specify which directories or file types are considered large and they will be left on the server. Unlike the largefiles extension, this can be done without converting the server repository. Only the client configuration needs to specify the patterns.
+
+The include/exclude settings haven't been extensively tested, so this feature is still considered experimental.
+
+An example largefiles style client configuration:
+
+ :::ini
+ [remotefilelog]
+ cachepath = /dev/shm/hgcache
+ cachelimit = 2 GB
+ includepattern = *.sql3
+ bin/*
+
+Usage
+=====
+
+Once you have configured the server, you can get a shallow clone by doing:
+
+ :::bash
+ hg clone --shallow ssh://server//path/repo
+
+After that, all normal mercurial commands should work.
+
+Occasionly the client or server caches may grow too big. Run `hg gc` to clean up the cache. It will remove cached files that appear to no longer be necessary, or any files that exceed the configured maximum size. This does not improve performance; it just frees up space.
+
+Limitations
+===========
+
+1. The extension must be used with Mercurial 3.3 (commit d7d08337b3f6) or higher (earlier versions of the extension work with earlier versions of Mercurial though, up to Mercurial 2.7).
+
+2. remotefilelog has only been tested on linux with case-sensitive filesystems. It should work on other unix systems but may have problems on case-insensitive filesystems.
+
+3. remotefilelog only works with ssh based Mercurial repos. http based repos are currently not supported, though it shouldn't be too difficult for some motivated individual to implement.
+
+4. Tags are not supported in completely shallow repos. If you use tags in your repo you will have to specify `excludepattern=.hgtags` in your client configuration to ensure that file is downloaded. The include/excludepattern settings are experimental at the moment and have yet to be deployed in a production environment.
+
+5. A few commands will be slower. `hg log <filename>` will be much slower since it has to walk the entire commit history instead of just the filelog. Use `hg log -f <filename>` instead, which remains very fast.
+
+Contributing
+============
+
+Patches are welcome as pull requests, though they will be collapsed and rebased to maintain a linear history. Tests can be run via:
+
+ :::bash
+ cd tests
+ ./run-tests --with-hg=path/to/hgrepo/hg
+
+We (Facebook) have to ask for a "Contributor License Agreement" from someone who sends in a patch or code that we want to include in the codebase. This is a legal requirement; a similar situation applies to Apache and other ASF projects.
+
+If we ask you to fill out a CLA we'll direct you to our [online CLA page](https://developers.facebook.com/opensource/cla) where you can complete it easily. We use the same form as the Apache CLA so that friction is minimal.
+
+License
+=======
+
+remotefilelog is made available under the terms of the GNU General Public License version 2, or any later version. See the COPYING file that accompanies this distribution for the full text of the license.
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/hgext/remotefilelog/__init__.py Fri Jan 18 13:28:22 2019 -0500
@@ -0,0 +1,1143 @@
+# __init__.py - remotefilelog extension
+#
+# Copyright 2013 Facebook, Inc.
+#
+# This software may be used and distributed according to the terms of the
+# GNU General Public License version 2 or any later version.
+"""remotefilelog causes Mercurial to lazilly fetch file contents (EXPERIMENTAL)
+
+This extension is HIGHLY EXPERIMENTAL. There are NO BACKWARDS COMPATIBILITY
+GUARANTEES. This means that repositories created with this extension may
+only be usable with the exact version of this extension/Mercurial that was
+used. The extension attempts to enforce this in order to prevent repository
+corruption.
+
+remotefilelog works by fetching file contents lazily and storing them
+in a cache on the client rather than in revlogs. This allows enormous
+histories to be transferred only partially, making them easier to
+operate on.
+
+Configs:
+
+ ``packs.maxchainlen`` specifies the maximum delta chain length in pack files
+
+ ``packs.maxpacksize`` specifies the maximum pack file size
+
+ ``packs.maxpackfilecount`` specifies the maximum number of packs in the
+ shared cache (trees only for now)
+
+ ``remotefilelog.backgroundprefetch`` runs prefetch in background when True
+
+ ``remotefilelog.bgprefetchrevs`` specifies revisions to fetch on commit and
+ update, and on other commands that use them. Different from pullprefetch.
+
+ ``remotefilelog.gcrepack`` does garbage collection during repack when True
+
+ ``remotefilelog.nodettl`` specifies maximum TTL of a node in seconds before
+ it is garbage collected
+
+ ``remotefilelog.repackonhggc`` runs repack on hg gc when True
+
+ ``remotefilelog.prefetchdays`` specifies the maximum age of a commit in
+ days after which it is no longer prefetched.
+
+ ``remotefilelog.prefetchdelay`` specifies delay between background
+ prefetches in seconds after operations that change the working copy parent
+
+ ``remotefilelog.data.gencountlimit`` constraints the minimum number of data
+ pack files required to be considered part of a generation. In particular,
+ minimum number of packs files > gencountlimit.
+
+ ``remotefilelog.data.generations`` list for specifying the lower bound of
+ each generation of the data pack files. For example, list ['100MB','1MB']
+ or ['1MB', '100MB'] will lead to three generations: [0, 1MB), [
+ 1MB, 100MB) and [100MB, infinity).
+
+ ``remotefilelog.data.maxrepackpacks`` the maximum number of pack files to
+ include in an incremental data repack.
+
+ ``remotefilelog.data.repackmaxpacksize`` the maximum size of a pack file for
+ it to be considered for an incremental data repack.
+
+ ``remotefilelog.data.repacksizelimit`` the maximum total size of pack files
+ to include in an incremental data repack.
+
+ ``remotefilelog.history.gencountlimit`` constraints the minimum number of
+ history pack files required to be considered part of a generation. In
+ particular, minimum number of packs files > gencountlimit.
+
+ ``remotefilelog.history.generations`` list for specifying the lower bound of
+ each generation of the history pack files. For example, list [
+ '100MB', '1MB'] or ['1MB', '100MB'] will lead to three generations: [
+ 0, 1MB), [1MB, 100MB) and [100MB, infinity).
+
+ ``remotefilelog.history.maxrepackpacks`` the maximum number of pack files to
+ include in an incremental history repack.
+
+ ``remotefilelog.history.repackmaxpacksize`` the maximum size of a pack file
+ for it to be considered for an incremental history repack.
+
+ ``remotefilelog.history.repacksizelimit`` the maximum total size of pack
+ files to include in an incremental history repack.
+
+ ``remotefilelog.backgroundrepack`` automatically consolidate packs in the
+ background
+
+ ``remotefilelog.cachepath`` path to cache
+
+ ``remotefilelog.cachegroup`` if set, make cache directory sgid to this
+ group
+
+ ``remotefilelog.cacheprocess`` binary to invoke for fetching file data
+
+ ``remotefilelog.debug`` turn on remotefilelog-specific debug output
+
+ ``remotefilelog.excludepattern`` pattern of files to exclude from pulls
+
+ ``remotefilelog.includepattern`` pattern of files to include in pulls
+
+ ``remotefilelog.fetchwarning``: message to print when too many
+ single-file fetches occur
+
+ ``remotefilelog.getfilesstep`` number of files to request in a single RPC
+
+ ``remotefilelog.getfilestype`` if set to 'threaded' use threads to fetch
+ files, otherwise use optimistic fetching
+
+ ``remotefilelog.pullprefetch`` revset for selecting files that should be
+ eagerly downloaded rather than lazily
+
+ ``remotefilelog.reponame`` name of the repo. If set, used to partition
+ data from other repos in a shared store.
+
+ ``remotefilelog.server`` if true, enable server-side functionality
+
+ ``remotefilelog.servercachepath`` path for caching blobs on the server
+
+ ``remotefilelog.serverexpiration`` number of days to keep cached server
+ blobs
+
+ ``remotefilelog.validatecache`` if set, check cache entries for corruption
+ before returning blobs
+
+ ``remotefilelog.validatecachelog`` if set, check cache entries for
+ corruption before returning metadata
+
+"""
+from __future__ import absolute_import
+
+import os
+import time
+import traceback
+
+from mercurial.node import hex
+from mercurial.i18n import _
+from mercurial import (
+ changegroup,
+ changelog,
+ cmdutil,
+ commands,
+ configitems,
+ context,
+ copies,
+ debugcommands as hgdebugcommands,
+ dispatch,
+ error,
+ exchange,
+ extensions,
+ hg,
+ localrepo,
+ match,
+ merge,
+ node as nodemod,
+ patch,
+ pycompat,
+ registrar,
+ repair,
+ repoview,
+ revset,
+ scmutil,
+ smartset,
+ streamclone,
+ templatekw,
+ util,
+)
+from . import (
+ constants,
+ debugcommands,
+ fileserverclient,
+ remotefilectx,
+ remotefilelog,
+ remotefilelogserver,
+ repack as repackmod,
+ shallowbundle,
+ shallowrepo,
+ shallowstore,
+ shallowutil,
+ shallowverifier,
+)
+
+# ensures debug commands are registered
+hgdebugcommands.command
+
+cmdtable = {}
+command = registrar.command(cmdtable)
+
+configtable = {}
+configitem = registrar.configitem(configtable)
+
+configitem('remotefilelog', 'debug', default=False)
+
+configitem('remotefilelog', 'reponame', default='')
+configitem('remotefilelog', 'cachepath', default=None)
+configitem('remotefilelog', 'cachegroup', default=None)
+configitem('remotefilelog', 'cacheprocess', default=None)
+configitem('remotefilelog', 'cacheprocess.includepath', default=None)
+configitem("remotefilelog", "cachelimit", default="1000 GB")
+
+configitem('remotefilelog', 'fallbackpath', default=configitems.dynamicdefault,
+ alias=[('remotefilelog', 'fallbackrepo')])
+
+configitem('remotefilelog', 'validatecachelog', default=None)
+configitem('remotefilelog', 'validatecache', default='on')
+configitem('remotefilelog', 'server', default=None)
+configitem('remotefilelog', 'servercachepath', default=None)
+configitem("remotefilelog", "serverexpiration", default=30)
+configitem('remotefilelog', 'backgroundrepack', default=False)
+configitem('remotefilelog', 'bgprefetchrevs', default=None)
+configitem('remotefilelog', 'pullprefetch', default=None)
+configitem('remotefilelog', 'backgroundprefetch', default=False)
+configitem('remotefilelog', 'prefetchdelay', default=120)
+configitem('remotefilelog', 'prefetchdays', default=14)
+
+configitem('remotefilelog', 'getfilesstep', default=10000)
+configitem('remotefilelog', 'getfilestype', default='optimistic')
+configitem('remotefilelog', 'batchsize', configitems.dynamicdefault)
+configitem('remotefilelog', 'fetchwarning', default='')
+
+configitem('remotefilelog', 'includepattern', default=None)
+configitem('remotefilelog', 'excludepattern', default=None)
+
+configitem('remotefilelog', 'gcrepack', default=False)
+configitem('remotefilelog', 'repackonhggc', default=False)
+configitem('repack', 'chainorphansbysize', default=True)
+
+configitem('packs', 'maxpacksize', default=0)
+configitem('packs', 'maxchainlen', default=1000)
+
+# default TTL limit is 30 days
+_defaultlimit = 60 * 60 * 24 * 30
+configitem('remotefilelog', 'nodettl', default=_defaultlimit)
+
+configitem('remotefilelog', 'data.gencountlimit', default=2),
+configitem('remotefilelog', 'data.generations',
+ default=['1GB', '100MB', '1MB'])
+configitem('remotefilelog', 'data.maxrepackpacks', default=50)
+configitem('remotefilelog', 'data.repackmaxpacksize', default='4GB')
+configitem('remotefilelog', 'data.repacksizelimit', default='100MB')
+
+configitem('remotefilelog', 'history.gencountlimit', default=2),
+configitem('remotefilelog', 'history.generations', default=['100MB'])
+configitem('remotefilelog', 'history.maxrepackpacks', default=50)
+configitem('remotefilelog', 'history.repackmaxpacksize', default='400MB')
+configitem('remotefilelog', 'history.repacksizelimit', default='100MB')
+
+# Note for extension authors: ONLY specify testedwith = 'ships-with-hg-core' for
+# extensions which SHIP WITH MERCURIAL. Non-mainline extensions should
+# be specifying the version(s) of Mercurial they are tested with, or
+# leave the attribute unspecified.
+testedwith = 'ships-with-hg-core'
+
+repoclass = localrepo.localrepository
+repoclass._basesupported.add(constants.SHALLOWREPO_REQUIREMENT)
+
+isenabled = shallowutil.isenabled
+
+def uisetup(ui):
+ """Wraps user facing Mercurial commands to swap them out with shallow
+ versions.
+ """
+ hg.wirepeersetupfuncs.append(fileserverclient.peersetup)
+
+ entry = extensions.wrapcommand(commands.table, 'clone', cloneshallow)
+ entry[1].append(('', 'shallow', None,
+ _("create a shallow clone which uses remote file "
+ "history")))
+
+ extensions.wrapcommand(commands.table, 'debugindex',
+ debugcommands.debugindex)
+ extensions.wrapcommand(commands.table, 'debugindexdot',
+ debugcommands.debugindexdot)
+ extensions.wrapcommand(commands.table, 'log', log)
+ extensions.wrapcommand(commands.table, 'pull', pull)
+
+ # Prevent 'hg manifest --all'
+ def _manifest(orig, ui, repo, *args, **opts):
+ if (isenabled(repo) and opts.get(r'all')):
+ raise error.Abort(_("--all is not supported in a shallow repo"))
+
+ return orig(ui, repo, *args, **opts)
+ extensions.wrapcommand(commands.table, "manifest", _manifest)
+
+ # Wrap remotefilelog with lfs code
+ def _lfsloaded(loaded=False):
+ lfsmod = None
+ try:
+ lfsmod = extensions.find('lfs')
+ except KeyError:
+ pass
+ if lfsmod:
+ lfsmod.wrapfilelog(remotefilelog.remotefilelog)
+ fileserverclient._lfsmod = lfsmod
+ extensions.afterloaded('lfs', _lfsloaded)
+
+ # debugdata needs remotefilelog.len to work
+ extensions.wrapcommand(commands.table, 'debugdata', debugdatashallow)
+
+def cloneshallow(orig, ui, repo, *args, **opts):
+ if opts.get(r'shallow'):
+ repos = []
+ def pull_shallow(orig, self, *args, **kwargs):
+ if not isenabled(self):
+ repos.append(self.unfiltered())
+ # set up the client hooks so the post-clone update works
+ setupclient(self.ui, self.unfiltered())
+
+ # setupclient fixed the class on the repo itself
+ # but we also need to fix it on the repoview
+ if isinstance(self, repoview.repoview):
+ self.__class__.__bases__ = (self.__class__.__bases__[0],
+ self.unfiltered().__class__)
+ self.requirements.add(constants.SHALLOWREPO_REQUIREMENT)
+ self._writerequirements()
+
+ # Since setupclient hadn't been called, exchange.pull was not
+ # wrapped. So we need to manually invoke our version of it.
+ return exchangepull(orig, self, *args, **kwargs)
+ else:
+ return orig(self, *args, **kwargs)
+ extensions.wrapfunction(exchange, 'pull', pull_shallow)
+
+ # Wrap the stream logic to add requirements and to pass include/exclude
+ # patterns around.
+ def setup_streamout(repo, remote):
+ # Replace remote.stream_out with a version that sends file
+ # patterns.
+ def stream_out_shallow(orig):
+ caps = remote.capabilities()
+ if constants.NETWORK_CAP_LEGACY_SSH_GETFILES in caps:
+ opts = {}
+ if repo.includepattern:
+ opts[r'includepattern'] = '\0'.join(repo.includepattern)
+ if repo.excludepattern:
+ opts[r'excludepattern'] = '\0'.join(repo.excludepattern)
+ return remote._callstream('stream_out_shallow', **opts)
+ else:
+ return orig()
+ extensions.wrapfunction(remote, 'stream_out', stream_out_shallow)
+ def stream_wrap(orig, op):
+ setup_streamout(op.repo, op.remote)
+ return orig(op)
+ extensions.wrapfunction(
+ streamclone, 'maybeperformlegacystreamclone', stream_wrap)
+
+ def canperformstreamclone(orig, pullop, bundle2=False):
+ # remotefilelog is currently incompatible with the
+ # bundle2 flavor of streamclones, so force us to use
+ # v1 instead.
+ if 'v2' in pullop.remotebundle2caps.get('stream', []):
+ pullop.remotebundle2caps['stream'] = [
+ c for c in pullop.remotebundle2caps['stream']
+ if c != 'v2']
+ if bundle2:
+ return False, None
+ supported, requirements = orig(pullop, bundle2=bundle2)
+ if requirements is not None:
+ requirements.add(constants.SHALLOWREPO_REQUIREMENT)
+ return supported, requirements
+ extensions.wrapfunction(
+ streamclone, 'canperformstreamclone', canperformstreamclone)
+
+ try:
+ orig(ui, repo, *args, **opts)
+ finally:
+ if opts.get(r'shallow'):
+ for r in repos:
+ if util.safehasattr(r, 'fileservice'):
+ r.fileservice.close()
+
+def debugdatashallow(orig, *args, **kwds):
+ oldlen = remotefilelog.remotefilelog.__len__
+ try:
+ remotefilelog.remotefilelog.__len__ = lambda x: 1
+ return orig(*args, **kwds)
+ finally:
+ remotefilelog.remotefilelog.__len__ = oldlen
+
+def reposetup(ui, repo):
+ if not repo.local():
+ return
+
+ # put here intentionally bc doesnt work in uisetup
+ ui.setconfig('hooks', 'update.prefetch', wcpprefetch)
+ ui.setconfig('hooks', 'commit.prefetch', wcpprefetch)
+
+ isserverenabled = ui.configbool('remotefilelog', 'server')
+ isshallowclient = isenabled(repo)
+
+ if isserverenabled and isshallowclient:
+ raise RuntimeError("Cannot be both a server and shallow client.")
+
+ if isshallowclient:
+ setupclient(ui, repo)
+
+ if isserverenabled:
+ remotefilelogserver.setupserver(ui, repo)
+
+def setupclient(ui, repo):
+ if not isinstance(repo, localrepo.localrepository):
+ return
+
+ # Even clients get the server setup since they need to have the
+ # wireprotocol endpoints registered.
+ remotefilelogserver.onetimesetup(ui)
+ onetimeclientsetup(ui)
+
+ shallowrepo.wraprepo(repo)
+ repo.store = shallowstore.wrapstore(repo.store)
+
+clientonetime = False
+def onetimeclientsetup(ui):
+ global clientonetime
+ if clientonetime:
+ return
+ clientonetime = True
+
+ changegroup.cgpacker = shallowbundle.shallowcg1packer
+
+ extensions.wrapfunction(changegroup, '_addchangegroupfiles',
+ shallowbundle.addchangegroupfiles)
+ extensions.wrapfunction(
+ changegroup, 'makechangegroup', shallowbundle.makechangegroup)
+
+ def storewrapper(orig, requirements, path, vfstype):
+ s = orig(requirements, path, vfstype)
+ if constants.SHALLOWREPO_REQUIREMENT in requirements:
+ s = shallowstore.wrapstore(s)
+
+ return s
+ extensions.wrapfunction(localrepo, 'makestore', storewrapper)
+
+ extensions.wrapfunction(exchange, 'pull', exchangepull)
+
+ # prefetch files before update
+ def applyupdates(orig, repo, actions, wctx, mctx, overwrite, labels=None):
+ if isenabled(repo):
+ manifest = mctx.manifest()
+ files = []
+ for f, args, msg in actions['g']:
+ files.append((f, hex(manifest[f])))
+ # batch fetch the needed files from the server
+ repo.fileservice.prefetch(files)
+ return orig(repo, actions, wctx, mctx, overwrite, labels=labels)
+ extensions.wrapfunction(merge, 'applyupdates', applyupdates)
+
+ # Prefetch merge checkunknownfiles
+ def checkunknownfiles(orig, repo, wctx, mctx, force, actions,
+ *args, **kwargs):
+ if isenabled(repo):
+ files = []
+ sparsematch = repo.maybesparsematch(mctx.rev())
+ for f, (m, actionargs, msg) in actions.iteritems():
+ if sparsematch and not sparsematch(f):
+ continue
+ if m in ('c', 'dc', 'cm'):
+ files.append((f, hex(mctx.filenode(f))))
+ elif m == 'dg':
+ f2 = actionargs[0]
+ files.append((f2, hex(mctx.filenode(f2))))
+ # batch fetch the needed files from the server
+ repo.fileservice.prefetch(files)
+ return orig(repo, wctx, mctx, force, actions, *args, **kwargs)
+ extensions.wrapfunction(merge, '_checkunknownfiles', checkunknownfiles)
+
+ # Prefetch files before status attempts to look at their size and contents
+ def checklookup(orig, self, files):
+ repo = self._repo
+ if isenabled(repo):
+ prefetchfiles = []
+ for parent in self._parents:
+ for f in files:
+ if f in parent:
+ prefetchfiles.append((f, hex(parent.filenode(f))))
+ # batch fetch the needed files from the server
+ repo.fileservice.prefetch(prefetchfiles)
+ return orig(self, files)
+ extensions.wrapfunction(context.workingctx, '_checklookup', checklookup)
+
+ # Prefetch the logic that compares added and removed files for renames
+ def findrenames(orig, repo, matcher, added, removed, *args, **kwargs):
+ if isenabled(repo):
+ files = []
+ parentctx = repo['.']
+ for f in removed:
+ files.append((f, hex(parentctx.filenode(f))))
+ # batch fetch the needed files from the server
+ repo.fileservice.prefetch(files)
+ return orig(repo, matcher, added, removed, *args, **kwargs)
+ extensions.wrapfunction(scmutil, '_findrenames', findrenames)
+
+ # prefetch files before mergecopies check
+ def computenonoverlap(orig, repo, c1, c2, *args, **kwargs):
+ u1, u2 = orig(repo, c1, c2, *args, **kwargs)
+ if isenabled(repo):
+ m1 = c1.manifest()
+ m2 = c2.manifest()
+ files = []
+
+ sparsematch1 = repo.maybesparsematch(c1.rev())
+ if sparsematch1:
+ sparseu1 = []
+ for f in u1:
+ if sparsematch1(f):
+ files.append((f, hex(m1[f])))
+ sparseu1.append(f)
+ u1 = sparseu1
+
+ sparsematch2 = repo.maybesparsematch(c2.rev())
+ if sparsematch2:
+ sparseu2 = []
+ for f in u2:
+ if sparsematch2(f):
+ files.append((f, hex(m2[f])))
+ sparseu2.append(f)
+ u2 = sparseu2
+
+ # batch fetch the needed files from the server
+ repo.fileservice.prefetch(files)
+ return u1, u2
+ extensions.wrapfunction(copies, '_computenonoverlap', computenonoverlap)
+
+ # prefetch files before pathcopies check
+ def computeforwardmissing(orig, a, b, match=None):
+ missing = list(orig(a, b, match=match))
+ repo = a._repo
+ if isenabled(repo):
+ mb = b.manifest()
+
+ files = []
+ sparsematch = repo.maybesparsematch(b.rev())
+ if sparsematch:
+ sparsemissing = []
+ for f in missing:
+ if sparsematch(f):
+ files.append((f, hex(mb[f])))
+ sparsemissing.append(f)
+ missing = sparsemissing
+
+ # batch fetch the needed files from the server
+ repo.fileservice.prefetch(files)
+ return missing
+ extensions.wrapfunction(copies, '_computeforwardmissing',
+ computeforwardmissing)
+
+ # close cache miss server connection after the command has finished
+ def runcommand(orig, lui, repo, *args, **kwargs):
+ fileservice = None
+ # repo can be None when running in chg:
+ # - at startup, reposetup was called because serve is not norepo
+ # - a norepo command like "help" is called
+ if repo and isenabled(repo):
+ fileservice = repo.fileservice
+ try:
+ return orig(lui, repo, *args, **kwargs)
+ finally:
+ if fileservice:
+ fileservice.close()
+ extensions.wrapfunction(dispatch, 'runcommand', runcommand)
+
+ # disappointing hacks below
+ templatekw.getrenamedfn = getrenamedfn
+ extensions.wrapfunction(revset, 'filelog', filelogrevset)
+ revset.symbols['filelog'] = revset.filelog
+ extensions.wrapfunction(cmdutil, 'walkfilerevs', walkfilerevs)
+
+ # prevent strip from stripping remotefilelogs
+ def _collectbrokencsets(orig, repo, files, striprev):
+ if isenabled(repo):
+ files = list([f for f in files if not repo.shallowmatch(f)])
+ return orig(repo, files, striprev)
+ extensions.wrapfunction(repair, '_collectbrokencsets', _collectbrokencsets)
+
+ # Don't commit filelogs until we know the commit hash, since the hash
+ # is present in the filelog blob.
+ # This violates Mercurial's filelog->manifest->changelog write order,
+ # but is generally fine for client repos.
+ pendingfilecommits = []
+ def addrawrevision(orig, self, rawtext, transaction, link, p1, p2, node,
+ flags, cachedelta=None, _metatuple=None):
+ if isinstance(link, int):
+ pendingfilecommits.append(
+ (self, rawtext, transaction, link, p1, p2, node, flags,
+ cachedelta, _metatuple))
+ return node
+ else:
+ return orig(self, rawtext, transaction, link, p1, p2, node, flags,
+ cachedelta, _metatuple=_metatuple)
+ extensions.wrapfunction(
+ remotefilelog.remotefilelog, 'addrawrevision', addrawrevision)
+
+ def changelogadd(orig, self, *args):
+ oldlen = len(self)
+ node = orig(self, *args)
+ newlen = len(self)
+ if oldlen != newlen:
+ for oldargs in pendingfilecommits:
+ log, rt, tr, link, p1, p2, n, fl, c, m = oldargs
+ linknode = self.node(link)
+ if linknode == node:
+ log.addrawrevision(rt, tr, linknode, p1, p2, n, fl, c, m)
+ else:
+ raise error.ProgrammingError(
+ 'pending multiple integer revisions are not supported')
+ else:
+ # "link" is actually wrong here (it is set to len(changelog))
+ # if changelog remains unchanged, skip writing file revisions
+ # but still do a sanity check about pending multiple revisions
+ if len(set(x[3] for x in pendingfilecommits)) > 1:
+ raise error.ProgrammingError(
+ 'pending multiple integer revisions are not supported')
+ del pendingfilecommits[:]
+ return node
+ extensions.wrapfunction(changelog.changelog, 'add', changelogadd)
+
+ # changectx wrappers
+ def filectx(orig, self, path, fileid=None, filelog=None):
+ if fileid is None:
+ fileid = self.filenode(path)
+ if (isenabled(self._repo) and self._repo.shallowmatch(path)):
+ return remotefilectx.remotefilectx(self._repo, path,
+ fileid=fileid, changectx=self, filelog=filelog)
+ return orig(self, path, fileid=fileid, filelog=filelog)
+ extensions.wrapfunction(context.changectx, 'filectx', filectx)
+
+ def workingfilectx(orig, self, path, filelog=None):
+ if (isenabled(self._repo) and self._repo.shallowmatch(path)):
+ return remotefilectx.remoteworkingfilectx(self._repo,
+ path, workingctx=self, filelog=filelog)
+ return orig(self, path, filelog=filelog)
+ extensions.wrapfunction(context.workingctx, 'filectx', workingfilectx)
+
+ # prefetch required revisions before a diff
+ def trydiff(orig, repo, revs, ctx1, ctx2, modified, added, removed,
+ copy, getfilectx, *args, **kwargs):
+ if isenabled(repo):
+ prefetch = []
+ mf1 = ctx1.manifest()
+ for fname in modified + added + removed:
+ if fname in mf1:
+ fnode = getfilectx(fname, ctx1).filenode()
+ # fnode can be None if it's a edited working ctx file
+ if fnode:
+ prefetch.append((fname, hex(fnode)))
+ if fname not in removed:
+ fnode = getfilectx(fname, ctx2).filenode()
+ if fnode:
+ prefetch.append((fname, hex(fnode)))
+
+ repo.fileservice.prefetch(prefetch)
+
+ return orig(repo, revs, ctx1, ctx2, modified, added, removed,
+ copy, getfilectx, *args, **kwargs)
+ extensions.wrapfunction(patch, 'trydiff', trydiff)
+
+ # Prevent verify from processing files
+ # a stub for mercurial.hg.verify()
+ def _verify(orig, repo):
+ lock = repo.lock()
+ try:
+ return shallowverifier.shallowverifier(repo).verify()
+ finally:
+ lock.release()
+
+ extensions.wrapfunction(hg, 'verify', _verify)
+
+ scmutil.fileprefetchhooks.add('remotefilelog', _fileprefetchhook)
+
+def getrenamedfn(repo, endrev=None):
+ rcache = {}
+
+ def getrenamed(fn, rev):
+ '''looks up all renames for a file (up to endrev) the first
+ time the file is given. It indexes on the changerev and only
+ parses the manifest if linkrev != changerev.
+ Returns rename info for fn at changerev rev.'''
+ if rev in rcache.setdefault(fn, {}):
+ return rcache[fn][rev]
+
+ try:
+ fctx = repo[rev].filectx(fn)
+ for ancestor in fctx.ancestors():
+ if ancestor.path() == fn:
+ renamed = ancestor.renamed()
+ rcache[fn][ancestor.rev()] = renamed and renamed[0]
+
+ renamed = fctx.renamed()
+ return renamed and renamed[0]
+ except error.LookupError:
+ return None
+
+ return getrenamed
+
+def walkfilerevs(orig, repo, match, follow, revs, fncache):
+ if not isenabled(repo):
+ return orig(repo, match, follow, revs, fncache)
+
+ # remotefilelog's can't be walked in rev order, so throw.
+ # The caller will see the exception and walk the commit tree instead.
+ if not follow:
+ raise cmdutil.FileWalkError("Cannot walk via filelog")
+
+ wanted = set()
+ minrev, maxrev = min(revs), max(revs)
+
+ pctx = repo['.']
+ for filename in match.files():
+ if filename not in pctx:
+ raise error.Abort(_('cannot follow file not in parent '
+ 'revision: "%s"') % filename)
+ fctx = pctx[filename]
+
+ linkrev = fctx.linkrev()
+ if linkrev >= minrev and linkrev <= maxrev:
+ fncache.setdefault(linkrev, []).append(filename)
+ wanted.add(linkrev)
+
+ for ancestor in fctx.ancestors():
+ linkrev = ancestor.linkrev()
+ if linkrev >= minrev and linkrev <= maxrev:
+ fncache.setdefault(linkrev, []).append(ancestor.path())
+ wanted.add(linkrev)
+
+ return wanted
+
+def filelogrevset(orig, repo, subset, x):
+ """``filelog(pattern)``
+ Changesets connected to the specified filelog.
+
+ For performance reasons, ``filelog()`` does not show every changeset
+ that affects the requested file(s). See :hg:`help log` for details. For
+ a slower, more accurate result, use ``file()``.
+ """
+
+ if not isenabled(repo):
+ return orig(repo, subset, x)
+
+ # i18n: "filelog" is a keyword
+ pat = revset.getstring(x, _("filelog requires a pattern"))
+ m = match.match(repo.root, repo.getcwd(), [pat], default='relpath',
+ ctx=repo[None])
+ s = set()
+
+ if not match.patkind(pat):
+ # slow
+ for r in subset:
+ ctx = repo[r]
+ cfiles = ctx.files()
+ for f in m.files():
+ if f in cfiles:
+ s.add(ctx.rev())
+ break
+ else:
+ # partial
+ files = (f for f in repo[None] if m(f))
+ for f in files:
+ fctx = repo[None].filectx(f)
+ s.add(fctx.linkrev())
+ for actx in fctx.ancestors():
+ s.add(actx.linkrev())
+
+ return smartset.baseset([r for r in subset if r in s])
+
+@command('gc', [], _('hg gc [REPO...]'), norepo=True)
+def gc(ui, *args, **opts):
+ '''garbage collect the client and server filelog caches
+ '''
+ cachepaths = set()
+
+ # get the system client cache
+ systemcache = shallowutil.getcachepath(ui, allowempty=True)
+ if systemcache:
+ cachepaths.add(systemcache)
+
+ # get repo client and server cache
+ repopaths = []
+ pwd = ui.environ.get('PWD')
+ if pwd:
+ repopaths.append(pwd)
+
+ repopaths.extend(args)
+ repos = []
+ for repopath in repopaths:
+ try:
+ repo = hg.peer(ui, {}, repopath)
+ repos.append(repo)
+
+ repocache = shallowutil.getcachepath(repo.ui, allowempty=True)
+ if repocache:
+ cachepaths.add(repocache)
+ except error.RepoError:
+ pass
+
+ # gc client cache
+ for cachepath in cachepaths:
+ gcclient(ui, cachepath)
+
+ # gc server cache
+ for repo in repos:
+ remotefilelogserver.gcserver(ui, repo._repo)
+
+def gcclient(ui, cachepath):
+ # get list of repos that use this cache
+ repospath = os.path.join(cachepath, 'repos')
+ if not os.path.exists(repospath):
+ ui.warn(_("no known cache at %s\n") % cachepath)
+ return
+
+ reposfile = open(repospath, 'rb')
+ repos = set([r[:-1] for r in reposfile.readlines()])
+ reposfile.close()
+
+ # build list of useful files
+ validrepos = []
+ keepkeys = set()
+
+ sharedcache = None
+ filesrepacked = False
+
+ count = 0
+ progress = ui.makeprogress(_("analyzing repositories"), unit="repos",
+ total=len(repos))
+ for path in repos:
+ progress.update(count)
+ count += 1
+ try:
+ path = ui.expandpath(os.path.normpath(path))
+ except TypeError as e:
+ ui.warn(_("warning: malformed path: %r:%s\n") % (path, e))
+ traceback.print_exc()
+ continue
+ try:
+ peer = hg.peer(ui, {}, path)
+ repo = peer._repo
+ except error.RepoError:
+ continue
+
+ validrepos.append(path)
+
+ # Protect against any repo or config changes that have happened since
+ # this repo was added to the repos file. We'd rather this loop succeed
+ # and too much be deleted, than the loop fail and nothing gets deleted.
+ if not isenabled(repo):
+ continue
+
+ if not util.safehasattr(repo, 'name'):
+ ui.warn(_("repo %s is a misconfigured remotefilelog repo\n") % path)
+ continue
+
+ # If garbage collection on repack and repack on hg gc are enabled
+ # then loose files are repacked and garbage collected.
+ # Otherwise regular garbage collection is performed.
+ repackonhggc = repo.ui.configbool('remotefilelog', 'repackonhggc')
+ gcrepack = repo.ui.configbool('remotefilelog', 'gcrepack')
+ if repackonhggc and gcrepack:
+ try:
+ repackmod.incrementalrepack(repo)
+ filesrepacked = True
+ continue
+ except (IOError, repackmod.RepackAlreadyRunning):
+ # If repack cannot be performed due to not enough disk space
+ # continue doing garbage collection of loose files w/o repack
+ pass
+
+ reponame = repo.name
+ if not sharedcache:
+ sharedcache = repo.sharedstore
+
+ # Compute a keepset which is not garbage collected
+ def keyfn(fname, fnode):
+ return fileserverclient.getcachekey(reponame, fname, hex(fnode))
+ keepkeys = repackmod.keepset(repo, keyfn=keyfn, lastkeepkeys=keepkeys)
+
+ progress.complete()
+
+ # write list of valid repos back
+ oldumask = os.umask(0o002)
+ try:
+ reposfile = open(repospath, 'wb')
+ reposfile.writelines([("%s\n" % r) for r in validrepos])
+ reposfile.close()
+ finally:
+ os.umask(oldumask)
+
+ # prune cache
+ if sharedcache is not None:
+ sharedcache.gc(keepkeys)
+ elif not filesrepacked:
+ ui.warn(_("warning: no valid repos in repofile\n"))
+
+def log(orig, ui, repo, *pats, **opts):
+ if not isenabled(repo):
+ return orig(ui, repo, *pats, **opts)
+
+ follow = opts.get(r'follow')
+ revs = opts.get(r'rev')
+ if pats:
+ # Force slowpath for non-follow patterns and follows that start from
+ # non-working-copy-parent revs.
+ if not follow or revs:
+ # This forces the slowpath
+ opts[r'removed'] = True
+
+ # If this is a non-follow log without any revs specified, recommend that
+ # the user add -f to speed it up.
+ if not follow and not revs:
+ match, pats = scmutil.matchandpats(repo['.'], pats,
+ pycompat.byteskwargs(opts))
+ isfile = not match.anypats()
+ if isfile:
+ for file in match.files():
+ if not os.path.isfile(repo.wjoin(file)):
+ isfile = False
+ break
+
+ if isfile:
+ ui.warn(_("warning: file log can be slow on large repos - " +
+ "use -f to speed it up\n"))
+
+ return orig(ui, repo, *pats, **opts)
+
+def revdatelimit(ui, revset):
+ """Update revset so that only changesets no older than 'prefetchdays' days
+ are included. The default value is set to 14 days. If 'prefetchdays' is set
+ to zero or negative value then date restriction is not applied.
+ """
+ days = ui.configint('remotefilelog', 'prefetchdays')
+ if days > 0:
+ revset = '(%s) & date(-%s)' % (revset, days)
+ return revset
+
+def readytofetch(repo):
+ """Check that enough time has passed since the last background prefetch.
+ This only relates to prefetches after operations that change the working
+ copy parent. Default delay between background prefetches is 2 minutes.
+ """
+ timeout = repo.ui.configint('remotefilelog', 'prefetchdelay')
+ fname = repo.vfs.join('lastprefetch')
+
+ ready = False
+ with open(fname, 'a'):
+ # the with construct above is used to avoid race conditions
+ modtime = os.path.getmtime(fname)
+ if (time.time() - modtime) > timeout:
+ os.utime(fname, None)
+ ready = True
+
+ return ready
+
+def wcpprefetch(ui, repo, **kwargs):
+ """Prefetches in background revisions specified by bgprefetchrevs revset.
+ Does background repack if backgroundrepack flag is set in config.
+ """
+ shallow = isenabled(repo)
+ bgprefetchrevs = ui.config('remotefilelog', 'bgprefetchrevs')
+ isready = readytofetch(repo)
+
+ if not (shallow and bgprefetchrevs and isready):
+ return
+
+ bgrepack = repo.ui.configbool('remotefilelog', 'backgroundrepack')
+ # update a revset with a date limit
+ bgprefetchrevs = revdatelimit(ui, bgprefetchrevs)
+
+ def anon():
+ if util.safehasattr(repo, 'ranprefetch') and repo.ranprefetch:
+ return
+ repo.ranprefetch = True
+ repo.backgroundprefetch(bgprefetchrevs, repack=bgrepack)
+
+ repo._afterlock(anon)
+
+def pull(orig, ui, repo, *pats, **opts):
+ result = orig(ui, repo, *pats, **opts)
+
+ if isenabled(repo):
+ # prefetch if it's configured
+ prefetchrevset = ui.config('remotefilelog', 'pullprefetch')
+ bgrepack = repo.ui.configbool('remotefilelog', 'backgroundrepack')
+ bgprefetch = repo.ui.configbool('remotefilelog', 'backgroundprefetch')
+
+ if prefetchrevset:
+ ui.status(_("prefetching file contents\n"))
+ revs = scmutil.revrange(repo, [prefetchrevset])
+ base = repo['.'].rev()
+ if bgprefetch:
+ repo.backgroundprefetch(prefetchrevset, repack=bgrepack)
+ else:
+ repo.prefetch(revs, base=base)
+ if bgrepack:
+ repackmod.backgroundrepack(repo, incremental=True)
+ elif bgrepack:
+ repackmod.backgroundrepack(repo, incremental=True)
+
+ return result
+
+def exchangepull(orig, repo, remote, *args, **kwargs):
+ # Hook into the callstream/getbundle to insert bundle capabilities
+ # during a pull.
+ def localgetbundle(orig, source, heads=None, common=None, bundlecaps=None,
+ **kwargs):
+ if not bundlecaps:
+ bundlecaps = set()
+ bundlecaps.add(constants.BUNDLE2_CAPABLITY)
+ return orig(source, heads=heads, common=common, bundlecaps=bundlecaps,
+ **kwargs)
+
+ if util.safehasattr(remote, '_callstream'):
+ remote._localrepo = repo
+ elif util.safehasattr(remote, 'getbundle'):
+ extensions.wrapfunction(remote, 'getbundle', localgetbundle)
+
+ return orig(repo, remote, *args, **kwargs)
+
+def _fileprefetchhook(repo, revs, match):
+ if isenabled(repo):
+ allfiles = []
+ for rev in revs:
+ if rev == nodemod.wdirrev or rev is None:
+ continue
+ ctx = repo[rev]
+ mf = ctx.manifest()
+ sparsematch = repo.maybesparsematch(ctx.rev())
+ for path in ctx.walk(match):
+ if path.endswith('/'):
+ # Tree manifest that's being excluded as part of narrow
+ continue
+ if (not sparsematch or sparsematch(path)) and path in mf:
+ allfiles.append((path, hex(mf[path])))
+ repo.fileservice.prefetch(allfiles)
+
+@command('debugremotefilelog', [
+ ('d', 'decompress', None, _('decompress the filelog first')),
+ ], _('hg debugremotefilelog <path>'), norepo=True)
+def debugremotefilelog(ui, path, **opts):
+ return debugcommands.debugremotefilelog(ui, path, **opts)
+
+@command('verifyremotefilelog', [
+ ('d', 'decompress', None, _('decompress the filelogs first')),
+ ], _('hg verifyremotefilelogs <directory>'), norepo=True)
+def verifyremotefilelog(ui, path, **opts):
+ return debugcommands.verifyremotefilelog(ui, path, **opts)
+
+@command('debugdatapack', [
+ ('', 'long', None, _('print the long hashes')),
+ ('', 'node', '', _('dump the contents of node'), 'NODE'),
+ ], _('hg debugdatapack <paths>'), norepo=True)
+def debugdatapack(ui, *paths, **opts):
+ return debugcommands.debugdatapack(ui, *paths, **opts)
+
+@command('debughistorypack', [
+ ], _('hg debughistorypack <path>'), norepo=True)
+def debughistorypack(ui, path, **opts):
+ return debugcommands.debughistorypack(ui, path)
+
+@command('debugkeepset', [
+ ], _('hg debugkeepset'))
+def debugkeepset(ui, repo, **opts):
+ # The command is used to measure keepset computation time
+ def keyfn(fname, fnode):
+ return fileserverclient.getcachekey(repo.name, fname, hex(fnode))
+ repackmod.keepset(repo, keyfn)
+ return
+
+@command('debugwaitonrepack', [
+ ], _('hg debugwaitonrepack'))
+def debugwaitonrepack(ui, repo, **opts):
+ return debugcommands.debugwaitonrepack(repo)
+
+@command('debugwaitonprefetch', [
+ ], _('hg debugwaitonprefetch'))
+def debugwaitonprefetch(ui, repo, **opts):
+ return debugcommands.debugwaitonprefetch(repo)
+
+def resolveprefetchopts(ui, opts):
+ if not opts.get('rev'):
+ revset = ['.', 'draft()']
+
+ prefetchrevset = ui.config('remotefilelog', 'pullprefetch', None)
+ if prefetchrevset:
+ revset.append('(%s)' % prefetchrevset)
+ bgprefetchrevs = ui.config('remotefilelog', 'bgprefetchrevs', None)
+ if bgprefetchrevs:
+ revset.append('(%s)' % bgprefetchrevs)
+ revset = '+'.join(revset)
+
+ # update a revset with a date limit
+ revset = revdatelimit(ui, revset)
+
+ opts['rev'] = [revset]
+
+ if not opts.get('base'):
+ opts['base'] = None
+
+ return opts
+
+@command('prefetch', [
+ ('r', 'rev', [], _('prefetch the specified revisions'), _('REV')),
+ ('', 'repack', False, _('run repack after prefetch')),
+ ('b', 'base', '', _("rev that is assumed to already be local")),
+ ] + commands.walkopts, _('hg prefetch [OPTIONS] [FILE...]'))
+def prefetch(ui, repo, *pats, **opts):
+ """prefetch file revisions from the server
+
+ Prefetchs file revisions for the specified revs and stores them in the
+ local remotefilelog cache. If no rev is specified, the default rev is
+ used which is the union of dot, draft, pullprefetch and bgprefetchrev.
+ File names or patterns can be used to limit which files are downloaded.
+
+ Return 0 on success.
+ """
+ opts = pycompat.byteskwargs(opts)
+ if not isenabled(repo):
+ raise error.Abort(_("repo is not shallow"))
+
+ opts = resolveprefetchopts(ui, opts)
+ revs = scmutil.revrange(repo, opts.get('rev'))
+ repo.prefetch(revs, opts.get('base'), pats, opts)
+
+ # Run repack in background
+ if opts.get('repack'):
+ repackmod.backgroundrepack(repo, incremental=True)
+
+@command('repack', [
+ ('', 'background', None, _('run in a background process'), None),
+ ('', 'incremental', None, _('do an incremental repack'), None),
+ ('', 'packsonly', None, _('only repack packs (skip loose objects)'), None),
+ ], _('hg repack [OPTIONS]'))
+def repack_(ui, repo, *pats, **opts):
+ if opts.get(r'background'):
+ repackmod.backgroundrepack(repo, incremental=opts.get(r'incremental'),
+ packsonly=opts.get(r'packsonly', False))
+ return
+
+ options = {'packsonly': opts.get(r'packsonly')}
+
+ try:
+ if opts.get(r'incremental'):
+ repackmod.incrementalrepack(repo, options=options)
+ else:
+ repackmod.fullrepack(repo, options=options)
+ except repackmod.RepackAlreadyRunning as ex:
+ # Don't propogate the exception if the repack is already in
+ # progress, since we want the command to exit 0.
+ repo.ui.warn('%s\n' % ex)
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/hgext/remotefilelog/basepack.py Fri Jan 18 13:28:22 2019 -0500
@@ -0,0 +1,541 @@
+from __future__ import absolute_import
+
+import collections
+import errno
+import hashlib
+import mmap
+import os
+import struct
+import time
+
+from mercurial.i18n import _
+from mercurial import (
+ node as nodemod,
+ policy,
+ pycompat,
+ util,
+ vfs as vfsmod,
+)
+from . import shallowutil
+
+osutil = policy.importmod(r'osutil')
+
+# The pack version supported by this implementation. This will need to be
+# rev'd whenever the byte format changes. Ex: changing the fanout prefix,
+# changing any of the int sizes, changing the delta algorithm, etc.
+PACKVERSIONSIZE = 1
+INDEXVERSIONSIZE = 2
+
+FANOUTSTART = INDEXVERSIONSIZE
+
+# Constant that indicates a fanout table entry hasn't been filled in. (This does
+# not get serialized)
+EMPTYFANOUT = -1
+
+# The fanout prefix is the number of bytes that can be addressed by the fanout
+# table. Example: a fanout prefix of 1 means we use the first byte of a hash to
+# look in the fanout table (which will be 2^8 entries long).
+SMALLFANOUTPREFIX = 1
+LARGEFANOUTPREFIX = 2
+
+# The number of entries in the index at which point we switch to a large fanout.
+# It is chosen to balance the linear scan through a sparse fanout, with the
+# size of the bisect in actual index.
+# 2^16 / 8 was chosen because it trades off (1 step fanout scan + 5 step
+# bisect) with (8 step fanout scan + 1 step bisect)
+# 5 step bisect = log(2^16 / 8 / 255) # fanout
+# 10 step fanout scan = 2^16 / (2^16 / 8) # fanout space divided by entries
+SMALLFANOUTCUTOFF = 2**16 // 8
+
+# The amount of time to wait between checking for new packs. This prevents an
+# exception when data is moved to a new pack after the process has already
+# loaded the pack list.
+REFRESHRATE = 0.1
+
+if pycompat.isposix and not pycompat.ispy3:
+ # With glibc 2.7+ the 'e' flag uses O_CLOEXEC when opening.
+ # The 'e' flag will be ignored on older versions of glibc.
+ # Python 3 can't handle the 'e' flag.
+ PACKOPENMODE = 'rbe'
+else:
+ PACKOPENMODE = 'rb'
+
+class _cachebackedpacks(object):
+ def __init__(self, packs, cachesize):
+ self._packs = set(packs)
+ self._lrucache = util.lrucachedict(cachesize)
+ self._lastpack = None
+
+ # Avoid cold start of the cache by populating the most recent packs
+ # in the cache.
+ for i in reversed(range(min(cachesize, len(packs)))):
+ self._movetofront(packs[i])
+
+ def _movetofront(self, pack):
+ # This effectively makes pack the first entry in the cache.
+ self._lrucache[pack] = True
+
+ def _registerlastpackusage(self):
+ if self._lastpack is not None:
+ self._movetofront(self._lastpack)
+ self._lastpack = None
+
+ def add(self, pack):
+ self._registerlastpackusage()
+
+ # This method will mostly be called when packs are not in cache.
+ # Therefore, adding pack to the cache.
+ self._movetofront(pack)
+ self._packs.add(pack)
+
+ def __iter__(self):
+ self._registerlastpackusage()
+
+ # Cache iteration is based on LRU.
+ for pack in self._lrucache:
+ self._lastpack = pack
+ yield pack
+
+ cachedpacks = set(pack for pack in self._lrucache)
+ # Yield for paths not in the cache.
+ for pack in self._packs - cachedpacks:
+ self._lastpack = pack
+ yield pack
+
+ # Data not found in any pack.
+ self._lastpack = None
+
+class basepackstore(object):
+ # Default cache size limit for the pack files.
+ DEFAULTCACHESIZE = 100
+
+ def __init__(self, ui, path):
+ self.ui = ui
+ self.path = path
+
+ # lastrefesh is 0 so we'll immediately check for new packs on the first
+ # failure.
+ self.lastrefresh = 0
+
+ packs = []
+ for filepath, __, __ in self._getavailablepackfilessorted():
+ try:
+ pack = self.getpack(filepath)
+ except Exception as ex:
+ # An exception may be thrown if the pack file is corrupted
+ # somehow. Log a warning but keep going in this case, just
+ # skipping this pack file.
+ #
+ # If this is an ENOENT error then don't even bother logging.
+ # Someone could have removed the file since we retrieved the
+ # list of paths.
+ if getattr(ex, 'errno', None) != errno.ENOENT:
+ ui.warn(_('unable to load pack %s: %s\n') % (filepath, ex))
+ continue
+ packs.append(pack)
+
+ self.packs = _cachebackedpacks(packs, self.DEFAULTCACHESIZE)
+
+ def _getavailablepackfiles(self):
+ """For each pack file (a index/data file combo), yields:
+ (full path without extension, mtime, size)
+
+ mtime will be the mtime of the index/data file (whichever is newer)
+ size is the combined size of index/data file
+ """
+ indexsuffixlen = len(self.INDEXSUFFIX)
+ packsuffixlen = len(self.PACKSUFFIX)
+
+ ids = set()
+ sizes = collections.defaultdict(lambda: 0)
+ mtimes = collections.defaultdict(lambda: [])
+ try:
+ for filename, type, stat in osutil.listdir(self.path, stat=True):
+ id = None
+ if filename[-indexsuffixlen:] == self.INDEXSUFFIX:
+ id = filename[:-indexsuffixlen]
+ elif filename[-packsuffixlen:] == self.PACKSUFFIX:
+ id = filename[:-packsuffixlen]
+
+ # Since we expect to have two files corresponding to each ID
+ # (the index file and the pack file), we can yield once we see
+ # it twice.
+ if id:
+ sizes[id] += stat.st_size # Sum both files' sizes together
+ mtimes[id].append(stat.st_mtime)
+ if id in ids:
+ yield (os.path.join(self.path, id), max(mtimes[id]),
+ sizes[id])
+ else:
+ ids.add(id)
+ except OSError as ex:
+ if ex.errno != errno.ENOENT:
+ raise
+
+ def _getavailablepackfilessorted(self):
+ """Like `_getavailablepackfiles`, but also sorts the files by mtime,
+ yielding newest files first.
+
+ This is desirable, since it is more likely newer packfiles have more
+ desirable data.
+ """
+ files = []
+ for path, mtime, size in self._getavailablepackfiles():
+ files.append((mtime, size, path))
+ files = sorted(files, reverse=True)
+ for mtime, size, path in files:
+ yield path, mtime, size
+
+ def gettotalsizeandcount(self):
+ """Returns the total disk size (in bytes) of all the pack files in
+ this store, and the count of pack files.
+
+ (This might be smaller than the total size of the ``self.path``
+ directory, since this only considers fuly-writen pack files, and not
+ temporary files or other detritus on the directory.)
+ """
+ totalsize = 0
+ count = 0
+ for __, __, size in self._getavailablepackfiles():
+ totalsize += size
+ count += 1
+ return totalsize, count
+
+ def getmetrics(self):
+ """Returns metrics on the state of this store."""
+ size, count = self.gettotalsizeandcount()
+ return {
+ 'numpacks': count,
+ 'totalpacksize': size,
+ }
+
+ def getpack(self, path):
+ raise NotImplementedError()
+
+ def getmissing(self, keys):
+ missing = keys
+ for pack in self.packs:
+ missing = pack.getmissing(missing)
+
+ # Ensures better performance of the cache by keeping the most
+ # recently accessed pack at the beginning in subsequent iterations.
+ if not missing:
+ return missing
+
+ if missing:
+ for pack in self.refresh():
+ missing = pack.getmissing(missing)
+
+ return missing
+
+ def markledger(self, ledger, options=None):
+ for pack in self.packs:
+ pack.markledger(ledger)
+
+ def markforrefresh(self):
+ """Tells the store that there may be new pack files, so the next time it
+ has a lookup miss it should check for new files."""
+ self.lastrefresh = 0
+
+ def refresh(self):
+ """Checks for any new packs on disk, adds them to the main pack list,
+ and returns a list of just the new packs."""
+ now = time.time()
+
+ # If we experience a lot of misses (like in the case of getmissing() on
+ # new objects), let's only actually check disk for new stuff every once
+ # in a while. Generally this code path should only ever matter when a
+ # repack is going on in the background, and that should be pretty rare
+ # to have that happen twice in quick succession.
+ newpacks = []
+ if now > self.lastrefresh + REFRESHRATE:
+ self.lastrefresh = now
+ previous = set(p.path for p in self.packs)
+ for filepath, __, __ in self._getavailablepackfilessorted():
+ if filepath not in previous:
+ newpack = self.getpack(filepath)
+ newpacks.append(newpack)
+ self.packs.add(newpack)
+
+ return newpacks
+
+class versionmixin(object):
+ # Mix-in for classes with multiple supported versions
+ VERSION = None
+ SUPPORTED_VERSIONS = [2]
+
+ def _checkversion(self, version):
+ if version in self.SUPPORTED_VERSIONS:
+ if self.VERSION is None:
+ # only affect this instance
+ self.VERSION = version
+ elif self.VERSION != version:
+ raise RuntimeError('inconsistent version: %s' % version)
+ else:
+ raise RuntimeError('unsupported version: %s' % version)
+
+class basepack(versionmixin):
+ # The maximum amount we should read via mmap before remmaping so the old
+ # pages can be released (100MB)
+ MAXPAGEDIN = 100 * 1024**2
+
+ SUPPORTED_VERSIONS = [2]
+
+ def __init__(self, path):
+ self.path = path
+ self.packpath = path + self.PACKSUFFIX
+ self.indexpath = path + self.INDEXSUFFIX
+
+ self.indexsize = os.stat(self.indexpath).st_size
+ self.datasize = os.stat(self.packpath).st_size
+
+ self._index = None
+ self._data = None
+ self.freememory() # initialize the mmap
+
+ version = struct.unpack('!B', self._data[:PACKVERSIONSIZE])[0]
+ self._checkversion(version)
+
+ version, config = struct.unpack('!BB', self._index[:INDEXVERSIONSIZE])
+ self._checkversion(version)
+
+ if 0b10000000 & config:
+ self.params = indexparams(LARGEFANOUTPREFIX, version)
+ else:
+ self.params = indexparams(SMALLFANOUTPREFIX, version)
+
+ @util.propertycache
+ def _fanouttable(self):
+ params = self.params
+ rawfanout = self._index[FANOUTSTART:FANOUTSTART + params.fanoutsize]
+ fanouttable = []
+ for i in pycompat.xrange(0, params.fanoutcount):
+ loc = i * 4
+ fanoutentry = struct.unpack('!I', rawfanout[loc:loc + 4])[0]
+ fanouttable.append(fanoutentry)
+ return fanouttable
+
+ @util.propertycache
+ def _indexend(self):
+ nodecount = struct.unpack_from('!Q', self._index,
+ self.params.indexstart - 8)[0]
+ return self.params.indexstart + nodecount * self.INDEXENTRYLENGTH
+
+ def freememory(self):
+ """Unmap and remap the memory to free it up after known expensive
+ operations. Return True if self._data and self._index were reloaded.
+ """
+ if self._index:
+ if self._pagedin < self.MAXPAGEDIN:
+ return False
+
+ self._index.close()
+ self._data.close()
+
+ # TODO: use an opener/vfs to access these paths
+ with open(self.indexpath, PACKOPENMODE) as indexfp:
+ # memory-map the file, size 0 means whole file
+ self._index = mmap.mmap(indexfp.fileno(), 0,
+ access=mmap.ACCESS_READ)
+ with open(self.packpath, PACKOPENMODE) as datafp:
+ self._data = mmap.mmap(datafp.fileno(), 0, access=mmap.ACCESS_READ)
+
+ self._pagedin = 0
+ return True
+
+ def getmissing(self, keys):
+ raise NotImplementedError()
+
+ def markledger(self, ledger, options=None):
+ raise NotImplementedError()
+
+ def cleanup(self, ledger):
+ raise NotImplementedError()
+
+ def __iter__(self):
+ raise NotImplementedError()
+
+ def iterentries(self):
+ raise NotImplementedError()
+
+class mutablebasepack(versionmixin):
+
+ def __init__(self, ui, packdir, version=2):
+ self._checkversion(version)
+ # TODO(augie): make this configurable
+ self._compressor = 'GZ'
+ opener = vfsmod.vfs(packdir)
+ opener.createmode = 0o444
+ self.opener = opener
+
+ self.entries = {}
+
+ shallowutil.mkstickygroupdir(ui, packdir)
+ self.packfp, self.packpath = opener.mkstemp(
+ suffix=self.PACKSUFFIX + '-tmp')
+ self.idxfp, self.idxpath = opener.mkstemp(
+ suffix=self.INDEXSUFFIX + '-tmp')
+ self.packfp = os.fdopen(self.packfp, r'wb+')
+ self.idxfp = os.fdopen(self.idxfp, r'wb+')
+ self.sha = hashlib.sha1()
+ self._closed = False
+
+ # The opener provides no way of doing permission fixup on files created
+ # via mkstemp, so we must fix it ourselves. We can probably fix this
+ # upstream in vfs.mkstemp so we don't need to use the private method.
+ opener._fixfilemode(opener.join(self.packpath))
+ opener._fixfilemode(opener.join(self.idxpath))
+
+ # Write header
+ # TODO: make it extensible (ex: allow specifying compression algorithm,
+ # a flexible key/value header, delta algorithm, fanout size, etc)
+ versionbuf = struct.pack('!B', self.VERSION) # unsigned 1 byte int
+ self.writeraw(versionbuf)
+
+ def __enter__(self):
+ return self
+
+ def __exit__(self, exc_type, exc_value, traceback):
+ if exc_type is None:
+ self.close()
+ else:
+ self.abort()
+
+ def abort(self):
+ # Unclean exit
+ self._cleantemppacks()
+
+ def writeraw(self, data):
+ self.packfp.write(data)
+ self.sha.update(data)
+
+ def close(self, ledger=None):
+ if self._closed:
+ return
+
+ try:
+ sha = nodemod.hex(self.sha.digest())
+ self.packfp.close()
+ self.writeindex()
+
+ if len(self.entries) == 0:
+ # Empty pack
+ self._cleantemppacks()
+ self._closed = True
+ return None
+
+ self.opener.rename(self.packpath, sha + self.PACKSUFFIX)
+ try:
+ self.opener.rename(self.idxpath, sha + self.INDEXSUFFIX)
+ except Exception as ex:
+ try:
+ self.opener.unlink(sha + self.PACKSUFFIX)
+ except Exception:
+ pass
+ # Throw exception 'ex' explicitly since a normal 'raise' would
+ # potentially throw an exception from the unlink cleanup.
+ raise ex
+ except Exception:
+ # Clean up temp packs in all exception cases
+ self._cleantemppacks()
+ raise
+
+ self._closed = True
+ result = self.opener.join(sha)
+ if ledger:
+ ledger.addcreated(result)
+ return result
+
+ def _cleantemppacks(self):
+ try:
+ self.opener.unlink(self.packpath)
+ except Exception:
+ pass
+ try:
+ self.opener.unlink(self.idxpath)
+ except Exception:
+ pass
+
+ def writeindex(self):
+ rawindex = ''
+
+ largefanout = len(self.entries) > SMALLFANOUTCUTOFF
+ if largefanout:
+ params = indexparams(LARGEFANOUTPREFIX, self.VERSION)
+ else:
+ params = indexparams(SMALLFANOUTPREFIX, self.VERSION)
+
+ fanouttable = [EMPTYFANOUT] * params.fanoutcount
+
+ # Precompute the location of each entry
+ locations = {}
+ count = 0
+ for node in sorted(self.entries):
+ location = count * self.INDEXENTRYLENGTH
+ locations[node] = location
+ count += 1
+
+ # Must use [0] on the unpack result since it's always a tuple.
+ fanoutkey = struct.unpack(params.fanoutstruct,
+ node[:params.fanoutprefix])[0]
+ if fanouttable[fanoutkey] == EMPTYFANOUT:
+ fanouttable[fanoutkey] = location
+
+ rawfanouttable = ''
+ last = 0
+ for offset in fanouttable:
+ offset = offset if offset != EMPTYFANOUT else last
+ last = offset
+ rawfanouttable += struct.pack('!I', offset)
+
+ rawentrieslength = struct.pack('!Q', len(self.entries))
+
+ # The index offset is the it's location in the file. So after the 2 byte
+ # header and the fanouttable.
+ rawindex = self.createindex(locations, 2 + len(rawfanouttable))
+
+ self._writeheader(params)
+ self.idxfp.write(rawfanouttable)
+ self.idxfp.write(rawentrieslength)
+ self.idxfp.write(rawindex)
+ self.idxfp.close()
+
+ def createindex(self, nodelocations):
+ raise NotImplementedError()
+
+ def _writeheader(self, indexparams):
+ # Index header
+ # <version: 1 byte>
+ # <large fanout: 1 bit> # 1 means 2^16, 0 means 2^8
+ # <unused: 7 bit> # future use (compression, delta format, etc)
+ config = 0
+ if indexparams.fanoutprefix == LARGEFANOUTPREFIX:
+ config = 0b10000000
+ self.idxfp.write(struct.pack('!BB', self.VERSION, config))
+
+class indexparams(object):
+ __slots__ = (r'fanoutprefix', r'fanoutstruct', r'fanoutcount',
+ r'fanoutsize', r'indexstart')
+
+ def __init__(self, prefixsize, version):
+ self.fanoutprefix = prefixsize
+
+ # The struct pack format for fanout table location (i.e. the format that
+ # converts the node prefix into an integer location in the fanout
+ # table).
+ if prefixsize == SMALLFANOUTPREFIX:
+ self.fanoutstruct = '!B'
+ elif prefixsize == LARGEFANOUTPREFIX:
+ self.fanoutstruct = '!H'
+ else:
+ raise ValueError("invalid fanout prefix size: %s" % prefixsize)
+
+ # The number of fanout table entries
+ self.fanoutcount = 2**(prefixsize * 8)
+
+ # The total bytes used by the fanout table
+ self.fanoutsize = self.fanoutcount * 4
+
+ self.indexstart = FANOUTSTART + self.fanoutsize
+ # Skip the index length
+ self.indexstart += 8
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/hgext/remotefilelog/basestore.py Fri Jan 18 13:28:22 2019 -0500
@@ -0,0 +1,425 @@
+from __future__ import absolute_import
+
+import errno
+import hashlib
+import os
+import shutil
+import stat
+import time
+
+from mercurial.i18n import _
+from mercurial.node import bin, hex
+from mercurial import (
+ error,
+ pycompat,
+ util,
+)
+from . import (
+ constants,
+ shallowutil,
+)
+
+class basestore(object):
+ def __init__(self, repo, path, reponame, shared=False):
+ """Creates a remotefilelog store object for the given repo name.
+
+ `path` - The file path where this store keeps its data
+ `reponame` - The name of the repo. This is used to partition data from
+ many repos.
+ `shared` - True if this store is a shared cache of data from the central
+ server, for many repos on this machine. False means this store is for
+ the local data for one repo.
+ """
+ self.repo = repo
+ self.ui = repo.ui
+ self._path = path
+ self._reponame = reponame
+ self._shared = shared
+ self._uid = os.getuid() if not pycompat.iswindows else None
+
+ self._validatecachelog = self.ui.config("remotefilelog",
+ "validatecachelog")
+ self._validatecache = self.ui.config("remotefilelog", "validatecache",
+ 'on')
+ if self._validatecache not in ('on', 'strict', 'off'):
+ self._validatecache = 'on'
+ if self._validatecache == 'off':
+ self._validatecache = False
+
+ if shared:
+ shallowutil.mkstickygroupdir(self.ui, path)
+
+ def getmissing(self, keys):
+ missing = []
+ for name, node in keys:
+ filepath = self._getfilepath(name, node)
+ exists = os.path.exists(filepath)
+ if (exists and self._validatecache == 'strict' and
+ not self._validatekey(filepath, 'contains')):
+ exists = False
+ if not exists:
+ missing.append((name, node))
+
+ return missing
+
+ # BELOW THIS ARE IMPLEMENTATIONS OF REPACK SOURCE
+
+ def markledger(self, ledger, options=None):
+ if options and options.get(constants.OPTION_PACKSONLY):
+ return
+ if self._shared:
+ for filename, nodes in self._getfiles():
+ for node in nodes:
+ ledger.markdataentry(self, filename, node)
+ ledger.markhistoryentry(self, filename, node)
+
+ def cleanup(self, ledger):
+ ui = self.ui
+ entries = ledger.sources.get(self, [])
+ count = 0
+ progress = ui.makeprogress(_("cleaning up"), unit="files",
+ total=len(entries))
+ for entry in entries:
+ if entry.gced or (entry.datarepacked and entry.historyrepacked):
+ progress.update(count)
+ path = self._getfilepath(entry.filename, entry.node)
+ util.tryunlink(path)
+ count += 1
+ progress.complete()
+
+ # Clean up the repo cache directory.
+ self._cleanupdirectory(self._getrepocachepath())
+
+ # BELOW THIS ARE NON-STANDARD APIS
+
+ def _cleanupdirectory(self, rootdir):
+ """Removes the empty directories and unnecessary files within the root
+ directory recursively. Note that this method does not remove the root
+ directory itself. """
+
+ oldfiles = set()
+ otherfiles = set()
+ # osutil.listdir returns stat information which saves some rmdir/listdir
+ # syscalls.
+ for name, mode in util.osutil.listdir(rootdir):
+ if stat.S_ISDIR(mode):
+ dirpath = os.path.join(rootdir, name)
+ self._cleanupdirectory(dirpath)
+
+ # Now that the directory specified by dirpath is potentially
+ # empty, try and remove it.
+ try:
+ os.rmdir(dirpath)
+ except OSError:
+ pass
+
+ elif stat.S_ISREG(mode):
+ if name.endswith('_old'):
+ oldfiles.add(name[:-4])
+ else:
+ otherfiles.add(name)
+
+ # Remove the files which end with suffix '_old' and have no
+ # corresponding file without the suffix '_old'. See addremotefilelognode
+ # method for the generation/purpose of files with '_old' suffix.
+ for filename in oldfiles - otherfiles:
+ filepath = os.path.join(rootdir, filename + '_old')
+ util.tryunlink(filepath)
+
+ def _getfiles(self):
+ """Return a list of (filename, [node,...]) for all the revisions that
+ exist in the store.
+
+ This is useful for obtaining a list of all the contents of the store
+ when performing a repack to another store, since the store API requires
+ name+node keys and not namehash+node keys.
+ """
+ existing = {}
+ for filenamehash, node in self._listkeys():
+ existing.setdefault(filenamehash, []).append(node)
+
+ filenamemap = self._resolvefilenames(existing.keys())
+
+ for filename, sha in filenamemap.iteritems():
+ yield (filename, existing[sha])
+
+ def _resolvefilenames(self, hashes):
+ """Given a list of filename hashes that are present in the
+ remotefilelog store, return a mapping from filename->hash.
+
+ This is useful when converting remotefilelog blobs into other storage
+ formats.
+ """
+ if not hashes:
+ return {}
+
+ filenames = {}
+ missingfilename = set(hashes)
+
+ # Start with a full manifest, since it'll cover the majority of files
+ for filename in self.repo['tip'].manifest():
+ sha = hashlib.sha1(filename).digest()
+ if sha in missingfilename:
+ filenames[filename] = sha
+ missingfilename.discard(sha)
+
+ # Scan the changelog until we've found every file name
+ cl = self.repo.unfiltered().changelog
+ for rev in pycompat.xrange(len(cl) - 1, -1, -1):
+ if not missingfilename:
+ break
+ files = cl.readfiles(cl.node(rev))
+ for filename in files:
+ sha = hashlib.sha1(filename).digest()
+ if sha in missingfilename:
+ filenames[filename] = sha
+ missingfilename.discard(sha)
+
+ return filenames
+
+ def _getrepocachepath(self):
+ return os.path.join(
+ self._path, self._reponame) if self._shared else self._path
+
+ def _listkeys(self):
+ """List all the remotefilelog keys that exist in the store.
+
+ Returns a iterator of (filename hash, filecontent hash) tuples.
+ """
+
+ for root, dirs, files in os.walk(self._getrepocachepath()):
+ for filename in files:
+ if len(filename) != 40:
+ continue
+ node = filename
+ if self._shared:
+ # .../1a/85ffda..be21
+ filenamehash = root[-41:-39] + root[-38:]
+ else:
+ filenamehash = root[-40:]
+ yield (bin(filenamehash), bin(node))
+
+ def _getfilepath(self, name, node):
+ node = hex(node)
+ if self._shared:
+ key = shallowutil.getcachekey(self._reponame, name, node)
+ else:
+ key = shallowutil.getlocalkey(name, node)
+
+ return os.path.join(self._path, key)
+
+ def _getdata(self, name, node):
+ filepath = self._getfilepath(name, node)
+ try:
+ data = shallowutil.readfile(filepath)
+ if self._validatecache and not self._validatedata(data, filepath):
+ if self._validatecachelog:
+ with open(self._validatecachelog, 'a+') as f:
+ f.write("corrupt %s during read\n" % filepath)
+ os.rename(filepath, filepath + ".corrupt")
+ raise KeyError("corrupt local cache file %s" % filepath)
+ except IOError:
+ raise KeyError("no file found at %s for %s:%s" % (filepath, name,
+ hex(node)))
+
+ return data
+
+ def addremotefilelognode(self, name, node, data):
+ filepath = self._getfilepath(name, node)
+
+ oldumask = os.umask(0o002)
+ try:
+ # if this node already exists, save the old version for
+ # recovery/debugging purposes.
+ if os.path.exists(filepath):
+ newfilename = filepath + '_old'
+ # newfilename can be read-only and shutil.copy will fail.
+ # Delete newfilename to avoid it
+ if os.path.exists(newfilename):
+ shallowutil.unlinkfile(newfilename)
+ shutil.copy(filepath, newfilename)
+
+ shallowutil.mkstickygroupdir(self.ui, os.path.dirname(filepath))
+ shallowutil.writefile(filepath, data, readonly=True)
+
+ if self._validatecache:
+ if not self._validatekey(filepath, 'write'):
+ raise error.Abort(_("local cache write was corrupted %s") %
+ filepath)
+ finally:
+ os.umask(oldumask)
+
+ def markrepo(self, path):
+ """Call this to add the given repo path to the store's list of
+ repositories that are using it. This is useful later when doing garbage
+ collection, since it allows us to insecpt the repos to see what nodes
+ they want to be kept alive in the store.
+ """
+ repospath = os.path.join(self._path, "repos")
+ with open(repospath, 'ab') as reposfile:
+ reposfile.write(os.path.dirname(path) + "\n")
+
+ repospathstat = os.stat(repospath)
+ if repospathstat.st_uid == self._uid:
+ os.chmod(repospath, 0o0664)
+
+ def _validatekey(self, path, action):
+ with open(path, 'rb') as f:
+ data = f.read()
+
+ if self._validatedata(data, path):
+ return True
+
+ if self._validatecachelog:
+ with open(self._validatecachelog, 'ab+') as f:
+ f.write("corrupt %s during %s\n" % (path, action))
+
+ os.rename(path, path + ".corrupt")
+ return False
+
+ def _validatedata(self, data, path):
+ try:
+ if len(data) > 0:
+ # see remotefilelogserver.createfileblob for the format
+ offset, size, flags = shallowutil.parsesizeflags(data)
+ if len(data) <= size:
+ # it is truncated
+ return False
+
+ # extract the node from the metadata
+ offset += size
+ datanode = data[offset:offset + 20]
+
+ # and compare against the path
+ if os.path.basename(path) == hex(datanode):
+ # Content matches the intended path
+ return True
+ return False
+ except (ValueError, RuntimeError):
+ pass
+
+ return False
+
+ def gc(self, keepkeys):
+ ui = self.ui
+ cachepath = self._path
+
+ # prune cache
+ queue = pycompat.queue.PriorityQueue()
+ originalsize = 0
+ size = 0
+ count = 0
+ removed = 0
+
+ # keep files newer than a day even if they aren't needed
+ limit = time.time() - (60 * 60 * 24)
+
+ progress = ui.makeprogress(_("removing unnecessary files"),
+ unit="files")
+ progress.update(0)
+ for root, dirs, files in os.walk(cachepath):
+ for file in files:
+ if file == 'repos':
+ continue
+
+ # Don't delete pack files
+ if '/packs/' in root:
+ continue
+
+ progress.update(count)
+ path = os.path.join(root, file)
+ key = os.path.relpath(path, cachepath)
+ count += 1
+ try:
+ pathstat = os.stat(path)
+ except OSError as e:
+ # errno.ENOENT = no such file or directory
+ if e.errno != errno.ENOENT:
+ raise
+ msg = _("warning: file %s was removed by another process\n")
+ ui.warn(msg % path)
+ continue
+
+ originalsize += pathstat.st_size
+
+ if key in keepkeys or pathstat.st_atime > limit:
+ queue.put((pathstat.st_atime, path, pathstat))
+ size += pathstat.st_size
+ else:
+ try:
+ shallowutil.unlinkfile(path)
+ except OSError as e:
+ # errno.ENOENT = no such file or directory
+ if e.errno != errno.ENOENT:
+ raise
+ msg = _("warning: file %s was removed by another "
+ "process\n")
+ ui.warn(msg % path)
+ continue
+ removed += 1
+ progress.complete()
+
+ # remove oldest files until under limit
+ limit = ui.configbytes("remotefilelog", "cachelimit")
+ if size > limit:
+ excess = size - limit
+ progress = ui.makeprogress(_("enforcing cache limit"), unit="bytes",
+ total=excess)
+ removedexcess = 0
+ while queue and size > limit and size > 0:
+ progress.update(removedexcess)
+ atime, oldpath, oldpathstat = queue.get()
+ try:
+ shallowutil.unlinkfile(oldpath)
+ except OSError as e:
+ # errno.ENOENT = no such file or directory
+ if e.errno != errno.ENOENT:
+ raise
+ msg = _("warning: file %s was removed by another process\n")
+ ui.warn(msg % oldpath)
+ size -= oldpathstat.st_size
+ removed += 1
+ removedexcess += oldpathstat.st_size
+ progress.complete()
+
+ ui.status(_("finished: removed %d of %d files (%0.2f GB to %0.2f GB)\n")
+ % (removed, count,
+ float(originalsize) / 1024.0 / 1024.0 / 1024.0,
+ float(size) / 1024.0 / 1024.0 / 1024.0))
+
+class baseunionstore(object):
+ def __init__(self, *args, **kwargs):
+ # If one of the functions that iterates all of the stores is about to
+ # throw a KeyError, try this many times with a full refresh between
+ # attempts. A repack operation may have moved data from one store to
+ # another while we were running.
+ self.numattempts = kwargs.get(r'numretries', 0) + 1
+ # If not-None, call this function on every retry and if the attempts are
+ # exhausted.
+ self.retrylog = kwargs.get(r'retrylog', None)
+
+ def markforrefresh(self):
+ for store in self.stores:
+ if util.safehasattr(store, 'markforrefresh'):
+ store.markforrefresh()
+
+ @staticmethod
+ def retriable(fn):
+ def noop(*args):
+ pass
+ def wrapped(self, *args, **kwargs):
+ retrylog = self.retrylog or noop
+ funcname = fn.__name__
+ for i in pycompat.xrange(self.numattempts):
+ if i > 0:
+ retrylog('re-attempting (n=%d) %s\n' % (i, funcname))
+ self.markforrefresh()
+ try:
+ return fn(self, *args, **kwargs)
+ except KeyError:
+ pass
+ # retries exhausted
+ retrylog('retries exhausted in %s, raising KeyError\n' %
+ pycompat.sysbytes(funcname))
+ raise
+ return wrapped
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/hgext/remotefilelog/connectionpool.py Fri Jan 18 13:28:22 2019 -0500
@@ -0,0 +1,84 @@
+# connectionpool.py - class for pooling peer connections for reuse
+#
+# Copyright 2017 Facebook, Inc.
+#
+# This software may be used and distributed according to the terms of the
+# GNU General Public License version 2 or any later version.
+
+from __future__ import absolute_import
+
+from mercurial import (
+ extensions,
+ hg,
+ sshpeer,
+ util,
+)
+
+_sshv1peer = sshpeer.sshv1peer
+
+class connectionpool(object):
+ def __init__(self, repo):
+ self._repo = repo
+ self._pool = dict()
+
+ def get(self, path):
+ pathpool = self._pool.get(path)
+ if pathpool is None:
+ pathpool = list()
+ self._pool[path] = pathpool
+
+ conn = None
+ if len(pathpool) > 0:
+ try:
+ conn = pathpool.pop()
+ peer = conn.peer
+ # If the connection has died, drop it
+ if isinstance(peer, _sshv1peer):
+ if peer._subprocess.poll() is not None:
+ conn = None
+ except IndexError:
+ pass
+
+ if conn is None:
+ def _cleanup(orig):
+ # close pipee first so peer.cleanup reading it won't deadlock,
+ # if there are other processes with pipeo open (i.e. us).
+ peer = orig.im_self
+ if util.safehasattr(peer, 'pipee'):
+ peer.pipee.close()
+ return orig()
+
+ peer = hg.peer(self._repo.ui, {}, path)
+ if util.safehasattr(peer, 'cleanup'):
+ extensions.wrapfunction(peer, 'cleanup', _cleanup)
+
+ conn = connection(pathpool, peer)
+
+ return conn
+
+ def close(self):
+ for pathpool in self._pool.itervalues():
+ for conn in pathpool:
+ conn.close()
+ del pathpool[:]
+
+class connection(object):
+ def __init__(self, pool, peer):
+ self._pool = pool
+ self.peer = peer
+
+ def __enter__(self):
+ return self
+
+ def __exit__(self, type, value, traceback):
+ # Only add the connection back to the pool if there was no exception,
+ # since an exception could mean the connection is not in a reusable
+ # state.
+ if type is None:
+ self._pool.append(self)
+ else:
+ self.close()
+
+ def close(self):
+ if util.safehasattr(self.peer, 'cleanup'):
+ self.peer.cleanup()
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/hgext/remotefilelog/constants.py Fri Jan 18 13:28:22 2019 -0500
@@ -0,0 +1,41 @@
+from __future__ import absolute_import
+
+import struct
+
+from mercurial.i18n import _
+
+NETWORK_CAP_LEGACY_SSH_GETFILES = 'exp-remotefilelog-ssh-getfiles-1'
+
+SHALLOWREPO_REQUIREMENT = "exp-remotefilelog-repo-req-1"
+
+BUNDLE2_CAPABLITY = "exp-remotefilelog-b2cap-1"
+
+FILENAMESTRUCT = '!H'
+FILENAMESIZE = struct.calcsize(FILENAMESTRUCT)
+
+NODESIZE = 20
+PACKREQUESTCOUNTSTRUCT = '!I'
+
+NODECOUNTSTRUCT = '!I'
+NODECOUNTSIZE = struct.calcsize(NODECOUNTSTRUCT)
+
+PATHCOUNTSTRUCT = '!I'
+PATHCOUNTSIZE = struct.calcsize(PATHCOUNTSTRUCT)
+
+FILEPACK_CATEGORY=""
+TREEPACK_CATEGORY="manifests"
+
+ALL_CATEGORIES = [FILEPACK_CATEGORY, TREEPACK_CATEGORY]
+
+# revision metadata keys. must be a single character.
+METAKEYFLAG = 'f' # revlog flag
+METAKEYSIZE = 's' # full rawtext size
+
+def getunits(category):
+ if category == FILEPACK_CATEGORY:
+ return _("files")
+ if category == TREEPACK_CATEGORY:
+ return _("trees")
+
+# Repack options passed to ``markledger``.
+OPTION_PACKSONLY = 'packsonly'
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/hgext/remotefilelog/contentstore.py Fri Jan 18 13:28:22 2019 -0500
@@ -0,0 +1,376 @@
+from __future__ import absolute_import
+
+import threading
+
+from mercurial.node import hex, nullid
+from mercurial import (
+ mdiff,
+ pycompat,
+ revlog,
+)
+from . import (
+ basestore,
+ constants,
+ shallowutil,
+)
+
+class ChainIndicies(object):
+ """A static class for easy reference to the delta chain indicies.
+ """
+ # The filename of this revision delta
+ NAME = 0
+ # The mercurial file node for this revision delta
+ NODE = 1
+ # The filename of the delta base's revision. This is useful when delta
+ # between different files (like in the case of a move or copy, we can delta
+ # against the original file content).
+ BASENAME = 2
+ # The mercurial file node for the delta base revision. This is the nullid if
+ # this delta is a full text.
+ BASENODE = 3
+ # The actual delta or full text data.
+ DATA = 4
+
+class unioncontentstore(basestore.baseunionstore):
+ def __init__(self, *args, **kwargs):
+ super(unioncontentstore, self).__init__(*args, **kwargs)
+
+ self.stores = args
+ self.writestore = kwargs.get(r'writestore')
+
+ # If allowincomplete==True then the union store can return partial
+ # delta chains, otherwise it will throw a KeyError if a full
+ # deltachain can't be found.
+ self.allowincomplete = kwargs.get(r'allowincomplete', False)
+
+ def get(self, name, node):
+ """Fetches the full text revision contents of the given name+node pair.
+ If the full text doesn't exist, throws a KeyError.
+
+ Under the hood, this uses getdeltachain() across all the stores to build
+ up a full chain to produce the full text.
+ """
+ chain = self.getdeltachain(name, node)
+
+ if chain[-1][ChainIndicies.BASENODE] != nullid:
+ # If we didn't receive a full chain, throw
+ raise KeyError((name, hex(node)))
+
+ # The last entry in the chain is a full text, so we start our delta
+ # applies with that.
+ fulltext = chain.pop()[ChainIndicies.DATA]
+
+ text = fulltext
+ while chain:
+ delta = chain.pop()[ChainIndicies.DATA]
+ text = mdiff.patches(text, [delta])
+
+ return text
+
+ @basestore.baseunionstore.retriable
+ def getdelta(self, name, node):
+ """Return the single delta entry for the given name/node pair.
+ """
+ for store in self.stores:
+ try:
+ return store.getdelta(name, node)
+ except KeyError:
+ pass
+
+ raise KeyError((name, hex(node)))
+
+ def getdeltachain(self, name, node):
+ """Returns the deltachain for the given name/node pair.
+
+ Returns an ordered list of:
+
+ [(name, node, deltabasename, deltabasenode, deltacontent),...]
+
+ where the chain is terminated by a full text entry with a nullid
+ deltabasenode.
+ """
+ chain = self._getpartialchain(name, node)
+ while chain[-1][ChainIndicies.BASENODE] != nullid:
+ x, x, deltabasename, deltabasenode, x = chain[-1]
+ try:
+ morechain = self._getpartialchain(deltabasename, deltabasenode)
+ chain.extend(morechain)
+ except KeyError:
+ # If we allow incomplete chains, don't throw.
+ if not self.allowincomplete:
+ raise
+ break
+
+ return chain
+
+ @basestore.baseunionstore.retriable
+ def getmeta(self, name, node):
+ """Returns the metadata dict for given node."""
+ for store in self.stores:
+ try:
+ return store.getmeta(name, node)
+ except KeyError:
+ pass
+ raise KeyError((name, hex(node)))
+
+ def getmetrics(self):
+ metrics = [s.getmetrics() for s in self.stores]
+ return shallowutil.sumdicts(*metrics)
+
+ @basestore.baseunionstore.retriable
+ def _getpartialchain(self, name, node):
+ """Returns a partial delta chain for the given name/node pair.
+
+ A partial chain is a chain that may not be terminated in a full-text.
+ """
+ for store in self.stores:
+ try:
+ return store.getdeltachain(name, node)
+ except KeyError:
+ pass
+
+ raise KeyError((name, hex(node)))
+
+ def add(self, name, node, data):
+ raise RuntimeError("cannot add content only to remotefilelog "
+ "contentstore")
+
+ def getmissing(self, keys):
+ missing = keys
+ for store in self.stores:
+ if missing:
+ missing = store.getmissing(missing)
+ return missing
+
+ def addremotefilelognode(self, name, node, data):
+ if self.writestore:
+ self.writestore.addremotefilelognode(name, node, data)
+ else:
+ raise RuntimeError("no writable store configured")
+
+ def markledger(self, ledger, options=None):
+ for store in self.stores:
+ store.markledger(ledger, options)
+
+class remotefilelogcontentstore(basestore.basestore):
+ def __init__(self, *args, **kwargs):
+ super(remotefilelogcontentstore, self).__init__(*args, **kwargs)
+ self._threaddata = threading.local()
+
+ def get(self, name, node):
+ # return raw revision text
+ data = self._getdata(name, node)
+
+ offset, size, flags = shallowutil.parsesizeflags(data)
+ content = data[offset:offset + size]
+
+ ancestormap = shallowutil.ancestormap(data)
+ p1, p2, linknode, copyfrom = ancestormap[node]
+ copyrev = None
+ if copyfrom:
+ copyrev = hex(p1)
+
+ self._updatemetacache(node, size, flags)
+
+ # lfs tracks renames in its own metadata, remove hg copy metadata,
+ # because copy metadata will be re-added by lfs flag processor.
+ if flags & revlog.REVIDX_EXTSTORED:
+ copyrev = copyfrom = None
+ revision = shallowutil.createrevlogtext(content, copyfrom, copyrev)
+ return revision
+
+ def getdelta(self, name, node):
+ # Since remotefilelog content stores only contain full texts, just
+ # return that.
+ revision = self.get(name, node)
+ return revision, name, nullid, self.getmeta(name, node)
+
+ def getdeltachain(self, name, node):
+ # Since remotefilelog content stores just contain full texts, we return
+ # a fake delta chain that just consists of a single full text revision.
+ # The nullid in the deltabasenode slot indicates that the revision is a
+ # fulltext.
+ revision = self.get(name, node)
+ return [(name, node, None, nullid, revision)]
+
+ def getmeta(self, name, node):
+ self._sanitizemetacache()
+ if node != self._threaddata.metacache[0]:
+ data = self._getdata(name, node)
+ offset, size, flags = shallowutil.parsesizeflags(data)
+ self._updatemetacache(node, size, flags)
+ return self._threaddata.metacache[1]
+
+ def add(self, name, node, data):
+ raise RuntimeError("cannot add content only to remotefilelog "
+ "contentstore")
+
+ def _sanitizemetacache(self):
+ metacache = getattr(self._threaddata, 'metacache', None)
+ if metacache is None:
+ self._threaddata.metacache = (None, None) # (node, meta)
+
+ def _updatemetacache(self, node, size, flags):
+ self._sanitizemetacache()
+ if node == self._threaddata.metacache[0]:
+ return
+ meta = {constants.METAKEYFLAG: flags,
+ constants.METAKEYSIZE: size}
+ self._threaddata.metacache = (node, meta)
+
+class remotecontentstore(object):
+ def __init__(self, ui, fileservice, shared):
+ self._fileservice = fileservice
+ # type(shared) is usually remotefilelogcontentstore
+ self._shared = shared
+
+ def get(self, name, node):
+ self._fileservice.prefetch([(name, hex(node))], force=True,
+ fetchdata=True)
+ return self._shared.get(name, node)
+
+ def getdelta(self, name, node):
+ revision = self.get(name, node)
+ return revision, name, nullid, self._shared.getmeta(name, node)
+
+ def getdeltachain(self, name, node):
+ # Since our remote content stores just contain full texts, we return a
+ # fake delta chain that just consists of a single full text revision.
+ # The nullid in the deltabasenode slot indicates that the revision is a
+ # fulltext.
+ revision = self.get(name, node)
+ return [(name, node, None, nullid, revision)]
+
+ def getmeta(self, name, node):
+ self._fileservice.prefetch([(name, hex(node))], force=True,
+ fetchdata=True)
+ return self._shared.getmeta(name, node)
+
+ def add(self, name, node, data):
+ raise RuntimeError("cannot add to a remote store")
+
+ def getmissing(self, keys):
+ return keys
+
+ def markledger(self, ledger, options=None):
+ pass
+
+class manifestrevlogstore(object):
+ def __init__(self, repo):
+ self._store = repo.store
+ self._svfs = repo.svfs
+ self._revlogs = dict()
+ self._cl = revlog.revlog(self._svfs, '00changelog.i')
+ self._repackstartlinkrev = 0
+
+ def get(self, name, node):
+ return self._revlog(name).revision(node, raw=True)
+
+ def getdelta(self, name, node):
+ revision = self.get(name, node)
+ return revision, name, nullid, self.getmeta(name, node)
+
+ def getdeltachain(self, name, node):
+ revision = self.get(name, node)
+ return [(name, node, None, nullid, revision)]
+
+ def getmeta(self, name, node):
+ rl = self._revlog(name)
+ rev = rl.rev(node)
+ return {constants.METAKEYFLAG: rl.flags(rev),
+ constants.METAKEYSIZE: rl.rawsize(rev)}
+
+ def getancestors(self, name, node, known=None):
+ if known is None:
+ known = set()
+ if node in known:
+ return []
+
+ rl = self._revlog(name)
+ ancestors = {}
+ missing = set((node,))
+ for ancrev in rl.ancestors([rl.rev(node)], inclusive=True):
+ ancnode = rl.node(ancrev)
+ missing.discard(ancnode)
+
+ p1, p2 = rl.parents(ancnode)
+ if p1 != nullid and p1 not in known:
+ missing.add(p1)
+ if p2 != nullid and p2 not in known:
+ missing.add(p2)
+
+ linknode = self._cl.node(rl.linkrev(ancrev))
+ ancestors[rl.node(ancrev)] = (p1, p2, linknode, '')
+ if not missing:
+ break
+ return ancestors
+
+ def getnodeinfo(self, name, node):
+ cl = self._cl
+ rl = self._revlog(name)
+ parents = rl.parents(node)
+ linkrev = rl.linkrev(rl.rev(node))
+ return (parents[0], parents[1], cl.node(linkrev), None)
+
+ def add(self, *args):
+ raise RuntimeError("cannot add to a revlog store")
+
+ def _revlog(self, name):
+ rl = self._revlogs.get(name)
+ if rl is None:
+ revlogname = '00manifesttree.i'
+ if name != '':
+ revlogname = 'meta/%s/00manifest.i' % name
+ rl = revlog.revlog(self._svfs, revlogname)
+ self._revlogs[name] = rl
+ return rl
+
+ def getmissing(self, keys):
+ missing = []
+ for name, node in keys:
+ mfrevlog = self._revlog(name)
+ if node not in mfrevlog.nodemap:
+ missing.append((name, node))
+
+ return missing
+
+ def setrepacklinkrevrange(self, startrev, endrev):
+ self._repackstartlinkrev = startrev
+ self._repackendlinkrev = endrev
+
+ def markledger(self, ledger, options=None):
+ if options and options.get(constants.OPTION_PACKSONLY):
+ return
+ treename = ''
+ rl = revlog.revlog(self._svfs, '00manifesttree.i')
+ startlinkrev = self._repackstartlinkrev
+ endlinkrev = self._repackendlinkrev
+ for rev in pycompat.xrange(len(rl) - 1, -1, -1):
+ linkrev = rl.linkrev(rev)
+ if linkrev < startlinkrev:
+ break
+ if linkrev > endlinkrev:
+ continue
+ node = rl.node(rev)
+ ledger.markdataentry(self, treename, node)
+ ledger.markhistoryentry(self, treename, node)
+
+ for path, encoded, size in self._store.datafiles():
+ if path[:5] != 'meta/' or path[-2:] != '.i':
+ continue
+
+ treename = path[5:-len('/00manifest.i')]
+
+ rl = revlog.revlog(self._svfs, path)
+ for rev in pycompat.xrange(len(rl) - 1, -1, -1):
+ linkrev = rl.linkrev(rev)
+ if linkrev < startlinkrev:
+ break
+ if linkrev > endlinkrev:
+ continue
+ node = rl.node(rev)
+ ledger.markdataentry(self, treename, node)
+ ledger.markhistoryentry(self, treename, node)
+
+ def cleanup(self, ledger):
+ pass
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/hgext/remotefilelog/datapack.py Fri Jan 18 13:28:22 2019 -0500
@@ -0,0 +1,460 @@
+from __future__ import absolute_import
+
+import struct
+import zlib
+
+from mercurial.node import hex, nullid
+from mercurial.i18n import _
+from mercurial import (
+ pycompat,
+ util,
+)
+from . import (
+ basepack,
+ constants,
+ shallowutil,
+)
+
+NODELENGTH = 20
+
+# The indicator value in the index for a fulltext entry.
+FULLTEXTINDEXMARK = -1
+NOBASEINDEXMARK = -2
+
+INDEXSUFFIX = '.dataidx'
+PACKSUFFIX = '.datapack'
+
+class datapackstore(basepack.basepackstore):
+ INDEXSUFFIX = INDEXSUFFIX
+ PACKSUFFIX = PACKSUFFIX
+
+ def __init__(self, ui, path):
+ super(datapackstore, self).__init__(ui, path)
+
+ def getpack(self, path):
+ return datapack(path)
+
+ def get(self, name, node):
+ raise RuntimeError("must use getdeltachain with datapackstore")
+
+ def getmeta(self, name, node):
+ for pack in self.packs:
+ try:
+ return pack.getmeta(name, node)
+ except KeyError:
+ pass
+
+ for pack in self.refresh():
+ try:
+ return pack.getmeta(name, node)
+ except KeyError:
+ pass
+
+ raise KeyError((name, hex(node)))
+
+ def getdelta(self, name, node):
+ for pack in self.packs:
+ try:
+ return pack.getdelta(name, node)
+ except KeyError:
+ pass
+
+ for pack in self.refresh():
+ try:
+ return pack.getdelta(name, node)
+ except KeyError:
+ pass
+
+ raise KeyError((name, hex(node)))
+
+ def getdeltachain(self, name, node):
+ for pack in self.packs:
+ try:
+ return pack.getdeltachain(name, node)
+ except KeyError:
+ pass
+
+ for pack in self.refresh():
+ try:
+ return pack.getdeltachain(name, node)
+ except KeyError:
+ pass
+
+ raise KeyError((name, hex(node)))
+
+ def add(self, name, node, data):
+ raise RuntimeError("cannot add to datapackstore")
+
+class datapack(basepack.basepack):
+ INDEXSUFFIX = INDEXSUFFIX
+ PACKSUFFIX = PACKSUFFIX
+
+ # Format is <node><delta offset><pack data offset><pack data size>
+ # See the mutabledatapack doccomment for more details.
+ INDEXFORMAT = '!20siQQ'
+ INDEXENTRYLENGTH = 40
+
+ SUPPORTED_VERSIONS = [2]
+
+ def getmissing(self, keys):
+ missing = []
+ for name, node in keys:
+ value = self._find(node)
+ if not value:
+ missing.append((name, node))
+
+ return missing
+
+ def get(self, name, node):
+ raise RuntimeError("must use getdeltachain with datapack (%s:%s)"
+ % (name, hex(node)))
+
+ def getmeta(self, name, node):
+ value = self._find(node)
+ if value is None:
+ raise KeyError((name, hex(node)))
+
+ node, deltabaseoffset, offset, size = value
+ rawentry = self._data[offset:offset + size]
+
+ # see docstring of mutabledatapack for the format
+ offset = 0
+ offset += struct.unpack_from('!H', rawentry, offset)[0] + 2 # filename
+ offset += 40 # node, deltabase node
+ offset += struct.unpack_from('!Q', rawentry, offset)[0] + 8 # delta
+
+ metalen = struct.unpack_from('!I', rawentry, offset)[0]
+ offset += 4
+
+ meta = shallowutil.parsepackmeta(rawentry[offset:offset + metalen])
+
+ return meta
+
+ def getdelta(self, name, node):
+ value = self._find(node)
+ if value is None:
+ raise KeyError((name, hex(node)))
+
+ node, deltabaseoffset, offset, size = value
+ entry = self._readentry(offset, size, getmeta=True)
+ filename, node, deltabasenode, delta, meta = entry
+
+ # If we've read a lot of data from the mmap, free some memory.
+ self.freememory()
+
+ return delta, filename, deltabasenode, meta
+
+ def getdeltachain(self, name, node):
+ value = self._find(node)
+ if value is None:
+ raise KeyError((name, hex(node)))
+
+ params = self.params
+
+ # Precompute chains
+ chain = [value]
+ deltabaseoffset = value[1]
+ entrylen = self.INDEXENTRYLENGTH
+ while (deltabaseoffset != FULLTEXTINDEXMARK
+ and deltabaseoffset != NOBASEINDEXMARK):
+ loc = params.indexstart + deltabaseoffset
+ value = struct.unpack(self.INDEXFORMAT,
+ self._index[loc:loc + entrylen])
+ deltabaseoffset = value[1]
+ chain.append(value)
+
+ # Read chain data
+ deltachain = []
+ for node, deltabaseoffset, offset, size in chain:
+ filename, node, deltabasenode, delta = self._readentry(offset, size)
+ deltachain.append((filename, node, filename, deltabasenode, delta))
+
+ # If we've read a lot of data from the mmap, free some memory.
+ self.freememory()
+
+ return deltachain
+
+ def _readentry(self, offset, size, getmeta=False):
+ rawentry = self._data[offset:offset + size]
+ self._pagedin += len(rawentry)
+
+ # <2 byte len> + <filename>
+ lengthsize = 2
+ filenamelen = struct.unpack('!H', rawentry[:2])[0]
+ filename = rawentry[lengthsize:lengthsize + filenamelen]
+
+ # <20 byte node> + <20 byte deltabase>
+ nodestart = lengthsize + filenamelen
+ deltabasestart = nodestart + NODELENGTH
+ node = rawentry[nodestart:deltabasestart]
+ deltabasenode = rawentry[deltabasestart:deltabasestart + NODELENGTH]
+
+ # <8 byte len> + <delta>
+ deltastart = deltabasestart + NODELENGTH
+ rawdeltalen = rawentry[deltastart:deltastart + 8]
+ deltalen = struct.unpack('!Q', rawdeltalen)[0]
+
+ delta = rawentry[deltastart + 8:deltastart + 8 + deltalen]
+ delta = self._decompress(delta)
+
+ if getmeta:
+ metastart = deltastart + 8 + deltalen
+ metalen = struct.unpack_from('!I', rawentry, metastart)[0]
+
+ rawmeta = rawentry[metastart + 4:metastart + 4 + metalen]
+ meta = shallowutil.parsepackmeta(rawmeta)
+ return filename, node, deltabasenode, delta, meta
+ else:
+ return filename, node, deltabasenode, delta
+
+ def _decompress(self, data):
+ return zlib.decompress(data)
+
+ def add(self, name, node, data):
+ raise RuntimeError("cannot add to datapack (%s:%s)" % (name, node))
+
+ def _find(self, node):
+ params = self.params
+ fanoutkey = struct.unpack(params.fanoutstruct,
+ node[:params.fanoutprefix])[0]
+ fanout = self._fanouttable
+
+ start = fanout[fanoutkey] + params.indexstart
+ indexend = self._indexend
+
+ # Scan forward to find the first non-same entry, which is the upper
+ # bound.
+ for i in pycompat.xrange(fanoutkey + 1, params.fanoutcount):
+ end = fanout[i] + params.indexstart
+ if end != start:
+ break
+ else:
+ end = indexend
+
+ # Bisect between start and end to find node
+ index = self._index
+ startnode = index[start:start + NODELENGTH]
+ endnode = index[end:end + NODELENGTH]
+ entrylen = self.INDEXENTRYLENGTH
+ if startnode == node:
+ entry = index[start:start + entrylen]
+ elif endnode == node:
+ entry = index[end:end + entrylen]
+ else:
+ while start < end - entrylen:
+ mid = start + (end - start) / 2
+ mid = mid - ((mid - params.indexstart) % entrylen)
+ midnode = index[mid:mid + NODELENGTH]
+ if midnode == node:
+ entry = index[mid:mid + entrylen]
+ break
+ if node > midnode:
+ start = mid
+ startnode = midnode
+ elif node < midnode:
+ end = mid
+ endnode = midnode
+ else:
+ return None
+
+ return struct.unpack(self.INDEXFORMAT, entry)
+
+ def markledger(self, ledger, options=None):
+ for filename, node in self:
+ ledger.markdataentry(self, filename, node)
+
+ def cleanup(self, ledger):
+ entries = ledger.sources.get(self, [])
+ allkeys = set(self)
+ repackedkeys = set((e.filename, e.node) for e in entries if
+ e.datarepacked or e.gced)
+
+ if len(allkeys - repackedkeys) == 0:
+ if self.path not in ledger.created:
+ util.unlinkpath(self.indexpath, ignoremissing=True)
+ util.unlinkpath(self.packpath, ignoremissing=True)
+
+ def __iter__(self):
+ for f, n, deltabase, deltalen in self.iterentries():
+ yield f, n
+
+ def iterentries(self):
+ # Start at 1 to skip the header
+ offset = 1
+ data = self._data
+ while offset < self.datasize:
+ oldoffset = offset
+
+ # <2 byte len> + <filename>
+ filenamelen = struct.unpack('!H', data[offset:offset + 2])[0]
+ offset += 2
+ filename = data[offset:offset + filenamelen]
+ offset += filenamelen
+
+ # <20 byte node>
+ node = data[offset:offset + constants.NODESIZE]
+ offset += constants.NODESIZE
+ # <20 byte deltabase>
+ deltabase = data[offset:offset + constants.NODESIZE]
+ offset += constants.NODESIZE
+
+ # <8 byte len> + <delta>
+ rawdeltalen = data[offset:offset + 8]
+ deltalen = struct.unpack('!Q', rawdeltalen)[0]
+ offset += 8
+
+ # TODO(augie): we should store a header that is the
+ # uncompressed size.
+ uncompressedlen = len(self._decompress(
+ data[offset:offset + deltalen]))
+ offset += deltalen
+
+ # <4 byte len> + <metadata-list>
+ metalen = struct.unpack_from('!I', data, offset)[0]
+ offset += 4 + metalen
+
+ yield (filename, node, deltabase, uncompressedlen)
+
+ # If we've read a lot of data from the mmap, free some memory.
+ self._pagedin += offset - oldoffset
+ if self.freememory():
+ data = self._data
+
+class mutabledatapack(basepack.mutablebasepack):
+ """A class for constructing and serializing a datapack file and index.
+
+ A datapack is a pair of files that contain the revision contents for various
+ file revisions in Mercurial. It contains only revision contents (like file
+ contents), not any history information.
+
+ It consists of two files, with the following format. All bytes are in
+ network byte order (big endian).
+
+ .datapack
+ The pack itself is a series of revision deltas with some basic header
+ information on each. A revision delta may be a fulltext, represented by
+ a deltabasenode equal to the nullid.
+
+ datapack = <version: 1 byte>
+ [<revision>,...]
+ revision = <filename len: 2 byte unsigned int>
+ <filename>
+ <node: 20 byte>
+ <deltabasenode: 20 byte>
+ <delta len: 8 byte unsigned int>
+ <delta>
+ <metadata-list len: 4 byte unsigned int> [1]
+ <metadata-list> [1]
+ metadata-list = [<metadata-item>, ...]
+ metadata-item = <metadata-key: 1 byte>
+ <metadata-value len: 2 byte unsigned>
+ <metadata-value>
+
+ metadata-key could be METAKEYFLAG or METAKEYSIZE or other single byte
+ value in the future.
+
+ .dataidx
+ The index file consists of two parts, the fanout and the index.
+
+ The index is a list of index entries, sorted by node (one per revision
+ in the pack). Each entry has:
+
+ - node (The 20 byte node of the entry; i.e. the commit hash, file node
+ hash, etc)
+ - deltabase index offset (The location in the index of the deltabase for
+ this entry. The deltabase is the next delta in
+ the chain, with the chain eventually
+ terminating in a full-text, represented by a
+ deltabase offset of -1. This lets us compute
+ delta chains from the index, then do
+ sequential reads from the pack if the revision
+ are nearby on disk.)
+ - pack entry offset (The location of this entry in the datapack)
+ - pack content size (The on-disk length of this entry's pack data)
+
+ The fanout is a quick lookup table to reduce the number of steps for
+ bisecting the index. It is a series of 4 byte pointers to positions
+ within the index. It has 2^16 entries, which corresponds to hash
+ prefixes [0000, 0001,..., FFFE, FFFF]. Example: the pointer in slot
+ 4F0A points to the index position of the first revision whose node
+ starts with 4F0A. This saves log(2^16)=16 bisect steps.
+
+ dataidx = <fanouttable>
+ <index>
+ fanouttable = [<index offset: 4 byte unsigned int>,...] (2^16 entries)
+ index = [<index entry>,...]
+ indexentry = <node: 20 byte>
+ <deltabase location: 4 byte signed int>
+ <pack entry offset: 8 byte unsigned int>
+ <pack entry size: 8 byte unsigned int>
+
+ [1]: new in version 1.
+ """
+ INDEXSUFFIX = INDEXSUFFIX
+ PACKSUFFIX = PACKSUFFIX
+
+ # v[01] index format: <node><delta offset><pack data offset><pack data size>
+ INDEXFORMAT = datapack.INDEXFORMAT
+ INDEXENTRYLENGTH = datapack.INDEXENTRYLENGTH
+
+ # v1 has metadata support
+ SUPPORTED_VERSIONS = [2]
+
+ def _compress(self, data):
+ return zlib.compress(data)
+
+ def add(self, name, node, deltabasenode, delta, metadata=None):
+ # metadata is a dict, ex. {METAKEYFLAG: flag}
+ if len(name) > 2**16:
+ raise RuntimeError(_("name too long %s") % name)
+ if len(node) != 20:
+ raise RuntimeError(_("node should be 20 bytes %s") % node)
+
+ if node in self.entries:
+ # The revision has already been added
+ return
+
+ # TODO: allow configurable compression
+ delta = self._compress(delta)
+
+ rawdata = ''.join((
+ struct.pack('!H', len(name)), # unsigned 2 byte int
+ name,
+ node,
+ deltabasenode,
+ struct.pack('!Q', len(delta)), # unsigned 8 byte int
+ delta,
+ ))
+
+ # v1 support metadata
+ rawmeta = shallowutil.buildpackmeta(metadata)
+ rawdata += struct.pack('!I', len(rawmeta)) # unsigned 4 byte
+ rawdata += rawmeta
+
+ offset = self.packfp.tell()
+
+ size = len(rawdata)
+
+ self.entries[node] = (deltabasenode, offset, size)
+
+ self.writeraw(rawdata)
+
+ def createindex(self, nodelocations, indexoffset):
+ entries = sorted((n, db, o, s) for n, (db, o, s)
+ in self.entries.iteritems())
+
+ rawindex = ''
+ fmt = self.INDEXFORMAT
+ for node, deltabase, offset, size in entries:
+ if deltabase == nullid:
+ deltabaselocation = FULLTEXTINDEXMARK
+ else:
+ # Instead of storing the deltabase node in the index, let's
+ # store a pointer directly to the index entry for the deltabase.
+ deltabaselocation = nodelocations.get(deltabase,
+ NOBASEINDEXMARK)
+
+ entry = struct.pack(fmt, node, deltabaselocation, offset, size)
+ rawindex += entry
+
+ return rawindex
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/hgext/remotefilelog/debugcommands.py Fri Jan 18 13:28:22 2019 -0500
@@ -0,0 +1,378 @@
+# debugcommands.py - debug logic for remotefilelog
+#
+# Copyright 2013 Facebook, Inc.
+#
+# This software may be used and distributed according to the terms of the
+# GNU General Public License version 2 or any later version.
+from __future__ import absolute_import
+
+import hashlib
+import os
+import zlib
+
+from mercurial.node import bin, hex, nullid, short
+from mercurial.i18n import _
+from mercurial import (
+ error,
+ filelog,
+ node as nodemod,
+ revlog,
+)
+from . import (
+ constants,
+ datapack,
+ extutil,
+ fileserverclient,
+ historypack,
+ repack,
+ shallowutil,
+)
+
+def debugremotefilelog(ui, path, **opts):
+ decompress = opts.get(r'decompress')
+
+ size, firstnode, mapping = parsefileblob(path, decompress)
+
+ ui.status(_("size: %d bytes\n") % (size))
+ ui.status(_("path: %s \n") % (path))
+ ui.status(_("key: %s \n") % (short(firstnode)))
+ ui.status(_("\n"))
+ ui.status(_("%12s => %12s %13s %13s %12s\n") %
+ ("node", "p1", "p2", "linknode", "copyfrom"))
+
+ queue = [firstnode]
+ while queue:
+ node = queue.pop(0)
+ p1, p2, linknode, copyfrom = mapping[node]
+ ui.status(_("%s => %s %s %s %s\n") %
+ (short(node), short(p1), short(p2), short(linknode), copyfrom))
+ if p1 != nullid:
+ queue.append(p1)
+ if p2 != nullid:
+ queue.append(p2)
+
+def buildtemprevlog(repo, file):
+ # get filename key
+ filekey = nodemod.hex(hashlib.sha1(file).digest())
+ filedir = os.path.join(repo.path, 'store/data', filekey)
+
+ # sort all entries based on linkrev
+ fctxs = []
+ for filenode in os.listdir(filedir):
+ if '_old' not in filenode:
+ fctxs.append(repo.filectx(file, fileid=bin(filenode)))
+
+ fctxs = sorted(fctxs, key=lambda x: x.linkrev())
+
+ # add to revlog
+ temppath = repo.sjoin('data/temprevlog.i')
+ if os.path.exists(temppath):
+ os.remove(temppath)
+ r = filelog.filelog(repo.svfs, 'temprevlog')
+
+ class faket(object):
+ def add(self, a, b, c):
+ pass
+ t = faket()
+ for fctx in fctxs:
+ if fctx.node() not in repo:
+ continue
+
+ p = fctx.filelog().parents(fctx.filenode())
+ meta = {}
+ if fctx.renamed():
+ meta['copy'] = fctx.renamed()[0]
+ meta['copyrev'] = hex(fctx.renamed()[1])
+
+ r.add(fctx.data(), meta, t, fctx.linkrev(), p[0], p[1])
+
+ return r
+
+def debugindex(orig, ui, repo, file_=None, **opts):
+ """dump the contents of an index file"""
+ if (opts.get(r'changelog') or
+ opts.get(r'manifest') or
+ opts.get(r'dir') or
+ not shallowutil.isenabled(repo) or
+ not repo.shallowmatch(file_)):
+ return orig(ui, repo, file_, **opts)
+
+ r = buildtemprevlog(repo, file_)
+
+ # debugindex like normal
+ format = opts.get('format', 0)
+ if format not in (0, 1):
+ raise error.Abort(_("unknown format %d") % format)
+
+ generaldelta = r.version & revlog.FLAG_GENERALDELTA
+ if generaldelta:
+ basehdr = ' delta'
+ else:
+ basehdr = ' base'
+
+ if format == 0:
+ ui.write((" rev offset length " + basehdr + " linkrev"
+ " nodeid p1 p2\n"))
+ elif format == 1:
+ ui.write((" rev flag offset length"
+ " size " + basehdr + " link p1 p2"
+ " nodeid\n"))
+
+ for i in r:
+ node = r.node(i)
+ if generaldelta:
+ base = r.deltaparent(i)
+ else:
+ base = r.chainbase(i)
+ if format == 0:
+ try:
+ pp = r.parents(node)
+ except Exception:
+ pp = [nullid, nullid]
+ ui.write("% 6d % 9d % 7d % 6d % 7d %s %s %s\n" % (
+ i, r.start(i), r.length(i), base, r.linkrev(i),
+ short(node), short(pp[0]), short(pp[1])))
+ elif format == 1:
+ pr = r.parentrevs(i)
+ ui.write("% 6d %04x % 8d % 8d % 8d % 6d % 6d % 6d % 6d %s\n" % (
+ i, r.flags(i), r.start(i), r.length(i), r.rawsize(i),
+ base, r.linkrev(i), pr[0], pr[1], short(node)))
+
+def debugindexdot(orig, ui, repo, file_):
+ """dump an index DAG as a graphviz dot file"""
+ if not shallowutil.isenabled(repo):
+ return orig(ui, repo, file_)
+
+ r = buildtemprevlog(repo, os.path.basename(file_)[:-2])
+
+ ui.write(("digraph G {\n"))
+ for i in r:
+ node = r.node(i)
+ pp = r.parents(node)
+ ui.write("\t%d -> %d\n" % (r.rev(pp[0]), i))
+ if pp[1] != nullid:
+ ui.write("\t%d -> %d\n" % (r.rev(pp[1]), i))
+ ui.write("}\n")
+
+def verifyremotefilelog(ui, path, **opts):
+ decompress = opts.get(r'decompress')
+
+ for root, dirs, files in os.walk(path):
+ for file in files:
+ if file == "repos":
+ continue
+ filepath = os.path.join(root, file)
+ size, firstnode, mapping = parsefileblob(filepath, decompress)
+ for p1, p2, linknode, copyfrom in mapping.itervalues():
+ if linknode == nullid:
+ actualpath = os.path.relpath(root, path)
+ key = fileserverclient.getcachekey("reponame", actualpath,
+ file)
+ ui.status("%s %s\n" % (key, os.path.relpath(filepath,
+ path)))
+
+def _decompressblob(raw):
+ return zlib.decompress(raw)
+
+def parsefileblob(path, decompress):
+ raw = None
+ f = open(path, "rb")
+ try:
+ raw = f.read()
+ finally:
+ f.close()
+
+ if decompress:
+ raw = _decompressblob(raw)
+
+ offset, size, flags = shallowutil.parsesizeflags(raw)
+ start = offset + size
+
+ firstnode = None
+
+ mapping = {}
+ while start < len(raw):
+ divider = raw.index('\0', start + 80)
+
+ currentnode = raw[start:(start + 20)]
+ if not firstnode:
+ firstnode = currentnode
+
+ p1 = raw[(start + 20):(start + 40)]
+ p2 = raw[(start + 40):(start + 60)]
+ linknode = raw[(start + 60):(start + 80)]
+ copyfrom = raw[(start + 80):divider]
+
+ mapping[currentnode] = (p1, p2, linknode, copyfrom)
+ start = divider + 1
+
+ return size, firstnode, mapping
+
+def debugdatapack(ui, *paths, **opts):
+ for path in paths:
+ if '.data' in path:
+ path = path[:path.index('.data')]
+ ui.write("%s:\n" % path)
+ dpack = datapack.datapack(path)
+ node = opts.get(r'node')
+ if node:
+ deltachain = dpack.getdeltachain('', bin(node))
+ dumpdeltachain(ui, deltachain, **opts)
+ return
+
+ if opts.get(r'long'):
+ hashformatter = hex
+ hashlen = 42
+ else:
+ hashformatter = short
+ hashlen = 14
+
+ lastfilename = None
+ totaldeltasize = 0
+ totalblobsize = 0
+ def printtotals():
+ if lastfilename is not None:
+ ui.write("\n")
+ if not totaldeltasize or not totalblobsize:
+ return
+ difference = totalblobsize - totaldeltasize
+ deltastr = "%0.1f%% %s" % (
+ (100.0 * abs(difference) / totalblobsize),
+ ("smaller" if difference > 0 else "bigger"))
+
+ ui.write(("Total:%s%s %s (%s)\n") % (
+ "".ljust(2 * hashlen - len("Total:")),
+ ('%d' % totaldeltasize).ljust(12),
+ ('%d' % totalblobsize).ljust(9),
+ deltastr
+ ))
+
+ bases = {}
+ nodes = set()
+ failures = 0
+ for filename, node, deltabase, deltalen in dpack.iterentries():
+ bases[node] = deltabase
+ if node in nodes:
+ ui.write(("Bad entry: %s appears twice\n" % short(node)))
+ failures += 1
+ nodes.add(node)
+ if filename != lastfilename:
+ printtotals()
+ name = '(empty name)' if filename == '' else filename
+ ui.write("%s:\n" % name)
+ ui.write("%s%s%s%s\n" % (
+ "Node".ljust(hashlen),
+ "Delta Base".ljust(hashlen),
+ "Delta Length".ljust(14),
+ "Blob Size".ljust(9)))
+ lastfilename = filename
+ totalblobsize = 0
+ totaldeltasize = 0
+
+ # Metadata could be missing, in which case it will be an empty dict.
+ meta = dpack.getmeta(filename, node)
+ if constants.METAKEYSIZE in meta:
+ blobsize = meta[constants.METAKEYSIZE]
+ totaldeltasize += deltalen
+ totalblobsize += blobsize
+ else:
+ blobsize = "(missing)"
+ ui.write("%s %s %s%d\n" % (
+ hashformatter(node),
+ hashformatter(deltabase),
+ ('%d' % deltalen).ljust(14),
+ blobsize))
+
+ if filename is not None:
+ printtotals()
+
+ failures += _sanitycheck(ui, set(nodes), bases)
+ if failures > 1:
+ ui.warn(("%d failures\n" % failures))
+ return 1
+
+def _sanitycheck(ui, nodes, bases):
+ """
+ Does some basic sanity checking on a packfiles with ``nodes`` ``bases`` (a
+ mapping of node->base):
+
+ - Each deltabase must itself be a node elsewhere in the pack
+ - There must be no cycles
+ """
+ failures = 0
+ for node in nodes:
+ seen = set()
+ current = node
+ deltabase = bases[current]
+
+ while deltabase != nullid:
+ if deltabase not in nodes:
+ ui.warn(("Bad entry: %s has an unknown deltabase (%s)\n" %
+ (short(node), short(deltabase))))
+ failures += 1
+ break
+
+ if deltabase in seen:
+ ui.warn(("Bad entry: %s has a cycle (at %s)\n" %
+ (short(node), short(deltabase))))
+ failures += 1
+ break
+
+ current = deltabase
+ seen.add(current)
+ deltabase = bases[current]
+ # Since ``node`` begins a valid chain, reset/memoize its base to nullid
+ # so we don't traverse it again.
+ bases[node] = nullid
+ return failures
+
+def dumpdeltachain(ui, deltachain, **opts):
+ hashformatter = hex
+ hashlen = 40
+
+ lastfilename = None
+ for filename, node, filename, deltabasenode, delta in deltachain:
+ if filename != lastfilename:
+ ui.write("\n%s\n" % filename)
+ lastfilename = filename
+ ui.write("%s %s %s %s\n" % (
+ "Node".ljust(hashlen),
+ "Delta Base".ljust(hashlen),
+ "Delta SHA1".ljust(hashlen),
+ "Delta Length".ljust(6),
+ ))
+
+ ui.write("%s %s %s %d\n" % (
+ hashformatter(node),
+ hashformatter(deltabasenode),
+ nodemod.hex(hashlib.sha1(delta).digest()),
+ len(delta)))
+
+def debughistorypack(ui, path):
+ if '.hist' in path:
+ path = path[:path.index('.hist')]
+ hpack = historypack.historypack(path)
+
+ lastfilename = None
+ for entry in hpack.iterentries():
+ filename, node, p1node, p2node, linknode, copyfrom = entry
+ if filename != lastfilename:
+ ui.write("\n%s\n" % filename)
+ ui.write("%s%s%s%s%s\n" % (
+ "Node".ljust(14),
+ "P1 Node".ljust(14),
+ "P2 Node".ljust(14),
+ "Link Node".ljust(14),
+ "Copy From"))
+ lastfilename = filename
+ ui.write("%s %s %s %s %s\n" % (short(node), short(p1node),
+ short(p2node), short(linknode), copyfrom))
+
+def debugwaitonrepack(repo):
+ with extutil.flock(repack.repacklockvfs(repo).join('repacklock'), ''):
+ return
+
+def debugwaitonprefetch(repo):
+ with repo._lock(repo.svfs, "prefetchlock", True, None,
+ None, _('prefetching in %s') % repo.origroot):
+ pass
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/hgext/remotefilelog/extutil.py Fri Jan 18 13:28:22 2019 -0500
@@ -0,0 +1,66 @@
+# extutil.py - useful utility methods for extensions
+#
+# Copyright 2016 Facebook
+#
+# This software may be used and distributed according to the terms of the
+# GNU General Public License version 2 or any later version.
+
+from __future__ import absolute_import
+
+import contextlib
+import errno
+import os
+import time
+
+from mercurial import (
+ error,
+ lock as lockmod,
+ util,
+ vfs as vfsmod,
+)
+
+@contextlib.contextmanager
+def flock(lockpath, description, timeout=-1):
+ """A flock based lock object. Currently it is always non-blocking.
+
+ Note that since it is flock based, you can accidentally take it multiple
+ times within one process and the first one to be released will release all
+ of them. So the caller needs to be careful to not create more than one
+ instance per lock.
+ """
+
+ # best effort lightweight lock
+ try:
+ import fcntl
+ fcntl.flock
+ except ImportError:
+ # fallback to Mercurial lock
+ vfs = vfsmod.vfs(os.path.dirname(lockpath))
+ with lockmod.lock(vfs, os.path.basename(lockpath), timeout=timeout):
+ yield
+ return
+ # make sure lock file exists
+ util.makedirs(os.path.dirname(lockpath))
+ with open(lockpath, 'a'):
+ pass
+ lockfd = os.open(lockpath, os.O_RDONLY, 0o664)
+ start = time.time()
+ while True:
+ try:
+ fcntl.flock(lockfd, fcntl.LOCK_EX | fcntl.LOCK_NB)
+ break
+ except IOError as ex:
+ if ex.errno == errno.EAGAIN:
+ if timeout != -1 and time.time() - start > timeout:
+ raise error.LockHeld(errno.EAGAIN, lockpath, description,
+ '')
+ else:
+ time.sleep(0.05)
+ continue
+ raise
+
+ try:
+ yield
+ finally:
+ fcntl.flock(lockfd, fcntl.LOCK_UN)
+ os.close(lockfd)
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/hgext/remotefilelog/fileserverclient.py Fri Jan 18 13:28:22 2019 -0500
@@ -0,0 +1,581 @@
+# fileserverclient.py - client for communicating with the cache process
+#
+# Copyright 2013 Facebook, Inc.
+#
+# This software may be used and distributed according to the terms of the
+# GNU General Public License version 2 or any later version.
+
+from __future__ import absolute_import
+
+import hashlib
+import io
+import os
+import threading
+import time
+import zlib
+
+from mercurial.i18n import _
+from mercurial.node import bin, hex, nullid
+from mercurial import (
+ error,
+ node,
+ pycompat,
+ revlog,
+ sshpeer,
+ util,
+ wireprotov1peer,
+)
+from mercurial.utils import procutil
+
+from . import (
+ constants,
+ contentstore,
+ metadatastore,
+)
+
+_sshv1peer = sshpeer.sshv1peer
+
+# Statistics for debugging
+fetchcost = 0
+fetches = 0
+fetched = 0
+fetchmisses = 0
+
+_lfsmod = None
+
+def getcachekey(reponame, file, id):
+ pathhash = node.hex(hashlib.sha1(file).digest())
+ return os.path.join(reponame, pathhash[:2], pathhash[2:], id)
+
+def getlocalkey(file, id):
+ pathhash = node.hex(hashlib.sha1(file).digest())
+ return os.path.join(pathhash, id)
+
+def peersetup(ui, peer):
+
+ class remotefilepeer(peer.__class__):
+ @wireprotov1peer.batchable
+ def x_rfl_getfile(self, file, node):
+ if not self.capable('x_rfl_getfile'):
+ raise error.Abort(
+ 'configured remotefile server does not support getfile')
+ f = wireprotov1peer.future()
+ yield {'file': file, 'node': node}, f
+ code, data = f.value.split('\0', 1)
+ if int(code):
+ raise error.LookupError(file, node, data)
+ yield data
+
+ @wireprotov1peer.batchable
+ def x_rfl_getflogheads(self, path):
+ if not self.capable('x_rfl_getflogheads'):
+ raise error.Abort('configured remotefile server does not '
+ 'support getflogheads')
+ f = wireprotov1peer.future()
+ yield {'path': path}, f
+ heads = f.value.split('\n') if f.value else []
+ yield heads
+
+ def _updatecallstreamopts(self, command, opts):
+ if command != 'getbundle':
+ return
+ if (constants.NETWORK_CAP_LEGACY_SSH_GETFILES
+ not in self.capabilities()):
+ return
+ if not util.safehasattr(self, '_localrepo'):
+ return
+ if (constants.SHALLOWREPO_REQUIREMENT
+ not in self._localrepo.requirements):
+ return
+
+ bundlecaps = opts.get('bundlecaps')
+ if bundlecaps:
+ bundlecaps = [bundlecaps]
+ else:
+ bundlecaps = []
+
+ # shallow, includepattern, and excludepattern are a hacky way of
+ # carrying over data from the local repo to this getbundle
+ # command. We need to do it this way because bundle1 getbundle
+ # doesn't provide any other place we can hook in to manipulate
+ # getbundle args before it goes across the wire. Once we get rid
+ # of bundle1, we can use bundle2's _pullbundle2extraprepare to
+ # do this more cleanly.
+ bundlecaps.append(constants.BUNDLE2_CAPABLITY)
+ if self._localrepo.includepattern:
+ patterns = '\0'.join(self._localrepo.includepattern)
+ includecap = "includepattern=" + patterns
+ bundlecaps.append(includecap)
+ if self._localrepo.excludepattern:
+ patterns = '\0'.join(self._localrepo.excludepattern)
+ excludecap = "excludepattern=" + patterns
+ bundlecaps.append(excludecap)
+ opts['bundlecaps'] = ','.join(bundlecaps)
+
+ def _sendrequest(self, command, args, **opts):
+ self._updatecallstreamopts(command, args)
+ return super(remotefilepeer, self)._sendrequest(command, args,
+ **opts)
+
+ def _callstream(self, command, **opts):
+ supertype = super(remotefilepeer, self)
+ if not util.safehasattr(supertype, '_sendrequest'):
+ self._updatecallstreamopts(command, pycompat.byteskwargs(opts))
+ return super(remotefilepeer, self)._callstream(command, **opts)
+
+ peer.__class__ = remotefilepeer
+
+class cacheconnection(object):
+ """The connection for communicating with the remote cache. Performs
+ gets and sets by communicating with an external process that has the
+ cache-specific implementation.
+ """
+ def __init__(self):
+ self.pipeo = self.pipei = self.pipee = None
+ self.subprocess = None
+ self.connected = False
+
+ def connect(self, cachecommand):
+ if self.pipeo:
+ raise error.Abort(_("cache connection already open"))
+ self.pipei, self.pipeo, self.pipee, self.subprocess = \
+ procutil.popen4(cachecommand)
+ self.connected = True
+
+ def close(self):
+ def tryclose(pipe):
+ try:
+ pipe.close()
+ except Exception:
+ pass
+ if self.connected:
+ try:
+ self.pipei.write("exit\n")
+ except Exception:
+ pass
+ tryclose(self.pipei)
+ self.pipei = None
+ tryclose(self.pipeo)
+ self.pipeo = None
+ tryclose(self.pipee)
+ self.pipee = None
+ try:
+ # Wait for process to terminate, making sure to avoid deadlock.
+ # See https://docs.python.org/2/library/subprocess.html for
+ # warnings about wait() and deadlocking.
+ self.subprocess.communicate()
+ except Exception:
+ pass
+ self.subprocess = None
+ self.connected = False
+
+ def request(self, request, flush=True):
+ if self.connected:
+ try:
+ self.pipei.write(request)
+ if flush:
+ self.pipei.flush()
+ except IOError:
+ self.close()
+
+ def receiveline(self):
+ if not self.connected:
+ return None
+ try:
+ result = self.pipeo.readline()[:-1]
+ if not result:
+ self.close()
+ except IOError:
+ self.close()
+
+ return result
+
+def _getfilesbatch(
+ remote, receivemissing, progresstick, missed, idmap, batchsize):
+ # Over http(s), iterbatch is a streamy method and we can start
+ # looking at results early. This means we send one (potentially
+ # large) request, but then we show nice progress as we process
+ # file results, rather than showing chunks of $batchsize in
+ # progress.
+ #
+ # Over ssh, iterbatch isn't streamy because batch() wasn't
+ # explicitly designed as a streaming method. In the future we
+ # should probably introduce a streambatch() method upstream and
+ # use that for this.
+ with remote.commandexecutor() as e:
+ futures = []
+ for m in missed:
+ futures.append(e.callcommand('x_rfl_getfile', {
+ 'file': idmap[m],
+ 'node': m[-40:]
+ }))
+
+ for i, m in enumerate(missed):
+ r = futures[i].result()
+ futures[i] = None # release memory
+ file_ = idmap[m]
+ node = m[-40:]
+ receivemissing(io.BytesIO('%d\n%s' % (len(r), r)), file_, node)
+ progresstick()
+
+def _getfiles_optimistic(
+ remote, receivemissing, progresstick, missed, idmap, step):
+ remote._callstream("x_rfl_getfiles")
+ i = 0
+ pipeo = remote._pipeo
+ pipei = remote._pipei
+ while i < len(missed):
+ # issue a batch of requests
+ start = i
+ end = min(len(missed), start + step)
+ i = end
+ for missingid in missed[start:end]:
+ # issue new request
+ versionid = missingid[-40:]
+ file = idmap[missingid]
+ sshrequest = "%s%s\n" % (versionid, file)
+ pipeo.write(sshrequest)
+ pipeo.flush()
+
+ # receive batch results
+ for missingid in missed[start:end]:
+ versionid = missingid[-40:]
+ file = idmap[missingid]
+ receivemissing(pipei, file, versionid)
+ progresstick()
+
+ # End the command
+ pipeo.write('\n')
+ pipeo.flush()
+
+def _getfiles_threaded(
+ remote, receivemissing, progresstick, missed, idmap, step):
+ remote._callstream("getfiles")
+ pipeo = remote._pipeo
+ pipei = remote._pipei
+
+ def writer():
+ for missingid in missed:
+ versionid = missingid[-40:]
+ file = idmap[missingid]
+ sshrequest = "%s%s\n" % (versionid, file)
+ pipeo.write(sshrequest)
+ pipeo.flush()
+ writerthread = threading.Thread(target=writer)
+ writerthread.daemon = True
+ writerthread.start()
+
+ for missingid in missed:
+ versionid = missingid[-40:]
+ file = idmap[missingid]
+ receivemissing(pipei, file, versionid)
+ progresstick()
+
+ writerthread.join()
+ # End the command
+ pipeo.write('\n')
+ pipeo.flush()
+
+class fileserverclient(object):
+ """A client for requesting files from the remote file server.
+ """
+ def __init__(self, repo):
+ ui = repo.ui
+ self.repo = repo
+ self.ui = ui
+ self.cacheprocess = ui.config("remotefilelog", "cacheprocess")
+ if self.cacheprocess:
+ self.cacheprocess = util.expandpath(self.cacheprocess)
+
+ # This option causes remotefilelog to pass the full file path to the
+ # cacheprocess instead of a hashed key.
+ self.cacheprocesspasspath = ui.configbool(
+ "remotefilelog", "cacheprocess.includepath")
+
+ self.debugoutput = ui.configbool("remotefilelog", "debug")
+
+ self.remotecache = cacheconnection()
+
+ def setstore(self, datastore, historystore, writedata, writehistory):
+ self.datastore = datastore
+ self.historystore = historystore
+ self.writedata = writedata
+ self.writehistory = writehistory
+
+ def _connect(self):
+ return self.repo.connectionpool.get(self.repo.fallbackpath)
+
+ def request(self, fileids):
+ """Takes a list of filename/node pairs and fetches them from the
+ server. Files are stored in the local cache.
+ A list of nodes that the server couldn't find is returned.
+ If the connection fails, an exception is raised.
+ """
+ if not self.remotecache.connected:
+ self.connect()
+ cache = self.remotecache
+ writedata = self.writedata
+
+ repo = self.repo
+ total = len(fileids)
+ request = "get\n%d\n" % total
+ idmap = {}
+ reponame = repo.name
+ for file, id in fileids:
+ fullid = getcachekey(reponame, file, id)
+ if self.cacheprocesspasspath:
+ request += file + '\0'
+ request += fullid + "\n"
+ idmap[fullid] = file
+
+ cache.request(request)
+
+ progress = self.ui.makeprogress(_('downloading'), total=total)
+ progress.update(0)
+
+ missed = []
+ while True:
+ missingid = cache.receiveline()
+ if not missingid:
+ missedset = set(missed)
+ for missingid in idmap:
+ if not missingid in missedset:
+ missed.append(missingid)
+ self.ui.warn(_("warning: cache connection closed early - " +
+ "falling back to server\n"))
+ break
+ if missingid == "0":
+ break
+ if missingid.startswith("_hits_"):
+ # receive progress reports
+ parts = missingid.split("_")
+ progress.increment(int(parts[2]))
+ continue
+
+ missed.append(missingid)
+
+ global fetchmisses
+ fetchmisses += len(missed)
+
+ fromcache = total - len(missed)
+ progress.update(fromcache, total=total)
+ self.ui.log("remotefilelog", "remote cache hit rate is %r of %r\n",
+ fromcache, total, hit=fromcache, total=total)
+
+ oldumask = os.umask(0o002)
+ try:
+ # receive cache misses from master
+ if missed:
+ # When verbose is true, sshpeer prints 'running ssh...'
+ # to stdout, which can interfere with some command
+ # outputs
+ verbose = self.ui.verbose
+ self.ui.verbose = False
+ try:
+ with self._connect() as conn:
+ remote = conn.peer
+ if remote.capable(
+ constants.NETWORK_CAP_LEGACY_SSH_GETFILES):
+ if not isinstance(remote, _sshv1peer):
+ raise error.Abort('remotefilelog requires ssh '
+ 'servers')
+ step = self.ui.configint('remotefilelog',
+ 'getfilesstep')
+ getfilestype = self.ui.config('remotefilelog',
+ 'getfilestype')
+ if getfilestype == 'threaded':
+ _getfiles = _getfiles_threaded
+ else:
+ _getfiles = _getfiles_optimistic
+ _getfiles(remote, self.receivemissing,
+ progress.increment, missed, idmap, step)
+ elif remote.capable("x_rfl_getfile"):
+ if remote.capable('batch'):
+ batchdefault = 100
+ else:
+ batchdefault = 10
+ batchsize = self.ui.configint(
+ 'remotefilelog', 'batchsize', batchdefault)
+ _getfilesbatch(
+ remote, self.receivemissing, progress.increment,
+ missed, idmap, batchsize)
+ else:
+ raise error.Abort("configured remotefilelog server"
+ " does not support remotefilelog")
+
+ self.ui.log("remotefilefetchlog",
+ "Success\n",
+ fetched_files = progress.pos - fromcache,
+ total_to_fetch = total - fromcache)
+ except Exception:
+ self.ui.log("remotefilefetchlog",
+ "Fail\n",
+ fetched_files = progress.pos - fromcache,
+ total_to_fetch = total - fromcache)
+ raise
+ finally:
+ self.ui.verbose = verbose
+ # send to memcache
+ request = "set\n%d\n%s\n" % (len(missed), "\n".join(missed))
+ cache.request(request)
+
+ progress.complete()
+
+ # mark ourselves as a user of this cache
+ writedata.markrepo(self.repo.path)
+ finally:
+ os.umask(oldumask)
+
+ def receivemissing(self, pipe, filename, node):
+ line = pipe.readline()[:-1]
+ if not line:
+ raise error.ResponseError(_("error downloading file contents:"),
+ _("connection closed early"))
+ size = int(line)
+ data = pipe.read(size)
+ if len(data) != size:
+ raise error.ResponseError(_("error downloading file contents:"),
+ _("only received %s of %s bytes")
+ % (len(data), size))
+
+ self.writedata.addremotefilelognode(filename, bin(node),
+ zlib.decompress(data))
+
+ def connect(self):
+ if self.cacheprocess:
+ cmd = "%s %s" % (self.cacheprocess, self.writedata._path)
+ self.remotecache.connect(cmd)
+ else:
+ # If no cache process is specified, we fake one that always
+ # returns cache misses. This enables tests to run easily
+ # and may eventually allow us to be a drop in replacement
+ # for the largefiles extension.
+ class simplecache(object):
+ def __init__(self):
+ self.missingids = []
+ self.connected = True
+
+ def close(self):
+ pass
+
+ def request(self, value, flush=True):
+ lines = value.split("\n")
+ if lines[0] != "get":
+ return
+ self.missingids = lines[2:-1]
+ self.missingids.append('0')
+
+ def receiveline(self):
+ if len(self.missingids) > 0:
+ return self.missingids.pop(0)
+ return None
+
+ self.remotecache = simplecache()
+
+ def close(self):
+ if fetches:
+ msg = ("%d files fetched over %d fetches - " +
+ "(%d misses, %0.2f%% hit ratio) over %0.2fs\n") % (
+ fetched,
+ fetches,
+ fetchmisses,
+ float(fetched - fetchmisses) / float(fetched) * 100.0,
+ fetchcost)
+ if self.debugoutput:
+ self.ui.warn(msg)
+ self.ui.log("remotefilelog.prefetch", msg.replace("%", "%%"),
+ remotefilelogfetched=fetched,
+ remotefilelogfetches=fetches,
+ remotefilelogfetchmisses=fetchmisses,
+ remotefilelogfetchtime=fetchcost * 1000)
+
+ if self.remotecache.connected:
+ self.remotecache.close()
+
+ def prefetch(self, fileids, force=False, fetchdata=True,
+ fetchhistory=False):
+ """downloads the given file versions to the cache
+ """
+ repo = self.repo
+ idstocheck = []
+ for file, id in fileids:
+ # hack
+ # - we don't use .hgtags
+ # - workingctx produces ids with length 42,
+ # which we skip since they aren't in any cache
+ if (file == '.hgtags' or len(id) == 42
+ or not repo.shallowmatch(file)):
+ continue
+
+ idstocheck.append((file, bin(id)))
+
+ datastore = self.datastore
+ historystore = self.historystore
+ if force:
+ datastore = contentstore.unioncontentstore(*repo.shareddatastores)
+ historystore = metadatastore.unionmetadatastore(
+ *repo.sharedhistorystores)
+
+ missingids = set()
+ if fetchdata:
+ missingids.update(datastore.getmissing(idstocheck))
+ if fetchhistory:
+ missingids.update(historystore.getmissing(idstocheck))
+
+ # partition missing nodes into nullid and not-nullid so we can
+ # warn about this filtering potentially shadowing bugs.
+ nullids = len([None for unused, id in missingids if id == nullid])
+ if nullids:
+ missingids = [(f, id) for f, id in missingids if id != nullid]
+ repo.ui.develwarn(
+ ('remotefilelog not fetching %d null revs'
+ ' - this is likely hiding bugs' % nullids),
+ config='remotefilelog-ext')
+ if missingids:
+ global fetches, fetched, fetchcost
+ fetches += 1
+
+ # We want to be able to detect excess individual file downloads, so
+ # let's log that information for debugging.
+ if fetches >= 15 and fetches < 18:
+ if fetches == 15:
+ fetchwarning = self.ui.config('remotefilelog',
+ 'fetchwarning')
+ if fetchwarning:
+ self.ui.warn(fetchwarning + '\n')
+ self.logstacktrace()
+ missingids = [(file, hex(id)) for file, id in missingids]
+ fetched += len(missingids)
+ start = time.time()
+ missingids = self.request(missingids)
+ if missingids:
+ raise error.Abort(_("unable to download %d files") %
+ len(missingids))
+ fetchcost += time.time() - start
+ self._lfsprefetch(fileids)
+
+ def _lfsprefetch(self, fileids):
+ if not _lfsmod or not util.safehasattr(
+ self.repo.svfs, 'lfslocalblobstore'):
+ return
+ if not _lfsmod.wrapper.candownload(self.repo):
+ return
+ pointers = []
+ store = self.repo.svfs.lfslocalblobstore
+ for file, id in fileids:
+ node = bin(id)
+ rlog = self.repo.file(file)
+ if rlog.flags(node) & revlog.REVIDX_EXTSTORED:
+ text = rlog.revision(node, raw=True)
+ p = _lfsmod.pointer.deserialize(text)
+ oid = p.oid()
+ if not store.has(oid):
+ pointers.append(p)
+ if len(pointers) > 0:
+ self.repo.svfs.lfsremoteblobstore.readbatch(pointers, store)
+ assert all(store.has(p.oid()) for p in pointers)
+
+ def logstacktrace(self):
+ import traceback
+ self.ui.log('remotefilelog', 'excess remotefilelog fetching:\n%s\n',
+ ''.join(traceback.format_stack()))
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/hgext/remotefilelog/historypack.py Fri Jan 18 13:28:22 2019 -0500
@@ -0,0 +1,520 @@
+from __future__ import absolute_import
+
+import hashlib
+import struct
+
+from mercurial.node import hex, nullid
+from mercurial import (
+ pycompat,
+ util,
+)
+from . import (
+ basepack,
+ constants,
+ shallowutil,
+)
+
+# (filename hash, offset, size)
+INDEXFORMAT2 = '!20sQQII'
+INDEXENTRYLENGTH2 = struct.calcsize(INDEXFORMAT2)
+NODELENGTH = 20
+
+NODEINDEXFORMAT = '!20sQ'
+NODEINDEXENTRYLENGTH = struct.calcsize(NODEINDEXFORMAT)
+
+# (node, p1, p2, linknode)
+PACKFORMAT = "!20s20s20s20sH"
+PACKENTRYLENGTH = 82
+
+ENTRYCOUNTSIZE = 4
+
+INDEXSUFFIX = '.histidx'
+PACKSUFFIX = '.histpack'
+
+ANC_NODE = 0
+ANC_P1NODE = 1
+ANC_P2NODE = 2
+ANC_LINKNODE = 3
+ANC_COPYFROM = 4
+
+class historypackstore(basepack.basepackstore):
+ INDEXSUFFIX = INDEXSUFFIX
+ PACKSUFFIX = PACKSUFFIX
+
+ def getpack(self, path):
+ return historypack(path)
+
+ def getancestors(self, name, node, known=None):
+ for pack in self.packs:
+ try:
+ return pack.getancestors(name, node, known=known)
+ except KeyError:
+ pass
+
+ for pack in self.refresh():
+ try:
+ return pack.getancestors(name, node, known=known)
+ except KeyError:
+ pass
+
+ raise KeyError((name, node))
+
+ def getnodeinfo(self, name, node):
+ for pack in self.packs:
+ try:
+ return pack.getnodeinfo(name, node)
+ except KeyError:
+ pass
+
+ for pack in self.refresh():
+ try:
+ return pack.getnodeinfo(name, node)
+ except KeyError:
+ pass
+
+ raise KeyError((name, node))
+
+ def add(self, filename, node, p1, p2, linknode, copyfrom):
+ raise RuntimeError("cannot add to historypackstore (%s:%s)"
+ % (filename, hex(node)))
+
+class historypack(basepack.basepack):
+ INDEXSUFFIX = INDEXSUFFIX
+ PACKSUFFIX = PACKSUFFIX
+
+ SUPPORTED_VERSIONS = [2]
+
+ def __init__(self, path):
+ super(historypack, self).__init__(path)
+ self.INDEXFORMAT = INDEXFORMAT2
+ self.INDEXENTRYLENGTH = INDEXENTRYLENGTH2
+
+ def getmissing(self, keys):
+ missing = []
+ for name, node in keys:
+ try:
+ self._findnode(name, node)
+ except KeyError:
+ missing.append((name, node))
+
+ return missing
+
+ def getancestors(self, name, node, known=None):
+ """Returns as many ancestors as we're aware of.
+
+ return value: {
+ node: (p1, p2, linknode, copyfrom),
+ ...
+ }
+ """
+ if known and node in known:
+ return []
+
+ ancestors = self._getancestors(name, node, known=known)
+ results = {}
+ for ancnode, p1, p2, linknode, copyfrom in ancestors:
+ results[ancnode] = (p1, p2, linknode, copyfrom)
+
+ if not results:
+ raise KeyError((name, node))
+ return results
+
+ def getnodeinfo(self, name, node):
+ # Drop the node from the tuple before returning, since the result should
+ # just be (p1, p2, linknode, copyfrom)
+ return self._findnode(name, node)[1:]
+
+ def _getancestors(self, name, node, known=None):
+ if known is None:
+ known = set()
+ section = self._findsection(name)
+ filename, offset, size, nodeindexoffset, nodeindexsize = section
+ pending = set((node,))
+ o = 0
+ while o < size:
+ if not pending:
+ break
+ entry, copyfrom = self._readentry(offset + o)
+ o += PACKENTRYLENGTH
+ if copyfrom:
+ o += len(copyfrom)
+
+ ancnode = entry[ANC_NODE]
+ if ancnode in pending:
+ pending.remove(ancnode)
+ p1node = entry[ANC_P1NODE]
+ p2node = entry[ANC_P2NODE]
+ if p1node != nullid and p1node not in known:
+ pending.add(p1node)
+ if p2node != nullid and p2node not in known:
+ pending.add(p2node)
+
+ yield (ancnode, p1node, p2node, entry[ANC_LINKNODE], copyfrom)
+
+ def _readentry(self, offset):
+ data = self._data
+ entry = struct.unpack(PACKFORMAT, data[offset:offset + PACKENTRYLENGTH])
+ copyfrom = None
+ copyfromlen = entry[ANC_COPYFROM]
+ if copyfromlen != 0:
+ offset += PACKENTRYLENGTH
+ copyfrom = data[offset:offset + copyfromlen]
+ return entry, copyfrom
+
+ def add(self, filename, node, p1, p2, linknode, copyfrom):
+ raise RuntimeError("cannot add to historypack (%s:%s)" %
+ (filename, hex(node)))
+
+ def _findnode(self, name, node):
+ if self.VERSION == 0:
+ ancestors = self._getancestors(name, node)
+ for ancnode, p1node, p2node, linknode, copyfrom in ancestors:
+ if ancnode == node:
+ return (ancnode, p1node, p2node, linknode, copyfrom)
+ else:
+ section = self._findsection(name)
+ nodeindexoffset, nodeindexsize = section[3:]
+ entry = self._bisect(node, nodeindexoffset,
+ nodeindexoffset + nodeindexsize,
+ NODEINDEXENTRYLENGTH)
+ if entry is not None:
+ node, offset = struct.unpack(NODEINDEXFORMAT, entry)
+ entry, copyfrom = self._readentry(offset)
+ # Drop the copyfromlen from the end of entry, and replace it
+ # with the copyfrom string.
+ return entry[:4] + (copyfrom,)
+
+ raise KeyError("unable to find history for %s:%s" % (name, hex(node)))
+
+ def _findsection(self, name):
+ params = self.params
+ namehash = hashlib.sha1(name).digest()
+ fanoutkey = struct.unpack(params.fanoutstruct,
+ namehash[:params.fanoutprefix])[0]
+ fanout = self._fanouttable
+
+ start = fanout[fanoutkey] + params.indexstart
+ indexend = self._indexend
+
+ for i in pycompat.xrange(fanoutkey + 1, params.fanoutcount):
+ end = fanout[i] + params.indexstart
+ if end != start:
+ break
+ else:
+ end = indexend
+
+ entry = self._bisect(namehash, start, end, self.INDEXENTRYLENGTH)
+ if not entry:
+ raise KeyError(name)
+
+ rawentry = struct.unpack(self.INDEXFORMAT, entry)
+ x, offset, size, nodeindexoffset, nodeindexsize = rawentry
+ rawnamelen = self._index[nodeindexoffset:nodeindexoffset +
+ constants.FILENAMESIZE]
+ actualnamelen = struct.unpack('!H', rawnamelen)[0]
+ nodeindexoffset += constants.FILENAMESIZE
+ actualname = self._index[nodeindexoffset:nodeindexoffset +
+ actualnamelen]
+ if actualname != name:
+ raise KeyError("found file name %s when looking for %s" %
+ (actualname, name))
+ nodeindexoffset += actualnamelen
+
+ filenamelength = struct.unpack('!H', self._data[offset:offset +
+ constants.FILENAMESIZE])[0]
+ offset += constants.FILENAMESIZE
+
+ actualname = self._data[offset:offset + filenamelength]
+ offset += filenamelength
+
+ if name != actualname:
+ raise KeyError("found file name %s when looking for %s" %
+ (actualname, name))
+
+ # Skip entry list size
+ offset += ENTRYCOUNTSIZE
+
+ nodelistoffset = offset
+ nodelistsize = (size - constants.FILENAMESIZE - filenamelength -
+ ENTRYCOUNTSIZE)
+ return (name, nodelistoffset, nodelistsize,
+ nodeindexoffset, nodeindexsize)
+
+ def _bisect(self, node, start, end, entrylen):
+ # Bisect between start and end to find node
+ origstart = start
+ startnode = self._index[start:start + NODELENGTH]
+ endnode = self._index[end:end + NODELENGTH]
+
+ if startnode == node:
+ return self._index[start:start + entrylen]
+ elif endnode == node:
+ return self._index[end:end + entrylen]
+ else:
+ while start < end - entrylen:
+ mid = start + (end - start) // 2
+ mid = mid - ((mid - origstart) % entrylen)
+ midnode = self._index[mid:mid + NODELENGTH]
+ if midnode == node:
+ return self._index[mid:mid + entrylen]
+ if node > midnode:
+ start = mid
+ startnode = midnode
+ elif node < midnode:
+ end = mid
+ endnode = midnode
+ return None
+
+ def markledger(self, ledger, options=None):
+ for filename, node in self:
+ ledger.markhistoryentry(self, filename, node)
+
+ def cleanup(self, ledger):
+ entries = ledger.sources.get(self, [])
+ allkeys = set(self)
+ repackedkeys = set((e.filename, e.node) for e in entries if
+ e.historyrepacked)
+
+ if len(allkeys - repackedkeys) == 0:
+ if self.path not in ledger.created:
+ util.unlinkpath(self.indexpath, ignoremissing=True)
+ util.unlinkpath(self.packpath, ignoremissing=True)
+
+ def __iter__(self):
+ for f, n, x, x, x, x in self.iterentries():
+ yield f, n
+
+ def iterentries(self):
+ # Start at 1 to skip the header
+ offset = 1
+ while offset < self.datasize:
+ data = self._data
+ # <2 byte len> + <filename>
+ filenamelen = struct.unpack('!H', data[offset:offset +
+ constants.FILENAMESIZE])[0]
+ offset += constants.FILENAMESIZE
+ filename = data[offset:offset + filenamelen]
+ offset += filenamelen
+
+ revcount = struct.unpack('!I', data[offset:offset +
+ ENTRYCOUNTSIZE])[0]
+ offset += ENTRYCOUNTSIZE
+
+ for i in pycompat.xrange(revcount):
+ entry = struct.unpack(PACKFORMAT, data[offset:offset +
+ PACKENTRYLENGTH])
+ offset += PACKENTRYLENGTH
+
+ copyfrom = data[offset:offset + entry[ANC_COPYFROM]]
+ offset += entry[ANC_COPYFROM]
+
+ yield (filename, entry[ANC_NODE], entry[ANC_P1NODE],
+ entry[ANC_P2NODE], entry[ANC_LINKNODE], copyfrom)
+
+ self._pagedin += PACKENTRYLENGTH
+
+ # If we've read a lot of data from the mmap, free some memory.
+ self.freememory()
+
+class mutablehistorypack(basepack.mutablebasepack):
+ """A class for constructing and serializing a histpack file and index.
+
+ A history pack is a pair of files that contain the revision history for
+ various file revisions in Mercurial. It contains only revision history (like
+ parent pointers and linknodes), not any revision content information.
+
+ It consists of two files, with the following format:
+
+ .histpack
+ The pack itself is a series of file revisions with some basic header
+ information on each.
+
+ datapack = <version: 1 byte>
+ [<filesection>,...]
+ filesection = <filename len: 2 byte unsigned int>
+ <filename>
+ <revision count: 4 byte unsigned int>
+ [<revision>,...]
+ revision = <node: 20 byte>
+ <p1node: 20 byte>
+ <p2node: 20 byte>
+ <linknode: 20 byte>
+ <copyfromlen: 2 byte>
+ <copyfrom>
+
+ The revisions within each filesection are stored in topological order
+ (newest first). If a given entry has a parent from another file (a copy)
+ then p1node is the node from the other file, and copyfrom is the
+ filepath of the other file.
+
+ .histidx
+ The index file provides a mapping from filename to the file section in
+ the histpack. In V1 it also contains sub-indexes for specific nodes
+ within each file. It consists of three parts, the fanout, the file index
+ and the node indexes.
+
+ The file index is a list of index entries, sorted by filename hash (one
+ per file section in the pack). Each entry has:
+
+ - node (The 20 byte hash of the filename)
+ - pack entry offset (The location of this file section in the histpack)
+ - pack content size (The on-disk length of this file section's pack
+ data)
+ - node index offset (The location of the file's node index in the index
+ file) [1]
+ - node index size (the on-disk length of this file's node index) [1]
+
+ The fanout is a quick lookup table to reduce the number of steps for
+ bisecting the index. It is a series of 4 byte pointers to positions
+ within the index. It has 2^16 entries, which corresponds to hash
+ prefixes [00, 01, 02,..., FD, FE, FF]. Example: the pointer in slot 4F
+ points to the index position of the first revision whose node starts
+ with 4F. This saves log(2^16) bisect steps.
+
+ dataidx = <fanouttable>
+ <file count: 8 byte unsigned> [1]
+ <fileindex>
+ <node count: 8 byte unsigned> [1]
+ [<nodeindex>,...] [1]
+ fanouttable = [<index offset: 4 byte unsigned int>,...] (2^16 entries)
+
+ fileindex = [<file index entry>,...]
+ fileindexentry = <node: 20 byte>
+ <pack file section offset: 8 byte unsigned int>
+ <pack file section size: 8 byte unsigned int>
+ <node index offset: 4 byte unsigned int> [1]
+ <node index size: 4 byte unsigned int> [1]
+ nodeindex = <filename>[<node index entry>,...] [1]
+ filename = <filename len : 2 byte unsigned int><filename value> [1]
+ nodeindexentry = <node: 20 byte> [1]
+ <pack file node offset: 8 byte unsigned int> [1]
+
+ [1]: new in version 1.
+ """
+ INDEXSUFFIX = INDEXSUFFIX
+ PACKSUFFIX = PACKSUFFIX
+
+ SUPPORTED_VERSIONS = [2]
+
+ def __init__(self, ui, packpath, version=2):
+ super(mutablehistorypack, self).__init__(ui, packpath, version=version)
+ self.files = {}
+ self.entrylocations = {}
+ self.fileentries = {}
+
+ self.INDEXFORMAT = INDEXFORMAT2
+ self.INDEXENTRYLENGTH = INDEXENTRYLENGTH2
+
+ self.NODEINDEXFORMAT = NODEINDEXFORMAT
+ self.NODEINDEXENTRYLENGTH = NODEINDEXENTRYLENGTH
+
+ def add(self, filename, node, p1, p2, linknode, copyfrom):
+ copyfrom = copyfrom or ''
+ copyfromlen = struct.pack('!H', len(copyfrom))
+ self.fileentries.setdefault(filename, []).append((node, p1, p2,
+ linknode,
+ copyfromlen,
+ copyfrom))
+
+ def _write(self):
+ for filename in sorted(self.fileentries):
+ entries = self.fileentries[filename]
+ sectionstart = self.packfp.tell()
+
+ # Write the file section content
+ entrymap = dict((e[0], e) for e in entries)
+ def parentfunc(node):
+ x, p1, p2, x, x, x = entrymap[node]
+ parents = []
+ if p1 != nullid:
+ parents.append(p1)
+ if p2 != nullid:
+ parents.append(p2)
+ return parents
+
+ sortednodes = list(reversed(shallowutil.sortnodes(
+ (e[0] for e in entries),
+ parentfunc)))
+
+ # Write the file section header
+ self.writeraw("%s%s%s" % (
+ struct.pack('!H', len(filename)),
+ filename,
+ struct.pack('!I', len(sortednodes)),
+ ))
+
+ sectionlen = constants.FILENAMESIZE + len(filename) + 4
+
+ rawstrings = []
+
+ # Record the node locations for the index
+ locations = self.entrylocations.setdefault(filename, {})
+ offset = sectionstart + sectionlen
+ for node in sortednodes:
+ locations[node] = offset
+ raw = '%s%s%s%s%s%s' % entrymap[node]
+ rawstrings.append(raw)
+ offset += len(raw)
+
+ rawdata = ''.join(rawstrings)
+ sectionlen += len(rawdata)
+
+ self.writeraw(rawdata)
+
+ # Record metadata for the index
+ self.files[filename] = (sectionstart, sectionlen)
+ node = hashlib.sha1(filename).digest()
+ self.entries[node] = node
+
+ def close(self, ledger=None):
+ if self._closed:
+ return
+
+ self._write()
+
+ return super(mutablehistorypack, self).close(ledger=ledger)
+
+ def createindex(self, nodelocations, indexoffset):
+ fileindexformat = self.INDEXFORMAT
+ fileindexlength = self.INDEXENTRYLENGTH
+ nodeindexformat = self.NODEINDEXFORMAT
+ nodeindexlength = self.NODEINDEXENTRYLENGTH
+
+ files = ((hashlib.sha1(filename).digest(), filename, offset, size)
+ for filename, (offset, size) in self.files.iteritems())
+ files = sorted(files)
+
+ # node index is after file index size, file index, and node index size
+ indexlensize = struct.calcsize('!Q')
+ nodeindexoffset = (indexoffset + indexlensize +
+ (len(files) * fileindexlength) + indexlensize)
+
+ fileindexentries = []
+ nodeindexentries = []
+ nodecount = 0
+ for namehash, filename, offset, size in files:
+ # File section index
+ nodelocations = self.entrylocations[filename]
+
+ nodeindexsize = len(nodelocations) * nodeindexlength
+
+ rawentry = struct.pack(fileindexformat, namehash, offset, size,
+ nodeindexoffset, nodeindexsize)
+ # Node index
+ nodeindexentries.append(struct.pack(constants.FILENAMESTRUCT,
+ len(filename)) + filename)
+ nodeindexoffset += constants.FILENAMESIZE + len(filename)
+
+ for node, location in sorted(nodelocations.iteritems()):
+ nodeindexentries.append(struct.pack(nodeindexformat, node,
+ location))
+ nodecount += 1
+
+ nodeindexoffset += len(nodelocations) * nodeindexlength
+
+ fileindexentries.append(rawentry)
+
+ nodecountraw = ''
+ nodecountraw = struct.pack('!Q', nodecount)
+ return (''.join(fileindexentries) + nodecountraw +
+ ''.join(nodeindexentries))
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/hgext/remotefilelog/metadatastore.py Fri Jan 18 13:28:22 2019 -0500
@@ -0,0 +1,156 @@
+from __future__ import absolute_import
+
+from mercurial.node import hex, nullid
+from . import (
+ basestore,
+ shallowutil,
+)
+
+class unionmetadatastore(basestore.baseunionstore):
+ def __init__(self, *args, **kwargs):
+ super(unionmetadatastore, self).__init__(*args, **kwargs)
+
+ self.stores = args
+ self.writestore = kwargs.get(r'writestore')
+
+ # If allowincomplete==True then the union store can return partial
+ # ancestor lists, otherwise it will throw a KeyError if a full
+ # history can't be found.
+ self.allowincomplete = kwargs.get(r'allowincomplete', False)
+
+ def getancestors(self, name, node, known=None):
+ """Returns as many ancestors as we're aware of.
+
+ return value: {
+ node: (p1, p2, linknode, copyfrom),
+ ...
+ }
+ """
+ if known is None:
+ known = set()
+ if node in known:
+ return []
+
+ ancestors = {}
+ def traverse(curname, curnode):
+ # TODO: this algorithm has the potential to traverse parts of
+ # history twice. Ex: with A->B->C->F and A->B->D->F, both D and C
+ # may be queued as missing, then B and A are traversed for both.
+ queue = [(curname, curnode)]
+ missing = []
+ seen = set()
+ while queue:
+ name, node = queue.pop()
+ if (name, node) in seen:
+ continue
+ seen.add((name, node))
+ value = ancestors.get(node)
+ if not value:
+ missing.append((name, node))
+ continue
+ p1, p2, linknode, copyfrom = value
+ if p1 != nullid and p1 not in known:
+ queue.append((copyfrom or curname, p1))
+ if p2 != nullid and p2 not in known:
+ queue.append((curname, p2))
+ return missing
+
+ missing = [(name, node)]
+ while missing:
+ curname, curnode = missing.pop()
+ try:
+ ancestors.update(self._getpartialancestors(curname, curnode,
+ known=known))
+ newmissing = traverse(curname, curnode)
+ missing.extend(newmissing)
+ except KeyError:
+ # If we allow incomplete histories, don't throw.
+ if not self.allowincomplete:
+ raise
+ # If the requested name+node doesn't exist, always throw.
+ if (curname, curnode) == (name, node):
+ raise
+
+ # TODO: ancestors should probably be (name, node) -> (value)
+ return ancestors
+
+ @basestore.baseunionstore.retriable
+ def _getpartialancestors(self, name, node, known=None):
+ for store in self.stores:
+ try:
+ return store.getancestors(name, node, known=known)
+ except KeyError:
+ pass
+
+ raise KeyError((name, hex(node)))
+
+ @basestore.baseunionstore.retriable
+ def getnodeinfo(self, name, node):
+ for store in self.stores:
+ try:
+ return store.getnodeinfo(name, node)
+ except KeyError:
+ pass
+
+ raise KeyError((name, hex(node)))
+
+ def add(self, name, node, data):
+ raise RuntimeError("cannot add content only to remotefilelog "
+ "contentstore")
+
+ def getmissing(self, keys):
+ missing = keys
+ for store in self.stores:
+ if missing:
+ missing = store.getmissing(missing)
+ return missing
+
+ def markledger(self, ledger, options=None):
+ for store in self.stores:
+ store.markledger(ledger, options)
+
+ def getmetrics(self):
+ metrics = [s.getmetrics() for s in self.stores]
+ return shallowutil.sumdicts(*metrics)
+
+class remotefilelogmetadatastore(basestore.basestore):
+ def getancestors(self, name, node, known=None):
+ """Returns as many ancestors as we're aware of.
+
+ return value: {
+ node: (p1, p2, linknode, copyfrom),
+ ...
+ }
+ """
+ data = self._getdata(name, node)
+ ancestors = shallowutil.ancestormap(data)
+ return ancestors
+
+ def getnodeinfo(self, name, node):
+ return self.getancestors(name, node)[node]
+
+ def add(self, name, node, parents, linknode):
+ raise RuntimeError("cannot add metadata only to remotefilelog "
+ "metadatastore")
+
+class remotemetadatastore(object):
+ def __init__(self, ui, fileservice, shared):
+ self._fileservice = fileservice
+ self._shared = shared
+
+ def getancestors(self, name, node, known=None):
+ self._fileservice.prefetch([(name, hex(node))], force=True,
+ fetchdata=False, fetchhistory=True)
+ return self._shared.getancestors(name, node, known=known)
+
+ def getnodeinfo(self, name, node):
+ return self.getancestors(name, node)[node]
+
+ def add(self, name, node, data):
+ raise RuntimeError("cannot add to a remote store")
+
+ def getmissing(self, keys):
+ return keys
+
+ def markledger(self, ledger, options=None):
+ pass
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/hgext/remotefilelog/remotefilectx.py Fri Jan 18 13:28:22 2019 -0500
@@ -0,0 +1,491 @@
+# remotefilectx.py - filectx/workingfilectx implementations for remotefilelog
+#
+# Copyright 2013 Facebook, Inc.
+#
+# This software may be used and distributed according to the terms of the
+# GNU General Public License version 2 or any later version.
+from __future__ import absolute_import
+
+import collections
+import time
+
+from mercurial.node import bin, hex, nullid, nullrev
+from mercurial import (
+ ancestor,
+ context,
+ error,
+ phases,
+ pycompat,
+ util,
+)
+from . import shallowutil
+
+propertycache = util.propertycache
+FASTLOG_TIMEOUT_IN_SECS = 0.5
+
+class remotefilectx(context.filectx):
+ def __init__(self, repo, path, changeid=None, fileid=None,
+ filelog=None, changectx=None, ancestormap=None):
+ if fileid == nullrev:
+ fileid = nullid
+ if fileid and len(fileid) == 40:
+ fileid = bin(fileid)
+ super(remotefilectx, self).__init__(repo, path, changeid,
+ fileid, filelog, changectx)
+ self._ancestormap = ancestormap
+
+ def size(self):
+ return self._filelog.size(self._filenode)
+
+ @propertycache
+ def _changeid(self):
+ if '_changeid' in self.__dict__:
+ return self._changeid
+ elif '_changectx' in self.__dict__:
+ return self._changectx.rev()
+ elif '_descendantrev' in self.__dict__:
+ # this file context was created from a revision with a known
+ # descendant, we can (lazily) correct for linkrev aliases
+ linknode = self._adjustlinknode(self._path, self._filelog,
+ self._filenode, self._descendantrev)
+ return self._repo.unfiltered().changelog.rev(linknode)
+ else:
+ return self.linkrev()
+
+ def filectx(self, fileid, changeid=None):
+ '''opens an arbitrary revision of the file without
+ opening a new filelog'''
+ return remotefilectx(self._repo, self._path, fileid=fileid,
+ filelog=self._filelog, changeid=changeid)
+
+ def linkrev(self):
+ return self._linkrev
+
+ @propertycache
+ def _linkrev(self):
+ if self._filenode == nullid:
+ return nullrev
+
+ ancestormap = self.ancestormap()
+ p1, p2, linknode, copyfrom = ancestormap[self._filenode]
+ rev = self._repo.changelog.nodemap.get(linknode)
+ if rev is not None:
+ return rev
+
+ # Search all commits for the appropriate linkrev (slow, but uncommon)
+ path = self._path
+ fileid = self._filenode
+ cl = self._repo.unfiltered().changelog
+ mfl = self._repo.manifestlog
+
+ for rev in range(len(cl) - 1, 0, -1):
+ node = cl.node(rev)
+ data = cl.read(node) # get changeset data (we avoid object creation)
+ if path in data[3]: # checking the 'files' field.
+ # The file has been touched, check if the hash is what we're
+ # looking for.
+ if fileid == mfl[data[0]].readfast().get(path):
+ return rev
+
+ # Couldn't find the linkrev. This should generally not happen, and will
+ # likely cause a crash.
+ return None
+
+ def introrev(self):
+ """return the rev of the changeset which introduced this file revision
+
+ This method is different from linkrev because it take into account the
+ changeset the filectx was created from. It ensures the returned
+ revision is one of its ancestors. This prevents bugs from
+ 'linkrev-shadowing' when a file revision is used by multiple
+ changesets.
+ """
+ lkr = self.linkrev()
+ attrs = vars(self)
+ noctx = not ('_changeid' in attrs or '_changectx' in attrs)
+ if noctx or self.rev() == lkr:
+ return lkr
+ linknode = self._adjustlinknode(self._path, self._filelog,
+ self._filenode, self.rev(),
+ inclusive=True)
+ return self._repo.changelog.rev(linknode)
+
+ def renamed(self):
+ """check if file was actually renamed in this changeset revision
+
+ If rename logged in file revision, we report copy for changeset only
+ if file revisions linkrev points back to the changeset in question
+ or both changeset parents contain different file revisions.
+ """
+ ancestormap = self.ancestormap()
+
+ p1, p2, linknode, copyfrom = ancestormap[self._filenode]
+ if not copyfrom:
+ return None
+
+ renamed = (copyfrom, p1)
+ if self.rev() == self.linkrev():
+ return renamed
+
+ name = self.path()
+ fnode = self._filenode
+ for p in self._changectx.parents():
+ try:
+ if fnode == p.filenode(name):
+ return None
+ except error.LookupError:
+ pass
+ return renamed
+
+ def ancestormap(self):
+ if not self._ancestormap:
+ self._ancestormap = self.filelog().ancestormap(self._filenode)
+
+ return self._ancestormap
+
+ def parents(self):
+ repo = self._repo
+ ancestormap = self.ancestormap()
+
+ p1, p2, linknode, copyfrom = ancestormap[self._filenode]
+ results = []
+ if p1 != nullid:
+ path = copyfrom or self._path
+ flog = repo.file(path)
+ p1ctx = remotefilectx(repo, path, fileid=p1, filelog=flog,
+ ancestormap=ancestormap)
+ p1ctx._descendantrev = self.rev()
+ results.append(p1ctx)
+
+ if p2 != nullid:
+ path = self._path
+ flog = repo.file(path)
+ p2ctx = remotefilectx(repo, path, fileid=p2, filelog=flog,
+ ancestormap=ancestormap)
+ p2ctx._descendantrev = self.rev()
+ results.append(p2ctx)
+
+ return results
+
+ def _nodefromancrev(self, ancrev, cl, mfl, path, fnode):
+ """returns the node for <path> in <ancrev> if content matches <fnode>"""
+ ancctx = cl.read(ancrev) # This avoids object creation.
+ manifestnode, files = ancctx[0], ancctx[3]
+ # If the file was touched in this ancestor, and the content is similar
+ # to the one we are searching for.
+ if path in files and fnode == mfl[manifestnode].readfast().get(path):
+ return cl.node(ancrev)
+ return None
+
+ def _adjustlinknode(self, path, filelog, fnode, srcrev, inclusive=False):
+ """return the first ancestor of <srcrev> introducing <fnode>
+
+ If the linkrev of the file revision does not point to an ancestor of
+ srcrev, we'll walk down the ancestors until we find one introducing
+ this file revision.
+
+ :repo: a localrepository object (used to access changelog and manifest)
+ :path: the file path
+ :fnode: the nodeid of the file revision
+ :filelog: the filelog of this path
+ :srcrev: the changeset revision we search ancestors from
+ :inclusive: if true, the src revision will also be checked
+
+ Note: This is based on adjustlinkrev in core, but it's quite different.
+
+ adjustlinkrev depends on the fact that the linkrev is the bottom most
+ node, and uses that as a stopping point for the ancestor traversal. We
+ can't do that here because the linknode is not guaranteed to be the
+ bottom most one.
+
+ In our code here, we actually know what a bunch of potential ancestor
+ linknodes are, so instead of stopping the cheap-ancestor-traversal when
+ we get to a linkrev, we stop when we see any of the known linknodes.
+ """
+ repo = self._repo
+ cl = repo.unfiltered().changelog
+ mfl = repo.manifestlog
+ ancestormap = self.ancestormap()
+ linknode = ancestormap[fnode][2]
+
+ if srcrev is None:
+ # wctx case, used by workingfilectx during mergecopy
+ revs = [p.rev() for p in self._repo[None].parents()]
+ inclusive = True # we skipped the real (revless) source
+ else:
+ revs = [srcrev]
+
+ if self._verifylinknode(revs, linknode):
+ return linknode
+
+ commonlogkwargs = {
+ r'revs': ' '.join([hex(cl.node(rev)) for rev in revs]),
+ r'fnode': hex(fnode),
+ r'filepath': path,
+ r'user': shallowutil.getusername(repo.ui),
+ r'reponame': shallowutil.getreponame(repo.ui),
+ }
+
+ repo.ui.log('linkrevfixup', 'adjusting linknode\n', **commonlogkwargs)
+
+ pc = repo._phasecache
+ seenpublic = False
+ iteranc = cl.ancestors(revs, inclusive=inclusive)
+ for ancrev in iteranc:
+ # First, check locally-available history.
+ lnode = self._nodefromancrev(ancrev, cl, mfl, path, fnode)
+ if lnode is not None:
+ return lnode
+
+ # adjusting linknode can be super-slow. To mitigate the issue
+ # we use two heuristics: calling fastlog and forcing remotefilelog
+ # prefetch
+ if not seenpublic and pc.phase(repo, ancrev) == phases.public:
+ # TODO: there used to be a codepath to fetch linknodes
+ # from a server as a fast path, but it appeared to
+ # depend on an API FB added to their phabricator.
+ lnode = self._forceprefetch(repo, path, fnode, revs,
+ commonlogkwargs)
+ if lnode:
+ return lnode
+ seenpublic = True
+
+ return linknode
+
+ def _forceprefetch(self, repo, path, fnode, revs,
+ commonlogkwargs):
+ # This next part is super non-obvious, so big comment block time!
+ #
+ # It is possible to get extremely bad performance here when a fairly
+ # common set of circumstances occur when this extension is combined
+ # with a server-side commit rewriting extension like pushrebase.
+ #
+ # First, an engineer creates Commit A and pushes it to the server.
+ # While the server's data structure will have the correct linkrev
+ # for the files touched in Commit A, the client will have the
+ # linkrev of the local commit, which is "invalid" because it's not
+ # an ancestor of the main line of development.
+ #
+ # The client will never download the remotefilelog with the correct
+ # linkrev as long as nobody else touches that file, since the file
+ # data and history hasn't changed since Commit A.
+ #
+ # After a long time (or a short time in a heavily used repo), if the
+ # same engineer returns to change the same file, some commands --
+ # such as amends of commits with file moves, logs, diffs, etc --
+ # can trigger this _adjustlinknode code. In those cases, finding
+ # the correct rev can become quite expensive, as the correct
+ # revision is far back in history and we need to walk back through
+ # history to find it.
+ #
+ # In order to improve this situation, we force a prefetch of the
+ # remotefilelog data blob for the file we were called on. We do this
+ # at most once, when we first see a public commit in the history we
+ # are traversing.
+ #
+ # Forcing the prefetch means we will download the remote blob even
+ # if we have the "correct" blob in the local store. Since the union
+ # store checks the remote store first, this means we are much more
+ # likely to get the correct linkrev at this point.
+ #
+ # In rare circumstances (such as the server having a suboptimal
+ # linkrev for our use case), we will fall back to the old slow path.
+ #
+ # We may want to add additional heuristics here in the future if
+ # the slow path is used too much. One promising possibility is using
+ # obsolescence markers to find a more-likely-correct linkrev.
+
+ logmsg = ''
+ start = time.time()
+ try:
+ repo.fileservice.prefetch([(path, hex(fnode))], force=True)
+
+ # Now that we've downloaded a new blob from the server,
+ # we need to rebuild the ancestor map to recompute the
+ # linknodes.
+ self._ancestormap = None
+ linknode = self.ancestormap()[fnode][2] # 2 is linknode
+ if self._verifylinknode(revs, linknode):
+ logmsg = 'remotefilelog prefetching succeeded'
+ return linknode
+ logmsg = 'remotefilelog prefetching not found'
+ return None
+ except Exception as e:
+ logmsg = 'remotefilelog prefetching failed (%s)' % e
+ return None
+ finally:
+ elapsed = time.time() - start
+ repo.ui.log('linkrevfixup', logmsg + '\n', elapsed=elapsed * 1000,
+ **pycompat.strkwargs(commonlogkwargs))
+
+ def _verifylinknode(self, revs, linknode):
+ """
+ Check if a linknode is correct one for the current history.
+
+ That is, return True if the linkrev is the ancestor of any of the
+ passed in revs, otherwise return False.
+
+ `revs` is a list that usually has one element -- usually the wdir parent
+ or the user-passed rev we're looking back from. It may contain two revs
+ when there is a merge going on, or zero revs when a root node with no
+ parents is being created.
+ """
+ if not revs:
+ return False
+ try:
+ # Use the C fastpath to check if the given linknode is correct.
+ cl = self._repo.unfiltered().changelog
+ return any(cl.isancestor(linknode, cl.node(r)) for r in revs)
+ except error.LookupError:
+ # The linknode read from the blob may have been stripped or
+ # otherwise not present in the repository anymore. Do not fail hard
+ # in this case. Instead, return false and continue the search for
+ # the correct linknode.
+ return False
+
+ def ancestors(self, followfirst=False):
+ ancestors = []
+ queue = collections.deque((self,))
+ seen = set()
+ while queue:
+ current = queue.pop()
+ if current.filenode() in seen:
+ continue
+ seen.add(current.filenode())
+
+ ancestors.append(current)
+
+ parents = current.parents()
+ first = True
+ for p in parents:
+ if first or not followfirst:
+ queue.append(p)
+ first = False
+
+ # Remove self
+ ancestors.pop(0)
+
+ # Sort by linkrev
+ # The copy tracing algorithm depends on these coming out in order
+ ancestors = sorted(ancestors, reverse=True, key=lambda x:x.linkrev())
+
+ for ancestor in ancestors:
+ yield ancestor
+
+ def ancestor(self, fc2, actx):
+ # the easy case: no (relevant) renames
+ if fc2.path() == self.path() and self.path() in actx:
+ return actx[self.path()]
+
+ # the next easiest cases: unambiguous predecessor (name trumps
+ # history)
+ if self.path() in actx and fc2.path() not in actx:
+ return actx[self.path()]
+ if fc2.path() in actx and self.path() not in actx:
+ return actx[fc2.path()]
+
+ # do a full traversal
+ amap = self.ancestormap()
+ bmap = fc2.ancestormap()
+
+ def parents(x):
+ f, n = x
+ p = amap.get(n) or bmap.get(n)
+ if not p:
+ return []
+
+ return [(p[3] or f, p[0]), (f, p[1])]
+
+ a = (self.path(), self.filenode())
+ b = (fc2.path(), fc2.filenode())
+ result = ancestor.genericancestor(a, b, parents)
+ if result:
+ f, n = result
+ r = remotefilectx(self._repo, f, fileid=n,
+ ancestormap=amap)
+ return r
+
+ return None
+
+ def annotate(self, *args, **kwargs):
+ introctx = self
+ prefetchskip = kwargs.pop(r'prefetchskip', None)
+ if prefetchskip:
+ # use introrev so prefetchskip can be accurately tested
+ introrev = self.introrev()
+ if self.rev() != introrev:
+ introctx = remotefilectx(self._repo, self._path,
+ changeid=introrev,
+ fileid=self._filenode,
+ filelog=self._filelog,
+ ancestormap=self._ancestormap)
+
+ # like self.ancestors, but append to "fetch" and skip visiting parents
+ # of nodes in "prefetchskip".
+ fetch = []
+ seen = set()
+ queue = collections.deque((introctx,))
+ seen.add(introctx.node())
+ while queue:
+ current = queue.pop()
+ if current.filenode() != self.filenode():
+ # this is a "joint point". fastannotate needs contents of
+ # "joint point"s to calculate diffs for side branches.
+ fetch.append((current.path(), hex(current.filenode())))
+ if prefetchskip and current in prefetchskip:
+ continue
+ for parent in current.parents():
+ if parent.node() not in seen:
+ seen.add(parent.node())
+ queue.append(parent)
+
+ self._repo.ui.debug('remotefilelog: prefetching %d files '
+ 'for annotate\n' % len(fetch))
+ if fetch:
+ self._repo.fileservice.prefetch(fetch)
+ return super(remotefilectx, self).annotate(*args, **kwargs)
+
+ # Return empty set so that the hg serve and thg don't stack trace
+ def children(self):
+ return []
+
+class remoteworkingfilectx(context.workingfilectx, remotefilectx):
+ def __init__(self, repo, path, filelog=None, workingctx=None):
+ self._ancestormap = None
+ return super(remoteworkingfilectx, self).__init__(repo, path,
+ filelog, workingctx)
+
+ def parents(self):
+ return remotefilectx.parents(self)
+
+ def ancestormap(self):
+ if not self._ancestormap:
+ path = self._path
+ pcl = self._changectx._parents
+ renamed = self.renamed()
+
+ if renamed:
+ p1 = renamed
+ else:
+ p1 = (path, pcl[0]._manifest.get(path, nullid))
+
+ p2 = (path, nullid)
+ if len(pcl) > 1:
+ p2 = (path, pcl[1]._manifest.get(path, nullid))
+
+ m = {}
+ if p1[1] != nullid:
+ p1ctx = self._repo.filectx(p1[0], fileid=p1[1])
+ m.update(p1ctx.filelog().ancestormap(p1[1]))
+
+ if p2[1] != nullid:
+ p2ctx = self._repo.filectx(p2[0], fileid=p2[1])
+ m.update(p2ctx.filelog().ancestormap(p2[1]))
+
+ copyfrom = ''
+ if renamed:
+ copyfrom = renamed[0]
+ m[None] = (p1[1], p2[1], nullid, copyfrom)
+ self._ancestormap = m
+
+ return self._ancestormap
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/hgext/remotefilelog/remotefilelog.py Fri Jan 18 13:28:22 2019 -0500
@@ -0,0 +1,454 @@
+# remotefilelog.py - filelog implementation where filelog history is stored
+# remotely
+#
+# Copyright 2013 Facebook, Inc.
+#
+# This software may be used and distributed according to the terms of the
+# GNU General Public License version 2 or any later version.
+from __future__ import absolute_import
+
+import collections
+import os
+
+from mercurial.node import bin, nullid
+from mercurial.i18n import _
+from mercurial import (
+ ancestor,
+ error,
+ mdiff,
+ revlog,
+)
+from mercurial.utils import storageutil
+
+from . import (
+ constants,
+ fileserverclient,
+ shallowutil,
+)
+
+class remotefilelognodemap(object):
+ def __init__(self, filename, store):
+ self._filename = filename
+ self._store = store
+
+ def __contains__(self, node):
+ missing = self._store.getmissing([(self._filename, node)])
+ return not bool(missing)
+
+ def __get__(self, node):
+ if node not in self:
+ raise KeyError(node)
+ return node
+
+class remotefilelog(object):
+
+ _generaldelta = True
+
+ def __init__(self, opener, path, repo):
+ self.opener = opener
+ self.filename = path
+ self.repo = repo
+ self.nodemap = remotefilelognodemap(self.filename, repo.contentstore)
+
+ self.version = 1
+
+ def read(self, node):
+ """returns the file contents at this node"""
+ t = self.revision(node)
+ if not t.startswith('\1\n'):
+ return t
+ s = t.index('\1\n', 2)
+ return t[s + 2:]
+
+ def add(self, text, meta, transaction, linknode, p1=None, p2=None):
+ hashtext = text
+
+ # hash with the metadata, like in vanilla filelogs
+ hashtext = shallowutil.createrevlogtext(text, meta.get('copy'),
+ meta.get('copyrev'))
+ node = storageutil.hashrevisionsha1(hashtext, p1, p2)
+ return self.addrevision(hashtext, transaction, linknode, p1, p2,
+ node=node)
+
+ def _createfileblob(self, text, meta, flags, p1, p2, node, linknode):
+ # text passed to "_createfileblob" does not include filelog metadata
+ header = shallowutil.buildfileblobheader(len(text), flags)
+ data = "%s\0%s" % (header, text)
+
+ realp1 = p1
+ copyfrom = ""
+ if meta and 'copy' in meta:
+ copyfrom = meta['copy']
+ realp1 = bin(meta['copyrev'])
+
+ data += "%s%s%s%s%s\0" % (node, realp1, p2, linknode, copyfrom)
+
+ visited = set()
+
+ pancestors = {}
+ queue = []
+ if realp1 != nullid:
+ p1flog = self
+ if copyfrom:
+ p1flog = remotefilelog(self.opener, copyfrom, self.repo)
+
+ pancestors.update(p1flog.ancestormap(realp1))
+ queue.append(realp1)
+ visited.add(realp1)
+ if p2 != nullid:
+ pancestors.update(self.ancestormap(p2))
+ queue.append(p2)
+ visited.add(p2)
+
+ ancestortext = ""
+
+ # add the ancestors in topological order
+ while queue:
+ c = queue.pop(0)
+ pa1, pa2, ancestorlinknode, pacopyfrom = pancestors[c]
+
+ pacopyfrom = pacopyfrom or ''
+ ancestortext += "%s%s%s%s%s\0" % (
+ c, pa1, pa2, ancestorlinknode, pacopyfrom)
+
+ if pa1 != nullid and pa1 not in visited:
+ queue.append(pa1)
+ visited.add(pa1)
+ if pa2 != nullid and pa2 not in visited:
+ queue.append(pa2)
+ visited.add(pa2)
+
+ data += ancestortext
+
+ return data
+
+ def addrevision(self, text, transaction, linknode, p1, p2, cachedelta=None,
+ node=None, flags=revlog.REVIDX_DEFAULT_FLAGS):
+ # text passed to "addrevision" includes hg filelog metadata header
+ if node is None:
+ node = storageutil.hashrevisionsha1(text, p1, p2)
+
+ meta, metaoffset = storageutil.parsemeta(text)
+ rawtext, validatehash = self._processflags(text, flags, 'write')
+ return self.addrawrevision(rawtext, transaction, linknode, p1, p2,
+ node, flags, cachedelta,
+ _metatuple=(meta, metaoffset))
+
+ def addrawrevision(self, rawtext, transaction, linknode, p1, p2, node,
+ flags, cachedelta=None, _metatuple=None):
+ if _metatuple:
+ # _metatuple: used by "addrevision" internally by remotefilelog
+ # meta was parsed confidently
+ meta, metaoffset = _metatuple
+ else:
+ # not from self.addrevision, but something else (repo._filecommit)
+ # calls addrawrevision directly. remotefilelog needs to get and
+ # strip filelog metadata.
+ # we don't have confidence about whether rawtext contains filelog
+ # metadata or not (flag processor could replace it), so we just
+ # parse it as best-effort.
+ # in LFS (flags != 0)'s case, the best way is to call LFS code to
+ # get the meta information, instead of storageutil.parsemeta.
+ meta, metaoffset = storageutil.parsemeta(rawtext)
+ if flags != 0:
+ # when flags != 0, be conservative and do not mangle rawtext, since
+ # a read flag processor expects the text not being mangled at all.
+ metaoffset = 0
+ if metaoffset:
+ # remotefilelog fileblob stores copy metadata in its ancestortext,
+ # not its main blob. so we need to remove filelog metadata
+ # (containing copy information) from text.
+ blobtext = rawtext[metaoffset:]
+ else:
+ blobtext = rawtext
+ data = self._createfileblob(blobtext, meta, flags, p1, p2, node,
+ linknode)
+ self.repo.contentstore.addremotefilelognode(self.filename, node, data)
+
+ return node
+
+ def renamed(self, node):
+ ancestors = self.repo.metadatastore.getancestors(self.filename, node)
+ p1, p2, linknode, copyfrom = ancestors[node]
+ if copyfrom:
+ return (copyfrom, p1)
+
+ return False
+
+ def size(self, node):
+ """return the size of a given revision"""
+ return len(self.read(node))
+
+ rawsize = size
+
+ def cmp(self, node, text):
+ """compare text with a given file revision
+
+ returns True if text is different than what is stored.
+ """
+
+ if node == nullid:
+ return True
+
+ nodetext = self.read(node)
+ return nodetext != text
+
+ def __nonzero__(self):
+ return True
+
+ __bool__ = __nonzero__
+
+ def __len__(self):
+ if self.filename == '.hgtags':
+ # The length of .hgtags is used to fast path tag checking.
+ # remotefilelog doesn't support .hgtags since the entire .hgtags
+ # history is needed. Use the excludepattern setting to make
+ # .hgtags a normal filelog.
+ return 0
+
+ raise RuntimeError("len not supported")
+
+ def empty(self):
+ return False
+
+ def flags(self, node):
+ if isinstance(node, int):
+ raise error.ProgrammingError(
+ 'remotefilelog does not accept integer rev for flags')
+ store = self.repo.contentstore
+ return store.getmeta(self.filename, node).get(constants.METAKEYFLAG, 0)
+
+ def parents(self, node):
+ if node == nullid:
+ return nullid, nullid
+
+ ancestormap = self.repo.metadatastore.getancestors(self.filename, node)
+ p1, p2, linknode, copyfrom = ancestormap[node]
+ if copyfrom:
+ p1 = nullid
+
+ return p1, p2
+
+ def parentrevs(self, rev):
+ # TODO(augie): this is a node and should be a rev, but for now
+ # nothing in core seems to actually break.
+ return self.parents(rev)
+
+ def linknode(self, node):
+ ancestormap = self.repo.metadatastore.getancestors(self.filename, node)
+ p1, p2, linknode, copyfrom = ancestormap[node]
+ return linknode
+
+ def linkrev(self, node):
+ return self.repo.unfiltered().changelog.rev(self.linknode(node))
+
+ def emitrevisions(self, nodes, nodesorder=None, revisiondata=False,
+ assumehaveparentrevisions=False, deltaprevious=False,
+ deltamode=None):
+ # we don't use any of these parameters here
+ del nodesorder, revisiondata, assumehaveparentrevisions, deltaprevious
+ del deltamode
+ prevnode = None
+ for node in nodes:
+ p1, p2 = self.parents(node)
+ if prevnode is None:
+ basenode = prevnode = p1
+ if basenode == node:
+ basenode = nullid
+ if basenode != nullid:
+ revision = None
+ delta = self.revdiff(basenode, node)
+ else:
+ revision = self.revision(node, raw=True)
+ delta = None
+ yield revlog.revlogrevisiondelta(
+ node=node,
+ p1node=p1,
+ p2node=p2,
+ linknode=self.linknode(node),
+ basenode=basenode,
+ flags=self.flags(node),
+ baserevisionsize=None,
+ revision=revision,
+ delta=delta,
+ )
+
+ def revdiff(self, node1, node2):
+ return mdiff.textdiff(self.revision(node1, raw=True),
+ self.revision(node2, raw=True))
+
+ def lookup(self, node):
+ if len(node) == 40:
+ node = bin(node)
+ if len(node) != 20:
+ raise error.LookupError(node, self.filename,
+ _('invalid lookup input'))
+
+ return node
+
+ def rev(self, node):
+ # This is a hack to make TortoiseHG work.
+ return node
+
+ def node(self, rev):
+ # This is a hack.
+ if isinstance(rev, int):
+ raise error.ProgrammingError(
+ 'remotefilelog does not convert integer rev to node')
+ return rev
+
+ def revision(self, node, raw=False):
+ """returns the revlog contents at this node.
+ this includes the meta data traditionally included in file revlogs.
+ this is generally only used for bundling and communicating with vanilla
+ hg clients.
+ """
+ if node == nullid:
+ return ""
+ if len(node) != 20:
+ raise error.LookupError(node, self.filename,
+ _('invalid revision input'))
+
+ store = self.repo.contentstore
+ rawtext = store.get(self.filename, node)
+ if raw:
+ return rawtext
+ flags = store.getmeta(self.filename, node).get(constants.METAKEYFLAG, 0)
+ if flags == 0:
+ return rawtext
+ text, verifyhash = self._processflags(rawtext, flags, 'read')
+ return text
+
+ def _processflags(self, text, flags, operation, raw=False):
+ # mostly copied from hg/mercurial/revlog.py
+ validatehash = True
+ orderedflags = revlog.REVIDX_FLAGS_ORDER
+ if operation == 'write':
+ orderedflags = reversed(orderedflags)
+ for flag in orderedflags:
+ if flag & flags:
+ vhash = True
+ if flag not in revlog._flagprocessors:
+ message = _("missing processor for flag '%#x'") % (flag)
+ raise revlog.RevlogError(message)
+ readfunc, writefunc, rawfunc = revlog._flagprocessors[flag]
+ if raw:
+ vhash = rawfunc(self, text)
+ elif operation == 'read':
+ text, vhash = readfunc(self, text)
+ elif operation == 'write':
+ text, vhash = writefunc(self, text)
+ validatehash = validatehash and vhash
+ return text, validatehash
+
+ def _read(self, id):
+ """reads the raw file blob from disk, cache, or server"""
+ fileservice = self.repo.fileservice
+ localcache = fileservice.localcache
+ cachekey = fileserverclient.getcachekey(self.repo.name, self.filename,
+ id)
+ try:
+ return localcache.read(cachekey)
+ except KeyError:
+ pass
+
+ localkey = fileserverclient.getlocalkey(self.filename, id)
+ localpath = os.path.join(self.localpath, localkey)
+ try:
+ return shallowutil.readfile(localpath)
+ except IOError:
+ pass
+
+ fileservice.prefetch([(self.filename, id)])
+ try:
+ return localcache.read(cachekey)
+ except KeyError:
+ pass
+
+ raise error.LookupError(id, self.filename, _('no node'))
+
+ def ancestormap(self, node):
+ return self.repo.metadatastore.getancestors(self.filename, node)
+
+ def ancestor(self, a, b):
+ if a == nullid or b == nullid:
+ return nullid
+
+ revmap, parentfunc = self._buildrevgraph(a, b)
+ nodemap = dict(((v, k) for (k, v) in revmap.iteritems()))
+
+ ancs = ancestor.ancestors(parentfunc, revmap[a], revmap[b])
+ if ancs:
+ # choose a consistent winner when there's a tie
+ return min(map(nodemap.__getitem__, ancs))
+ return nullid
+
+ def commonancestorsheads(self, a, b):
+ """calculate all the heads of the common ancestors of nodes a and b"""
+
+ if a == nullid or b == nullid:
+ return nullid
+
+ revmap, parentfunc = self._buildrevgraph(a, b)
+ nodemap = dict(((v, k) for (k, v) in revmap.iteritems()))
+
+ ancs = ancestor.commonancestorsheads(parentfunc, revmap[a], revmap[b])
+ return map(nodemap.__getitem__, ancs)
+
+ def _buildrevgraph(self, a, b):
+ """Builds a numeric revision graph for the given two nodes.
+ Returns a node->rev map and a rev->[revs] parent function.
+ """
+ amap = self.ancestormap(a)
+ bmap = self.ancestormap(b)
+
+ # Union the two maps
+ parentsmap = collections.defaultdict(list)
+ allparents = set()
+ for mapping in (amap, bmap):
+ for node, pdata in mapping.iteritems():
+ parents = parentsmap[node]
+ p1, p2, linknode, copyfrom = pdata
+ # Don't follow renames (copyfrom).
+ # remotefilectx.ancestor does that.
+ if p1 != nullid and not copyfrom:
+ parents.append(p1)
+ allparents.add(p1)
+ if p2 != nullid:
+ parents.append(p2)
+ allparents.add(p2)
+
+ # Breadth first traversal to build linkrev graph
+ parentrevs = collections.defaultdict(list)
+ revmap = {}
+ queue = collections.deque(((None, n) for n in parentsmap
+ if n not in allparents))
+ while queue:
+ prevrev, current = queue.pop()
+ if current in revmap:
+ if prevrev:
+ parentrevs[prevrev].append(revmap[current])
+ continue
+
+ # Assign linkrevs in reverse order, so start at
+ # len(parentsmap) and work backwards.
+ currentrev = len(parentsmap) - len(revmap) - 1
+ revmap[current] = currentrev
+
+ if prevrev:
+ parentrevs[prevrev].append(currentrev)
+
+ for parent in parentsmap.get(current):
+ queue.appendleft((currentrev, parent))
+
+ return revmap, parentrevs.__getitem__
+
+ def strip(self, minlink, transaction):
+ pass
+
+ # misc unused things
+ def files(self):
+ return []
+
+ def checksize(self):
+ return 0, 0
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/hgext/remotefilelog/remotefilelogserver.py Fri Jan 18 13:28:22 2019 -0500
@@ -0,0 +1,404 @@
+# remotefilelogserver.py - server logic for a remotefilelog server
+#
+# Copyright 2013 Facebook, Inc.
+#
+# This software may be used and distributed according to the terms of the
+# GNU General Public License version 2 or any later version.
+from __future__ import absolute_import
+
+import errno
+import os
+import stat
+import time
+import zlib
+
+from mercurial.i18n import _
+from mercurial.node import bin, hex, nullid
+from mercurial import (
+ changegroup,
+ changelog,
+ context,
+ error,
+ extensions,
+ match,
+ store,
+ streamclone,
+ util,
+ wireprotoserver,
+ wireprototypes,
+ wireprotov1server,
+)
+from . import (
+ constants,
+ shallowutil,
+)
+
+_sshv1server = wireprotoserver.sshv1protocolhandler
+
+def setupserver(ui, repo):
+ """Sets up a normal Mercurial repo so it can serve files to shallow repos.
+ """
+ onetimesetup(ui)
+
+ # don't send files to shallow clients during pulls
+ def generatefiles(orig, self, changedfiles, linknodes, commonrevs, source,
+ *args, **kwargs):
+ caps = self._bundlecaps or []
+ if constants.BUNDLE2_CAPABLITY in caps:
+ # only send files that don't match the specified patterns
+ includepattern = None
+ excludepattern = None
+ for cap in (self._bundlecaps or []):
+ if cap.startswith("includepattern="):
+ includepattern = cap[len("includepattern="):].split('\0')
+ elif cap.startswith("excludepattern="):
+ excludepattern = cap[len("excludepattern="):].split('\0')
+
+ m = match.always(repo.root, '')
+ if includepattern or excludepattern:
+ m = match.match(repo.root, '', None,
+ includepattern, excludepattern)
+
+ changedfiles = list([f for f in changedfiles if not m(f)])
+ return orig(self, changedfiles, linknodes, commonrevs, source,
+ *args, **kwargs)
+
+ extensions.wrapfunction(
+ changegroup.cgpacker, 'generatefiles', generatefiles)
+
+onetime = False
+def onetimesetup(ui):
+ """Configures the wireprotocol for both clients and servers.
+ """
+ global onetime
+ if onetime:
+ return
+ onetime = True
+
+ # support file content requests
+ wireprotov1server.wireprotocommand(
+ 'x_rfl_getflogheads', 'path', permission='pull')(getflogheads)
+ wireprotov1server.wireprotocommand(
+ 'x_rfl_getfiles', '', permission='pull')(getfiles)
+ wireprotov1server.wireprotocommand(
+ 'x_rfl_getfile', 'file node', permission='pull')(getfile)
+
+ class streamstate(object):
+ match = None
+ shallowremote = False
+ noflatmf = False
+ state = streamstate()
+
+ def stream_out_shallow(repo, proto, other):
+ includepattern = None
+ excludepattern = None
+ raw = other.get('includepattern')
+ if raw:
+ includepattern = raw.split('\0')
+ raw = other.get('excludepattern')
+ if raw:
+ excludepattern = raw.split('\0')
+
+ oldshallow = state.shallowremote
+ oldmatch = state.match
+ oldnoflatmf = state.noflatmf
+ try:
+ state.shallowremote = True
+ state.match = match.always(repo.root, '')
+ state.noflatmf = other.get('noflatmanifest') == 'True'
+ if includepattern or excludepattern:
+ state.match = match.match(repo.root, '', None,
+ includepattern, excludepattern)
+ streamres = wireprotov1server.stream(repo, proto)
+
+ # Force the first value to execute, so the file list is computed
+ # within the try/finally scope
+ first = next(streamres.gen)
+ second = next(streamres.gen)
+ def gen():
+ yield first
+ yield second
+ for value in streamres.gen:
+ yield value
+ return wireprototypes.streamres(gen())
+ finally:
+ state.shallowremote = oldshallow
+ state.match = oldmatch
+ state.noflatmf = oldnoflatmf
+
+ wireprotov1server.commands['stream_out_shallow'] = (stream_out_shallow, '*')
+
+ # don't clone filelogs to shallow clients
+ def _walkstreamfiles(orig, repo, matcher=None):
+ if state.shallowremote:
+ # if we are shallow ourselves, stream our local commits
+ if shallowutil.isenabled(repo):
+ striplen = len(repo.store.path) + 1
+ readdir = repo.store.rawvfs.readdir
+ visit = [os.path.join(repo.store.path, 'data')]
+ while visit:
+ p = visit.pop()
+ for f, kind, st in readdir(p, stat=True):
+ fp = p + '/' + f
+ if kind == stat.S_IFREG:
+ if not fp.endswith('.i') and not fp.endswith('.d'):
+ n = util.pconvert(fp[striplen:])
+ yield (store.decodedir(n), n, st.st_size)
+ if kind == stat.S_IFDIR:
+ visit.append(fp)
+
+ if 'treemanifest' in repo.requirements:
+ for (u, e, s) in repo.store.datafiles():
+ if (u.startswith('meta/') and
+ (u.endswith('.i') or u.endswith('.d'))):
+ yield (u, e, s)
+
+ # Return .d and .i files that do not match the shallow pattern
+ match = state.match
+ if match and not match.always():
+ for (u, e, s) in repo.store.datafiles():
+ f = u[5:-2] # trim data/... and .i/.d
+ if not state.match(f):
+ yield (u, e, s)
+
+ for x in repo.store.topfiles():
+ if state.noflatmf and x[0][:11] == '00manifest.':
+ continue
+ yield x
+
+ elif shallowutil.isenabled(repo):
+ # don't allow cloning from a shallow repo to a full repo
+ # since it would require fetching every version of every
+ # file in order to create the revlogs.
+ raise error.Abort(_("Cannot clone from a shallow repo "
+ "to a full repo."))
+ else:
+ for x in orig(repo, matcher):
+ yield x
+
+ extensions.wrapfunction(streamclone, '_walkstreamfiles', _walkstreamfiles)
+
+ # expose remotefilelog capabilities
+ def _capabilities(orig, repo, proto):
+ caps = orig(repo, proto)
+ if (shallowutil.isenabled(repo) or ui.configbool('remotefilelog',
+ 'server')):
+ if isinstance(proto, _sshv1server):
+ # legacy getfiles method which only works over ssh
+ caps.append(constants.NETWORK_CAP_LEGACY_SSH_GETFILES)
+ caps.append('x_rfl_getflogheads')
+ caps.append('x_rfl_getfile')
+ return caps
+ extensions.wrapfunction(wireprotov1server, '_capabilities', _capabilities)
+
+ def _adjustlinkrev(orig, self, *args, **kwargs):
+ # When generating file blobs, taking the real path is too slow on large
+ # repos, so force it to just return the linkrev directly.
+ repo = self._repo
+ if util.safehasattr(repo, 'forcelinkrev') and repo.forcelinkrev:
+ return self._filelog.linkrev(self._filelog.rev(self._filenode))
+ return orig(self, *args, **kwargs)
+
+ extensions.wrapfunction(
+ context.basefilectx, '_adjustlinkrev', _adjustlinkrev)
+
+ def _iscmd(orig, cmd):
+ if cmd == 'x_rfl_getfiles':
+ return False
+ return orig(cmd)
+
+ extensions.wrapfunction(wireprotoserver, 'iscmd', _iscmd)
+
+def _loadfileblob(repo, cachepath, path, node):
+ filecachepath = os.path.join(cachepath, path, hex(node))
+ if not os.path.exists(filecachepath) or os.path.getsize(filecachepath) == 0:
+ filectx = repo.filectx(path, fileid=node)
+ if filectx.node() == nullid:
+ repo.changelog = changelog.changelog(repo.svfs)
+ filectx = repo.filectx(path, fileid=node)
+
+ text = createfileblob(filectx)
+ # TODO configurable compression engines
+ text = zlib.compress(text)
+
+ # everything should be user & group read/writable
+ oldumask = os.umask(0o002)
+ try:
+ dirname = os.path.dirname(filecachepath)
+ if not os.path.exists(dirname):
+ try:
+ os.makedirs(dirname)
+ except OSError as ex:
+ if ex.errno != errno.EEXIST:
+ raise
+
+ f = None
+ try:
+ f = util.atomictempfile(filecachepath, "wb")
+ f.write(text)
+ except (IOError, OSError):
+ # Don't abort if the user only has permission to read,
+ # and not write.
+ pass
+ finally:
+ if f:
+ f.close()
+ finally:
+ os.umask(oldumask)
+ else:
+ with open(filecachepath, "rb") as f:
+ text = f.read()
+ return text
+
+def getflogheads(repo, proto, path):
+ """A server api for requesting a filelog's heads
+ """
+ flog = repo.file(path)
+ heads = flog.heads()
+ return '\n'.join((hex(head) for head in heads if head != nullid))
+
+def getfile(repo, proto, file, node):
+ """A server api for requesting a particular version of a file. Can be used
+ in batches to request many files at once. The return protocol is:
+ <errorcode>\0<data/errormsg> where <errorcode> is 0 for success or
+ non-zero for an error.
+
+ data is a compressed blob with revlog flag and ancestors information. See
+ createfileblob for its content.
+ """
+ if shallowutil.isenabled(repo):
+ return '1\0' + _('cannot fetch remote files from shallow repo')
+ cachepath = repo.ui.config("remotefilelog", "servercachepath")
+ if not cachepath:
+ cachepath = os.path.join(repo.path, "remotefilelogcache")
+ node = bin(node.strip())
+ if node == nullid:
+ return '0\0'
+ return '0\0' + _loadfileblob(repo, cachepath, file, node)
+
+def getfiles(repo, proto):
+ """A server api for requesting particular versions of particular files.
+ """
+ if shallowutil.isenabled(repo):
+ raise error.Abort(_('cannot fetch remote files from shallow repo'))
+ if not isinstance(proto, _sshv1server):
+ raise error.Abort(_('cannot fetch remote files over non-ssh protocol'))
+
+ def streamer():
+ fin = proto._fin
+
+ cachepath = repo.ui.config("remotefilelog", "servercachepath")
+ if not cachepath:
+ cachepath = os.path.join(repo.path, "remotefilelogcache")
+
+ while True:
+ request = fin.readline()[:-1]
+ if not request:
+ break
+
+ node = bin(request[:40])
+ if node == nullid:
+ yield '0\n'
+ continue
+
+ path = request[40:]
+
+ text = _loadfileblob(repo, cachepath, path, node)
+
+ yield '%d\n%s' % (len(text), text)
+
+ # it would be better to only flush after processing a whole batch
+ # but currently we don't know if there are more requests coming
+ proto._fout.flush()
+ return wireprototypes.streamres(streamer())
+
+def createfileblob(filectx):
+ """
+ format:
+ v0:
+ str(len(rawtext)) + '\0' + rawtext + ancestortext
+ v1:
+ 'v1' + '\n' + metalist + '\0' + rawtext + ancestortext
+ metalist := metalist + '\n' + meta | meta
+ meta := sizemeta | flagmeta
+ sizemeta := METAKEYSIZE + str(len(rawtext))
+ flagmeta := METAKEYFLAG + str(flag)
+
+ note: sizemeta must exist. METAKEYFLAG and METAKEYSIZE must have a
+ length of 1.
+ """
+ flog = filectx.filelog()
+ frev = filectx.filerev()
+ revlogflags = flog._revlog.flags(frev)
+ if revlogflags == 0:
+ # normal files
+ text = filectx.data()
+ else:
+ # lfs, read raw revision data
+ text = flog.revision(frev, raw=True)
+
+ repo = filectx._repo
+
+ ancestors = [filectx]
+
+ try:
+ repo.forcelinkrev = True
+ ancestors.extend([f for f in filectx.ancestors()])
+
+ ancestortext = ""
+ for ancestorctx in ancestors:
+ parents = ancestorctx.parents()
+ p1 = nullid
+ p2 = nullid
+ if len(parents) > 0:
+ p1 = parents[0].filenode()
+ if len(parents) > 1:
+ p2 = parents[1].filenode()
+
+ copyname = ""
+ rename = ancestorctx.renamed()
+ if rename:
+ copyname = rename[0]
+ linknode = ancestorctx.node()
+ ancestortext += "%s%s%s%s%s\0" % (
+ ancestorctx.filenode(), p1, p2, linknode,
+ copyname)
+ finally:
+ repo.forcelinkrev = False
+
+ header = shallowutil.buildfileblobheader(len(text), revlogflags)
+
+ return "%s\0%s%s" % (header, text, ancestortext)
+
+def gcserver(ui, repo):
+ if not repo.ui.configbool("remotefilelog", "server"):
+ return
+
+ neededfiles = set()
+ heads = repo.revs("heads(tip~25000:) - null")
+
+ cachepath = repo.vfs.join("remotefilelogcache")
+ for head in heads:
+ mf = repo[head].manifest()
+ for filename, filenode in mf.iteritems():
+ filecachepath = os.path.join(cachepath, filename, hex(filenode))
+ neededfiles.add(filecachepath)
+
+ # delete unneeded older files
+ days = repo.ui.configint("remotefilelog", "serverexpiration")
+ expiration = time.time() - (days * 24 * 60 * 60)
+
+ progress = ui.makeprogress(_("removing old server cache"), unit="files")
+ progress.update(0)
+ for root, dirs, files in os.walk(cachepath):
+ for file in files:
+ filepath = os.path.join(root, file)
+ progress.increment()
+ if filepath in neededfiles:
+ continue
+
+ stat = os.stat(filepath)
+ if stat.st_mtime < expiration:
+ os.remove(filepath)
+
+ progress.complete()
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/hgext/remotefilelog/repack.py Fri Jan 18 13:28:22 2019 -0500
@@ -0,0 +1,778 @@
+from __future__ import absolute_import
+
+import os
+import time
+
+from mercurial.i18n import _
+from mercurial.node import (
+ nullid,
+ short,
+)
+from mercurial import (
+ encoding,
+ error,
+ mdiff,
+ policy,
+ pycompat,
+ scmutil,
+ util,
+ vfs,
+)
+from mercurial.utils import procutil
+from . import (
+ constants,
+ contentstore,
+ datapack,
+ extutil,
+ historypack,
+ metadatastore,
+ shallowutil,
+)
+
+osutil = policy.importmod(r'osutil')
+
+class RepackAlreadyRunning(error.Abort):
+ pass
+
+def backgroundrepack(repo, incremental=True, packsonly=False):
+ cmd = [procutil.hgexecutable(), '-R', repo.origroot, 'repack']
+ msg = _("(running background repack)\n")
+ if incremental:
+ cmd.append('--incremental')
+ msg = _("(running background incremental repack)\n")
+ if packsonly:
+ cmd.append('--packsonly')
+ repo.ui.warn(msg)
+ procutil.runbgcommand(cmd, encoding.environ)
+
+def fullrepack(repo, options=None):
+ """If ``packsonly`` is True, stores creating only loose objects are skipped.
+ """
+ if util.safehasattr(repo, 'shareddatastores'):
+ datasource = contentstore.unioncontentstore(
+ *repo.shareddatastores)
+ historysource = metadatastore.unionmetadatastore(
+ *repo.sharedhistorystores,
+ allowincomplete=True)
+
+ packpath = shallowutil.getcachepackpath(
+ repo,
+ constants.FILEPACK_CATEGORY)
+ _runrepack(repo, datasource, historysource, packpath,
+ constants.FILEPACK_CATEGORY, options=options)
+
+ if util.safehasattr(repo.manifestlog, 'datastore'):
+ localdata, shareddata = _getmanifeststores(repo)
+ lpackpath, ldstores, lhstores = localdata
+ spackpath, sdstores, shstores = shareddata
+
+ # Repack the shared manifest store
+ datasource = contentstore.unioncontentstore(*sdstores)
+ historysource = metadatastore.unionmetadatastore(
+ *shstores,
+ allowincomplete=True)
+ _runrepack(repo, datasource, historysource, spackpath,
+ constants.TREEPACK_CATEGORY, options=options)
+
+ # Repack the local manifest store
+ datasource = contentstore.unioncontentstore(
+ *ldstores,
+ allowincomplete=True)
+ historysource = metadatastore.unionmetadatastore(
+ *lhstores,
+ allowincomplete=True)
+ _runrepack(repo, datasource, historysource, lpackpath,
+ constants.TREEPACK_CATEGORY, options=options)
+
+def incrementalrepack(repo, options=None):
+ """This repacks the repo by looking at the distribution of pack files in the
+ repo and performing the most minimal repack to keep the repo in good shape.
+ """
+ if util.safehasattr(repo, 'shareddatastores'):
+ packpath = shallowutil.getcachepackpath(
+ repo,
+ constants.FILEPACK_CATEGORY)
+ _incrementalrepack(repo,
+ repo.shareddatastores,
+ repo.sharedhistorystores,
+ packpath,
+ constants.FILEPACK_CATEGORY,
+ options=options)
+
+ if util.safehasattr(repo.manifestlog, 'datastore'):
+ localdata, shareddata = _getmanifeststores(repo)
+ lpackpath, ldstores, lhstores = localdata
+ spackpath, sdstores, shstores = shareddata
+
+ # Repack the shared manifest store
+ _incrementalrepack(repo,
+ sdstores,
+ shstores,
+ spackpath,
+ constants.TREEPACK_CATEGORY,
+ options=options)
+
+ # Repack the local manifest store
+ _incrementalrepack(repo,
+ ldstores,
+ lhstores,
+ lpackpath,
+ constants.TREEPACK_CATEGORY,
+ allowincompletedata=True,
+ options=options)
+
+def _getmanifeststores(repo):
+ shareddatastores = repo.manifestlog.shareddatastores
+ localdatastores = repo.manifestlog.localdatastores
+ sharedhistorystores = repo.manifestlog.sharedhistorystores
+ localhistorystores = repo.manifestlog.localhistorystores
+
+ sharedpackpath = shallowutil.getcachepackpath(repo,
+ constants.TREEPACK_CATEGORY)
+ localpackpath = shallowutil.getlocalpackpath(repo.svfs.vfs.base,
+ constants.TREEPACK_CATEGORY)
+
+ return ((localpackpath, localdatastores, localhistorystores),
+ (sharedpackpath, shareddatastores, sharedhistorystores))
+
+def _topacks(packpath, files, constructor):
+ paths = list(os.path.join(packpath, p) for p in files)
+ packs = list(constructor(p) for p in paths)
+ return packs
+
+def _deletebigpacks(repo, folder, files):
+ """Deletes packfiles that are bigger than ``packs.maxpacksize``.
+
+ Returns ``files` with the removed files omitted."""
+ maxsize = repo.ui.configbytes("packs", "maxpacksize")
+ if maxsize <= 0:
+ return files
+
+ # This only considers datapacks today, but we could broaden it to include
+ # historypacks.
+ VALIDEXTS = [".datapack", ".dataidx"]
+
+ # Either an oversize index or datapack will trigger cleanup of the whole
+ # pack:
+ oversized = set([os.path.splitext(path)[0] for path, ftype, stat in files
+ if (stat.st_size > maxsize and (os.path.splitext(path)[1]
+ in VALIDEXTS))])
+
+ for rootfname in oversized:
+ rootpath = os.path.join(folder, rootfname)
+ for ext in VALIDEXTS:
+ path = rootpath + ext
+ repo.ui.debug('removing oversize packfile %s (%s)\n' %
+ (path, util.bytecount(os.stat(path).st_size)))
+ os.unlink(path)
+ return [row for row in files if os.path.basename(row[0]) not in oversized]
+
+def _incrementalrepack(repo, datastore, historystore, packpath, category,
+ allowincompletedata=False, options=None):
+ shallowutil.mkstickygroupdir(repo.ui, packpath)
+
+ files = osutil.listdir(packpath, stat=True)
+ files = _deletebigpacks(repo, packpath, files)
+ datapacks = _topacks(packpath,
+ _computeincrementaldatapack(repo.ui, files),
+ datapack.datapack)
+ datapacks.extend(s for s in datastore
+ if not isinstance(s, datapack.datapackstore))
+
+ historypacks = _topacks(packpath,
+ _computeincrementalhistorypack(repo.ui, files),
+ historypack.historypack)
+ historypacks.extend(s for s in historystore
+ if not isinstance(s, historypack.historypackstore))
+
+ # ``allhistory{files,packs}`` contains all known history packs, even ones we
+ # don't plan to repack. They are used during the datapack repack to ensure
+ # good ordering of nodes.
+ allhistoryfiles = _allpackfileswithsuffix(files, historypack.PACKSUFFIX,
+ historypack.INDEXSUFFIX)
+ allhistorypacks = _topacks(packpath,
+ (f for f, mode, stat in allhistoryfiles),
+ historypack.historypack)
+ allhistorypacks.extend(s for s in historystore
+ if not isinstance(s, historypack.historypackstore))
+ _runrepack(repo,
+ contentstore.unioncontentstore(
+ *datapacks,
+ allowincomplete=allowincompletedata),
+ metadatastore.unionmetadatastore(
+ *historypacks,
+ allowincomplete=True),
+ packpath, category,
+ fullhistory=metadatastore.unionmetadatastore(
+ *allhistorypacks,
+ allowincomplete=True),
+ options=options)
+
+def _computeincrementaldatapack(ui, files):
+ opts = {
+ 'gencountlimit' : ui.configint(
+ 'remotefilelog', 'data.gencountlimit'),
+ 'generations' : ui.configlist(
+ 'remotefilelog', 'data.generations'),
+ 'maxrepackpacks' : ui.configint(
+ 'remotefilelog', 'data.maxrepackpacks'),
+ 'repackmaxpacksize' : ui.configbytes(
+ 'remotefilelog', 'data.repackmaxpacksize'),
+ 'repacksizelimit' : ui.configbytes(
+ 'remotefilelog', 'data.repacksizelimit'),
+ }
+
+ packfiles = _allpackfileswithsuffix(
+ files, datapack.PACKSUFFIX, datapack.INDEXSUFFIX)
+ return _computeincrementalpack(packfiles, opts)
+
+def _computeincrementalhistorypack(ui, files):
+ opts = {
+ 'gencountlimit' : ui.configint(
+ 'remotefilelog', 'history.gencountlimit'),
+ 'generations' : ui.configlist(
+ 'remotefilelog', 'history.generations', ['100MB']),
+ 'maxrepackpacks' : ui.configint(
+ 'remotefilelog', 'history.maxrepackpacks'),
+ 'repackmaxpacksize' : ui.configbytes(
+ 'remotefilelog', 'history.repackmaxpacksize', '400MB'),
+ 'repacksizelimit' : ui.configbytes(
+ 'remotefilelog', 'history.repacksizelimit'),
+ }
+
+ packfiles = _allpackfileswithsuffix(
+ files, historypack.PACKSUFFIX, historypack.INDEXSUFFIX)
+ return _computeincrementalpack(packfiles, opts)
+
+def _allpackfileswithsuffix(files, packsuffix, indexsuffix):
+ result = []
+ fileset = set(fn for fn, mode, stat in files)
+ for filename, mode, stat in files:
+ if not filename.endswith(packsuffix):
+ continue
+
+ prefix = filename[:-len(packsuffix)]
+
+ # Don't process a pack if it doesn't have an index.
+ if (prefix + indexsuffix) not in fileset:
+ continue
+ result.append((prefix, mode, stat))
+
+ return result
+
+def _computeincrementalpack(files, opts):
+ """Given a set of pack files along with the configuration options, this
+ function computes the list of files that should be packed as part of an
+ incremental repack.
+
+ It tries to strike a balance between keeping incremental repacks cheap (i.e.
+ packing small things when possible, and rolling the packs up to the big ones
+ over time).
+ """
+
+ limits = list(sorted((util.sizetoint(s) for s in opts['generations']),
+ reverse=True))
+ limits.append(0)
+
+ # Group the packs by generation (i.e. by size)
+ generations = []
+ for i in pycompat.xrange(len(limits)):
+ generations.append([])
+
+ sizes = {}
+ for prefix, mode, stat in files:
+ size = stat.st_size
+ if size > opts['repackmaxpacksize']:
+ continue
+
+ sizes[prefix] = size
+ for i, limit in enumerate(limits):
+ if size > limit:
+ generations[i].append(prefix)
+ break
+
+ # Steps for picking what packs to repack:
+ # 1. Pick the largest generation with > gencountlimit pack files.
+ # 2. Take the smallest three packs.
+ # 3. While total-size-of-packs < repacksizelimit: add another pack
+
+ # Find the largest generation with more than gencountlimit packs
+ genpacks = []
+ for i, limit in enumerate(limits):
+ if len(generations[i]) > opts['gencountlimit']:
+ # Sort to be smallest last, for easy popping later
+ genpacks.extend(sorted(generations[i], reverse=True,
+ key=lambda x: sizes[x]))
+ break
+
+ # Take as many packs from the generation as we can
+ chosenpacks = genpacks[-3:]
+ genpacks = genpacks[:-3]
+ repacksize = sum(sizes[n] for n in chosenpacks)
+ while (repacksize < opts['repacksizelimit'] and genpacks and
+ len(chosenpacks) < opts['maxrepackpacks']):
+ chosenpacks.append(genpacks.pop())
+ repacksize += sizes[chosenpacks[-1]]
+
+ return chosenpacks
+
+def _runrepack(repo, data, history, packpath, category, fullhistory=None,
+ options=None):
+ shallowutil.mkstickygroupdir(repo.ui, packpath)
+
+ def isold(repo, filename, node):
+ """Check if the file node is older than a limit.
+ Unless a limit is specified in the config the default limit is taken.
+ """
+ filectx = repo.filectx(filename, fileid=node)
+ filetime = repo[filectx.linkrev()].date()
+
+ ttl = repo.ui.configint('remotefilelog', 'nodettl')
+
+ limit = time.time() - ttl
+ return filetime[0] < limit
+
+ garbagecollect = repo.ui.configbool('remotefilelog', 'gcrepack')
+ if not fullhistory:
+ fullhistory = history
+ packer = repacker(repo, data, history, fullhistory, category,
+ gc=garbagecollect, isold=isold, options=options)
+
+ with datapack.mutabledatapack(repo.ui, packpath, version=2) as dpack:
+ with historypack.mutablehistorypack(repo.ui, packpath) as hpack:
+ try:
+ packer.run(dpack, hpack)
+ except error.LockHeld:
+ raise RepackAlreadyRunning(_("skipping repack - another repack "
+ "is already running"))
+
+def keepset(repo, keyfn, lastkeepkeys=None):
+ """Computes a keepset which is not garbage collected.
+ 'keyfn' is a function that maps filename, node to a unique key.
+ 'lastkeepkeys' is an optional argument and if provided the keepset
+ function updates lastkeepkeys with more keys and returns the result.
+ """
+ if not lastkeepkeys:
+ keepkeys = set()
+ else:
+ keepkeys = lastkeepkeys
+
+ # We want to keep:
+ # 1. Working copy parent
+ # 2. Draft commits
+ # 3. Parents of draft commits
+ # 4. Pullprefetch and bgprefetchrevs revsets if specified
+ revs = ['.', 'draft()', 'parents(draft())']
+ prefetchrevs = repo.ui.config('remotefilelog', 'pullprefetch', None)
+ if prefetchrevs:
+ revs.append('(%s)' % prefetchrevs)
+ prefetchrevs = repo.ui.config('remotefilelog', 'bgprefetchrevs', None)
+ if prefetchrevs:
+ revs.append('(%s)' % prefetchrevs)
+ revs = '+'.join(revs)
+
+ revs = ['sort((%s), "topo")' % revs]
+ keep = scmutil.revrange(repo, revs)
+
+ processed = set()
+ lastmanifest = None
+
+ # process the commits in toposorted order starting from the oldest
+ for r in reversed(keep._list):
+ if repo[r].p1().rev() in processed:
+ # if the direct parent has already been processed
+ # then we only need to process the delta
+ m = repo[r].manifestctx().readdelta()
+ else:
+ # otherwise take the manifest and diff it
+ # with the previous manifest if one exists
+ if lastmanifest:
+ m = repo[r].manifest().diff(lastmanifest)
+ else:
+ m = repo[r].manifest()
+ lastmanifest = repo[r].manifest()
+ processed.add(r)
+
+ # populate keepkeys with keys from the current manifest
+ if type(m) is dict:
+ # m is a result of diff of two manifests and is a dictionary that
+ # maps filename to ((newnode, newflag), (oldnode, oldflag)) tuple
+ for filename, diff in m.iteritems():
+ if diff[0][0] is not None:
+ keepkeys.add(keyfn(filename, diff[0][0]))
+ else:
+ # m is a manifest object
+ for filename, filenode in m.iteritems():
+ keepkeys.add(keyfn(filename, filenode))
+
+ return keepkeys
+
+class repacker(object):
+ """Class for orchestrating the repack of data and history information into a
+ new format.
+ """
+ def __init__(self, repo, data, history, fullhistory, category, gc=False,
+ isold=None, options=None):
+ self.repo = repo
+ self.data = data
+ self.history = history
+ self.fullhistory = fullhistory
+ self.unit = constants.getunits(category)
+ self.garbagecollect = gc
+ self.options = options
+ if self.garbagecollect:
+ if not isold:
+ raise ValueError("Function 'isold' is not properly specified")
+ # use (filename, node) tuple as a keepset key
+ self.keepkeys = keepset(repo, lambda f, n : (f, n))
+ self.isold = isold
+
+ def run(self, targetdata, targethistory):
+ ledger = repackledger()
+
+ with extutil.flock(repacklockvfs(self.repo).join("repacklock"),
+ _('repacking %s') % self.repo.origroot, timeout=0):
+ self.repo.hook('prerepack')
+
+ # Populate ledger from source
+ self.data.markledger(ledger, options=self.options)
+ self.history.markledger(ledger, options=self.options)
+
+ # Run repack
+ self.repackdata(ledger, targetdata)
+ self.repackhistory(ledger, targethistory)
+
+ # Call cleanup on each source
+ for source in ledger.sources:
+ source.cleanup(ledger)
+
+ def _chainorphans(self, ui, filename, nodes, orphans, deltabases):
+ """Reorderes ``orphans`` into a single chain inside ``nodes`` and
+ ``deltabases``.
+
+ We often have orphan entries (nodes without a base that aren't
+ referenced by other nodes -- i.e., part of a chain) due to gaps in
+ history. Rather than store them as individual fulltexts, we prefer to
+ insert them as one chain sorted by size.
+ """
+ if not orphans:
+ return nodes
+
+ def getsize(node, default=0):
+ meta = self.data.getmeta(filename, node)
+ if constants.METAKEYSIZE in meta:
+ return meta[constants.METAKEYSIZE]
+ else:
+ return default
+
+ # Sort orphans by size; biggest first is preferred, since it's more
+ # likely to be the newest version assuming files grow over time.
+ # (Sort by node first to ensure the sort is stable.)
+ orphans = sorted(orphans)
+ orphans = list(sorted(orphans, key=getsize, reverse=True))
+ if ui.debugflag:
+ ui.debug("%s: orphan chain: %s\n" % (filename,
+ ", ".join([short(s) for s in orphans])))
+
+ # Create one contiguous chain and reassign deltabases.
+ for i, node in enumerate(orphans):
+ if i == 0:
+ deltabases[node] = (nullid, 0)
+ else:
+ parent = orphans[i - 1]
+ deltabases[node] = (parent, deltabases[parent][1] + 1)
+ nodes = [n for n in nodes if n not in orphans]
+ nodes += orphans
+ return nodes
+
+ def repackdata(self, ledger, target):
+ ui = self.repo.ui
+ maxchainlen = ui.configint('packs', 'maxchainlen', 1000)
+
+ byfile = {}
+ for entry in ledger.entries.itervalues():
+ if entry.datasource:
+ byfile.setdefault(entry.filename, {})[entry.node] = entry
+
+ count = 0
+ repackprogress = ui.makeprogress(_("repacking data"), unit=self.unit,
+ total=len(byfile))
+ for filename, entries in sorted(byfile.iteritems()):
+ repackprogress.update(count)
+
+ ancestors = {}
+ nodes = list(node for node in entries)
+ nohistory = []
+ buildprogress = ui.makeprogress(_("building history"), unit='nodes',
+ total=len(nodes))
+ for i, node in enumerate(nodes):
+ if node in ancestors:
+ continue
+ buildprogress.update(i)
+ try:
+ ancestors.update(self.fullhistory.getancestors(filename,
+ node, known=ancestors))
+ except KeyError:
+ # Since we're packing data entries, we may not have the
+ # corresponding history entries for them. It's not a big
+ # deal, but the entries won't be delta'd perfectly.
+ nohistory.append(node)
+ buildprogress.complete()
+
+ # Order the nodes children first, so we can produce reverse deltas
+ orderednodes = list(reversed(self._toposort(ancestors)))
+ if len(nohistory) > 0:
+ ui.debug('repackdata: %d nodes without history\n' %
+ len(nohistory))
+ orderednodes.extend(sorted(nohistory))
+
+ # Filter orderednodes to just the nodes we want to serialize (it
+ # currently also has the edge nodes' ancestors).
+ orderednodes = list(filter(lambda node: node in nodes,
+ orderednodes))
+
+ # Garbage collect old nodes:
+ if self.garbagecollect:
+ neworderednodes = []
+ for node in orderednodes:
+ # If the node is old and is not in the keepset, we skip it,
+ # and mark as garbage collected
+ if ((filename, node) not in self.keepkeys and
+ self.isold(self.repo, filename, node)):
+ entries[node].gced = True
+ continue
+ neworderednodes.append(node)
+ orderednodes = neworderednodes
+
+ # Compute delta bases for nodes:
+ deltabases = {}
+ nobase = set()
+ referenced = set()
+ nodes = set(nodes)
+ processprogress = ui.makeprogress(_("processing nodes"),
+ unit='nodes',
+ total=len(orderednodes))
+ for i, node in enumerate(orderednodes):
+ processprogress.update(i)
+ # Find delta base
+ # TODO: allow delta'ing against most recent descendant instead
+ # of immediate child
+ deltatuple = deltabases.get(node, None)
+ if deltatuple is None:
+ deltabase, chainlen = nullid, 0
+ deltabases[node] = (nullid, 0)
+ nobase.add(node)
+ else:
+ deltabase, chainlen = deltatuple
+ referenced.add(deltabase)
+
+ # Use available ancestor information to inform our delta choices
+ ancestorinfo = ancestors.get(node)
+ if ancestorinfo:
+ p1, p2, linknode, copyfrom = ancestorinfo
+
+ # The presence of copyfrom means we're at a point where the
+ # file was copied from elsewhere. So don't attempt to do any
+ # deltas with the other file.
+ if copyfrom:
+ p1 = nullid
+
+ if chainlen < maxchainlen:
+ # Record this child as the delta base for its parents.
+ # This may be non optimal, since the parents may have
+ # many children, and this will only choose the last one.
+ # TODO: record all children and try all deltas to find
+ # best
+ if p1 != nullid:
+ deltabases[p1] = (node, chainlen + 1)
+ if p2 != nullid:
+ deltabases[p2] = (node, chainlen + 1)
+
+ # experimental config: repack.chainorphansbysize
+ if ui.configbool('repack', 'chainorphansbysize'):
+ orphans = nobase - referenced
+ orderednodes = self._chainorphans(ui, filename, orderednodes,
+ orphans, deltabases)
+
+ # Compute deltas and write to the pack
+ for i, node in enumerate(orderednodes):
+ deltabase, chainlen = deltabases[node]
+ # Compute delta
+ # TODO: Optimize the deltachain fetching. Since we're
+ # iterating over the different version of the file, we may
+ # be fetching the same deltachain over and over again.
+ meta = None
+ if deltabase != nullid:
+ deltaentry = self.data.getdelta(filename, node)
+ delta, deltabasename, origdeltabase, meta = deltaentry
+ size = meta.get(constants.METAKEYSIZE)
+ if (deltabasename != filename or origdeltabase != deltabase
+ or size is None):
+ deltabasetext = self.data.get(filename, deltabase)
+ original = self.data.get(filename, node)
+ size = len(original)
+ delta = mdiff.textdiff(deltabasetext, original)
+ else:
+ delta = self.data.get(filename, node)
+ size = len(delta)
+ meta = self.data.getmeta(filename, node)
+
+ # TODO: don't use the delta if it's larger than the fulltext
+ if constants.METAKEYSIZE not in meta:
+ meta[constants.METAKEYSIZE] = size
+ target.add(filename, node, deltabase, delta, meta)
+
+ entries[node].datarepacked = True
+
+ processprogress.complete()
+ count += 1
+
+ repackprogress.complete()
+ target.close(ledger=ledger)
+
+ def repackhistory(self, ledger, target):
+ ui = self.repo.ui
+
+ byfile = {}
+ for entry in ledger.entries.itervalues():
+ if entry.historysource:
+ byfile.setdefault(entry.filename, {})[entry.node] = entry
+
+ progress = ui.makeprogress(_("repacking history"), unit=self.unit,
+ total=len(byfile))
+ for filename, entries in sorted(byfile.iteritems()):
+ ancestors = {}
+ nodes = list(node for node in entries)
+
+ for node in nodes:
+ if node in ancestors:
+ continue
+ ancestors.update(self.history.getancestors(filename, node,
+ known=ancestors))
+
+ # Order the nodes children first
+ orderednodes = reversed(self._toposort(ancestors))
+
+ # Write to the pack
+ dontprocess = set()
+ for node in orderednodes:
+ p1, p2, linknode, copyfrom = ancestors[node]
+
+ # If the node is marked dontprocess, but it's also in the
+ # explicit entries set, that means the node exists both in this
+ # file and in another file that was copied to this file.
+ # Usually this happens if the file was copied to another file,
+ # then the copy was deleted, then reintroduced without copy
+ # metadata. The original add and the new add have the same hash
+ # since the content is identical and the parents are null.
+ if node in dontprocess and node not in entries:
+ # If copyfrom == filename, it means the copy history
+ # went to come other file, then came back to this one, so we
+ # should continue processing it.
+ if p1 != nullid and copyfrom != filename:
+ dontprocess.add(p1)
+ if p2 != nullid:
+ dontprocess.add(p2)
+ continue
+
+ if copyfrom:
+ dontprocess.add(p1)
+
+ target.add(filename, node, p1, p2, linknode, copyfrom)
+
+ if node in entries:
+ entries[node].historyrepacked = True
+
+ progress.increment()
+
+ progress.complete()
+ target.close(ledger=ledger)
+
+ def _toposort(self, ancestors):
+ def parentfunc(node):
+ p1, p2, linknode, copyfrom = ancestors[node]
+ parents = []
+ if p1 != nullid:
+ parents.append(p1)
+ if p2 != nullid:
+ parents.append(p2)
+ return parents
+
+ sortednodes = shallowutil.sortnodes(ancestors.keys(), parentfunc)
+ return sortednodes
+
+class repackledger(object):
+ """Storage for all the bookkeeping that happens during a repack. It contains
+ the list of revisions being repacked, what happened to each revision, and
+ which source store contained which revision originally (for later cleanup).
+ """
+ def __init__(self):
+ self.entries = {}
+ self.sources = {}
+ self.created = set()
+
+ def markdataentry(self, source, filename, node):
+ """Mark the given filename+node revision as having a data rev in the
+ given source.
+ """
+ entry = self._getorcreateentry(filename, node)
+ entry.datasource = True
+ entries = self.sources.get(source)
+ if not entries:
+ entries = set()
+ self.sources[source] = entries
+ entries.add(entry)
+
+ def markhistoryentry(self, source, filename, node):
+ """Mark the given filename+node revision as having a history rev in the
+ given source.
+ """
+ entry = self._getorcreateentry(filename, node)
+ entry.historysource = True
+ entries = self.sources.get(source)
+ if not entries:
+ entries = set()
+ self.sources[source] = entries
+ entries.add(entry)
+
+ def _getorcreateentry(self, filename, node):
+ key = (filename, node)
+ value = self.entries.get(key)
+ if not value:
+ value = repackentry(filename, node)
+ self.entries[key] = value
+
+ return value
+
+ def addcreated(self, value):
+ self.created.add(value)
+
+class repackentry(object):
+ """Simple class representing a single revision entry in the repackledger.
+ """
+ __slots__ = (r'filename', r'node', r'datasource', r'historysource',
+ r'datarepacked', r'historyrepacked', r'gced')
+ def __init__(self, filename, node):
+ self.filename = filename
+ self.node = node
+ # If the revision has a data entry in the source
+ self.datasource = False
+ # If the revision has a history entry in the source
+ self.historysource = False
+ # If the revision's data entry was repacked into the repack target
+ self.datarepacked = False
+ # If the revision's history entry was repacked into the repack target
+ self.historyrepacked = False
+ # If garbage collected
+ self.gced = False
+
+def repacklockvfs(repo):
+ if util.safehasattr(repo, 'name'):
+ # Lock in the shared cache so repacks across multiple copies of the same
+ # repo are coordinated.
+ sharedcachepath = shallowutil.getcachepackpath(
+ repo,
+ constants.FILEPACK_CATEGORY)
+ return vfs.vfs(sharedcachepath)
+ else:
+ return repo.svfs
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/hgext/remotefilelog/shallowbundle.py Fri Jan 18 13:28:22 2019 -0500
@@ -0,0 +1,293 @@
+# shallowbundle.py - bundle10 implementation for use with shallow repositories
+#
+# Copyright 2013 Facebook, Inc.
+#
+# This software may be used and distributed according to the terms of the
+# GNU General Public License version 2 or any later version.
+from __future__ import absolute_import
+
+from mercurial.i18n import _
+from mercurial.node import bin, hex, nullid
+from mercurial import (
+ bundlerepo,
+ changegroup,
+ error,
+ match,
+ mdiff,
+ pycompat,
+)
+from . import (
+ constants,
+ remotefilelog,
+ shallowutil,
+)
+
+NoFiles = 0
+LocalFiles = 1
+AllFiles = 2
+
+def shallowgroup(cls, self, nodelist, rlog, lookup, units=None, reorder=None):
+ if not isinstance(rlog, remotefilelog.remotefilelog):
+ for c in super(cls, self).group(nodelist, rlog, lookup,
+ units=units):
+ yield c
+ return
+
+ if len(nodelist) == 0:
+ yield self.close()
+ return
+
+ nodelist = shallowutil.sortnodes(nodelist, rlog.parents)
+
+ # add the parent of the first rev
+ p = rlog.parents(nodelist[0])[0]
+ nodelist.insert(0, p)
+
+ # build deltas
+ for i in pycompat.xrange(len(nodelist) - 1):
+ prev, curr = nodelist[i], nodelist[i + 1]
+ linknode = lookup(curr)
+ for c in self.nodechunk(rlog, curr, prev, linknode):
+ yield c
+
+ yield self.close()
+
+class shallowcg1packer(changegroup.cgpacker):
+ def generate(self, commonrevs, clnodes, fastpathlinkrev, source):
+ if shallowutil.isenabled(self._repo):
+ fastpathlinkrev = False
+
+ return super(shallowcg1packer, self).generate(commonrevs, clnodes,
+ fastpathlinkrev, source)
+
+ def group(self, nodelist, rlog, lookup, units=None, reorder=None):
+ return shallowgroup(shallowcg1packer, self, nodelist, rlog, lookup,
+ units=units)
+
+ def generatefiles(self, changedfiles, *args):
+ try:
+ linknodes, commonrevs, source = args
+ except ValueError:
+ commonrevs, source, mfdicts, fastpathlinkrev, fnodes, clrevs = args
+ if shallowutil.isenabled(self._repo):
+ repo = self._repo
+ if isinstance(repo, bundlerepo.bundlerepository):
+ # If the bundle contains filelogs, we can't pull from it, since
+ # bundlerepo is heavily tied to revlogs. Instead require that
+ # the user use unbundle instead.
+ # Force load the filelog data.
+ bundlerepo.bundlerepository.file(repo, 'foo')
+ if repo._cgfilespos:
+ raise error.Abort("cannot pull from full bundles",
+ hint="use `hg unbundle` instead")
+ return []
+ filestosend = self.shouldaddfilegroups(source)
+ if filestosend == NoFiles:
+ changedfiles = list([f for f in changedfiles
+ if not repo.shallowmatch(f)])
+
+ return super(shallowcg1packer, self).generatefiles(
+ changedfiles, *args)
+
+ def shouldaddfilegroups(self, source):
+ repo = self._repo
+ if not shallowutil.isenabled(repo):
+ return AllFiles
+
+ if source == "push" or source == "bundle":
+ return AllFiles
+
+ caps = self._bundlecaps or []
+ if source == "serve" or source == "pull":
+ if constants.BUNDLE2_CAPABLITY in caps:
+ return LocalFiles
+ else:
+ # Serving to a full repo requires us to serve everything
+ repo.ui.warn(_("pulling from a shallow repo\n"))
+ return AllFiles
+
+ return NoFiles
+
+ def prune(self, rlog, missing, commonrevs):
+ if not isinstance(rlog, remotefilelog.remotefilelog):
+ return super(shallowcg1packer, self).prune(rlog, missing,
+ commonrevs)
+
+ repo = self._repo
+ results = []
+ for fnode in missing:
+ fctx = repo.filectx(rlog.filename, fileid=fnode)
+ if fctx.linkrev() not in commonrevs:
+ results.append(fnode)
+ return results
+
+ def nodechunk(self, revlog, node, prevnode, linknode):
+ prefix = ''
+ if prevnode == nullid:
+ delta = revlog.revision(node, raw=True)
+ prefix = mdiff.trivialdiffheader(len(delta))
+ else:
+ # Actually uses remotefilelog.revdiff which works on nodes, not revs
+ delta = revlog.revdiff(prevnode, node)
+ p1, p2 = revlog.parents(node)
+ flags = revlog.flags(node)
+ meta = self.builddeltaheader(node, p1, p2, prevnode, linknode, flags)
+ meta += prefix
+ l = len(meta) + len(delta)
+ yield changegroup.chunkheader(l)
+ yield meta
+ yield delta
+
+def makechangegroup(orig, repo, outgoing, version, source, *args, **kwargs):
+ if not shallowutil.isenabled(repo):
+ return orig(repo, outgoing, version, source, *args, **kwargs)
+
+ original = repo.shallowmatch
+ try:
+ # if serving, only send files the clients has patterns for
+ if source == 'serve':
+ bundlecaps = kwargs.get(r'bundlecaps')
+ includepattern = None
+ excludepattern = None
+ for cap in (bundlecaps or []):
+ if cap.startswith("includepattern="):
+ raw = cap[len("includepattern="):]
+ if raw:
+ includepattern = raw.split('\0')
+ elif cap.startswith("excludepattern="):
+ raw = cap[len("excludepattern="):]
+ if raw:
+ excludepattern = raw.split('\0')
+ if includepattern or excludepattern:
+ repo.shallowmatch = match.match(repo.root, '', None,
+ includepattern, excludepattern)
+ else:
+ repo.shallowmatch = match.always(repo.root, '')
+ return orig(repo, outgoing, version, source, *args, **kwargs)
+ finally:
+ repo.shallowmatch = original
+
+def addchangegroupfiles(orig, repo, source, revmap, trp, expectedfiles, *args):
+ if not shallowutil.isenabled(repo):
+ return orig(repo, source, revmap, trp, expectedfiles, *args)
+
+ newfiles = 0
+ visited = set()
+ revisiondatas = {}
+ queue = []
+
+ # Normal Mercurial processes each file one at a time, adding all
+ # the new revisions for that file at once. In remotefilelog a file
+ # revision may depend on a different file's revision (in the case
+ # of a rename/copy), so we must lay all revisions down across all
+ # files in topological order.
+
+ # read all the file chunks but don't add them
+ progress = repo.ui.makeprogress(_('files'), total=expectedfiles)
+ while True:
+ chunkdata = source.filelogheader()
+ if not chunkdata:
+ break
+ f = chunkdata["filename"]
+ repo.ui.debug("adding %s revisions\n" % f)
+ progress.increment()
+
+ if not repo.shallowmatch(f):
+ fl = repo.file(f)
+ deltas = source.deltaiter()
+ fl.addgroup(deltas, revmap, trp)
+ continue
+
+ chain = None
+ while True:
+ # returns: (node, p1, p2, cs, deltabase, delta, flags) or None
+ revisiondata = source.deltachunk(chain)
+ if not revisiondata:
+ break
+
+ chain = revisiondata[0]
+
+ revisiondatas[(f, chain)] = revisiondata
+ queue.append((f, chain))
+
+ if f not in visited:
+ newfiles += 1
+ visited.add(f)
+
+ if chain is None:
+ raise error.Abort(_("received file revlog group is empty"))
+
+ processed = set()
+ def available(f, node, depf, depnode):
+ if depnode != nullid and (depf, depnode) not in processed:
+ if not (depf, depnode) in revisiondatas:
+ # It's not in the changegroup, assume it's already
+ # in the repo
+ return True
+ # re-add self to queue
+ queue.insert(0, (f, node))
+ # add dependency in front
+ queue.insert(0, (depf, depnode))
+ return False
+ return True
+
+ skipcount = 0
+
+ # Prefetch the non-bundled revisions that we will need
+ prefetchfiles = []
+ for f, node in queue:
+ revisiondata = revisiondatas[(f, node)]
+ # revisiondata: (node, p1, p2, cs, deltabase, delta, flags)
+ dependents = [revisiondata[1], revisiondata[2], revisiondata[4]]
+
+ for dependent in dependents:
+ if dependent == nullid or (f, dependent) in revisiondatas:
+ continue
+ prefetchfiles.append((f, hex(dependent)))
+
+ repo.fileservice.prefetch(prefetchfiles)
+
+ # Apply the revisions in topological order such that a revision
+ # is only written once it's deltabase and parents have been written.
+ while queue:
+ f, node = queue.pop(0)
+ if (f, node) in processed:
+ continue
+
+ skipcount += 1
+ if skipcount > len(queue) + 1:
+ raise error.Abort(_("circular node dependency"))
+
+ fl = repo.file(f)
+
+ revisiondata = revisiondatas[(f, node)]
+ # revisiondata: (node, p1, p2, cs, deltabase, delta, flags)
+ node, p1, p2, linknode, deltabase, delta, flags = revisiondata
+
+ if not available(f, node, f, deltabase):
+ continue
+
+ base = fl.revision(deltabase, raw=True)
+ text = mdiff.patch(base, delta)
+ if not isinstance(text, bytes):
+ text = bytes(text)
+
+ meta, text = shallowutil.parsemeta(text)
+ if 'copy' in meta:
+ copyfrom = meta['copy']
+ copynode = bin(meta['copyrev'])
+ if not available(f, node, copyfrom, copynode):
+ continue
+
+ for p in [p1, p2]:
+ if p != nullid:
+ if not available(f, node, f, p):
+ continue
+
+ fl.add(text, meta, trp, linknode, p1, p2)
+ processed.add((f, node))
+ skipcount = 0
+
+ progress.complete()
+
+ return len(revisiondatas), newfiles
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/hgext/remotefilelog/shallowrepo.py Fri Jan 18 13:28:22 2019 -0500
@@ -0,0 +1,305 @@
+# shallowrepo.py - shallow repository that uses remote filelogs
+#
+# Copyright 2013 Facebook, Inc.
+#
+# This software may be used and distributed according to the terms of the
+# GNU General Public License version 2 or any later version.
+from __future__ import absolute_import
+
+import os
+
+from mercurial.i18n import _
+from mercurial.node import hex, nullid, nullrev
+from mercurial import (
+ encoding,
+ error,
+ localrepo,
+ match,
+ scmutil,
+ sparse,
+ util,
+)
+from mercurial.utils import procutil
+from . import (
+ connectionpool,
+ constants,
+ contentstore,
+ datapack,
+ fileserverclient,
+ historypack,
+ metadatastore,
+ remotefilectx,
+ remotefilelog,
+ shallowutil,
+)
+
+if util.safehasattr(util, '_hgexecutable'):
+ # Before 5be286db
+ _hgexecutable = util.hgexecutable
+else:
+ from mercurial.utils import procutil
+ _hgexecutable = procutil.hgexecutable
+
+# These make*stores functions are global so that other extensions can replace
+# them.
+def makelocalstores(repo):
+ """In-repo stores, like .hg/store/data; can not be discarded."""
+ localpath = os.path.join(repo.svfs.vfs.base, 'data')
+ if not os.path.exists(localpath):
+ os.makedirs(localpath)
+
+ # Instantiate local data stores
+ localcontent = contentstore.remotefilelogcontentstore(
+ repo, localpath, repo.name, shared=False)
+ localmetadata = metadatastore.remotefilelogmetadatastore(
+ repo, localpath, repo.name, shared=False)
+ return localcontent, localmetadata
+
+def makecachestores(repo):
+ """Typically machine-wide, cache of remote data; can be discarded."""
+ # Instantiate shared cache stores
+ cachepath = shallowutil.getcachepath(repo.ui)
+ cachecontent = contentstore.remotefilelogcontentstore(
+ repo, cachepath, repo.name, shared=True)
+ cachemetadata = metadatastore.remotefilelogmetadatastore(
+ repo, cachepath, repo.name, shared=True)
+
+ repo.sharedstore = cachecontent
+ repo.shareddatastores.append(cachecontent)
+ repo.sharedhistorystores.append(cachemetadata)
+
+ return cachecontent, cachemetadata
+
+def makeremotestores(repo, cachecontent, cachemetadata):
+ """These stores fetch data from a remote server."""
+ # Instantiate remote stores
+ repo.fileservice = fileserverclient.fileserverclient(repo)
+ remotecontent = contentstore.remotecontentstore(
+ repo.ui, repo.fileservice, cachecontent)
+ remotemetadata = metadatastore.remotemetadatastore(
+ repo.ui, repo.fileservice, cachemetadata)
+ return remotecontent, remotemetadata
+
+def makepackstores(repo):
+ """Packs are more efficient (to read from) cache stores."""
+ # Instantiate pack stores
+ packpath = shallowutil.getcachepackpath(repo,
+ constants.FILEPACK_CATEGORY)
+ packcontentstore = datapack.datapackstore(repo.ui, packpath)
+ packmetadatastore = historypack.historypackstore(repo.ui, packpath)
+
+ repo.shareddatastores.append(packcontentstore)
+ repo.sharedhistorystores.append(packmetadatastore)
+ shallowutil.reportpackmetrics(repo.ui, 'filestore', packcontentstore,
+ packmetadatastore)
+ return packcontentstore, packmetadatastore
+
+def makeunionstores(repo):
+ """Union stores iterate the other stores and return the first result."""
+ repo.shareddatastores = []
+ repo.sharedhistorystores = []
+
+ packcontentstore, packmetadatastore = makepackstores(repo)
+ cachecontent, cachemetadata = makecachestores(repo)
+ localcontent, localmetadata = makelocalstores(repo)
+ remotecontent, remotemetadata = makeremotestores(repo, cachecontent,
+ cachemetadata)
+
+ # Instantiate union stores
+ repo.contentstore = contentstore.unioncontentstore(
+ packcontentstore, cachecontent,
+ localcontent, remotecontent, writestore=localcontent)
+ repo.metadatastore = metadatastore.unionmetadatastore(
+ packmetadatastore, cachemetadata, localmetadata, remotemetadata,
+ writestore=localmetadata)
+
+ fileservicedatawrite = cachecontent
+ fileservicehistorywrite = cachemetadata
+ repo.fileservice.setstore(repo.contentstore, repo.metadatastore,
+ fileservicedatawrite, fileservicehistorywrite)
+ shallowutil.reportpackmetrics(repo.ui, 'filestore',
+ packcontentstore, packmetadatastore)
+
+def wraprepo(repo):
+ class shallowrepository(repo.__class__):
+ @util.propertycache
+ def name(self):
+ return self.ui.config('remotefilelog', 'reponame')
+
+ @util.propertycache
+ def fallbackpath(self):
+ path = repo.ui.config("remotefilelog", "fallbackpath",
+ repo.ui.config('paths', 'default'))
+ if not path:
+ raise error.Abort("no remotefilelog server "
+ "configured - is your .hg/hgrc trusted?")
+
+ return path
+
+ def maybesparsematch(self, *revs, **kwargs):
+ '''
+ A wrapper that allows the remotefilelog to invoke sparsematch() if
+ this is a sparse repository, or returns None if this is not a
+ sparse repository.
+ '''
+ if revs:
+ ret = sparse.matcher(repo, revs=revs)
+ else:
+ ret = sparse.matcher(repo)
+
+ if ret.always():
+ return None
+ return ret
+
+ def file(self, f):
+ if f[0] == '/':
+ f = f[1:]
+
+ if self.shallowmatch(f):
+ return remotefilelog.remotefilelog(self.svfs, f, self)
+ else:
+ return super(shallowrepository, self).file(f)
+
+ def filectx(self, path, *args, **kwargs):
+ if self.shallowmatch(path):
+ return remotefilectx.remotefilectx(self, path, *args, **kwargs)
+ else:
+ return super(shallowrepository, self).filectx(path, *args,
+ **kwargs)
+
+ @localrepo.unfilteredmethod
+ def commitctx(self, ctx, error=False):
+ """Add a new revision to current repository.
+ Revision information is passed via the context argument.
+ """
+
+ # some contexts already have manifest nodes, they don't need any
+ # prefetching (for example if we're just editing a commit message
+ # we can reuse manifest
+ if not ctx.manifestnode():
+ # prefetch files that will likely be compared
+ m1 = ctx.p1().manifest()
+ files = []
+ for f in ctx.modified() + ctx.added():
+ fparent1 = m1.get(f, nullid)
+ if fparent1 != nullid:
+ files.append((f, hex(fparent1)))
+ self.fileservice.prefetch(files)
+ return super(shallowrepository, self).commitctx(ctx,
+ error=error)
+
+ def backgroundprefetch(self, revs, base=None, repack=False, pats=None,
+ opts=None):
+ """Runs prefetch in background with optional repack
+ """
+ cmd = [_hgexecutable(), '-R', repo.origroot, 'prefetch']
+ if repack:
+ cmd.append('--repack')
+ if revs:
+ cmd += ['-r', revs]
+ procutil.runbgcommand(cmd, encoding.environ)
+
+ def prefetch(self, revs, base=None, pats=None, opts=None):
+ """Prefetches all the necessary file revisions for the given revs
+ Optionally runs repack in background
+ """
+ with repo._lock(repo.svfs, 'prefetchlock', True, None, None,
+ _('prefetching in %s') % repo.origroot):
+ self._prefetch(revs, base, pats, opts)
+
+ def _prefetch(self, revs, base=None, pats=None, opts=None):
+ fallbackpath = self.fallbackpath
+ if fallbackpath:
+ # If we know a rev is on the server, we should fetch the server
+ # version of those files, since our local file versions might
+ # become obsolete if the local commits are stripped.
+ localrevs = repo.revs('outgoing(%s)', fallbackpath)
+ if base is not None and base != nullrev:
+ serverbase = list(repo.revs('first(reverse(::%s) - %ld)',
+ base, localrevs))
+ if serverbase:
+ base = serverbase[0]
+ else:
+ localrevs = repo
+
+ mfl = repo.manifestlog
+ mfrevlog = mfl.getstorage('')
+ if base is not None:
+ mfdict = mfl[repo[base].manifestnode()].read()
+ skip = set(mfdict.iteritems())
+ else:
+ skip = set()
+
+ # Copy the skip set to start large and avoid constant resizing,
+ # and since it's likely to be very similar to the prefetch set.
+ files = skip.copy()
+ serverfiles = skip.copy()
+ visited = set()
+ visited.add(nullrev)
+ revcount = len(revs)
+ progress = self.ui.makeprogress(_('prefetching'), total=revcount)
+ progress.update(0)
+ for rev in sorted(revs):
+ ctx = repo[rev]
+ if pats:
+ m = scmutil.match(ctx, pats, opts)
+ sparsematch = repo.maybesparsematch(rev)
+
+ mfnode = ctx.manifestnode()
+ mfrev = mfrevlog.rev(mfnode)
+
+ # Decompressing manifests is expensive.
+ # When possible, only read the deltas.
+ p1, p2 = mfrevlog.parentrevs(mfrev)
+ if p1 in visited and p2 in visited:
+ mfdict = mfl[mfnode].readfast()
+ else:
+ mfdict = mfl[mfnode].read()
+
+ diff = mfdict.iteritems()
+ if pats:
+ diff = (pf for pf in diff if m(pf[0]))
+ if sparsematch:
+ diff = (pf for pf in diff if sparsematch(pf[0]))
+ if rev not in localrevs:
+ serverfiles.update(diff)
+ else:
+ files.update(diff)
+
+ visited.add(mfrev)
+ progress.increment()
+
+ files.difference_update(skip)
+ serverfiles.difference_update(skip)
+ progress.complete()
+
+ # Fetch files known to be on the server
+ if serverfiles:
+ results = [(path, hex(fnode)) for (path, fnode) in serverfiles]
+ repo.fileservice.prefetch(results, force=True)
+
+ # Fetch files that may or may not be on the server
+ if files:
+ results = [(path, hex(fnode)) for (path, fnode) in files]
+ repo.fileservice.prefetch(results)
+
+ def close(self):
+ super(shallowrepository, self).close()
+ self.connectionpool.close()
+
+ repo.__class__ = shallowrepository
+
+ repo.shallowmatch = match.always(repo.root, '')
+
+ makeunionstores(repo)
+
+ repo.includepattern = repo.ui.configlist("remotefilelog", "includepattern",
+ None)
+ repo.excludepattern = repo.ui.configlist("remotefilelog", "excludepattern",
+ None)
+ if not util.safehasattr(repo, 'connectionpool'):
+ repo.connectionpool = connectionpool.connectionpool(repo)
+
+ if repo.includepattern or repo.excludepattern:
+ repo.shallowmatch = match.match(repo.root, '', None,
+ repo.includepattern, repo.excludepattern)
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/hgext/remotefilelog/shallowstore.py Fri Jan 18 13:28:22 2019 -0500
@@ -0,0 +1,17 @@
+# shallowstore.py - shallow store for interacting with shallow repos
+#
+# Copyright 2013 Facebook, Inc.
+#
+# This software may be used and distributed according to the terms of the
+# GNU General Public License version 2 or any later version.
+from __future__ import absolute_import
+
+def wrapstore(store):
+ class shallowstore(store.__class__):
+ def __contains__(self, path):
+ # Assume it exists
+ return True
+
+ store.__class__ = shallowstore
+
+ return store
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/hgext/remotefilelog/shallowutil.py Fri Jan 18 13:28:22 2019 -0500
@@ -0,0 +1,492 @@
+# shallowutil.py -- remotefilelog utilities
+#
+# Copyright 2014 Facebook, Inc.
+#
+# This software may be used and distributed according to the terms of the
+# GNU General Public License version 2 or any later version.
+from __future__ import absolute_import
+
+import collections
+import errno
+import hashlib
+import os
+import stat
+import struct
+import tempfile
+
+from mercurial.i18n import _
+from mercurial import (
+ error,
+ node,
+ pycompat,
+ revlog,
+ util,
+)
+from mercurial.utils import (
+ storageutil,
+ stringutil,
+)
+from . import constants
+
+if not pycompat.iswindows:
+ import grp
+
+def isenabled(repo):
+ """returns whether the repository is remotefilelog enabled or not"""
+ return constants.SHALLOWREPO_REQUIREMENT in repo.requirements
+
+def getcachekey(reponame, file, id):
+ pathhash = node.hex(hashlib.sha1(file).digest())
+ return os.path.join(reponame, pathhash[:2], pathhash[2:], id)
+
+def getlocalkey(file, id):
+ pathhash = node.hex(hashlib.sha1(file).digest())
+ return os.path.join(pathhash, id)
+
+def getcachepath(ui, allowempty=False):
+ cachepath = ui.config("remotefilelog", "cachepath")
+ if not cachepath:
+ if allowempty:
+ return None
+ else:
+ raise error.Abort(_("could not find config option "
+ "remotefilelog.cachepath"))
+ return util.expandpath(cachepath)
+
+def getcachepackpath(repo, category):
+ cachepath = getcachepath(repo.ui)
+ if category != constants.FILEPACK_CATEGORY:
+ return os.path.join(cachepath, repo.name, 'packs', category)
+ else:
+ return os.path.join(cachepath, repo.name, 'packs')
+
+def getlocalpackpath(base, category):
+ return os.path.join(base, 'packs', category)
+
+def createrevlogtext(text, copyfrom=None, copyrev=None):
+ """returns a string that matches the revlog contents in a
+ traditional revlog
+ """
+ meta = {}
+ if copyfrom or text.startswith('\1\n'):
+ if copyfrom:
+ meta['copy'] = copyfrom
+ meta['copyrev'] = copyrev
+ text = storageutil.packmeta(meta, text)
+
+ return text
+
+def parsemeta(text):
+ """parse mercurial filelog metadata"""
+ meta, size = storageutil.parsemeta(text)
+ if text.startswith('\1\n'):
+ s = text.index('\1\n', 2)
+ text = text[s + 2:]
+ return meta or {}, text
+
+def sumdicts(*dicts):
+ """Adds all the values of *dicts together into one dictionary. This assumes
+ the values in *dicts are all summable.
+
+ e.g. [{'a': 4', 'b': 2}, {'b': 3, 'c': 1}] -> {'a': 4, 'b': 5, 'c': 1}
+ """
+ result = collections.defaultdict(lambda: 0)
+ for dict in dicts:
+ for k, v in dict.iteritems():
+ result[k] += v
+ return result
+
+def prefixkeys(dict, prefix):
+ """Returns ``dict`` with ``prefix`` prepended to all its keys."""
+ result = {}
+ for k, v in dict.iteritems():
+ result[prefix + k] = v
+ return result
+
+def reportpackmetrics(ui, prefix, *stores):
+ dicts = [s.getmetrics() for s in stores]
+ dict = prefixkeys(sumdicts(*dicts), prefix + '_')
+ ui.log(prefix + "_packsizes", "\n", **pycompat.strkwargs(dict))
+
+def _parsepackmeta(metabuf):
+ """parse datapack meta, bytes (<metadata-list>) -> dict
+
+ The dict contains raw content - both keys and values are strings.
+ Upper-level business may want to convert some of them to other types like
+ integers, on their own.
+
+ raise ValueError if the data is corrupted
+ """
+ metadict = {}
+ offset = 0
+ buflen = len(metabuf)
+ while buflen - offset >= 3:
+ key = metabuf[offset:offset + 1]
+ offset += 1
+ metalen = struct.unpack_from('!H', metabuf, offset)[0]
+ offset += 2
+ if offset + metalen > buflen:
+ raise ValueError('corrupted metadata: incomplete buffer')
+ value = metabuf[offset:offset + metalen]
+ metadict[key] = value
+ offset += metalen
+ if offset != buflen:
+ raise ValueError('corrupted metadata: redundant data')
+ return metadict
+
+def _buildpackmeta(metadict):
+ """reverse of _parsepackmeta, dict -> bytes (<metadata-list>)
+
+ The dict contains raw content - both keys and values are strings.
+ Upper-level business may want to serialize some of other types (like
+ integers) to strings before calling this function.
+
+ raise ProgrammingError when metadata key is illegal, or ValueError if
+ length limit is exceeded
+ """
+ metabuf = ''
+ for k, v in sorted((metadict or {}).iteritems()):
+ if len(k) != 1:
+ raise error.ProgrammingError('packmeta: illegal key: %s' % k)
+ if len(v) > 0xfffe:
+ raise ValueError('metadata value is too long: 0x%x > 0xfffe'
+ % len(v))
+ metabuf += k
+ metabuf += struct.pack('!H', len(v))
+ metabuf += v
+ # len(metabuf) is guaranteed representable in 4 bytes, because there are
+ # only 256 keys, and for each value, len(value) <= 0xfffe.
+ return metabuf
+
+_metaitemtypes = {
+ constants.METAKEYFLAG: (int, pycompat.long),
+ constants.METAKEYSIZE: (int, pycompat.long),
+}
+
+def buildpackmeta(metadict):
+ """like _buildpackmeta, but typechecks metadict and normalize it.
+
+ This means, METAKEYSIZE and METAKEYSIZE should have integers as values,
+ and METAKEYFLAG will be dropped if its value is 0.
+ """
+ newmeta = {}
+ for k, v in (metadict or {}).iteritems():
+ expectedtype = _metaitemtypes.get(k, (bytes,))
+ if not isinstance(v, expectedtype):
+ raise error.ProgrammingError('packmeta: wrong type of key %s' % k)
+ # normalize int to binary buffer
+ if int in expectedtype:
+ # optimization: remove flag if it's 0 to save space
+ if k == constants.METAKEYFLAG and v == 0:
+ continue
+ v = int2bin(v)
+ newmeta[k] = v
+ return _buildpackmeta(newmeta)
+
+def parsepackmeta(metabuf):
+ """like _parsepackmeta, but convert fields to desired types automatically.
+
+ This means, METAKEYFLAG and METAKEYSIZE fields will be converted to
+ integers.
+ """
+ metadict = _parsepackmeta(metabuf)
+ for k, v in metadict.iteritems():
+ if k in _metaitemtypes and int in _metaitemtypes[k]:
+ metadict[k] = bin2int(v)
+ return metadict
+
+def int2bin(n):
+ """convert a non-negative integer to raw binary buffer"""
+ buf = bytearray()
+ while n > 0:
+ buf.insert(0, n & 0xff)
+ n >>= 8
+ return bytes(buf)
+
+def bin2int(buf):
+ """the reverse of int2bin, convert a binary buffer to an integer"""
+ x = 0
+ for b in bytearray(buf):
+ x <<= 8
+ x |= b
+ return x
+
+def parsesizeflags(raw):
+ """given a remotefilelog blob, return (headersize, rawtextsize, flags)
+
+ see remotefilelogserver.createfileblob for the format.
+ raise RuntimeError if the content is illformed.
+ """
+ flags = revlog.REVIDX_DEFAULT_FLAGS
+ size = None
+ try:
+ index = raw.index('\0')
+ header = raw[:index]
+ if header.startswith('v'):
+ # v1 and above, header starts with 'v'
+ if header.startswith('v1\n'):
+ for s in header.split('\n'):
+ if s.startswith(constants.METAKEYSIZE):
+ size = int(s[len(constants.METAKEYSIZE):])
+ elif s.startswith(constants.METAKEYFLAG):
+ flags = int(s[len(constants.METAKEYFLAG):])
+ else:
+ raise RuntimeError('unsupported remotefilelog header: %s'
+ % header)
+ else:
+ # v0, str(int(size)) is the header
+ size = int(header)
+ except ValueError:
+ raise RuntimeError("unexpected remotefilelog header: illegal format")
+ if size is None:
+ raise RuntimeError("unexpected remotefilelog header: no size found")
+ return index + 1, size, flags
+
+def buildfileblobheader(size, flags, version=None):
+ """return the header of a remotefilelog blob.
+
+ see remotefilelogserver.createfileblob for the format.
+ approximately the reverse of parsesizeflags.
+
+ version could be 0 or 1, or None (auto decide).
+ """
+ # choose v0 if flags is empty, otherwise v1
+ if version is None:
+ version = int(bool(flags))
+ if version == 1:
+ header = ('v1\n%s%d\n%s%d'
+ % (constants.METAKEYSIZE, size,
+ constants.METAKEYFLAG, flags))
+ elif version == 0:
+ if flags:
+ raise error.ProgrammingError('fileblob v0 does not support flag')
+ header = '%d' % size
+ else:
+ raise error.ProgrammingError('unknown fileblob version %d' % version)
+ return header
+
+def ancestormap(raw):
+ offset, size, flags = parsesizeflags(raw)
+ start = offset + size
+
+ mapping = {}
+ while start < len(raw):
+ divider = raw.index('\0', start + 80)
+
+ currentnode = raw[start:(start + 20)]
+ p1 = raw[(start + 20):(start + 40)]
+ p2 = raw[(start + 40):(start + 60)]
+ linknode = raw[(start + 60):(start + 80)]
+ copyfrom = raw[(start + 80):divider]
+
+ mapping[currentnode] = (p1, p2, linknode, copyfrom)
+ start = divider + 1
+
+ return mapping
+
+def readfile(path):
+ f = open(path, 'rb')
+ try:
+ result = f.read()
+
+ # we should never have empty files
+ if not result:
+ os.remove(path)
+ raise IOError("empty file: %s" % path)
+
+ return result
+ finally:
+ f.close()
+
+def unlinkfile(filepath):
+ if pycompat.iswindows:
+ # On Windows, os.unlink cannnot delete readonly files
+ os.chmod(filepath, stat.S_IWUSR)
+ os.unlink(filepath)
+
+def renamefile(source, destination):
+ if pycompat.iswindows:
+ # On Windows, os.rename cannot rename readonly files
+ # and cannot overwrite destination if it exists
+ os.chmod(source, stat.S_IWUSR)
+ if os.path.isfile(destination):
+ os.chmod(destination, stat.S_IWUSR)
+ os.unlink(destination)
+
+ os.rename(source, destination)
+
+def writefile(path, content, readonly=False):
+ dirname, filename = os.path.split(path)
+ if not os.path.exists(dirname):
+ try:
+ os.makedirs(dirname)
+ except OSError as ex:
+ if ex.errno != errno.EEXIST:
+ raise
+
+ fd, temp = tempfile.mkstemp(prefix='.%s-' % filename, dir=dirname)
+ os.close(fd)
+
+ try:
+ f = util.posixfile(temp, 'wb')
+ f.write(content)
+ f.close()
+
+ if readonly:
+ mode = 0o444
+ else:
+ # tempfiles are created with 0o600, so we need to manually set the
+ # mode.
+ oldumask = os.umask(0)
+ # there's no way to get the umask without modifying it, so set it
+ # back
+ os.umask(oldumask)
+ mode = ~oldumask
+
+ renamefile(temp, path)
+ os.chmod(path, mode)
+ except Exception:
+ try:
+ unlinkfile(temp)
+ except OSError:
+ pass
+ raise
+
+def sortnodes(nodes, parentfunc):
+ """Topologically sorts the nodes, using the parentfunc to find
+ the parents of nodes."""
+ nodes = set(nodes)
+ childmap = {}
+ parentmap = {}
+ roots = []
+
+ # Build a child and parent map
+ for n in nodes:
+ parents = [p for p in parentfunc(n) if p in nodes]
+ parentmap[n] = set(parents)
+ for p in parents:
+ childmap.setdefault(p, set()).add(n)
+ if not parents:
+ roots.append(n)
+
+ roots.sort()
+ # Process roots, adding children to the queue as they become roots
+ results = []
+ while roots:
+ n = roots.pop(0)
+ results.append(n)
+ if n in childmap:
+ children = childmap[n]
+ for c in children:
+ childparents = parentmap[c]
+ childparents.remove(n)
+ if len(childparents) == 0:
+ # insert at the beginning, that way child nodes
+ # are likely to be output immediately after their
+ # parents. This gives better compression results.
+ roots.insert(0, c)
+
+ return results
+
+def readexactly(stream, n):
+ '''read n bytes from stream.read and abort if less was available'''
+ s = stream.read(n)
+ if len(s) < n:
+ raise error.Abort(_("stream ended unexpectedly"
+ " (got %d bytes, expected %d)")
+ % (len(s), n))
+ return s
+
+def readunpack(stream, fmt):
+ data = readexactly(stream, struct.calcsize(fmt))
+ return struct.unpack(fmt, data)
+
+def readpath(stream):
+ rawlen = readexactly(stream, constants.FILENAMESIZE)
+ pathlen = struct.unpack(constants.FILENAMESTRUCT, rawlen)[0]
+ return readexactly(stream, pathlen)
+
+def readnodelist(stream):
+ rawlen = readexactly(stream, constants.NODECOUNTSIZE)
+ nodecount = struct.unpack(constants.NODECOUNTSTRUCT, rawlen)[0]
+ for i in pycompat.xrange(nodecount):
+ yield readexactly(stream, constants.NODESIZE)
+
+def readpathlist(stream):
+ rawlen = readexactly(stream, constants.PATHCOUNTSIZE)
+ pathcount = struct.unpack(constants.PATHCOUNTSTRUCT, rawlen)[0]
+ for i in pycompat.xrange(pathcount):
+ yield readpath(stream)
+
+def getgid(groupname):
+ try:
+ gid = grp.getgrnam(pycompat.fsdecode(groupname)).gr_gid
+ return gid
+ except KeyError:
+ return None
+
+def setstickygroupdir(path, gid, warn=None):
+ if gid is None:
+ return
+ try:
+ os.chown(path, -1, gid)
+ os.chmod(path, 0o2775)
+ except (IOError, OSError) as ex:
+ if warn:
+ warn(_('unable to chown/chmod on %s: %s\n') % (path, ex))
+
+def mkstickygroupdir(ui, path):
+ """Creates the given directory (if it doesn't exist) and give it a
+ particular group with setgid enabled."""
+ gid = None
+ groupname = ui.config("remotefilelog", "cachegroup")
+ if groupname:
+ gid = getgid(groupname)
+ if gid is None:
+ ui.warn(_('unable to resolve group name: %s\n') % groupname)
+
+ # we use a single stat syscall to test the existence and mode / group bit
+ st = None
+ try:
+ st = os.stat(path)
+ except OSError:
+ pass
+
+ if st:
+ # exists
+ if (st.st_mode & 0o2775) != 0o2775 or st.st_gid != gid:
+ # permission needs to be fixed
+ setstickygroupdir(path, gid, ui.warn)
+ return
+
+ oldumask = os.umask(0o002)
+ try:
+ missingdirs = [path]
+ path = os.path.dirname(path)
+ while path and not os.path.exists(path):
+ missingdirs.append(path)
+ path = os.path.dirname(path)
+
+ for path in reversed(missingdirs):
+ try:
+ os.mkdir(path)
+ except OSError as ex:
+ if ex.errno != errno.EEXIST:
+ raise
+
+ for path in missingdirs:
+ setstickygroupdir(path, gid, ui.warn)
+ finally:
+ os.umask(oldumask)
+
+def getusername(ui):
+ try:
+ return stringutil.shortuser(ui.username())
+ except Exception:
+ return 'unknown'
+
+def getreponame(ui):
+ reponame = ui.config('paths', 'default')
+ if reponame:
+ return os.path.basename(reponame)
+ return "unknown"
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/hgext/remotefilelog/shallowverifier.py Fri Jan 18 13:28:22 2019 -0500
@@ -0,0 +1,17 @@
+# shallowverifier.py - shallow repository verifier
+#
+# Copyright 2015 Facebook, Inc.
+#
+# This software may be used and distributed according to the terms of the
+# GNU General Public License version 2 or any later version.
+from __future__ import absolute_import
+
+from mercurial.i18n import _
+from mercurial import verify
+
+class shallowverifier(verify.verifier):
+ def _verifyfiles(self, filenodes, filelinkrevs):
+ """Skips files verification since repo's not guaranteed to have them"""
+ self.repo.ui.status(
+ _("skipping filelog check since remotefilelog is used\n"))
+ return 0, 0
--- a/hgext/shelve.py Wed Jan 09 20:00:35 2019 -0800
+++ b/hgext/shelve.py Fri Jan 18 13:28:22 2019 -0500
@@ -41,7 +41,6 @@
lock as lockmod,
mdiff,
merge,
- narrowspec,
node as nodemod,
patch,
phases,
@@ -137,7 +136,7 @@
raise
raise error.Abort(_("shelved change '%s' not found") % self.name)
- def applybundle(self):
+ def applybundle(self, tr):
fp = self.opener()
try:
targetphase = phases.internal
@@ -145,7 +144,6 @@
targetphase = phases.secret
gen = exchange.readbundle(self.repo.ui, fp, self.fname, self.vfs)
pretip = self.repo['tip']
- tr = self.repo.currenttransaction()
bundle2.applybundle(self.repo, gen, tr,
source='unshelve',
url='bundle:' + self.vfs.join(self.fname),
@@ -324,16 +322,12 @@
if mark:
bookmarks.activate(repo, mark)
-def _aborttransaction(repo):
+def _aborttransaction(repo, tr):
'''Abort current transaction for shelve/unshelve, but keep dirstate
'''
- tr = repo.currenttransaction()
dirstatebackupname = 'dirstate.shelve'
- narrowspecbackupname = 'narrowspec.shelve'
repo.dirstate.savebackup(tr, dirstatebackupname)
- narrowspec.savebackup(repo, narrowspecbackupname)
tr.abort()
- narrowspec.restorebackup(repo, narrowspecbackupname)
repo.dirstate.restorebackup(None, dirstatebackupname)
def getshelvename(repo, parent, opts):
@@ -430,8 +424,12 @@
shelvedfile(repo, name, 'shelve').writeinfo(info)
bases = list(mutableancestors(repo[node]))
shelvedfile(repo, name, 'hg').writebundle(bases, node)
+ # Create a matcher so that prefetch doesn't attempt to fetch the entire
+ # repository pointlessly.
+ match = scmutil.matchfiles(repo, repo[node].files())
with shelvedfile(repo, name, patchextension).opener('wb') as fp:
- cmdutil.exportfile(repo, [node], fp, opts=mdiff.diffopts(git=True))
+ cmdutil.exportfile(repo, [node], fp, opts=mdiff.diffopts(git=True),
+ match=match)
def _includeunknownfiles(repo, pats, opts, extra):
s = repo.status(match=scmutil.match(repo[None], pats, opts),
@@ -440,15 +438,11 @@
extra['shelve_unknown'] = '\0'.join(s.unknown)
repo[None].add(s.unknown)
-def _finishshelve(repo):
+def _finishshelve(repo, tr):
if phases.supportinternal(repo):
- backupname = 'dirstate.shelve'
- tr = repo.currenttransaction()
- repo.dirstate.savebackup(tr, backupname)
tr.close()
- repo.dirstate.restorebackup(None, backupname)
else:
- _aborttransaction(repo)
+ _aborttransaction(repo, tr)
def createcmd(ui, repo, pats, opts):
"""subcommand that creates a new shelve"""
@@ -478,7 +472,7 @@
# use an uncommitted transaction to generate the bundle to avoid
# pull races. ensure we don't print the abort message to stderr.
- tr = repo.transaction('commit', report=lambda x: None)
+ tr = repo.transaction('shelve', report=lambda x: None)
interactive = opts.get('interactive', False)
includeunknown = (opts.get('unknown', False) and
@@ -515,7 +509,7 @@
if origbranch != repo['.'].branch() and not _isbareshelve(pats, opts):
repo.dirstate.setbranch(origbranch)
- _finishshelve(repo)
+ _finishshelve(repo, tr)
finally:
_restoreactivebookmark(repo, activebookmark)
lockmod.release(tr, lock)
@@ -790,7 +784,7 @@
tmpwctx = repo[node]
return tmpwctx, addedbefore
-def _unshelverestorecommit(ui, repo, basename):
+def _unshelverestorecommit(ui, repo, tr, basename):
"""Recreate commit in the repository during the unshelve"""
repo = repo.unfiltered()
node = None
@@ -798,7 +792,7 @@
node = shelvedfile(repo, basename, 'shelve').readinfo()['node']
if node is None or node not in repo:
with ui.configoverride({('ui', 'quiet'): True}):
- shelvectx = shelvedfile(repo, basename, 'hg').applybundle()
+ shelvectx = shelvedfile(repo, basename, 'hg').applybundle(tr)
# We might not strip the unbundled changeset, so we should keep track of
# the unshelve node in case we need to reuse it (eg: unshelve --keep)
if node is None:
@@ -878,7 +872,7 @@
# hooks still fire and try to operate on the missing commits.
# Clean up manually to prevent this.
repo.unfiltered().changelog.strip(oldtiprev, tr)
- _aborttransaction(repo)
+ _aborttransaction(repo, tr)
def _checkunshelveuntrackedproblems(ui, repo, shelvectx):
"""Check potential problems which may result from working
@@ -1022,7 +1016,7 @@
activebookmark = _backupactivebookmark(repo)
tmpwctx, addedbefore = _commitworkingcopychanges(ui, repo, opts,
tmpwctx)
- repo, shelvectx = _unshelverestorecommit(ui, repo, basename)
+ repo, shelvectx = _unshelverestorecommit(ui, repo, tr, basename)
_checkunshelveuntrackedproblems(ui, repo, shelvectx)
branchtorestore = ''
if shelvectx.branch() != shelvectx.p1().branch():
--- a/hgext/sparse.py Wed Jan 09 20:00:35 2019 -0800
+++ b/hgext/sparse.py Fri Jan 18 13:28:22 2019 -0500
@@ -141,6 +141,7 @@
include_pat = opts.get(r'include')
exclude_pat = opts.get(r'exclude')
enableprofile_pat = opts.get(r'enable_profile')
+ narrow_pat = opts.get(r'narrow')
include = exclude = enableprofile = False
if include_pat:
pat = include_pat
@@ -153,7 +154,9 @@
enableprofile = True
if sum([include, exclude, enableprofile]) > 1:
raise error.Abort(_("too many flags specified."))
- if include or exclude or enableprofile:
+ # if --narrow is passed, it means they are includes and excludes for narrow
+ # clone
+ if not narrow_pat and (include or exclude or enableprofile):
def clonesparse(orig, self, node, overwrite, *args, **kwargs):
sparse.updateconfig(self.unfiltered(), pat, {}, include=include,
exclude=exclude, enableprofile=enableprofile,
@@ -207,7 +210,7 @@
def _rebuild(orig, self, parent, allfiles, changedfiles=None):
matcher = self._sparsematcher
if not matcher.always():
- allfiles = allfiles.matches(matcher)
+ allfiles = [f for f in allfiles if matcher(f)]
if changedfiles:
changedfiles = [f for f in changedfiles if matcher(f)]
--- a/hgext/sqlitestore.py Wed Jan 09 20:00:35 2019 -0800
+++ b/hgext/sqlitestore.py Fri Jan 18 13:28:22 2019 -0500
@@ -63,6 +63,7 @@
from mercurial import (
ancestor,
dagop,
+ encoding,
error,
extensions,
localrepo,
@@ -558,7 +559,8 @@
return not storageutil.filedataequivalent(self, node, fulltext)
def emitrevisions(self, nodes, nodesorder=None, revisiondata=False,
- assumehaveparentrevisions=False, deltaprevious=False):
+ assumehaveparentrevisions=False,
+ deltamode=repository.CG_DELTAMODE_STD):
if nodesorder not in ('nodes', 'storage', 'linear', None):
raise error.ProgrammingError('unhandled value for nodesorder: %s' %
nodesorder)
@@ -589,7 +591,7 @@
deltaparentfn=deltabases.__getitem__,
revisiondata=revisiondata,
assumehaveparentrevisions=assumehaveparentrevisions,
- deltaprevious=deltaprevious):
+ deltamode=deltamode):
yield delta
@@ -1020,7 +1022,7 @@
def makedb(path):
"""Construct a database handle for a database at path."""
- db = sqlite3.connect(path)
+ db = sqlite3.connect(encoding.strfromlocal(path))
db.text_factory = bytes
res = db.execute(r'PRAGMA user_version').fetchone()[0]
--- a/mercurial/__init__.py Wed Jan 09 20:00:35 2019 -0800
+++ b/mercurial/__init__.py Fri Jan 18 13:28:22 2019 -0500
@@ -40,6 +40,10 @@
# zstd is already dual-version clean, don't try and mangle it
if fullname.startswith('mercurial.zstd'):
return None
+ # rustext is built for the right python version,
+ # don't try and mangle it
+ if fullname.startswith('mercurial.rustext'):
+ return None
# pywatchman is already dual-version clean, don't try and mangle it
if fullname.startswith('hgext.fsmonitor.pywatchman'):
return None
--- a/mercurial/ancestor.py Wed Jan 09 20:00:35 2019 -0800
+++ b/mercurial/ancestor.py Fri Jan 18 13:28:22 2019 -0500
@@ -11,6 +11,7 @@
from .node import nullrev
from . import (
+ dagop,
policy,
pycompat,
)
@@ -162,6 +163,9 @@
'''grow the ancestor set by adding new bases'''
self.bases.update(newbases)
+ def basesheads(self):
+ return dagop.headrevs(self.bases, self.pfunc)
+
def removeancestorsfrom(self, revs):
'''remove all ancestors of bases from the set revs (in place)'''
bases = self.bases
--- a/mercurial/archival.py Wed Jan 09 20:00:35 2019 -0800
+++ b/mercurial/archival.py Fri Jan 18 13:28:22 2019 -0500
@@ -274,7 +274,7 @@
'zip': zipit,
}
-def archive(repo, dest, node, kind, decode=True, matchfn=None,
+def archive(repo, dest, node, kind, decode=True, match=None,
prefix='', mtime=None, subrepos=False):
'''create archive of repo as it was at node.
@@ -286,7 +286,7 @@
decode tells whether to put files through decode filters from
hgrc.
- matchfn is function to filter names of files to write to archive.
+ match is a matcher to filter names of files to write to archive.
prefix is name of path to put before every archive member.
@@ -313,22 +313,22 @@
ctx = repo[node]
archiver = archivers[kind](dest, mtime or ctx.date()[0])
+ if not match:
+ match = scmutil.matchall(repo)
+
if repo.ui.configbool("ui", "archivemeta"):
name = '.hg_archival.txt'
- if not matchfn or matchfn(name):
+ if match(name):
write(name, 0o644, False, lambda: buildmetadata(ctx))
- if matchfn:
- files = [f for f in ctx.manifest().keys() if matchfn(f)]
- else:
- files = ctx.manifest().keys()
+ files = [f for f in ctx.manifest().matches(match)]
total = len(files)
if total:
files.sort()
scmutil.prefetchfiles(repo, [ctx.rev()],
scmutil.matchfiles(repo, files))
- progress = scmutil.progress(repo.ui, _('archiving'), unit=_('files'),
- total=total)
+ progress = repo.ui.makeprogress(_('archiving'), unit=_('files'),
+ total=total)
progress.update(0)
for f in files:
ff = ctx.flags(f)
@@ -339,7 +339,7 @@
if subrepos:
for subpath in sorted(ctx.substate):
sub = ctx.workingsub(subpath)
- submatch = matchmod.subdirmatcher(subpath, matchfn)
+ submatch = matchmod.subdirmatcher(subpath, match)
total += sub.archive(archiver, prefix, submatch, decode)
if total == 0:
--- a/mercurial/branchmap.py Wed Jan 09 20:00:35 2019 -0800
+++ b/mercurial/branchmap.py Fri Jan 18 13:28:22 2019 -0500
@@ -281,7 +281,7 @@
newbranches = {}
getbranchinfo = repo.revbranchcache().branchinfo
for r in revgen:
- branch, closesbranch = getbranchinfo(r, changelog=cl)
+ branch, closesbranch = getbranchinfo(r)
newbranches.setdefault(branch, []).append(r)
if closesbranch:
self._closednodes.add(cl.node(r))
@@ -397,20 +397,23 @@
self._names = []
self._rbcnamescount = len(self._names) # number of names read at
# _rbcsnameslen
- self._namesreverse = dict((b, r) for r, b in enumerate(self._names))
def _clear(self):
self._rbcsnameslen = 0
del self._names[:]
self._rbcnamescount = 0
- self._namesreverse.clear()
self._rbcrevslen = len(self._repo.changelog)
self._rbcrevs = bytearray(self._rbcrevslen * _rbcrecsize)
+ util.clearcachedproperty(self, '_namesreverse')
- def branchinfo(self, rev, changelog=None):
+ @util.propertycache
+ def _namesreverse(self):
+ return dict((b, r) for r, b in enumerate(self._names))
+
+ def branchinfo(self, rev):
"""Return branch name and close flag for rev, using and updating
persistent cache."""
- changelog = changelog or self._repo.changelog
+ changelog = self._repo.changelog
rbcrevidx = rev * _rbcrecsize
# avoid negative index, changelog.read(nullrev) is fast without cache
@@ -419,7 +422,7 @@
# if requested rev isn't allocated, grow and cache the rev info
if len(self._rbcrevs) < rbcrevidx + _rbcrecsize:
- return self._branchinfo(rev, changelog=changelog)
+ return self._branchinfo(rev)
# fast path: extract data from cache, use it if node is matching
reponode = changelog.node(rev)[:_rbcnodelen]
@@ -447,11 +450,11 @@
self._rbcrevslen = min(self._rbcrevslen, truncate)
# fall back to slow path and make sure it will be written to disk
- return self._branchinfo(rev, changelog=changelog)
+ return self._branchinfo(rev)
- def _branchinfo(self, rev, changelog=None):
+ def _branchinfo(self, rev):
"""Retrieve branch info from changelog and update _rbcrevs"""
- changelog = changelog or self._repo.changelog
+ changelog = self._repo.changelog
b, close = changelog.branchinfo(rev)
if b in self._namesreverse:
branchidx = self._namesreverse[b]
@@ -462,7 +465,7 @@
reponode = changelog.node(rev)
if close:
branchidx |= _rbccloseflag
- self._setcachedata(rev, reponode, branchidx, changelog)
+ self._setcachedata(rev, reponode, branchidx)
return b, close
def setdata(self, branch, rev, node, close):
@@ -485,16 +488,14 @@
if r'branchinfo' in vars(self):
del self.branchinfo
- def _setcachedata(self, rev, node, branchidx, changelog=None):
+ def _setcachedata(self, rev, node, branchidx):
"""Writes the node's branch data to the in-memory cache data."""
if rev == nullrev:
return
-
- changelog = changelog or self._repo.changelog
rbcrevidx = rev * _rbcrecsize
if len(self._rbcrevs) < rbcrevidx + _rbcrecsize:
self._rbcrevs.extend('\0' *
- (len(changelog) * _rbcrecsize -
+ (len(self._repo.changelog) * _rbcrecsize -
len(self._rbcrevs)))
pack_into(_rbcrecfmt, self._rbcrevs, rbcrevidx, node, branchidx)
self._rbcrevslen = min(self._rbcrevslen, rev)
--- a/mercurial/bundlerepo.py Wed Jan 09 20:00:35 2019 -0800
+++ b/mercurial/bundlerepo.py Fri Jan 18 13:28:22 2019 -0500
@@ -374,7 +374,8 @@
rootstore = bundlemanifest(self.svfs, self._cgunpacker, linkmapper)
self.filestart = self._cgunpacker.tell()
- return manifest.manifestlog(self.svfs, self, rootstore)
+ return manifest.manifestlog(self.svfs, self, rootstore,
+ self.narrowmatch())
def _consumemanifest(self):
"""Consumes the manifest portion of the bundle, setting filestart so the
--- a/mercurial/cext/manifest.c Wed Jan 09 20:00:35 2019 -0800
+++ b/mercurial/cext/manifest.c Fri Jan 18 13:28:22 2019 -0500
@@ -38,6 +38,8 @@
#define MANIFEST_OOM -1
#define MANIFEST_NOT_SORTED -2
#define MANIFEST_MALFORMED -3
+#define MANIFEST_BOGUS_FILENAME -4
+#define MANIFEST_TOO_SHORT_LINE -5
/* get the length of the path for a line */
static size_t pathlen(line *l)
@@ -115,18 +117,33 @@
char *prev = NULL;
while (len > 0) {
line *l;
- char *next = memchr(data, '\n', len);
+ char *next;
+ if (*data == '\0') {
+ /* It's implausible there's no filename, don't
+ * even bother looking for the newline. */
+ return MANIFEST_BOGUS_FILENAME;
+ }
+ next = memchr(data, '\n', len);
if (!next) {
return MANIFEST_MALFORMED;
}
+ if ((next - data) < 42) {
+ /* We should have at least 42 bytes in a line:
+ 1 byte filename
+ 1 NUL
+ 40 bytes of hash
+ so we can give up here.
+ */
+ return MANIFEST_TOO_SHORT_LINE;
+ }
next++; /* advance past newline */
- if (!realloc_if_full(self)) {
- return MANIFEST_OOM; /* no memory */
- }
if (prev && strcmp(prev, data) > -1) {
/* This data isn't sorted, so we have to abort. */
return MANIFEST_NOT_SORTED;
}
+ if (!realloc_if_full(self)) {
+ return MANIFEST_OOM; /* no memory */
+ }
l = self->lines + ((self->numlines)++);
l->start = data;
l->len = next - data;
@@ -190,6 +207,16 @@
PyErr_Format(PyExc_ValueError,
"Manifest did not end in a newline.");
break;
+ case MANIFEST_BOGUS_FILENAME:
+ PyErr_Format(
+ PyExc_ValueError,
+ "Manifest had an entry with a zero-length filename.");
+ break;
+ case MANIFEST_TOO_SHORT_LINE:
+ PyErr_Format(
+ PyExc_ValueError,
+ "Manifest had implausibly-short line.");
+ break;
default:
PyErr_Format(PyExc_ValueError,
"Unknown problem parsing manifest.");
--- a/mercurial/cext/parsers.c Wed Jan 09 20:00:35 2019 -0800
+++ b/mercurial/cext/parsers.c Fri Jan 18 13:28:22 2019 -0500
@@ -38,85 +38,6 @@
return _dict_new_presized(expected_size);
}
-/*
- * This code assumes that a manifest is stitched together with newline
- * ('\n') characters.
- */
-static PyObject *parse_manifest(PyObject *self, PyObject *args)
-{
- PyObject *mfdict, *fdict;
- char *str, *start, *end;
- int len;
-
- if (!PyArg_ParseTuple(
- args, PY23("O!O!s#:parse_manifest", "O!O!y#:parse_manifest"),
- &PyDict_Type, &mfdict, &PyDict_Type, &fdict, &str, &len))
- goto quit;
-
- start = str;
- end = str + len;
- while (start < end) {
- PyObject *file = NULL, *node = NULL;
- PyObject *flags = NULL;
- char *zero = NULL, *newline = NULL;
- ptrdiff_t nlen;
-
- zero = memchr(start, '\0', end - start);
- if (!zero) {
- PyErr_SetString(PyExc_ValueError,
- "manifest entry has no separator");
- goto quit;
- }
-
- newline = memchr(zero + 1, '\n', end - (zero + 1));
- if (!newline) {
- PyErr_SetString(PyExc_ValueError,
- "manifest contains trailing garbage");
- goto quit;
- }
-
- file = PyBytes_FromStringAndSize(start, zero - start);
-
- if (!file)
- goto bail;
-
- nlen = newline - zero - 1;
-
- node = unhexlify(zero + 1, nlen > 40 ? 40 : (Py_ssize_t)nlen);
- if (!node)
- goto bail;
-
- if (nlen > 40) {
- flags = PyBytes_FromStringAndSize(zero + 41, nlen - 40);
- if (!flags)
- goto bail;
-
- if (PyDict_SetItem(fdict, file, flags) == -1)
- goto bail;
- }
-
- if (PyDict_SetItem(mfdict, file, node) == -1)
- goto bail;
-
- start = newline + 1;
-
- Py_XDECREF(flags);
- Py_XDECREF(node);
- Py_XDECREF(file);
- continue;
- bail:
- Py_XDECREF(flags);
- Py_XDECREF(node);
- Py_XDECREF(file);
- goto quit;
- }
-
- Py_INCREF(Py_None);
- return Py_None;
-quit:
- return NULL;
-}
-
static inline dirstateTupleObject *make_dirstate_tuple(char state, int mode,
int size, int mtime)
{
@@ -651,6 +572,17 @@
&offset, &stop)) {
return NULL;
}
+ if (offset < 0) {
+ PyErr_SetString(PyExc_ValueError,
+ "invalid negative offset in fm1readmarkers");
+ return NULL;
+ }
+ if (stop > datalen) {
+ PyErr_SetString(
+ PyExc_ValueError,
+ "stop longer than data length in fm1readmarkers");
+ return NULL;
+ }
dataend = data + datalen;
data += offset;
markers = PyList_New(0);
@@ -690,7 +622,6 @@
{"nonnormalotherparententries", nonnormalotherparententries, METH_VARARGS,
"create a set containing non-normal and other parent entries of given "
"dirstate\n"},
- {"parse_manifest", parse_manifest, METH_VARARGS, "parse a manifest\n"},
{"parse_dirstate", parse_dirstate, METH_VARARGS, "parse a dirstate\n"},
{"parse_index2", parse_index2, METH_VARARGS, "parse a revlog index\n"},
{"isasciistr", isasciistr, METH_VARARGS, "check if an ASCII string\n"},
@@ -713,7 +644,7 @@
void manifest_module_init(PyObject *mod);
void revlog_module_init(PyObject *mod);
-static const int version = 11;
+static const int version = 12;
static void module_init(PyObject *mod)
{
--- a/mercurial/cext/revlog.c Wed Jan 09 20:00:35 2019 -0800
+++ b/mercurial/cext/revlog.c Fri Jan 18 13:28:22 2019 -0500
@@ -10,11 +10,14 @@
#include <Python.h>
#include <assert.h>
#include <ctype.h>
+#include <limits.h>
#include <stddef.h>
+#include <stdlib.h>
#include <string.h>
#include "bitmanipulation.h"
#include "charencode.h"
+#include "revlog.h"
#include "util.h"
#ifdef IS_PY3K
@@ -24,7 +27,6 @@
#define PyInt_Check PyLong_Check
#define PyInt_FromLong PyLong_FromLong
#define PyInt_FromSsize_t PyLong_FromSsize_t
-#define PyInt_AS_LONG PyLong_AS_LONG
#define PyInt_AsLong PyLong_AsLong
#endif
@@ -44,15 +46,15 @@
typedef struct {
indexObject *index;
nodetreenode *nodes;
- unsigned length; /* # nodes in use */
- unsigned capacity; /* # nodes allocated */
- int depth; /* maximum depth of tree */
- int splits; /* # splits performed */
+ unsigned length; /* # nodes in use */
+ unsigned capacity; /* # nodes allocated */
+ int depth; /* maximum depth of tree */
+ int splits; /* # splits performed */
} nodetree;
typedef struct {
- PyObject_HEAD
- nodetree nt;
+ PyObject_HEAD /* ; */
+ nodetree nt;
} nodetreeObject;
/*
@@ -69,21 +71,21 @@
*/
struct indexObjectStruct {
PyObject_HEAD
- /* Type-specific fields go here. */
- PyObject *data; /* raw bytes of index */
- Py_buffer buf; /* buffer of data */
- PyObject **cache; /* cached tuples */
- const char **offsets; /* populated on demand */
- Py_ssize_t raw_length; /* original number of elements */
- Py_ssize_t length; /* current number of elements */
- PyObject *added; /* populated on demand */
- PyObject *headrevs; /* cache, invalidated on changes */
- PyObject *filteredrevs;/* filtered revs set */
- nodetree nt; /* base-16 trie */
- int ntinitialized; /* 0 or 1 */
- int ntrev; /* last rev scanned */
- int ntlookups; /* # lookups */
- int ntmisses; /* # lookups that miss the cache */
+ /* Type-specific fields go here. */
+ PyObject *data; /* raw bytes of index */
+ Py_buffer buf; /* buffer of data */
+ PyObject **cache; /* cached tuples */
+ const char **offsets; /* populated on demand */
+ Py_ssize_t raw_length; /* original number of elements */
+ Py_ssize_t length; /* current number of elements */
+ PyObject *added; /* populated on demand */
+ PyObject *headrevs; /* cache, invalidated on changes */
+ PyObject *filteredrevs; /* filtered revs set */
+ nodetree nt; /* base-16 trie */
+ int ntinitialized; /* 0 or 1 */
+ int ntrev; /* last rev scanned */
+ int ntlookups; /* # lookups */
+ int ntmisses; /* # lookups that miss the cache */
int inlined;
};
@@ -96,6 +98,7 @@
static PyObject *nullentry = NULL;
static const char nullid[20] = {0};
+static const Py_ssize_t nullrev = -1;
static Py_ssize_t inline_scan(indexObject *self, const char **offsets);
@@ -126,7 +129,7 @@
errclass = PyDict_GetItemString(dict, "RevlogError");
if (errclass == NULL) {
PyErr_SetString(PyExc_SystemError,
- "could not find RevlogError");
+ "could not find RevlogError");
goto cleanup;
}
@@ -146,7 +149,7 @@
if (self->inlined && pos > 0) {
if (self->offsets == NULL) {
self->offsets = PyMem_Malloc(self->raw_length *
- sizeof(*self->offsets));
+ sizeof(*self->offsets));
if (self->offsets == NULL)
return (const char *)PyErr_NoMemory();
inline_scan(self, self->offsets);
@@ -163,13 +166,21 @@
* The specified rev must be valid and must not be nullrev. A returned
* parent revision may be nullrev, but is guaranteed to be in valid range.
*/
-static inline int index_get_parents(indexObject *self, Py_ssize_t rev,
- int *ps, int maxrev)
+static inline int index_get_parents(indexObject *self, Py_ssize_t rev, int *ps,
+ int maxrev)
{
if (rev >= self->length) {
- PyObject *tuple = PyList_GET_ITEM(self->added, rev - self->length);
- ps[0] = (int)PyInt_AS_LONG(PyTuple_GET_ITEM(tuple, 5));
- ps[1] = (int)PyInt_AS_LONG(PyTuple_GET_ITEM(tuple, 6));
+ long tmp;
+ PyObject *tuple =
+ PyList_GET_ITEM(self->added, rev - self->length);
+ if (!pylong_to_long(PyTuple_GET_ITEM(tuple, 5), &tmp)) {
+ return -1;
+ }
+ ps[0] = (int)tmp;
+ if (!pylong_to_long(PyTuple_GET_ITEM(tuple, 6), &tmp)) {
+ return -1;
+ }
+ ps[1] = (int)tmp;
} else {
const char *data = index_deref(self, rev);
ps[0] = getbe32(data + 24);
@@ -184,6 +195,104 @@
return 0;
}
+/*
+ * Get parents of the given rev.
+ *
+ * If the specified rev is out of range, IndexError will be raised. If the
+ * revlog entry is corrupted, ValueError may be raised.
+ *
+ * Returns 0 on success or -1 on failure.
+ */
+int HgRevlogIndex_GetParents(PyObject *op, int rev, int *ps)
+{
+ int tiprev;
+ if (!op || !HgRevlogIndex_Check(op) || !ps) {
+ PyErr_BadInternalCall();
+ return -1;
+ }
+ tiprev = (int)index_length((indexObject *)op) - 1;
+ if (rev < -1 || rev > tiprev) {
+ PyErr_Format(PyExc_IndexError, "rev out of range: %d", rev);
+ return -1;
+ } else if (rev == -1) {
+ ps[0] = ps[1] = -1;
+ return 0;
+ } else {
+ return index_get_parents((indexObject *)op, rev, ps, tiprev);
+ }
+}
+
+static inline int64_t index_get_start(indexObject *self, Py_ssize_t rev)
+{
+ uint64_t offset;
+ if (rev == nullrev) {
+ return 0;
+ }
+ if (rev >= self->length) {
+ PyObject *tuple;
+ PyObject *pylong;
+ PY_LONG_LONG tmp;
+ tuple = PyList_GET_ITEM(self->added, rev - self->length);
+ pylong = PyTuple_GET_ITEM(tuple, 0);
+ tmp = PyLong_AsLongLong(pylong);
+ if (tmp == -1 && PyErr_Occurred()) {
+ return -1;
+ }
+ if (tmp < 0) {
+ PyErr_Format(PyExc_OverflowError,
+ "revlog entry size out of bound (%lld)",
+ (long long)tmp);
+ return -1;
+ }
+ offset = (uint64_t)tmp;
+ } else {
+ const char *data = index_deref(self, rev);
+ offset = getbe32(data + 4);
+ if (rev == 0) {
+ /* mask out version number for the first entry */
+ offset &= 0xFFFF;
+ } else {
+ uint32_t offset_high = getbe32(data);
+ offset |= ((uint64_t)offset_high) << 32;
+ }
+ }
+ return (int64_t)(offset >> 16);
+}
+
+static inline int index_get_length(indexObject *self, Py_ssize_t rev)
+{
+ if (rev == nullrev) {
+ return 0;
+ }
+ if (rev >= self->length) {
+ PyObject *tuple;
+ PyObject *pylong;
+ long ret;
+ tuple = PyList_GET_ITEM(self->added, rev - self->length);
+ pylong = PyTuple_GET_ITEM(tuple, 1);
+ ret = PyInt_AsLong(pylong);
+ if (ret == -1 && PyErr_Occurred()) {
+ return -1;
+ }
+ if (ret < 0 || ret > (long)INT_MAX) {
+ PyErr_Format(PyExc_OverflowError,
+ "revlog entry size out of bound (%ld)",
+ ret);
+ return -1;
+ }
+ return (int)ret;
+ } else {
+ const char *data = index_deref(self, rev);
+ int tmp = (int)getbe32(data + 8);
+ if (tmp < 0) {
+ PyErr_Format(PyExc_OverflowError,
+ "revlog entry size out of bound (%d)",
+ tmp);
+ return -1;
+ }
+ return tmp;
+ }
+}
/*
* RevlogNG format (all in big endian, data may be inlined):
@@ -206,7 +315,7 @@
Py_ssize_t length = index_length(self);
PyObject *entry;
- if (pos == -1) {
+ if (pos == nullrev) {
Py_INCREF(nullentry);
return nullentry;
}
@@ -254,9 +363,9 @@
parent_2 = getbe32(data + 28);
c_node_id = data + 32;
- entry = Py_BuildValue(tuple_format, offset_flags, comp_len,
- uncomp_len, base_rev, link_rev,
- parent_1, parent_2, c_node_id, 20);
+ entry = Py_BuildValue(tuple_format, offset_flags, comp_len, uncomp_len,
+ base_rev, link_rev, parent_1, parent_2, c_node_id,
+ 20);
if (entry) {
PyObject_GC_UnTrack(entry);
@@ -276,7 +385,7 @@
Py_ssize_t length = index_length(self);
const char *data;
- if (pos == -1)
+ if (pos == nullrev)
return nullid;
if (pos >= length)
@@ -354,29 +463,34 @@
static PyObject *index_stats(indexObject *self)
{
PyObject *obj = PyDict_New();
+ PyObject *s = NULL;
PyObject *t = NULL;
if (obj == NULL)
return NULL;
-#define istat(__n, __d) \
- do { \
- t = PyInt_FromSsize_t(self->__n); \
- if (!t) \
- goto bail; \
- if (PyDict_SetItemString(obj, __d, t) == -1) \
- goto bail; \
- Py_DECREF(t); \
+#define istat(__n, __d) \
+ do { \
+ s = PyBytes_FromString(__d); \
+ t = PyInt_FromSsize_t(self->__n); \
+ if (!s || !t) \
+ goto bail; \
+ if (PyDict_SetItem(obj, s, t) == -1) \
+ goto bail; \
+ Py_CLEAR(s); \
+ Py_CLEAR(t); \
} while (0)
if (self->added) {
Py_ssize_t len = PyList_GET_SIZE(self->added);
+ s = PyBytes_FromString("index entries added");
t = PyInt_FromSsize_t(len);
- if (!t)
+ if (!s || !t)
goto bail;
- if (PyDict_SetItemString(obj, "index entries added", t) == -1)
+ if (PyDict_SetItem(obj, s, t) == -1)
goto bail;
- Py_DECREF(t);
+ Py_CLEAR(s);
+ Py_CLEAR(t);
}
if (self->raw_length != self->length)
@@ -398,6 +512,7 @@
bail:
Py_XDECREF(obj);
+ Py_XDECREF(s);
Py_XDECREF(t);
return NULL;
}
@@ -464,7 +579,10 @@
if (iter == NULL)
return -2;
while ((iter_item = PyIter_Next(iter))) {
- iter_item_long = PyInt_AS_LONG(iter_item);
+ if (!pylong_to_long(iter_item, &iter_item_long)) {
+ Py_DECREF(iter_item);
+ return -2;
+ }
Py_DECREF(iter_item);
if (iter_item_long < min_idx)
min_idx = iter_item_long;
@@ -507,7 +625,9 @@
int parents[2];
/* Internal data structure:
- * tovisit: array of length len+1 (all revs + nullrev), filled upto lentovisit
+ * tovisit: array of length len+1 (all revs + nullrev), filled upto
+ * lentovisit
+ *
* revstates: array of length len+1 (all revs + nullrev) */
int *tovisit = NULL;
long lentovisit = 0;
@@ -516,8 +636,8 @@
/* Get arguments */
if (!PyArg_ParseTuple(args, "lO!O!O!", &minroot, &PyList_Type, &heads,
- &PyList_Type, &roots,
- &PyBool_Type, &includepatharg))
+ &PyList_Type, &roots, &PyBool_Type,
+ &includepatharg))
goto bail;
if (includepatharg == Py_True)
@@ -588,14 +708,14 @@
}
/* Add its parents to the list of nodes to visit */
- if (revnum == -1)
+ if (revnum == nullrev)
continue;
r = index_get_parents(self, revnum, parents, (int)len - 1);
if (r < 0)
goto bail;
for (i = 0; i < 2; i++) {
- if (!(revstates[parents[i] + 1] & RS_SEEN)
- && parents[i] >= minroot) {
+ if (!(revstates[parents[i] + 1] & RS_SEEN) &&
+ parents[i] >= minroot) {
tovisit[lentovisit++] = parents[i];
revstates[parents[i] + 1] |= RS_SEEN;
}
@@ -617,8 +737,9 @@
if (r < 0)
goto bail;
if (((revstates[parents[0] + 1] |
- revstates[parents[1] + 1]) & RS_REACHABLE)
- && !(revstates[i + 1] & RS_REACHABLE)) {
+ revstates[parents[1] + 1]) &
+ RS_REACHABLE) &&
+ !(revstates[i + 1] & RS_REACHABLE)) {
revstates[i + 1] |= RS_REACHABLE;
val = PyInt_FromSsize_t(i);
if (val == NULL)
@@ -665,13 +786,14 @@
goto done;
}
- phases = calloc(len, 1); /* phase per rev: {0: public, 1: draft, 2: secret} */
+ phases = calloc(
+ len, 1); /* phase per rev: {0: public, 1: draft, 2: secret} */
if (phases == NULL) {
PyErr_NoMemory();
goto done;
}
/* Put the phase information of all the roots in phases */
- numphase = PyList_GET_SIZE(roots)+1;
+ numphase = PyList_GET_SIZE(roots) + 1;
minrevallphases = len + 1;
phasessetlist = PyList_New(numphase);
if (phasessetlist == NULL)
@@ -680,18 +802,19 @@
PyList_SET_ITEM(phasessetlist, 0, Py_None);
Py_INCREF(Py_None);
- for (i = 0; i < numphase-1; i++) {
+ for (i = 0; i < numphase - 1; i++) {
phaseroots = PyList_GET_ITEM(roots, i);
phaseset = PySet_New(NULL);
if (phaseset == NULL)
goto release;
- PyList_SET_ITEM(phasessetlist, i+1, phaseset);
+ PyList_SET_ITEM(phasessetlist, i + 1, phaseset);
if (!PyList_Check(phaseroots)) {
PyErr_SetString(PyExc_TypeError,
- "roots item must be a list");
+ "roots item must be a list");
goto release;
}
- minrevphase = add_roots_get_min(self, phaseroots, i+1, phases);
+ minrevphase =
+ add_roots_get_min(self, phaseroots, i + 1, phases);
if (minrevphase == -2) /* Error from add_roots_get_min */
goto release;
minrevallphases = MIN(minrevallphases, minrevphase);
@@ -700,10 +823,11 @@
if (minrevallphases != -1) {
int parents[2];
for (i = minrevallphases; i < len; i++) {
- if (index_get_parents(self, i, parents,
- (int)len - 1) < 0)
+ if (index_get_parents(self, i, parents, (int)len - 1) <
+ 0)
goto release;
- set_phase_from_parents(phases, parents[0], parents[1], i);
+ set_phase_from_parents(phases, parents[0], parents[1],
+ i);
}
}
/* Transform phase list to a python list */
@@ -712,8 +836,8 @@
goto release;
for (i = 0; i < len; i++) {
phase = phases[i];
- /* We only store the sets of phase for non public phase, the public phase
- * is computed as a difference */
+ /* We only store the sets of phase for non public phase, the
+ * public phase is computed as a difference */
if (phase != 0) {
phaseset = PyList_GET_ITEM(phasessetlist, phase);
rev = PyInt_FromSsize_t(i);
@@ -755,8 +879,9 @@
if (filteredrevs != Py_None) {
filter = PyObject_GetAttrString(filteredrevs, "__contains__");
if (!filter) {
- PyErr_SetString(PyExc_TypeError,
- "filteredrevs has no attribute __contains__");
+ PyErr_SetString(
+ PyExc_TypeError,
+ "filteredrevs has no attribute __contains__");
goto bail;
}
}
@@ -784,15 +909,15 @@
int isfiltered;
int parents[2];
- /* If nothead[i] == 1, it means we've seen an unfiltered child of this
- * node already, and therefore this node is not filtered. So we can skip
- * the expensive check_filter step.
+ /* If nothead[i] == 1, it means we've seen an unfiltered child
+ * of this node already, and therefore this node is not
+ * filtered. So we can skip the expensive check_filter step.
*/
if (nothead[i] != 1) {
isfiltered = check_filter(filter, i);
if (isfiltered == -1) {
PyErr_SetString(PyExc_TypeError,
- "unable to check filter");
+ "unable to check filter");
goto bail;
}
@@ -845,10 +970,14 @@
int result;
if (rev >= self->length) {
- PyObject *tuple = PyList_GET_ITEM(self->added, rev - self->length);
- result = (int)PyInt_AS_LONG(PyTuple_GET_ITEM(tuple, 3));
- }
- else {
+ PyObject *tuple =
+ PyList_GET_ITEM(self->added, rev - self->length);
+ long ret;
+ if (!pylong_to_long(PyTuple_GET_ITEM(tuple, 3), &ret)) {
+ return -2;
+ }
+ result = (int)ret;
+ } else {
data = index_deref(self, rev);
if (data == NULL) {
return -2;
@@ -866,13 +995,129 @@
if (result < -1) {
PyErr_Format(
PyExc_ValueError,
- "corrupted revlog, revision base out of range: %d, %d",
- rev, result);
+ "corrupted revlog, revision base out of range: %d, %d", rev,
+ result);
return -2;
}
return result;
}
+/**
+ * Find if a revision is a snapshot or not
+ *
+ * Only relevant for sparse-revlog case.
+ * Callers must ensure that rev is in a valid range.
+ */
+static int index_issnapshotrev(indexObject *self, Py_ssize_t rev)
+{
+ int ps[2];
+ Py_ssize_t base;
+ while (rev >= 0) {
+ base = (Py_ssize_t)index_baserev(self, rev);
+ if (base == rev) {
+ base = -1;
+ }
+ if (base == -2) {
+ assert(PyErr_Occurred());
+ return -1;
+ }
+ if (base == -1) {
+ return 1;
+ }
+ if (index_get_parents(self, rev, ps, (int)rev) < 0) {
+ assert(PyErr_Occurred());
+ return -1;
+ };
+ if (base == ps[0] || base == ps[1]) {
+ return 0;
+ }
+ rev = base;
+ }
+ return rev == -1;
+}
+
+static PyObject *index_issnapshot(indexObject *self, PyObject *value)
+{
+ long rev;
+ int issnap;
+ Py_ssize_t length = index_length(self);
+
+ if (!pylong_to_long(value, &rev)) {
+ return NULL;
+ }
+ if (rev < -1 || rev >= length) {
+ PyErr_Format(PyExc_ValueError, "revlog index out of range: %ld",
+ rev);
+ return NULL;
+ };
+ issnap = index_issnapshotrev(self, (Py_ssize_t)rev);
+ if (issnap < 0) {
+ return NULL;
+ };
+ return PyBool_FromLong((long)issnap);
+}
+
+static PyObject *index_findsnapshots(indexObject *self, PyObject *args)
+{
+ Py_ssize_t start_rev;
+ PyObject *cache;
+ Py_ssize_t base;
+ Py_ssize_t rev;
+ PyObject *key = NULL;
+ PyObject *value = NULL;
+ const Py_ssize_t length = index_length(self);
+ if (!PyArg_ParseTuple(args, "O!n", &PyDict_Type, &cache, &start_rev)) {
+ return NULL;
+ }
+ for (rev = start_rev; rev < length; rev++) {
+ int issnap;
+ PyObject *allvalues = NULL;
+ issnap = index_issnapshotrev(self, rev);
+ if (issnap < 0) {
+ goto bail;
+ }
+ if (issnap == 0) {
+ continue;
+ }
+ base = (Py_ssize_t)index_baserev(self, rev);
+ if (base == rev) {
+ base = -1;
+ }
+ if (base == -2) {
+ assert(PyErr_Occurred());
+ goto bail;
+ }
+ key = PyInt_FromSsize_t(base);
+ allvalues = PyDict_GetItem(cache, key);
+ if (allvalues == NULL && PyErr_Occurred()) {
+ goto bail;
+ }
+ if (allvalues == NULL) {
+ int r;
+ allvalues = PyList_New(0);
+ if (!allvalues) {
+ goto bail;
+ }
+ r = PyDict_SetItem(cache, key, allvalues);
+ Py_DECREF(allvalues);
+ if (r < 0) {
+ goto bail;
+ }
+ }
+ value = PyInt_FromSsize_t(rev);
+ if (PyList_Append(allvalues, value)) {
+ goto bail;
+ }
+ Py_CLEAR(key);
+ Py_CLEAR(value);
+ }
+ Py_RETURN_NONE;
+bail:
+ Py_XDECREF(key);
+ Py_XDECREF(value);
+ return NULL;
+}
+
static PyObject *index_deltachain(indexObject *self, PyObject *args)
{
int rev, generaldelta;
@@ -891,13 +1136,11 @@
if (stoprev == -1 && PyErr_Occurred()) {
return NULL;
}
- }
- else if (stoparg == Py_None) {
+ } else if (stoparg == Py_None) {
stoprev = -2;
- }
- else {
+ } else {
PyErr_SetString(PyExc_ValueError,
- "stoprev must be integer or None");
+ "stoprev must be integer or None");
return NULL;
}
@@ -935,8 +1178,7 @@
if (generaldelta) {
iterrev = baserev;
- }
- else {
+ } else {
iterrev--;
}
@@ -945,7 +1187,8 @@
}
if (iterrev >= length) {
- PyErr_SetString(PyExc_IndexError, "revision outside index");
+ PyErr_SetString(PyExc_IndexError,
+ "revision outside index");
return NULL;
}
@@ -961,8 +1204,7 @@
if (iterrev == stoprev) {
stopped = 1;
- }
- else {
+ } else {
PyObject *value = PyInt_FromLong(iterrev);
if (value == NULL) {
goto bail;
@@ -989,9 +1231,279 @@
return NULL;
}
+static inline int64_t
+index_segment_span(indexObject *self, Py_ssize_t start_rev, Py_ssize_t end_rev)
+{
+ int64_t start_offset;
+ int64_t end_offset;
+ int end_size;
+ start_offset = index_get_start(self, start_rev);
+ if (start_offset < 0) {
+ return -1;
+ }
+ end_offset = index_get_start(self, end_rev);
+ if (end_offset < 0) {
+ return -1;
+ }
+ end_size = index_get_length(self, end_rev);
+ if (end_size < 0) {
+ return -1;
+ }
+ if (end_offset < start_offset) {
+ PyErr_Format(PyExc_ValueError,
+ "corrupted revlog index: inconsistent offset "
+ "between revisions (%zd) and (%zd)",
+ start_rev, end_rev);
+ return -1;
+ }
+ return (end_offset - start_offset) + (int64_t)end_size;
+}
+
+/* returns endidx so that revs[startidx:endidx] has no empty trailing revs */
+static Py_ssize_t trim_endidx(indexObject *self, const Py_ssize_t *revs,
+ Py_ssize_t startidx, Py_ssize_t endidx)
+{
+ int length;
+ while (endidx > 1 && endidx > startidx) {
+ length = index_get_length(self, revs[endidx - 1]);
+ if (length < 0) {
+ return -1;
+ }
+ if (length != 0) {
+ break;
+ }
+ endidx -= 1;
+ }
+ return endidx;
+}
+
+struct Gap {
+ int64_t size;
+ Py_ssize_t idx;
+};
+
+static int gap_compare(const void *left, const void *right)
+{
+ const struct Gap *l_left = ((const struct Gap *)left);
+ const struct Gap *l_right = ((const struct Gap *)right);
+ if (l_left->size < l_right->size) {
+ return -1;
+ } else if (l_left->size > l_right->size) {
+ return 1;
+ }
+ return 0;
+}
+static int Py_ssize_t_compare(const void *left, const void *right)
+{
+ const Py_ssize_t l_left = *(const Py_ssize_t *)left;
+ const Py_ssize_t l_right = *(const Py_ssize_t *)right;
+ if (l_left < l_right) {
+ return -1;
+ } else if (l_left > l_right) {
+ return 1;
+ }
+ return 0;
+}
+
+static PyObject *index_slicechunktodensity(indexObject *self, PyObject *args)
+{
+ /* method arguments */
+ PyObject *list_revs = NULL; /* revisions in the chain */
+ double targetdensity = 0; /* min density to achieve */
+ Py_ssize_t mingapsize = 0; /* threshold to ignore gaps */
+
+ /* other core variables */
+ Py_ssize_t idxlen = index_length(self);
+ Py_ssize_t i; /* used for various iteration */
+ PyObject *result = NULL; /* the final return of the function */
+
+ /* generic information about the delta chain being slice */
+ Py_ssize_t num_revs = 0; /* size of the full delta chain */
+ Py_ssize_t *revs = NULL; /* native array of revision in the chain */
+ int64_t chainpayload = 0; /* sum of all delta in the chain */
+ int64_t deltachainspan = 0; /* distance from first byte to last byte */
+
+ /* variable used for slicing the delta chain */
+ int64_t readdata = 0; /* amount of data currently planned to be read */
+ double density = 0; /* ration of payload data compared to read ones */
+ int64_t previous_end;
+ struct Gap *gaps = NULL; /* array of notable gap in the chain */
+ Py_ssize_t num_gaps =
+ 0; /* total number of notable gap recorded so far */
+ Py_ssize_t *selected_indices = NULL; /* indices of gap skipped over */
+ Py_ssize_t num_selected = 0; /* number of gaps skipped */
+ PyObject *chunk = NULL; /* individual slice */
+ PyObject *allchunks = NULL; /* all slices */
+ Py_ssize_t previdx;
+
+ /* parsing argument */
+ if (!PyArg_ParseTuple(args, "O!dn", &PyList_Type, &list_revs,
+ &targetdensity, &mingapsize)) {
+ goto bail;
+ }
+
+ /* If the delta chain contains a single element, we do not need slicing
+ */
+ num_revs = PyList_GET_SIZE(list_revs);
+ if (num_revs <= 1) {
+ result = PyTuple_Pack(1, list_revs);
+ goto done;
+ }
+
+ /* Turn the python list into a native integer array (for efficiency) */
+ revs = (Py_ssize_t *)calloc(num_revs, sizeof(Py_ssize_t));
+ if (revs == NULL) {
+ PyErr_NoMemory();
+ goto bail;
+ }
+ for (i = 0; i < num_revs; i++) {
+ Py_ssize_t revnum = PyInt_AsLong(PyList_GET_ITEM(list_revs, i));
+ if (revnum == -1 && PyErr_Occurred()) {
+ goto bail;
+ }
+ if (revnum < nullrev || revnum >= idxlen) {
+ PyErr_Format(PyExc_IndexError,
+ "index out of range: %zd", revnum);
+ goto bail;
+ }
+ revs[i] = revnum;
+ }
+
+ /* Compute and check various property of the unsliced delta chain */
+ deltachainspan = index_segment_span(self, revs[0], revs[num_revs - 1]);
+ if (deltachainspan < 0) {
+ goto bail;
+ }
+
+ if (deltachainspan <= mingapsize) {
+ result = PyTuple_Pack(1, list_revs);
+ goto done;
+ }
+ chainpayload = 0;
+ for (i = 0; i < num_revs; i++) {
+ int tmp = index_get_length(self, revs[i]);
+ if (tmp < 0) {
+ goto bail;
+ }
+ chainpayload += tmp;
+ }
+
+ readdata = deltachainspan;
+ density = 1.0;
+
+ if (0 < deltachainspan) {
+ density = (double)chainpayload / (double)deltachainspan;
+ }
+
+ if (density >= targetdensity) {
+ result = PyTuple_Pack(1, list_revs);
+ goto done;
+ }
+
+ /* if chain is too sparse, look for relevant gaps */
+ gaps = (struct Gap *)calloc(num_revs, sizeof(struct Gap));
+ if (gaps == NULL) {
+ PyErr_NoMemory();
+ goto bail;
+ }
+
+ previous_end = -1;
+ for (i = 0; i < num_revs; i++) {
+ int64_t revstart;
+ int revsize;
+ revstart = index_get_start(self, revs[i]);
+ if (revstart < 0) {
+ goto bail;
+ };
+ revsize = index_get_length(self, revs[i]);
+ if (revsize < 0) {
+ goto bail;
+ };
+ if (revsize == 0) {
+ continue;
+ }
+ if (previous_end >= 0) {
+ int64_t gapsize = revstart - previous_end;
+ if (gapsize > mingapsize) {
+ gaps[num_gaps].size = gapsize;
+ gaps[num_gaps].idx = i;
+ num_gaps += 1;
+ }
+ }
+ previous_end = revstart + revsize;
+ }
+ if (num_gaps == 0) {
+ result = PyTuple_Pack(1, list_revs);
+ goto done;
+ }
+ qsort(gaps, num_gaps, sizeof(struct Gap), &gap_compare);
+
+ /* Slice the largest gap first, they improve the density the most */
+ selected_indices =
+ (Py_ssize_t *)malloc((num_gaps + 1) * sizeof(Py_ssize_t));
+ if (selected_indices == NULL) {
+ PyErr_NoMemory();
+ goto bail;
+ }
+
+ for (i = num_gaps - 1; i >= 0; i--) {
+ selected_indices[num_selected] = gaps[i].idx;
+ readdata -= gaps[i].size;
+ num_selected += 1;
+ if (readdata <= 0) {
+ density = 1.0;
+ } else {
+ density = (double)chainpayload / (double)readdata;
+ }
+ if (density >= targetdensity) {
+ break;
+ }
+ }
+ qsort(selected_indices, num_selected, sizeof(Py_ssize_t),
+ &Py_ssize_t_compare);
+
+ /* create the resulting slice */
+ allchunks = PyList_New(0);
+ if (allchunks == NULL) {
+ goto bail;
+ }
+ previdx = 0;
+ selected_indices[num_selected] = num_revs;
+ for (i = 0; i <= num_selected; i++) {
+ Py_ssize_t idx = selected_indices[i];
+ Py_ssize_t endidx = trim_endidx(self, revs, previdx, idx);
+ if (endidx < 0) {
+ goto bail;
+ }
+ if (previdx < endidx) {
+ chunk = PyList_GetSlice(list_revs, previdx, endidx);
+ if (chunk == NULL) {
+ goto bail;
+ }
+ if (PyList_Append(allchunks, chunk) == -1) {
+ goto bail;
+ }
+ Py_DECREF(chunk);
+ chunk = NULL;
+ }
+ previdx = idx;
+ }
+ result = allchunks;
+ goto done;
+
+bail:
+ Py_XDECREF(allchunks);
+ Py_XDECREF(chunk);
+done:
+ free(revs);
+ free(gaps);
+ free(selected_indices);
+ return result;
+}
+
static inline int nt_level(const char *node, Py_ssize_t level)
{
- int v = node[level>>1];
+ int v = node[level >> 1];
if (!(level & 1))
v >>= 4;
return v & 0xf;
@@ -1005,7 +1517,7 @@
* rest: valid rev
*/
static int nt_find(nodetree *self, const char *node, Py_ssize_t nodelen,
- int hex)
+ int hex)
{
int (*getnybble)(const char *, Py_ssize_t) = hex ? hexdigit : nt_level;
int level, maxlevel, off;
@@ -1051,10 +1563,12 @@
nodetreenode *newnodes;
newcapacity = self->capacity * 2;
if (newcapacity >= INT_MAX / sizeof(nodetreenode)) {
- PyErr_SetString(PyExc_MemoryError, "overflow in nt_new");
+ PyErr_SetString(PyExc_MemoryError,
+ "overflow in nt_new");
return -1;
}
- newnodes = realloc(self->nodes, newcapacity * sizeof(nodetreenode));
+ newnodes =
+ realloc(self->nodes, newcapacity * sizeof(nodetreenode));
if (newnodes == NULL) {
PyErr_SetString(PyExc_MemoryError, "out of memory");
return -1;
@@ -1085,7 +1599,8 @@
return 0;
}
if (v < 0) {
- const char *oldnode = index_node_existing(self->index, -(v + 2));
+ const char *oldnode =
+ index_node_existing(self->index, -(v + 2));
int noff;
if (oldnode == NULL)
@@ -1134,7 +1649,8 @@
static int nt_delete_node(nodetree *self, const char *node)
{
- /* rev==-2 happens to get encoded as 0, which is interpreted as not set */
+ /* rev==-2 happens to get encoded as 0, which is interpreted as not set
+ */
return nt_insert(self, node, -2);
}
@@ -1162,20 +1678,18 @@
return 0;
}
-static PyTypeObject indexType;
-
static int ntobj_init(nodetreeObject *self, PyObject *args)
{
PyObject *index;
unsigned capacity;
- if (!PyArg_ParseTuple(args, "O!I", &indexType, &index, &capacity))
+ if (!PyArg_ParseTuple(args, "O!I", &HgRevlogIndex_Type, &index,
+ &capacity))
return -1;
Py_INCREF(index);
- return nt_init(&self->nt, (indexObject*)index, capacity);
+ return nt_init(&self->nt, (indexObject *)index, capacity);
}
-static int nt_partialmatch(nodetree *self, const char *node,
- Py_ssize_t nodelen)
+static int nt_partialmatch(nodetree *self, const char *node, Py_ssize_t nodelen)
{
return nt_find(self, node, nodelen, 1);
}
@@ -1261,51 +1775,51 @@
}
static PyMethodDef ntobj_methods[] = {
- {"insert", (PyCFunction)ntobj_insert, METH_VARARGS,
- "insert an index entry"},
- {"shortest", (PyCFunction)ntobj_shortest, METH_VARARGS,
- "find length of shortest hex nodeid of a binary ID"},
- {NULL} /* Sentinel */
+ {"insert", (PyCFunction)ntobj_insert, METH_VARARGS,
+ "insert an index entry"},
+ {"shortest", (PyCFunction)ntobj_shortest, METH_VARARGS,
+ "find length of shortest hex nodeid of a binary ID"},
+ {NULL} /* Sentinel */
};
static PyTypeObject nodetreeType = {
- PyVarObject_HEAD_INIT(NULL, 0) /* header */
- "parsers.nodetree", /* tp_name */
- sizeof(nodetreeObject) , /* tp_basicsize */
- 0, /* tp_itemsize */
- (destructor)ntobj_dealloc, /* tp_dealloc */
- 0, /* tp_print */
- 0, /* tp_getattr */
- 0, /* tp_setattr */
- 0, /* tp_compare */
- 0, /* tp_repr */
- 0, /* tp_as_number */
- 0, /* tp_as_sequence */
- 0, /* tp_as_mapping */
- 0, /* tp_hash */
- 0, /* tp_call */
- 0, /* tp_str */
- 0, /* tp_getattro */
- 0, /* tp_setattro */
- 0, /* tp_as_buffer */
- Py_TPFLAGS_DEFAULT, /* tp_flags */
- "nodetree", /* tp_doc */
- 0, /* tp_traverse */
- 0, /* tp_clear */
- 0, /* tp_richcompare */
- 0, /* tp_weaklistoffset */
- 0, /* tp_iter */
- 0, /* tp_iternext */
- ntobj_methods, /* tp_methods */
- 0, /* tp_members */
- 0, /* tp_getset */
- 0, /* tp_base */
- 0, /* tp_dict */
- 0, /* tp_descr_get */
- 0, /* tp_descr_set */
- 0, /* tp_dictoffset */
- (initproc)ntobj_init, /* tp_init */
- 0, /* tp_alloc */
+ PyVarObject_HEAD_INIT(NULL, 0) /* header */
+ "parsers.nodetree", /* tp_name */
+ sizeof(nodetreeObject), /* tp_basicsize */
+ 0, /* tp_itemsize */
+ (destructor)ntobj_dealloc, /* tp_dealloc */
+ 0, /* tp_print */
+ 0, /* tp_getattr */
+ 0, /* tp_setattr */
+ 0, /* tp_compare */
+ 0, /* tp_repr */
+ 0, /* tp_as_number */
+ 0, /* tp_as_sequence */
+ 0, /* tp_as_mapping */
+ 0, /* tp_hash */
+ 0, /* tp_call */
+ 0, /* tp_str */
+ 0, /* tp_getattro */
+ 0, /* tp_setattro */
+ 0, /* tp_as_buffer */
+ Py_TPFLAGS_DEFAULT, /* tp_flags */
+ "nodetree", /* tp_doc */
+ 0, /* tp_traverse */
+ 0, /* tp_clear */
+ 0, /* tp_richcompare */
+ 0, /* tp_weaklistoffset */
+ 0, /* tp_iter */
+ 0, /* tp_iternext */
+ ntobj_methods, /* tp_methods */
+ 0, /* tp_members */
+ 0, /* tp_getset */
+ 0, /* tp_base */
+ 0, /* tp_dict */
+ 0, /* tp_descr_get */
+ 0, /* tp_descr_set */
+ 0, /* tp_dictoffset */
+ (initproc)ntobj_init, /* tp_init */
+ 0, /* tp_alloc */
};
static int index_init_nt(indexObject *self)
@@ -1334,8 +1848,8 @@
* -2: not found (no exception set)
* rest: valid rev
*/
-static int index_find_node(indexObject *self,
- const char *node, Py_ssize_t nodelen)
+static int index_find_node(indexObject *self, const char *node,
+ Py_ssize_t nodelen)
{
int rev;
@@ -1393,8 +1907,13 @@
char *node;
int rev;
- if (PyInt_Check(value))
- return index_get(self, PyInt_AS_LONG(value));
+ if (PyInt_Check(value)) {
+ long idx;
+ if (!pylong_to_long(value, &idx)) {
+ return NULL;
+ }
+ return index_get(self, idx);
+ }
if (node_check(value, &node) == -1)
return NULL;
@@ -1409,7 +1928,8 @@
/*
* Fully populate the radix tree.
*/
-static int index_populate_nt(indexObject *self) {
+static int index_populate_nt(indexObject *self)
+{
int rev;
if (self->ntrev > 0) {
for (rev = self->ntrev - 1; rev >= 0; rev--) {
@@ -1524,7 +2044,10 @@
char *node;
if (PyInt_Check(value)) {
- long rev = PyInt_AS_LONG(value);
+ long rev;
+ if (!pylong_to_long(value, &rev)) {
+ return -1;
+ }
return rev >= -1 && rev < index_length(self);
}
@@ -1549,7 +2072,7 @@
* "heads(::a and ::b and ...)"
*/
static PyObject *find_gca_candidates(indexObject *self, const int *revs,
- int revcount)
+ int revcount)
{
const bitmask allseen = (1ull << revcount) - 1;
const bitmask poison = 1ull << revcount;
@@ -1614,8 +2137,7 @@
if (sp == 0) {
seen[p] = sv;
interesting++;
- }
- else if (sp != sv)
+ } else if (sp != sv)
seen[p] |= sv;
} else {
if (sp && sp < poison)
@@ -1651,8 +2173,8 @@
if (revcount > capacity) {
PyErr_Format(PyExc_OverflowError,
- "bitset size (%ld) > capacity (%ld)",
- (long)revcount, (long)capacity);
+ "bitset size (%ld) > capacity (%ld)",
+ (long)revcount, (long)capacity);
return NULL;
}
@@ -1726,8 +2248,7 @@
ninteresting -= 1;
}
}
- }
- else if (dv == dp - 1) {
+ } else if (dv == dp - 1) {
long nsp = sp | sv;
if (nsp == sp)
continue;
@@ -1815,7 +2336,7 @@
if (!PyInt_Check(obj)) {
PyErr_SetString(PyExc_TypeError,
- "arguments must all be ints");
+ "arguments must all be ints");
Py_DECREF(obj);
goto bail;
}
@@ -1826,8 +2347,7 @@
goto done;
}
if (val < 0 || val >= len) {
- PyErr_SetString(PyExc_IndexError,
- "index out of range");
+ PyErr_SetString(PyExc_IndexError, "index out of range");
goto bail;
}
/* this cheesy bloom filter lets us avoid some more
@@ -1840,12 +2360,12 @@
if (val == revs[k])
goto duplicate;
}
- }
- else repeat |= x;
+ } else
+ repeat |= x;
if (revcount >= capacity) {
PyErr_Format(PyExc_OverflowError,
- "bitset size (%d) > capacity (%d)",
- revcount, capacity);
+ "bitset size (%d) > capacity (%d)",
+ revcount, capacity);
goto bail;
}
revs[revcount++] = (int)val;
@@ -1932,11 +2452,11 @@
/* Argument changed from PySliceObject* to PyObject* in Python 3. */
#ifdef IS_PY3K
- if (PySlice_GetIndicesEx(item, length,
- &start, &stop, &step, &slicelength) < 0)
+ if (PySlice_GetIndicesEx(item, length, &start, &stop, &step,
+ &slicelength) < 0)
#else
- if (PySlice_GetIndicesEx((PySliceObject*)item, length,
- &start, &stop, &step, &slicelength) < 0)
+ if (PySlice_GetIndicesEx((PySliceObject *)item, length, &start, &stop,
+ &step, &slicelength) < 0)
#endif
return -1;
@@ -1948,19 +2468,19 @@
if (step < 0) {
stop = start + 1;
- start = stop + step*(slicelength - 1) - 1;
+ start = stop + step * (slicelength - 1) - 1;
step = -step;
}
if (step != 1) {
PyErr_SetString(PyExc_ValueError,
- "revlog index delete requires step size of 1");
+ "revlog index delete requires step size of 1");
return -1;
}
if (stop != length - 1) {
PyErr_SetString(PyExc_IndexError,
- "revlog index deletion indices are invalid");
+ "revlog index deletion indices are invalid");
return -1;
}
@@ -1999,7 +2519,7 @@
}
if (self->added)
ret = PyList_SetSlice(self->added, start - self->length,
- PyList_GET_SIZE(self->added), NULL);
+ PyList_GET_SIZE(self->added), NULL);
done:
Py_CLEAR(self->headrevs);
return ret;
@@ -2013,7 +2533,7 @@
* string deletion (shrink node->rev mapping)
*/
static int index_assign_subscript(indexObject *self, PyObject *item,
- PyObject *value)
+ PyObject *value)
{
char *node;
long rev;
@@ -2025,7 +2545,8 @@
return -1;
if (value == NULL)
- return self->ntinitialized ? nt_delete_node(&self->nt, node) : 0;
+ return self->ntinitialized ? nt_delete_node(&self->nt, node)
+ : 0;
rev = PyInt_AsLong(value);
if (rev > INT_MAX || rev < 0) {
if (!PyErr_Occurred())
@@ -2075,7 +2596,8 @@
PyObject *data_obj, *inlined_obj;
Py_ssize_t size;
- /* Initialize before argument-checking to avoid index_dealloc() crash. */
+ /* Initialize before argument-checking to avoid index_dealloc() crash.
+ */
self->raw_length = 0;
self->added = NULL;
self->cache = NULL;
@@ -2091,7 +2613,7 @@
return -1;
if (!PyObject_CheckBuffer(data_obj)) {
PyErr_SetString(PyExc_TypeError,
- "data does not support buffer interface");
+ "data does not support buffer interface");
return -1;
}
@@ -2175,96 +2697,99 @@
}
static PySequenceMethods index_sequence_methods = {
- (lenfunc)index_length, /* sq_length */
- 0, /* sq_concat */
- 0, /* sq_repeat */
- (ssizeargfunc)index_get, /* sq_item */
- 0, /* sq_slice */
- 0, /* sq_ass_item */
- 0, /* sq_ass_slice */
- (objobjproc)index_contains, /* sq_contains */
+ (lenfunc)index_length, /* sq_length */
+ 0, /* sq_concat */
+ 0, /* sq_repeat */
+ (ssizeargfunc)index_get, /* sq_item */
+ 0, /* sq_slice */
+ 0, /* sq_ass_item */
+ 0, /* sq_ass_slice */
+ (objobjproc)index_contains, /* sq_contains */
};
static PyMappingMethods index_mapping_methods = {
- (lenfunc)index_length, /* mp_length */
- (binaryfunc)index_getitem, /* mp_subscript */
- (objobjargproc)index_assign_subscript, /* mp_ass_subscript */
+ (lenfunc)index_length, /* mp_length */
+ (binaryfunc)index_getitem, /* mp_subscript */
+ (objobjargproc)index_assign_subscript, /* mp_ass_subscript */
};
static PyMethodDef index_methods[] = {
- {"ancestors", (PyCFunction)index_ancestors, METH_VARARGS,
- "return the gca set of the given revs"},
- {"commonancestorsheads", (PyCFunction)index_commonancestorsheads,
- METH_VARARGS,
- "return the heads of the common ancestors of the given revs"},
- {"clearcaches", (PyCFunction)index_clearcaches, METH_NOARGS,
- "clear the index caches"},
- {"get", (PyCFunction)index_m_get, METH_VARARGS,
- "get an index entry"},
- {"computephasesmapsets", (PyCFunction)compute_phases_map_sets,
- METH_VARARGS, "compute phases"},
- {"reachableroots2", (PyCFunction)reachableroots2, METH_VARARGS,
- "reachableroots"},
- {"headrevs", (PyCFunction)index_headrevs, METH_VARARGS,
- "get head revisions"}, /* Can do filtering since 3.2 */
- {"headrevsfiltered", (PyCFunction)index_headrevs, METH_VARARGS,
- "get filtered head revisions"}, /* Can always do filtering */
- {"deltachain", (PyCFunction)index_deltachain, METH_VARARGS,
- "determine revisions with deltas to reconstruct fulltext"},
- {"append", (PyCFunction)index_append, METH_O,
- "append an index entry"},
- {"partialmatch", (PyCFunction)index_partialmatch, METH_VARARGS,
- "match a potentially ambiguous node ID"},
- {"shortest", (PyCFunction)index_shortest, METH_VARARGS,
- "find length of shortest hex nodeid of a binary ID"},
- {"stats", (PyCFunction)index_stats, METH_NOARGS,
- "stats for the index"},
- {NULL} /* Sentinel */
+ {"ancestors", (PyCFunction)index_ancestors, METH_VARARGS,
+ "return the gca set of the given revs"},
+ {"commonancestorsheads", (PyCFunction)index_commonancestorsheads,
+ METH_VARARGS,
+ "return the heads of the common ancestors of the given revs"},
+ {"clearcaches", (PyCFunction)index_clearcaches, METH_NOARGS,
+ "clear the index caches"},
+ {"get", (PyCFunction)index_m_get, METH_VARARGS, "get an index entry"},
+ {"computephasesmapsets", (PyCFunction)compute_phases_map_sets, METH_VARARGS,
+ "compute phases"},
+ {"reachableroots2", (PyCFunction)reachableroots2, METH_VARARGS,
+ "reachableroots"},
+ {"headrevs", (PyCFunction)index_headrevs, METH_VARARGS,
+ "get head revisions"}, /* Can do filtering since 3.2 */
+ {"headrevsfiltered", (PyCFunction)index_headrevs, METH_VARARGS,
+ "get filtered head revisions"}, /* Can always do filtering */
+ {"issnapshot", (PyCFunction)index_issnapshot, METH_O,
+ "True if the object is a snapshot"},
+ {"findsnapshots", (PyCFunction)index_findsnapshots, METH_VARARGS,
+ "Gather snapshot data in a cache dict"},
+ {"deltachain", (PyCFunction)index_deltachain, METH_VARARGS,
+ "determine revisions with deltas to reconstruct fulltext"},
+ {"slicechunktodensity", (PyCFunction)index_slicechunktodensity,
+ METH_VARARGS, "determine revisions with deltas to reconstruct fulltext"},
+ {"append", (PyCFunction)index_append, METH_O, "append an index entry"},
+ {"partialmatch", (PyCFunction)index_partialmatch, METH_VARARGS,
+ "match a potentially ambiguous node ID"},
+ {"shortest", (PyCFunction)index_shortest, METH_VARARGS,
+ "find length of shortest hex nodeid of a binary ID"},
+ {"stats", (PyCFunction)index_stats, METH_NOARGS, "stats for the index"},
+ {NULL} /* Sentinel */
};
static PyGetSetDef index_getset[] = {
- {"nodemap", (getter)index_nodemap, NULL, "nodemap", NULL},
- {NULL} /* Sentinel */
+ {"nodemap", (getter)index_nodemap, NULL, "nodemap", NULL},
+ {NULL} /* Sentinel */
};
-static PyTypeObject indexType = {
- PyVarObject_HEAD_INIT(NULL, 0) /* header */
- "parsers.index", /* tp_name */
- sizeof(indexObject), /* tp_basicsize */
- 0, /* tp_itemsize */
- (destructor)index_dealloc, /* tp_dealloc */
- 0, /* tp_print */
- 0, /* tp_getattr */
- 0, /* tp_setattr */
- 0, /* tp_compare */
- 0, /* tp_repr */
- 0, /* tp_as_number */
- &index_sequence_methods, /* tp_as_sequence */
- &index_mapping_methods, /* tp_as_mapping */
- 0, /* tp_hash */
- 0, /* tp_call */
- 0, /* tp_str */
- 0, /* tp_getattro */
- 0, /* tp_setattro */
- 0, /* tp_as_buffer */
- Py_TPFLAGS_DEFAULT, /* tp_flags */
- "revlog index", /* tp_doc */
- 0, /* tp_traverse */
- 0, /* tp_clear */
- 0, /* tp_richcompare */
- 0, /* tp_weaklistoffset */
- 0, /* tp_iter */
- 0, /* tp_iternext */
- index_methods, /* tp_methods */
- 0, /* tp_members */
- index_getset, /* tp_getset */
- 0, /* tp_base */
- 0, /* tp_dict */
- 0, /* tp_descr_get */
- 0, /* tp_descr_set */
- 0, /* tp_dictoffset */
- (initproc)index_init, /* tp_init */
- 0, /* tp_alloc */
+PyTypeObject HgRevlogIndex_Type = {
+ PyVarObject_HEAD_INIT(NULL, 0) /* header */
+ "parsers.index", /* tp_name */
+ sizeof(indexObject), /* tp_basicsize */
+ 0, /* tp_itemsize */
+ (destructor)index_dealloc, /* tp_dealloc */
+ 0, /* tp_print */
+ 0, /* tp_getattr */
+ 0, /* tp_setattr */
+ 0, /* tp_compare */
+ 0, /* tp_repr */
+ 0, /* tp_as_number */
+ &index_sequence_methods, /* tp_as_sequence */
+ &index_mapping_methods, /* tp_as_mapping */
+ 0, /* tp_hash */
+ 0, /* tp_call */
+ 0, /* tp_str */
+ 0, /* tp_getattro */
+ 0, /* tp_setattro */
+ 0, /* tp_as_buffer */
+ Py_TPFLAGS_DEFAULT, /* tp_flags */
+ "revlog index", /* tp_doc */
+ 0, /* tp_traverse */
+ 0, /* tp_clear */
+ 0, /* tp_richcompare */
+ 0, /* tp_weaklistoffset */
+ 0, /* tp_iter */
+ 0, /* tp_iternext */
+ index_methods, /* tp_methods */
+ 0, /* tp_members */
+ index_getset, /* tp_getset */
+ 0, /* tp_base */
+ 0, /* tp_dict */
+ 0, /* tp_descr_get */
+ 0, /* tp_descr_set */
+ 0, /* tp_dictoffset */
+ (initproc)index_init, /* tp_init */
+ 0, /* tp_alloc */
};
/*
@@ -2283,7 +2808,7 @@
indexObject *idx;
int ret;
- idx = PyObject_New(indexObject, &indexType);
+ idx = PyObject_New(indexObject, &HgRevlogIndex_Type);
if (idx == NULL)
goto bail;
@@ -2322,37 +2847,24 @@
struct rustlazyancestorsObjectStruct {
PyObject_HEAD
- /* Type-specific fields go here. */
- indexObject *index; /* Ref kept to avoid GC'ing the index */
- void *iter; /* Rust iterator */
+ /* Type-specific fields go here. */
+ indexObject *index; /* Ref kept to avoid GC'ing the index */
+ void *iter; /* Rust iterator */
};
/* FFI exposed from Rust code */
-rustlazyancestorsObject *rustlazyancestors_init(
- indexObject *index,
- /* to pass index_get_parents_checked() */
- int (*)(indexObject *, Py_ssize_t, int*, int),
- /* intrevs vector */
- Py_ssize_t initrevslen, long *initrevs,
- long stoprev,
- int inclusive);
+rustlazyancestorsObject *rustlazyancestors_init(indexObject *index,
+ /* intrevs vector */
+ Py_ssize_t initrevslen,
+ long *initrevs, long stoprev,
+ int inclusive);
void rustlazyancestors_drop(rustlazyancestorsObject *self);
int rustlazyancestors_next(rustlazyancestorsObject *self);
int rustlazyancestors_contains(rustlazyancestorsObject *self, long rev);
-static int index_get_parents_checked(indexObject *self, Py_ssize_t rev,
- int *ps, int maxrev)
+/* CPython instance methods */
+static int rustla_init(rustlazyancestorsObject *self, PyObject *args)
{
- if (rev < 0 || rev >= index_length(self)) {
- PyErr_SetString(PyExc_ValueError, "rev out of range");
- return -1;
- }
- return index_get_parents(self, rev, ps, maxrev);
-}
-
-/* CPython instance methods */
-static int rustla_init(rustlazyancestorsObject *self,
- PyObject *args) {
PyObject *initrevsarg = NULL;
PyObject *inclusivearg = NULL;
long stoprev = 0;
@@ -2361,12 +2873,10 @@
Py_ssize_t i;
indexObject *index;
- if (!PyArg_ParseTuple(args, "O!O!lO!",
- &indexType, &index,
- &PyList_Type, &initrevsarg,
- &stoprev,
- &PyBool_Type, &inclusivearg))
- return -1;
+ if (!PyArg_ParseTuple(args, "O!O!lO!", &HgRevlogIndex_Type, &index,
+ &PyList_Type, &initrevsarg, &stoprev,
+ &PyBool_Type, &inclusivearg))
+ return -1;
Py_INCREF(index);
self->index = index;
@@ -2376,27 +2886,25 @@
Py_ssize_t linit = PyList_GET_SIZE(initrevsarg);
- initrevs = (long*)calloc(linit, sizeof(long));
+ initrevs = (long *)calloc(linit, sizeof(long));
if (initrevs == NULL) {
PyErr_NoMemory();
goto bail;
}
- for (i=0; i<linit; i++) {
+ for (i = 0; i < linit; i++) {
initrevs[i] = PyInt_AsLong(PyList_GET_ITEM(initrevsarg, i));
}
if (PyErr_Occurred())
goto bail;
- self->iter = rustlazyancestors_init(index,
- index_get_parents_checked,
- linit, initrevs,
- stoprev, inclusive);
+ self->iter =
+ rustlazyancestors_init(index, linit, initrevs, stoprev, inclusive);
if (self->iter == NULL) {
/* if this is because of GraphError::ParentOutOfRange
- * index_get_parents_checked() has already set the proper
- * ValueError */
+ * HgRevlogIndex_GetParents() has already set the proper
+ * exception */
goto bail;
}
@@ -2417,84 +2925,89 @@
PyObject_Del(self);
}
-static PyObject *rustla_next(rustlazyancestorsObject *self) {
+static PyObject *rustla_next(rustlazyancestorsObject *self)
+{
int res = rustlazyancestors_next(self->iter);
if (res == -1) {
/* Setting an explicit exception seems unnecessary
- * as examples from Python source code (Objects/rangeobjets.c and
- * Modules/_io/stringio.c) seem to demonstrate.
+ * as examples from Python source code (Objects/rangeobjets.c
+ * and Modules/_io/stringio.c) seem to demonstrate.
*/
return NULL;
}
return PyInt_FromLong(res);
}
-static int rustla_contains(rustlazyancestorsObject *self, PyObject *rev) {
- if (!(PyInt_Check(rev))) {
+static int rustla_contains(rustlazyancestorsObject *self, PyObject *rev)
+{
+ long lrev;
+ if (!pylong_to_long(rev, &lrev)) {
+ PyErr_Clear();
return 0;
}
- return rustlazyancestors_contains(self->iter, PyInt_AS_LONG(rev));
+ return rustlazyancestors_contains(self->iter, lrev);
}
static PySequenceMethods rustla_sequence_methods = {
- 0, /* sq_length */
- 0, /* sq_concat */
- 0, /* sq_repeat */
- 0, /* sq_item */
- 0, /* sq_slice */
- 0, /* sq_ass_item */
- 0, /* sq_ass_slice */
- (objobjproc)rustla_contains, /* sq_contains */
+ 0, /* sq_length */
+ 0, /* sq_concat */
+ 0, /* sq_repeat */
+ 0, /* sq_item */
+ 0, /* sq_slice */
+ 0, /* sq_ass_item */
+ 0, /* sq_ass_slice */
+ (objobjproc)rustla_contains, /* sq_contains */
};
static PyTypeObject rustlazyancestorsType = {
- PyVarObject_HEAD_INIT(NULL, 0) /* header */
- "parsers.rustlazyancestors", /* tp_name */
- sizeof(rustlazyancestorsObject), /* tp_basicsize */
- 0, /* tp_itemsize */
- (destructor)rustla_dealloc, /* tp_dealloc */
- 0, /* tp_print */
- 0, /* tp_getattr */
- 0, /* tp_setattr */
- 0, /* tp_compare */
- 0, /* tp_repr */
- 0, /* tp_as_number */
- &rustla_sequence_methods, /* tp_as_sequence */
- 0, /* tp_as_mapping */
- 0, /* tp_hash */
- 0, /* tp_call */
- 0, /* tp_str */
- 0, /* tp_getattro */
- 0, /* tp_setattro */
- 0, /* tp_as_buffer */
- Py_TPFLAGS_DEFAULT, /* tp_flags */
- "Iterator over ancestors, implemented in Rust", /* tp_doc */
- 0, /* tp_traverse */
- 0, /* tp_clear */
- 0, /* tp_richcompare */
- 0, /* tp_weaklistoffset */
- 0, /* tp_iter */
- (iternextfunc)rustla_next, /* tp_iternext */
- 0, /* tp_methods */
- 0, /* tp_members */
- 0, /* tp_getset */
- 0, /* tp_base */
- 0, /* tp_dict */
- 0, /* tp_descr_get */
- 0, /* tp_descr_set */
- 0, /* tp_dictoffset */
- (initproc)rustla_init, /* tp_init */
- 0, /* tp_alloc */
+ PyVarObject_HEAD_INIT(NULL, 0) /* header */
+ "parsers.rustlazyancestors", /* tp_name */
+ sizeof(rustlazyancestorsObject), /* tp_basicsize */
+ 0, /* tp_itemsize */
+ (destructor)rustla_dealloc, /* tp_dealloc */
+ 0, /* tp_print */
+ 0, /* tp_getattr */
+ 0, /* tp_setattr */
+ 0, /* tp_compare */
+ 0, /* tp_repr */
+ 0, /* tp_as_number */
+ &rustla_sequence_methods, /* tp_as_sequence */
+ 0, /* tp_as_mapping */
+ 0, /* tp_hash */
+ 0, /* tp_call */
+ 0, /* tp_str */
+ 0, /* tp_getattro */
+ 0, /* tp_setattro */
+ 0, /* tp_as_buffer */
+ Py_TPFLAGS_DEFAULT, /* tp_flags */
+ "Iterator over ancestors, implemented in Rust", /* tp_doc */
+ 0, /* tp_traverse */
+ 0, /* tp_clear */
+ 0, /* tp_richcompare */
+ 0, /* tp_weaklistoffset */
+ 0, /* tp_iter */
+ (iternextfunc)rustla_next, /* tp_iternext */
+ 0, /* tp_methods */
+ 0, /* tp_members */
+ 0, /* tp_getset */
+ 0, /* tp_base */
+ 0, /* tp_dict */
+ 0, /* tp_descr_get */
+ 0, /* tp_descr_set */
+ 0, /* tp_dictoffset */
+ (initproc)rustla_init, /* tp_init */
+ 0, /* tp_alloc */
};
#endif /* WITH_RUST */
void revlog_module_init(PyObject *mod)
{
- indexType.tp_new = PyType_GenericNew;
- if (PyType_Ready(&indexType) < 0)
+ PyObject *caps = NULL;
+ HgRevlogIndex_Type.tp_new = PyType_GenericNew;
+ if (PyType_Ready(&HgRevlogIndex_Type) < 0)
return;
- Py_INCREF(&indexType);
- PyModule_AddObject(mod, "index", (PyObject *)&indexType);
+ Py_INCREF(&HgRevlogIndex_Type);
+ PyModule_AddObject(mod, "index", (PyObject *)&HgRevlogIndex_Type);
nodetreeType.tp_new = PyType_GenericNew;
if (PyType_Ready(&nodetreeType) < 0)
@@ -2503,19 +3016,24 @@
PyModule_AddObject(mod, "nodetree", (PyObject *)&nodetreeType);
if (!nullentry) {
- nullentry = Py_BuildValue(PY23("iiiiiiis#", "iiiiiiiy#"), 0, 0, 0,
- -1, -1, -1, -1, nullid, 20);
+ nullentry = Py_BuildValue(PY23("iiiiiiis#", "iiiiiiiy#"), 0, 0,
+ 0, -1, -1, -1, -1, nullid, 20);
}
if (nullentry)
PyObject_GC_UnTrack(nullentry);
+ caps = PyCapsule_New(HgRevlogIndex_GetParents,
+ "mercurial.cext.parsers.index_get_parents_CAPI",
+ NULL);
+ if (caps != NULL)
+ PyModule_AddObject(mod, "index_get_parents_CAPI", caps);
+
#ifdef WITH_RUST
rustlazyancestorsType.tp_new = PyType_GenericNew;
if (PyType_Ready(&rustlazyancestorsType) < 0)
return;
Py_INCREF(&rustlazyancestorsType);
PyModule_AddObject(mod, "rustlazyancestors",
- (PyObject *)&rustlazyancestorsType);
+ (PyObject *)&rustlazyancestorsType);
#endif
-
}
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/mercurial/cext/revlog.h Fri Jan 18 13:28:22 2019 -0500
@@ -0,0 +1,19 @@
+/*
+ revlog.h - efficient revlog parsing
+
+ This software may be used and distributed according to the terms of
+ the GNU General Public License, incorporated herein by reference.
+*/
+
+#ifndef _HG_REVLOG_H_
+#define _HG_REVLOG_H_
+
+#include <Python.h>
+
+extern PyTypeObject HgRevlogIndex_Type;
+
+#define HgRevlogIndex_Check(op) PyObject_TypeCheck(op, &HgRevlogIndex_Type)
+
+int HgRevlogIndex_GetParents(PyObject *op, int rev, int *ps);
+
+#endif /* _HG_REVLOG_H_ */
--- a/mercurial/cext/util.h Wed Jan 09 20:00:35 2019 -0800
+++ b/mercurial/cext/util.h Fri Jan 18 13:28:22 2019 -0500
@@ -58,4 +58,17 @@
return _PyDict_NewPresized(((1 + expected_size) / 2) * 3);
}
+/* Convert a PyInt or PyLong to a long. Returns false if there is an
+ error, in which case an exception will already have been set. */
+static inline bool pylong_to_long(PyObject *pylong, long *out)
+{
+ *out = PyLong_AsLong(pylong);
+ /* Fast path to avoid hitting PyErr_Occurred if the value was obviously
+ * not an error. */
+ if (*out != -1) {
+ return true;
+ }
+ return PyErr_Occurred() == NULL;
+}
+
#endif /* _HG_UTIL_H_ */
--- a/mercurial/changegroup.py Wed Jan 09 20:00:35 2019 -0800
+++ b/mercurial/changegroup.py Fri Jan 18 13:28:22 2019 -0500
@@ -697,12 +697,25 @@
progress = repo.ui.makeprogress(topic, unit=_('chunks'),
total=len(nodes))
+ configtarget = repo.ui.config('devel', 'bundle.delta')
+ if configtarget not in ('', 'p1', 'full'):
+ msg = _("""config "devel.bundle.delta" as unknown value: %s""")
+ repo.ui.warn(msg % configtarget)
+
+ deltamode = repository.CG_DELTAMODE_STD
+ if forcedeltaparentprev:
+ deltamode = repository.CG_DELTAMODE_PREV
+ elif configtarget == 'p1':
+ deltamode = repository.CG_DELTAMODE_P1
+ elif configtarget == 'full':
+ deltamode = repository.CG_DELTAMODE_FULL
+
revisions = store.emitrevisions(
nodes,
nodesorder=nodesorder,
revisiondata=True,
assumehaveparentrevisions=not ellipses,
- deltaprevious=forcedeltaparentprev)
+ deltamode=deltamode)
for i, revision in enumerate(revisions):
if progress:
@@ -1030,18 +1043,25 @@
while tmfnodes:
tree, nodes = tmfnodes.popitem()
+
+ should_visit = self._matcher.visitdir(tree[:-1] or '.')
+ if tree and not should_visit:
+ continue
+
store = mfl.getstorage(tree)
- if not self._matcher.visitdir(store.tree[:-1] or '.'):
+ if not should_visit:
# No nodes to send because this directory is out of
# the client's view of the repository (probably
- # because of narrow clones).
+ # because of narrow clones). Do this even for the root
+ # directory (tree=='')
prunednodes = []
else:
# Avoid sending any manifest nodes we can prove the
# client already has by checking linkrevs. See the
# related comment in generatefiles().
prunednodes = self._prunemanifests(store, nodes, commonrevs)
+
if tree and not prunednodes:
continue
--- a/mercurial/changelog.py Wed Jan 09 20:00:35 2019 -0800
+++ b/mercurial/changelog.py Fri Jan 18 13:28:22 2019 -0500
@@ -295,8 +295,9 @@
revlog.revlog.__init__(self, opener, indexfile, datafile=datafile,
checkambig=True, mmaplargeindex=True)
- if self._initempty:
- # changelogs don't benefit from generaldelta
+ if self._initempty and (self.version & 0xFFFF == revlog.REVLOGV1):
+ # changelogs don't benefit from generaldelta.
+
self.version &= ~revlog.FLAG_GENERALDELTA
self._generaldelta = False
@@ -346,8 +347,8 @@
def reachableroots(self, minroot, heads, roots, includepath=False):
return self.index.reachableroots2(minroot, heads, roots, includepath)
- def headrevs(self):
- if self.filteredrevs:
+ def headrevs(self, revs=None):
+ if revs is None and self.filteredrevs:
try:
return self.index.headrevsfiltered(self.filteredrevs)
# AttributeError covers non-c-extension environments and
@@ -355,7 +356,7 @@
except AttributeError:
return self._headrevs()
- return super(changelog, self).headrevs()
+ return super(changelog, self).headrevs(revs)
def strip(self, *args, **kwargs):
# XXX make something better than assert
--- a/mercurial/chgserver.py Wed Jan 09 20:00:35 2019 -0800
+++ b/mercurial/chgserver.py Fri Jan 18 13:28:22 2019 -0500
@@ -66,8 +66,6 @@
procutil,
)
-_log = commandserver.log
-
def _hashlist(items):
"""return sha1 hexdigest for a list"""
return node.hex(hashlib.sha1(str(items)).digest())
@@ -186,7 +184,8 @@
mtimepaths = _getmtimepaths(ui)
confighash = _confighash(ui)
mtimehash = _mtimehash(mtimepaths)
- _log('confighash = %s mtimehash = %s\n' % (confighash, mtimehash))
+ ui.log('cmdserver', 'confighash = %s mtimehash = %s\n',
+ confighash, mtimehash)
return hashstate(confighash, mtimehash, mtimepaths)
def _newchgui(srcui, csystem, attachio):
@@ -201,7 +200,7 @@
def _runsystem(self, cmd, environ, cwd, out):
# fallback to the original system method if
# a. the output stream is not stdout (e.g. stderr, cStringIO),
- # b. or stdout is redirected by protectstdio(),
+ # b. or stdout is redirected by protectfinout(),
# because the chg client is not aware of these situations and
# will behave differently (i.e. write to stdout).
if (out is not self.fout
@@ -219,7 +218,7 @@
return chgui(srcui)
-def _loadnewui(srcui, args):
+def _loadnewui(srcui, args, cdebug):
from . import dispatch # avoid cycle
newui = srcui.__class__.load()
@@ -246,6 +245,12 @@
rpath = options['repository']
path, newlui = dispatch._getlocal(newui, rpath, wd=cwd)
+ extensions.populateui(newui)
+ commandserver.setuplogging(newui, fp=cdebug)
+ if newui is not newlui:
+ extensions.populateui(newlui)
+ commandserver.setuplogging(newlui, fp=cdebug)
+
return (newui, newlui)
class channeledsystem(object):
@@ -294,7 +299,6 @@
if not cmd:
break
if cmdtable and cmd in cmdtable:
- _log('pager subcommand: %s' % cmd)
cmdtable[cmd]()
else:
raise error.Abort(_('unexpected command: %s') % cmd)
@@ -309,10 +313,11 @@
]
class chgcmdserver(commandserver.server):
- def __init__(self, ui, repo, fin, fout, sock, hashstate, baseaddress):
+ def __init__(self, ui, repo, fin, fout, sock, prereposetups,
+ hashstate, baseaddress):
super(chgcmdserver, self).__init__(
_newchgui(ui, channeledsystem(fin, fout, 'S'), self.attachio),
- repo, fin, fout)
+ repo, fin, fout, prereposetups)
self.clientsock = sock
self._ioattached = False
self._oldios = [] # original (self.ch, ui.fp, fd) before "attachio"
@@ -338,7 +343,7 @@
# distinctive from "attachio\n" command consumed by client.read()
self.clientsock.sendall(struct.pack('>cI', 'I', 1))
clientfds = util.recvfds(self.clientsock.fileno())
- _log('received fds: %r\n' % clientfds)
+ self.ui.log('chgserver', 'received fds: %r\n', clientfds)
ui = self.ui
ui.flush()
@@ -419,7 +424,7 @@
args = self._readlist()
try:
- self.ui, lui = _loadnewui(self.ui, args)
+ self.ui, lui = _loadnewui(self.ui, args, self.cdebug)
except error.ParseError as inst:
dispatch._formatparse(self.ui.warn, inst)
self.ui.flush()
@@ -444,7 +449,7 @@
if newhash.confighash != self.hashstate.confighash:
addr = _hashaddress(self.baseaddress, newhash.confighash)
insts.append('redirect %s' % addr)
- _log('validate: %s\n' % insts)
+ self.ui.log('chgserver', 'validate: %s\n', insts)
self.cresult.write('\0'.join(insts) or '\0')
def chdir(self):
@@ -456,7 +461,7 @@
path = self._readstr()
if not path:
return
- _log('chdir to %r\n' % path)
+ self.ui.log('chgserver', 'chdir to %r\n', path)
os.chdir(path)
def setumask(self):
@@ -474,7 +479,7 @@
def _setumask(self, data):
mask = struct.unpack('>I', data)[0]
- _log('setumask %r\n' % mask)
+ self.ui.log('chgserver', 'setumask %r\n', mask)
os.umask(mask)
def runcommand(self):
@@ -499,7 +504,7 @@
newenv = dict(s.split('=', 1) for s in l)
except ValueError:
raise ValueError('unexpected value in setenv request')
- _log('setenv: %r\n' % sorted(newenv.keys()))
+ self.ui.log('chgserver', 'setenv: %r\n', sorted(newenv.keys()))
encoding.environ.clear()
encoding.environ.update(newenv)
@@ -515,7 +520,7 @@
def setprocname(self):
"""Change process title"""
name = self._readstr()
- _log('setprocname: %r\n' % name)
+ self.ui.log('chgserver', 'setprocname: %r\n', name)
procutil.setprocname(name)
capabilities['setprocname'] = setprocname
@@ -602,18 +607,19 @@
def shouldexit(self):
if not self._issocketowner():
- self.ui.debug('%s is not owned, exiting.\n' % self._realaddress)
+ self.ui.log(b'chgserver', b'%s is not owned, exiting.\n',
+ self._realaddress)
return True
if time.time() - self._lastactive > self._idletimeout:
- self.ui.debug('being idle too long. exiting.\n')
+ self.ui.log(b'chgserver', b'being idle too long. exiting.\n')
return True
return False
def newconnection(self):
self._lastactive = time.time()
- def createcmdserver(self, repo, conn, fin, fout):
- return chgcmdserver(self.ui, repo, fin, fout, conn,
+ def createcmdserver(self, repo, conn, fin, fout, prereposetups):
+ return chgcmdserver(self.ui, repo, fin, fout, conn, prereposetups,
self._hashstate, self._baseaddress)
def chgunixservice(ui, repo, opts):
--- a/mercurial/cmdutil.py Wed Jan 09 20:00:35 2019 -0800
+++ b/mercurial/cmdutil.py Fri Jan 18 13:28:22 2019 -0500
@@ -732,11 +732,10 @@
rewriteutil.precheck(repo, revs, 'change branch of')
root = repo[roots.first()]
- if not root.p1().branch() == label and label in repo.branchmap():
+ rpb = {parent.branch() for parent in root.parents()}
+ if label not in rpb and label in repo.branchmap():
raise error.Abort(_("a branch of the same name already exists"))
- if repo.revs('merge() and %ld', revs):
- raise error.Abort(_("cannot change branch of a merge commit"))
if repo.revs('obsolete() and %ld', revs):
raise error.Abort(_("cannot change branch of a obsolete changeset"))
@@ -2442,10 +2441,21 @@
extra.update(wctx.extra())
user = opts.get('user') or old.user()
- date = opts.get('date') or old.date()
-
- # Parse the date to allow comparison between date and old.date()
- date = dateutil.parsedate(date)
+
+ datemaydiffer = False # date-only change should be ignored?
+ if opts.get('date') and opts.get('currentdate'):
+ raise error.Abort(_('--date and --currentdate are mutually '
+ 'exclusive'))
+ if opts.get('date'):
+ date = dateutil.parsedate(opts.get('date'))
+ elif opts.get('currentdate'):
+ date = dateutil.makedate()
+ elif (ui.configbool('rewrite', 'update-timestamp')
+ and opts.get('currentdate') is None):
+ date = dateutil.makedate()
+ datemaydiffer = True
+ else:
+ date = old.date()
if len(old.parents()) > 1:
# ctx.files() isn't reliable for merges, so fall back to the
@@ -2559,7 +2569,7 @@
if ((not changes)
and newdesc == old.description()
and user == old.user()
- and date == old.date()
+ and (date == old.date() or datemaydiffer)
and pureextra == old.extra()):
# nothing changed. continuing here would create a new node
# anyway because of the amend_source noise.
@@ -2578,7 +2588,7 @@
obsmetadata = None
if opts.get('note'):
obsmetadata = {'note': encoding.fromlocal(opts['note'])}
- backup = ui.configbool('ui', 'history-editing-backup')
+ backup = ui.configbool('rewrite', 'backup-bundle')
scmutil.cleanupnodes(repo, mapping, 'amend', metadata=obsmetadata,
fixphase=True, targetphase=commitphase,
backup=backup)
--- a/mercurial/color.py Wed Jan 09 20:00:35 2019 -0800
+++ b/mercurial/color.py Fri Jan 18 13:28:22 2019 -0500
@@ -245,7 +245,7 @@
# Since "ansi" could result in terminal gibberish, we error on the
# side of selecting "win32". However, if w32effects is not defined,
# we almost certainly don't support "win32", so don't even try.
- # w32ffects is not populated when stdout is redirected, so checking
+ # w32effects is not populated when stdout is redirected, so checking
# it first avoids win32 calls in a state known to error out.
if ansienviron or not w32effects or win32.enablevtmode():
realmode = 'ansi'
@@ -487,11 +487,7 @@
ansire = re.compile(b'\033\[([^m]*)m([^\033]*)(.*)',
re.MULTILINE | re.DOTALL)
- def win32print(ui, writefunc, *msgs, **opts):
- for text in msgs:
- _win32print(ui, text, writefunc, **opts)
-
- def _win32print(ui, text, writefunc, **opts):
+ def win32print(ui, writefunc, text, **opts):
label = opts.get(r'label', '')
attr = origattr
@@ -529,7 +525,7 @@
attr = mapcolor(int(sattr), attr)
ui.flush()
_kernel32.SetConsoleTextAttribute(stdout, attr)
- writefunc(m.group(2), **opts)
+ writefunc(m.group(2))
m = re.match(ansire, m.group(3))
finally:
# Explicitly reset original attributes
--- a/mercurial/commands.py Wed Jan 09 20:00:35 2019 -0800
+++ b/mercurial/commands.py Fri Jan 18 13:28:22 2019 -0500
@@ -320,11 +320,20 @@
# to mimic the behavior of Mercurial before version 1.5
opts['file'] = True
+ if (not opts.get('user') and not opts.get('changeset')
+ and not opts.get('date') and not opts.get('file')):
+ opts['number'] = True
+
+ linenumber = opts.get('line_number') is not None
+ if linenumber and (not opts.get('changeset')) and (not opts.get('number')):
+ raise error.Abort(_('at least one of -n/-c is required for -l'))
+
rev = opts.get('rev')
if rev:
repo = scmutil.unhidehashlikerevs(repo, [rev], 'nowarn')
ctx = scmutil.revsingle(repo, rev)
+ ui.pager('annotate')
rootfm = ui.formatter('annotate', opts)
if ui.debugflag:
shorthex = pycompat.identity
@@ -358,25 +367,20 @@
formatrev = b'%d'.__mod__
formathex = shorthex
- opmap = [('user', ' ', lambda x: x.fctx.user(), ui.shortuser),
- ('rev', ' ', lambda x: scmutil.intrev(x.fctx), formatrev),
- ('node', ' ', lambda x: hex(scmutil.binnode(x.fctx)), formathex),
- ('date', ' ', lambda x: x.fctx.date(), util.cachefunc(datefunc)),
- ('path', ' ', lambda x: x.fctx.path(), pycompat.bytestr),
- ('lineno', ':', lambda x: x.lineno, pycompat.bytestr),
- ]
- opnamemap = {'rev': 'number', 'node': 'changeset', 'path': 'file',
- 'lineno': 'line_number'}
-
- if (not opts.get('user') and not opts.get('changeset')
- and not opts.get('date') and not opts.get('file')):
- opts['number'] = True
-
- linenumber = opts.get('line_number') is not None
- if linenumber and (not opts.get('changeset')) and (not opts.get('number')):
- raise error.Abort(_('at least one of -n/-c is required for -l'))
-
- ui.pager('annotate')
+ opmap = [
+ ('user', ' ', lambda x: x.fctx.user(), ui.shortuser),
+ ('rev', ' ', lambda x: scmutil.intrev(x.fctx), formatrev),
+ ('node', ' ', lambda x: hex(scmutil.binnode(x.fctx)), formathex),
+ ('date', ' ', lambda x: x.fctx.date(), util.cachefunc(datefunc)),
+ ('path', ' ', lambda x: x.fctx.path(), pycompat.bytestr),
+ ('lineno', ':', lambda x: x.lineno, pycompat.bytestr),
+ ]
+ opnamemap = {
+ 'rev': 'number',
+ 'node': 'changeset',
+ 'path': 'file',
+ 'lineno': 'line_number',
+ }
if rootfm.isplain():
def makefunc(get, fmt):
@@ -408,8 +412,7 @@
rootfm.startitem()
rootfm.data(path=abs)
if not opts.get('text') and fctx.isbinary():
- rootfm.plain(_("%s: binary file\n")
- % ((pats and m.rel(abs)) or abs))
+ rootfm.plain(_("%s: binary file\n") % m.rel(abs))
continue
fm = rootfm.nested('lines', tmpl='{rev}: {line}')
@@ -1129,6 +1132,7 @@
[('a', 'active', False,
_('show only branches that have unmerged heads (DEPRECATED)')),
('c', 'closed', False, _('show normal and closed branches')),
+ ('r', 'rev', [], _('show branch name(s) of the given rev'))
] + formatteropts,
_('[-c]'),
helpcategory=command.CATEGORY_CHANGE_ORGANIZATION,
@@ -1158,6 +1162,13 @@
"""
opts = pycompat.byteskwargs(opts)
+ revs = opts.get('rev')
+ selectedbranches = None
+ if revs:
+ revs = scmutil.revrange(repo, revs)
+ getbi = repo.revbranchcache().branchinfo
+ selectedbranches = {getbi(r)[0] for r in revs}
+
ui.pager('branches')
fm = ui.formatter('branches', opts)
hexfunc = fm.hexfunc
@@ -1165,6 +1176,8 @@
allheads = set(repo.heads())
branches = []
for tag, heads, tip, isclosed in repo.branchmap().iterbranches():
+ if selectedbranches is not None and tag not in selectedbranches:
+ continue
isactive = False
if not isclosed:
openheads = set(repo.branchmap().iteropen(heads))
@@ -2249,6 +2262,8 @@
@command(
'graft',
[('r', 'rev', [], _('revisions to graft'), _('REV')),
+ ('', 'base', '',
+ _('base revision when doing the graft merge (ADVANCED)'), _('REV')),
('c', 'continue', False, _('resume interrupted graft')),
('', 'stop', False, _('stop interrupted graft')),
('', 'abort', False, _('abort interrupted graft')),
@@ -2294,6 +2309,35 @@
.. container:: verbose
+ The --base option exposes more of how graft internally uses merge with a
+ custom base revision. --base can be used to specify another ancestor than
+ the first and only parent.
+
+ The command::
+
+ hg graft -r 345 --base 234
+
+ is thus pretty much the same as::
+
+ hg diff -r 234 -r 345 | hg import
+
+ but using merge to resolve conflicts and track moved files.
+
+ The result of a merge can thus be backported as a single commit by
+ specifying one of the merge parents as base, and thus effectively
+ grafting the changes from the other side.
+
+ It is also possible to collapse multiple changesets and clean up history
+ by specifying another ancestor as base, much like rebase --collapse
+ --keep.
+
+ The commit message can be tweaked after the fact using commit --amend .
+
+ For using non-ancestors as the base to backout changes, see the backout
+ command and the hidden --parent option.
+
+ .. container:: verbose
+
Examples:
- copy a single change to the stable branch and edit its description::
@@ -2317,6 +2361,15 @@
hg log -r "sort(all(), date)"
+ - backport the result of a merge as a single commit::
+
+ hg graft -r 123 --base 123^
+
+ - land a feature branch as one changeset::
+
+ hg up -cr default
+ hg graft -r featureX --base "ancestor('featureX', 'default')"
+
See :hg:`help revisions` for more about specifying revisions.
Returns 0 on successful completion.
@@ -2332,11 +2385,18 @@
revs = list(revs)
revs.extend(opts.get('rev'))
+ basectx = None
+ if opts.get('base'):
+ basectx = scmutil.revsingle(repo, opts['base'], None)
# a dict of data to be stored in state file
statedata = {}
# list of new nodes created by ongoing graft
statedata['newnodes'] = []
+ if opts.get('user') and opts.get('currentuser'):
+ raise error.Abort(_('--user and --currentuser are mutually exclusive'))
+ if opts.get('date') and opts.get('currentdate'):
+ raise error.Abort(_('--date and --currentdate are mutually exclusive'))
if not opts.get('user') and opts.get('currentuser'):
opts['user'] = ui.username()
if not opts.get('date') and opts.get('currentdate'):
@@ -2411,13 +2471,16 @@
revs = scmutil.revrange(repo, revs)
skipped = set()
- # check for merges
- for rev in repo.revs('%ld and merge()', revs):
- ui.warn(_('skipping ungraftable merge revision %d\n') % rev)
- skipped.add(rev)
+ if basectx is None:
+ # check for merges
+ for rev in repo.revs('%ld and merge()', revs):
+ ui.warn(_('skipping ungraftable merge revision %d\n') % rev)
+ skipped.add(rev)
revs = [r for r in revs if r not in skipped]
if not revs:
return -1
+ if basectx is not None and len(revs) != 1:
+ raise error.Abort(_('only one revision allowed with --base '))
# Don't check in the --continue case, in effect retaining --force across
# --continues. That's because without --force, any revisions we decided to
@@ -2425,7 +2488,7 @@
# way to the graftstate. With --force, any revisions we would have otherwise
# skipped would not have been filtered out, and if they hadn't been applied
# already, they'd have been in the graftstate.
- if not (cont or opts.get('force')):
+ if not (cont or opts.get('force')) and basectx is None:
# check for ancestors of dest branch
crev = repo['.'].rev()
ancestors = repo.changelog.ancestors([crev], inclusive=True)
@@ -2522,8 +2585,9 @@
if not cont:
# perform the graft merge with p1(rev) as 'ancestor'
overrides = {('ui', 'forcemerge'): opts.get('tool', '')}
+ base = ctx.p1() if basectx is None else basectx
with ui.configoverride(overrides, 'graft'):
- stats = mergemod.graft(repo, ctx, ctx.p1(), ['local', 'graft'])
+ stats = mergemod.graft(repo, ctx, base, ['local', 'graft'])
# report any conflicts
if stats.unresolvedcount > 0:
# write out state for --continue
@@ -4222,8 +4286,8 @@
opts = pycompat.byteskwargs(opts)
# search for a unique phase argument
targetphase = None
- for idx, name in enumerate(phases.phasenames):
- if opts.get(name, False):
+ for idx, name in enumerate(phases.cmdphasenames):
+ if opts[name]:
if targetphase is not None:
raise error.Abort(_('only one phase can be specified'))
targetphase = idx
@@ -4364,49 +4428,47 @@
revs, checkout = hg.addbranchrevs(repo, other, branches,
opts.get('rev'))
-
pullopargs = {}
- if opts.get('bookmark'):
- if not revs:
- revs = []
- # The list of bookmark used here is not the one used to actually
- # update the bookmark name. This can result in the revision pulled
- # not ending up with the name of the bookmark because of a race
- # condition on the server. (See issue 4689 for details)
- remotebookmarks = other.listkeys('bookmarks')
+
+ nodes = None
+ if opts.get('bookmark') or revs:
+ # The list of bookmark used here is the same used to actually update
+ # the bookmark names, to avoid the race from issue 4689 and we do
+ # all lookup and bookmark queries in one go so they see the same
+ # version of the server state (issue 4700).
+ nodes = []
+ fnodes = []
+ revs = revs or []
+ if revs and not other.capable('lookup'):
+ err = _("other repository doesn't support revision lookup, "
+ "so a rev cannot be specified.")
+ raise error.Abort(err)
+ with other.commandexecutor() as e:
+ fremotebookmarks = e.callcommand('listkeys', {
+ 'namespace': 'bookmarks'
+ })
+ for r in revs:
+ fnodes.append(e.callcommand('lookup', {'key': r}))
+ remotebookmarks = fremotebookmarks.result()
remotebookmarks = bookmarks.unhexlifybookmarks(remotebookmarks)
pullopargs['remotebookmarks'] = remotebookmarks
- for b in opts['bookmark']:
+ for b in opts.get('bookmark', []):
b = repo._bookmarks.expandname(b)
if b not in remotebookmarks:
raise error.Abort(_('remote bookmark %s not found!') % b)
- revs.append(hex(remotebookmarks[b]))
-
- if revs:
- try:
- # When 'rev' is a bookmark name, we cannot guarantee that it
- # will be updated with that name because of a race condition
- # server side. (See issue 4689 for details)
- oldrevs = revs
- revs = [] # actually, nodes
- for r in oldrevs:
- with other.commandexecutor() as e:
- node = e.callcommand('lookup', {'key': r}).result()
-
- revs.append(node)
- if r == checkout:
- checkout = node
- except error.CapabilityError:
- err = _("other repository doesn't support revision lookup, "
- "so a rev cannot be specified.")
- raise error.Abort(err)
+ nodes.append(remotebookmarks[b])
+ for i, rev in enumerate(revs):
+ node = fnodes[i].result()
+ nodes.append(node)
+ if rev == checkout:
+ checkout = node
wlock = util.nullcontextmanager()
if opts.get('update'):
wlock = repo.wlock()
with wlock:
pullopargs.update(opts.get('opargs', {}))
- modheads = exchange.pull(repo, other, heads=revs,
+ modheads = exchange.pull(repo, other, heads=nodes,
force=opts.get('force'),
bookmarks=opts.get('bookmark', ()),
opargs=pullopargs).cgresult
@@ -4450,6 +4512,7 @@
_('a specific branch you would like to push'), _('BRANCH')),
('', 'new-branch', False, _('allow pushing a new branch')),
('', 'pushvars', [], _('variables that can be sent to server (ADVANCED)')),
+ ('', 'publish', False, _('push the changeset as public (EXPERIMENTAL)')),
] + remoteopts,
_('[-f] [-r REV]... [-e CMD] [--remotecmd CMD] [DEST]'),
helpcategory=command.CATEGORY_REMOTE_REPO_MANAGEMENT,
@@ -4567,6 +4630,7 @@
pushop = exchange.push(repo, other, opts.get('force'), revs=revs,
newbranch=opts.get('new_branch'),
bookmarks=opts.get('bookmark', ()),
+ publish=opts.get('publish'),
opargs=opargs)
result = not pushop.cgresult
@@ -4871,8 +4935,7 @@
if mark:
if markcheck:
- with repo.wvfs(f) as fobj:
- fdata = fobj.read()
+ fdata = repo.wvfs.tryread(f)
if filemerge.hasconflictmarkers(fdata) and \
ms[f] != mergemod.MERGE_RECORD_RESOLVED:
hasconflictmarkers.append(f)
--- a/mercurial/commandserver.py Wed Jan 09 20:00:35 2019 -0800
+++ b/mercurial/commandserver.py Fri Jan 18 13:28:22 2019 -0500
@@ -26,23 +26,17 @@
from . import (
encoding,
error,
+ loggingutil,
+ pycompat,
+ repocache,
util,
+ vfs as vfsmod,
)
from .utils import (
+ cborutil,
procutil,
)
-logfile = None
-
-def log(*args):
- if not logfile:
- return
-
- for a in args:
- logfile.write(str(a))
-
- logfile.flush()
-
class channeledoutput(object):
"""
Write data to out in the following format:
@@ -70,6 +64,34 @@
raise AttributeError(attr)
return getattr(self.out, attr)
+class channeledmessage(object):
+ """
+ Write encoded message and metadata to out in the following format:
+
+ data length (unsigned int),
+ encoded message and metadata, as a flat key-value dict.
+
+ Each message should have 'type' attribute. Messages of unknown type
+ should be ignored.
+ """
+
+ # teach ui that write() can take **opts
+ structured = True
+
+ def __init__(self, out, channel, encodename, encodefn):
+ self._cout = channeledoutput(out, channel)
+ self.encoding = encodename
+ self._encodefn = encodefn
+
+ def write(self, data, **opts):
+ opts = pycompat.byteskwargs(opts)
+ if data is not None:
+ opts[b'data'] = data
+ self._cout.write(self._encodefn(opts))
+
+ def __getattr__(self, attr):
+ return getattr(self._cout, attr)
+
class channeledinput(object):
"""
Read data from in_.
@@ -156,24 +178,28 @@
raise AttributeError(attr)
return getattr(self.in_, attr)
+_messageencoders = {
+ b'cbor': lambda v: b''.join(cborutil.streamencode(v)),
+}
+
+def _selectmessageencoder(ui):
+ # experimental config: cmdserver.message-encodings
+ encnames = ui.configlist(b'cmdserver', b'message-encodings')
+ for n in encnames:
+ f = _messageencoders.get(n)
+ if f:
+ return n, f
+ raise error.Abort(b'no supported message encodings: %s'
+ % b' '.join(encnames))
+
class server(object):
"""
Listens for commands on fin, runs them and writes the output on a channel
based stream to fout.
"""
- def __init__(self, ui, repo, fin, fout):
+ def __init__(self, ui, repo, fin, fout, prereposetups=None):
self.cwd = encoding.getcwd()
- # developer config: cmdserver.log
- logpath = ui.config("cmdserver", "log")
- if logpath:
- global logfile
- if logpath == '-':
- # write log on a special 'd' (debug) channel
- logfile = channeledoutput(fout, 'd')
- else:
- logfile = open(logpath, 'a')
-
if repo:
# the ui here is really the repo ui so take its baseui so we don't
# end up with its local configuration
@@ -183,12 +209,28 @@
else:
self.ui = ui
self.repo = self.repoui = None
+ self._prereposetups = prereposetups
+ self.cdebug = channeledoutput(fout, 'd')
self.cerr = channeledoutput(fout, 'e')
self.cout = channeledoutput(fout, 'o')
self.cin = channeledinput(fin, fout, 'I')
self.cresult = channeledoutput(fout, 'r')
+ if self.ui.config(b'cmdserver', b'log') == b'-':
+ # switch log stream of server's ui to the 'd' (debug) channel
+ # (don't touch repo.ui as its lifetime is longer than the server)
+ self.ui = self.ui.copy()
+ setuplogging(self.ui, repo=None, fp=self.cdebug)
+
+ # TODO: add this to help/config.txt when stabilized
+ # ``channel``
+ # Use separate channel for structured output. (Command-server only)
+ self.cmsg = None
+ if ui.config(b'ui', b'message-output') == b'channel':
+ encname, encfn = _selectmessageencoder(ui)
+ self.cmsg = channeledmessage(fout, b'm', encname, encfn)
+
self.client = fin
def cleanup(self):
@@ -254,7 +296,8 @@
ui.setconfig('ui', 'nontty', 'true', 'commandserver')
req = dispatch.request(args[:], copiedui, self.repo, self.cin,
- self.cout, self.cerr)
+ self.cout, self.cerr, self.cmsg,
+ prereposetups=self._prereposetups)
try:
ret = dispatch.dispatch(req) & 255
@@ -289,6 +332,8 @@
hellomsg += '\n'
hellomsg += 'encoding: ' + encoding.encoding
hellomsg += '\n'
+ if self.cmsg:
+ hellomsg += 'message-encoding: %s\n' % self.cmsg.encoding
hellomsg += 'pid: %d' % procutil.getpid()
if util.safehasattr(os, 'getpgid'):
hellomsg += '\n'
@@ -307,6 +352,41 @@
return 0
+def setuplogging(ui, repo=None, fp=None):
+ """Set up server logging facility
+
+ If cmdserver.log is '-', log messages will be sent to the given fp.
+ It should be the 'd' channel while a client is connected, and otherwise
+ is the stderr of the server process.
+ """
+ # developer config: cmdserver.log
+ logpath = ui.config(b'cmdserver', b'log')
+ if not logpath:
+ return
+ # developer config: cmdserver.track-log
+ tracked = set(ui.configlist(b'cmdserver', b'track-log'))
+
+ if logpath == b'-' and fp:
+ logger = loggingutil.fileobjectlogger(fp, tracked)
+ elif logpath == b'-':
+ logger = loggingutil.fileobjectlogger(ui.ferr, tracked)
+ else:
+ logpath = os.path.abspath(util.expandpath(logpath))
+ # developer config: cmdserver.max-log-files
+ maxfiles = ui.configint(b'cmdserver', b'max-log-files')
+ # developer config: cmdserver.max-log-size
+ maxsize = ui.configbytes(b'cmdserver', b'max-log-size')
+ vfs = vfsmod.vfs(os.path.dirname(logpath))
+ logger = loggingutil.filelogger(vfs, os.path.basename(logpath), tracked,
+ maxfiles=maxfiles, maxsize=maxsize)
+
+ targetuis = {ui}
+ if repo:
+ targetuis.add(repo.baseui)
+ targetuis.add(repo.ui)
+ for u in targetuis:
+ u.setlogger(b'cmdserver', logger)
+
class pipeservice(object):
def __init__(self, ui, repo, opts):
self.ui = ui
@@ -319,9 +399,9 @@
ui = self.ui
# redirect stdio to null device so that broken extensions or in-process
# hooks will never cause corruption of channel protocol.
- with procutil.protectedstdio(ui.fin, ui.fout) as (fin, fout):
+ with ui.protectedfinout() as (fin, fout):
+ sv = server(ui, self.repo, fin, fout)
try:
- sv = server(ui, self.repo, fin, fout)
return sv.serve()
finally:
sv.cleanup()
@@ -343,12 +423,12 @@
# same state inherited from parent.
random.seed()
-def _serverequest(ui, repo, conn, createcmdserver):
+def _serverequest(ui, repo, conn, createcmdserver, prereposetups):
fin = conn.makefile(r'rb')
fout = conn.makefile(r'wb')
sv = None
try:
- sv = createcmdserver(repo, conn, fin, fout)
+ sv = createcmdserver(repo, conn, fin, fout, prereposetups)
try:
sv.serve()
# handle exceptions that may be raised by command server. most of
@@ -407,10 +487,10 @@
def newconnection(self):
"""Called when main process notices new connection"""
- def createcmdserver(self, repo, conn, fin, fout):
+ def createcmdserver(self, repo, conn, fin, fout, prereposetups):
"""Create new command server instance; called in the process that
serves for the current connection"""
- return server(self.ui, repo, fin, fout)
+ return server(self.ui, repo, fin, fout, prereposetups)
class unixforkingservice(object):
"""
@@ -427,18 +507,31 @@
raise error.Abort(_('no socket path specified with --address'))
self._servicehandler = handler or unixservicehandler(ui)
self._sock = None
+ self._mainipc = None
+ self._workeripc = None
self._oldsigchldhandler = None
self._workerpids = set() # updated by signal handler; do not iterate
self._socketunlinked = None
+ # experimental config: cmdserver.max-repo-cache
+ maxlen = ui.configint(b'cmdserver', b'max-repo-cache')
+ if maxlen < 0:
+ raise error.Abort(_('negative max-repo-cache size not allowed'))
+ self._repoloader = repocache.repoloader(ui, maxlen)
def init(self):
self._sock = socket.socket(socket.AF_UNIX)
+ # IPC channel from many workers to one main process; this is actually
+ # a uni-directional pipe, but is backed by a DGRAM socket so each
+ # message can be easily separated.
+ o = socket.socketpair(socket.AF_UNIX, socket.SOCK_DGRAM)
+ self._mainipc, self._workeripc = o
self._servicehandler.bindsocket(self._sock, self.address)
if util.safehasattr(procutil, 'unblocksignal'):
procutil.unblocksignal(signal.SIGCHLD)
o = signal.signal(signal.SIGCHLD, self._sigchldhandler)
self._oldsigchldhandler = o
self._socketunlinked = False
+ self._repoloader.start()
def _unlinksocket(self):
if not self._socketunlinked:
@@ -448,7 +541,10 @@
def _cleanup(self):
signal.signal(signal.SIGCHLD, self._oldsigchldhandler)
self._sock.close()
+ self._mainipc.close()
+ self._workeripc.close()
self._unlinksocket()
+ self._repoloader.stop()
# don't kill child processes as they have active clients, just wait
self._reapworkers(0)
@@ -462,7 +558,10 @@
exiting = False
h = self._servicehandler
selector = selectors.DefaultSelector()
- selector.register(self._sock, selectors.EVENT_READ)
+ selector.register(self._sock, selectors.EVENT_READ,
+ self._acceptnewconnection)
+ selector.register(self._mainipc, selectors.EVENT_READ,
+ self._handlemainipc)
while True:
if not exiting and h.shouldexit():
# clients can no longer connect() to the domain socket, so
@@ -473,47 +572,69 @@
self._unlinksocket()
exiting = True
try:
- ready = selector.select(timeout=h.pollinterval)
+ events = selector.select(timeout=h.pollinterval)
except OSError as inst:
# selectors2 raises ETIMEDOUT if timeout exceeded while
# handling signal interrupt. That's probably wrong, but
# we can easily get around it.
if inst.errno != errno.ETIMEDOUT:
raise
- ready = []
- if not ready:
+ events = []
+ if not events:
# only exit if we completed all queued requests
if exiting:
break
continue
- try:
- conn, _addr = self._sock.accept()
- except socket.error as inst:
- if inst.args[0] == errno.EINTR:
- continue
- raise
+ for key, _mask in events:
+ key.data(key.fileobj, selector)
+ selector.close()
+
+ def _acceptnewconnection(self, sock, selector):
+ h = self._servicehandler
+ try:
+ conn, _addr = sock.accept()
+ except socket.error as inst:
+ if inst.args[0] == errno.EINTR:
+ return
+ raise
- pid = os.fork()
- if pid:
+ # Future improvement: On Python 3.7, maybe gc.freeze() can be used
+ # to prevent COW memory from being touched by GC.
+ # https://instagram-engineering.com/
+ # copy-on-write-friendly-python-garbage-collection-ad6ed5233ddf
+ pid = os.fork()
+ if pid:
+ try:
+ self.ui.log(b'cmdserver', b'forked worker process (pid=%d)\n',
+ pid)
+ self._workerpids.add(pid)
+ h.newconnection()
+ finally:
+ conn.close() # release handle in parent process
+ else:
+ try:
+ selector.close()
+ sock.close()
+ self._mainipc.close()
+ self._runworker(conn)
+ conn.close()
+ self._workeripc.close()
+ os._exit(0)
+ except: # never return, hence no re-raises
try:
- self.ui.debug('forked worker process (pid=%d)\n' % pid)
- self._workerpids.add(pid)
- h.newconnection()
+ self.ui.traceback(force=True)
finally:
- conn.close() # release handle in parent process
- else:
- try:
- selector.close()
- self._sock.close()
- self._runworker(conn)
- conn.close()
- os._exit(0)
- except: # never return, hence no re-raises
- try:
- self.ui.traceback(force=True)
- finally:
- os._exit(255)
- selector.close()
+ os._exit(255)
+
+ def _handlemainipc(self, sock, selector):
+ """Process messages sent from a worker"""
+ try:
+ path = sock.recv(32768) # large enough to receive path
+ except socket.error as inst:
+ if inst.args[0] == errno.EINTR:
+ return
+ raise
+ self._repoloader.load(path)
def _sigchldhandler(self, signal, frame):
self._reapworkers(os.WNOHANG)
@@ -533,7 +654,7 @@
if pid == 0:
# no waitable child processes
return
- self.ui.debug('worker process exited (pid=%d)\n' % pid)
+ self.ui.log(b'cmdserver', b'worker process exited (pid=%d)\n', pid)
self._workerpids.discard(pid)
def _runworker(self, conn):
@@ -541,6 +662,29 @@
_initworkerprocess()
h = self._servicehandler
try:
- _serverequest(self.ui, self.repo, conn, h.createcmdserver)
+ _serverequest(self.ui, self.repo, conn, h.createcmdserver,
+ prereposetups=[self._reposetup])
finally:
gc.collect() # trigger __del__ since worker process uses os._exit
+
+ def _reposetup(self, ui, repo):
+ if not repo.local():
+ return
+
+ class unixcmdserverrepo(repo.__class__):
+ def close(self):
+ super(unixcmdserverrepo, self).close()
+ try:
+ self._cmdserveripc.send(self.root)
+ except socket.error:
+ self.ui.log(b'cmdserver',
+ b'failed to send repo root to master\n')
+
+ repo.__class__ = unixcmdserverrepo
+ repo._cmdserveripc = self._workeripc
+
+ cachedrepo = self._repoloader.get(repo.root)
+ if cachedrepo is None:
+ return
+ repo.ui.log(b'repocache', b'repo from cache: %s\n', repo.root)
+ repocache.copycache(cachedrepo, repo)
--- a/mercurial/configitems.py Wed Jan 09 20:00:35 2019 -0800
+++ b/mercurial/configitems.py Fri Jan 18 13:28:22 2019 -0500
@@ -173,6 +173,21 @@
coreconfigitem('cmdserver', 'log',
default=None,
)
+coreconfigitem('cmdserver', 'max-log-files',
+ default=7,
+)
+coreconfigitem('cmdserver', 'max-log-size',
+ default='1 MB',
+)
+coreconfigitem('cmdserver', 'max-repo-cache',
+ default=0,
+)
+coreconfigitem('cmdserver', 'message-encodings',
+ default=list,
+)
+coreconfigitem('cmdserver', 'track-log',
+ default=lambda: ['chgserver', 'cmdserver', 'repocache'],
+)
coreconfigitem('color', '.*',
default=None,
generic=True,
@@ -329,6 +344,9 @@
coreconfigitem('devel', 'bundle2.debug',
default=False,
)
+coreconfigitem('devel', 'bundle.delta',
+ default='',
+)
coreconfigitem('devel', 'cache-vfs',
default=None,
)
@@ -443,6 +461,9 @@
coreconfigitem('experimental', 'archivemetatemplate',
default=dynamicdefault,
)
+coreconfigitem('experimental', 'auto-publish',
+ default='publish',
+)
coreconfigitem('experimental', 'bundle-phases',
default=False,
)
@@ -535,9 +556,6 @@
coreconfigitem('experimental', 'mergetempdirprefix',
default=None,
)
-coreconfigitem('experimental', 'mmapindexthreshold',
- default=None,
-)
coreconfigitem('experimental', 'narrow',
default=False,
)
@@ -679,7 +697,7 @@
default=None,
)
coreconfigitem('format', 'sparse-revlog',
- default=False,
+ default=True,
)
coreconfigitem('format', 'usefncache',
default=True,
@@ -699,6 +717,14 @@
coreconfigitem('fsmonitor', 'warn_update_file_count',
default=50000,
)
+coreconfigitem('help', 'hidden-command\..*',
+ default=False,
+ generic=True,
+)
+coreconfigitem('help', 'hidden-topic\..*',
+ default=False,
+ generic=True,
+)
coreconfigitem('hooks', '.*',
default=dynamicdefault,
generic=True,
@@ -961,6 +987,17 @@
coreconfigitem('push', 'pushvars.server',
default=False,
)
+coreconfigitem('storage', 'mmap-threshold',
+ default='1MB',
+ alias=[('experimental', 'mmapindexthreshold')],
+)
+coreconfigitem('rewrite', 'backup-bundle',
+ default=True,
+ alias=[('ui', 'history-editing-backup')],
+)
+coreconfigitem('rewrite', 'update-timestamp',
+ default=False,
+)
coreconfigitem('storage', 'new-repo-backend',
default='revlogv1',
)
@@ -1135,9 +1172,6 @@
coreconfigitem('ui', 'graphnodetemplate',
default=None,
)
-coreconfigitem('ui', 'history-editing-backup',
- default=True,
-)
coreconfigitem('ui', 'interactive',
default=None,
)
@@ -1170,6 +1204,9 @@
'{ifeq(branch, "default", "", "{branch} ")}'
'- {author|user}: {desc|firstline}')
)
+coreconfigitem('ui', 'message-output',
+ default='stdio',
+)
coreconfigitem('ui', 'nontty',
default=False,
)
@@ -1182,6 +1219,9 @@
coreconfigitem('ui', 'patch',
default=None,
)
+coreconfigitem('ui', 'pre-merge-tool-output-template',
+ default=None,
+)
coreconfigitem('ui', 'portablefilenames',
default='warn',
)
--- a/mercurial/context.py Wed Jan 09 20:00:35 2019 -0800
+++ b/mercurial/context.py Fri Jan 18 13:28:22 2019 -0500
@@ -562,9 +562,7 @@
@propertycache
def _changeid(self):
- if r'_changeid' in self.__dict__:
- return self._changeid
- elif r'_changectx' in self.__dict__:
+ if r'_changectx' in self.__dict__:
return self._changectx.rev()
elif r'_descendantrev' in self.__dict__:
# this file context was created from a revision with a known
@@ -704,17 +702,27 @@
if fctx._customcmp:
return fctx.cmp(self)
- if (fctx._filenode is None
- and (self._repo._encodefilterpats
- # if file data starts with '\1\n', empty metadata block is
- # prepended, which adds 4 bytes to filelog.size().
- or self.size() - 4 == fctx.size())
- or self.size() == fctx.size()):
+ if self._filenode is None:
+ raise error.ProgrammingError(
+ 'filectx.cmp() must be reimplemented if not backed by revlog')
+
+ if fctx._filenode is None:
+ if self._repo._encodefilterpats:
+ # can't rely on size() because wdir content may be decoded
+ return self._filelog.cmp(self._filenode, fctx.data())
+ if self.size() - 4 == fctx.size():
+ # size() can match:
+ # if file data starts with '\1\n', empty metadata block is
+ # prepended, which adds 4 bytes to filelog.size().
+ return self._filelog.cmp(self._filenode, fctx.data())
+ if self.size() == fctx.size():
+ # size() matches: need to compare content
return self._filelog.cmp(self._filenode, fctx.data())
+ # size() differs
return True
- def _adjustlinkrev(self, srcrev, inclusive=False):
+ def _adjustlinkrev(self, srcrev, inclusive=False, stoprev=None):
"""return the first ancestor of <srcrev> introducing <fnode>
If the linkrev of the file revision does not point to an ancestor of
@@ -723,6 +731,10 @@
:srcrev: the changeset revision we search ancestors from
:inclusive: if true, the src revision will also be checked
+ :stoprev: an optional revision to stop the walk at. If no introduction
+ of this file content could be found before this floor
+ revision, the function will returns "None" and stops its
+ iteration.
"""
repo = self._repo
cl = repo.unfiltered().changelog
@@ -750,6 +762,8 @@
fnode = self._filenode
path = self._path
for a in iteranc:
+ if stoprev is not None and a < stoprev:
+ return None
ac = cl.read(a) # get changeset data (we avoid object creation)
if path in ac[3]: # checking the 'files' field.
# The file has been touched, check if the content is
@@ -762,6 +776,16 @@
# result is crash somewhere else at to some point.
return lkr
+ def isintroducedafter(self, changelogrev):
+ """True if a filectx has been introduced after a given floor revision
+ """
+ if self.linkrev() >= changelogrev:
+ return True
+ introrev = self._introrev(stoprev=changelogrev)
+ if introrev is None:
+ return False
+ return introrev >= changelogrev
+
def introrev(self):
"""return the rev of the changeset which introduced this file revision
@@ -771,10 +795,34 @@
'linkrev-shadowing' when a file revision is used by multiple
changesets.
"""
+ return self._introrev()
+
+ def _introrev(self, stoprev=None):
+ """
+ Same as `introrev` but, with an extra argument to limit changelog
+ iteration range in some internal usecase.
+
+ If `stoprev` is set, the `introrev` will not be searched past that
+ `stoprev` revision and "None" might be returned. This is useful to
+ limit the iteration range.
+ """
+ toprev = None
attrs = vars(self)
- hastoprev = (r'_changeid' in attrs or r'_changectx' in attrs)
- if hastoprev:
- return self._adjustlinkrev(self.rev(), inclusive=True)
+ if r'_changeid' in attrs:
+ # We have a cached value already
+ toprev = self._changeid
+ elif r'_changectx' in attrs:
+ # We know which changelog entry we are coming from
+ toprev = self._changectx.rev()
+
+ if toprev is not None:
+ return self._adjustlinkrev(toprev, inclusive=True, stoprev=stoprev)
+ elif r'_descendantrev' in attrs:
+ introrev = self._adjustlinkrev(self._descendantrev, stoprev=stoprev)
+ # be nice and cache the result of the computation
+ if introrev is not None:
+ self._changeid = introrev
+ return introrev
else:
return self.linkrev()
@@ -1832,11 +1880,11 @@
# files.
if 'l' in self.p1()[component].flags():
raise error.Abort("error: %s conflicts with symlink %s "
- "in %s." % (path, component,
+ "in %d." % (path, component,
self.p1().rev()))
else:
raise error.Abort("error: '%s' conflicts with file '%s' in "
- "%s." % (path, component,
+ "%d." % (path, component,
self.p1().rev()))
# Test that each new directory to be created to write this path from p2
@@ -1970,6 +2018,12 @@
to resolve a conflict.
"""
keys = []
+ # This won't be perfect, but can help performance significantly when
+ # using things like remotefilelog.
+ scmutil.prefetchfiles(
+ self.repo(), [self.p1().rev()],
+ scmutil.matchfiles(self.repo(), self._cache.keys()))
+
for path in self._cache.keys():
cache = self._cache[path]
try:
--- a/mercurial/copies.py Wed Jan 09 20:00:35 2019 -0800
+++ b/mercurial/copies.py Fri Jan 18 13:28:22 2019 -0500
@@ -46,22 +46,21 @@
# - quit when interesting revs is zero
cl = repo.changelog
- working = len(cl) # pseudo rev for the working directory
if a is None:
- a = working
+ a = node.wdirrev
if b is None:
- b = working
+ b = node.wdirrev
side = {a: -1, b: 1}
visit = [-a, -b]
heapq.heapify(visit)
interesting = len(visit)
hascommonancestor = False
- limit = working
+ limit = node.wdirrev
while interesting:
r = -heapq.heappop(visit)
- if r == working:
+ if r == node.wdirrev:
parents = [cl.rev(p) for p in repo.dirstate.parents()]
else:
parents = cl.parentrevs(r)
@@ -132,14 +131,14 @@
return t
-def _tracefile(fctx, am, limit=-1):
+def _tracefile(fctx, am, limit=node.nullrev):
"""return file context that is the ancestor of fctx present in ancestor
manifest am, stopping after the first ancestor lower than limit"""
for f in fctx.ancestors():
if am.get(f.path(), None) == f.filenode():
return f
- if limit >= 0 and f.linkrev() < limit and f.rev() < limit:
+ if limit >= 0 and not f.isintroducedafter(limit):
return None
def _dirstatecopies(d, match=None):
@@ -171,7 +170,7 @@
% (a, b))
limit = _findlimit(repo, a.rev(), b.rev())
if limit is None:
- limit = -1
+ limit = node.nullrev
if debug:
dbg('debug.copies: search limit: %d\n' % limit)
am = a.manifest()
--- a/mercurial/dagop.py Wed Jan 09 20:00:35 2019 -0800
+++ b/mercurial/dagop.py Fri Jan 18 13:28:22 2019 -0500
@@ -764,13 +764,12 @@
the input set.
"""
headrevs = set(revs)
+ parents = set([node.nullrev])
+ up = parents.update
for rev in revs:
- for prev in parentsfn(rev):
- headrevs.discard(prev)
-
- headrevs.discard(node.nullrev)
-
+ up(parentsfn(rev))
+ headrevs.difference_update(parents)
return headrevs
def headrevssubset(revsfn, parentrevsfn, startrev=None, stoprevs=None):
--- a/mercurial/debugcommands.py Wed Jan 09 20:00:35 2019 -0800
+++ b/mercurial/debugcommands.py Fri Jan 18 13:28:22 2019 -0500
@@ -1172,7 +1172,7 @@
if not util.safehasattr(index, 'stats'):
raise error.Abort(_('debugindexstats only works with native code'))
for k, v in sorted(index.stats().items()):
- ui.write('%s: %s\n' % (k, v))
+ ui.write('%s: %d\n' % (k, v))
@command('debuginstall', [] + cmdutil.formatteropts, '', norepo=True)
def debuginstall(ui, **opts):
@@ -2751,8 +2751,9 @@
@command('debugupgraderepo', [
('o', 'optimize', [], _('extra optimization to perform'), _('NAME')),
('', 'run', False, _('performs an upgrade')),
+ ('', 'backup', True, _('keep the old repository content around')),
])
-def debugupgraderepo(ui, repo, run=False, optimize=None):
+def debugupgraderepo(ui, repo, run=False, optimize=None, backup=True):
"""upgrade a repository to use different features
If no arguments are specified, the repository is evaluated for upgrade
@@ -2771,7 +2772,8 @@
should complete almost instantaneously and the chances of a consumer being
unable to access the repository should be low.
"""
- return upgrade.upgraderepo(ui, repo, run=run, optimize=optimize)
+ return upgrade.upgraderepo(ui, repo, run=run, optimize=optimize,
+ backup=backup)
@command('debugwalk', cmdutil.walkopts, _('[OPTION]... [FILE]...'),
inferrepo=True)
--- a/mercurial/default.d/mergetools.rc Wed Jan 09 20:00:35 2019 -0800
+++ b/mercurial/default.d/mergetools.rc Fri Jan 18 13:28:22 2019 -0500
@@ -101,14 +101,14 @@
beyondcompare3.diffargs=/lro /lefttitle=$plabel1 /righttitle=$clabel /solo /expandall $parent $child
; Linux version of Beyond Compare
-bcompare.args=$local $other $base -mergeoutput=$output -ro -lefttitle=parent1 -centertitle=base -righttitle=parent2 -outputtitle=merged -automerge -reviewconflicts -solo
+bcompare.args=$local $other $base -mergeoutput=$output -ro -lefttitle=$labellocal -centertitle=$labelbase -righttitle=$labelother -outputtitle=merged -automerge -reviewconflicts -solo
bcompare.gui=True
bcompare.priority=-1
bcompare.diffargs=-lro -lefttitle=$plabel1 -righttitle=$clabel -solo -expandall $parent $child
; OS X version of Beyond Compare
bcomposx.executable = /Applications/Beyond Compare.app/Contents/MacOS/bcomp
-bcomposx.args=$local $other $base -mergeoutput=$output -ro -lefttitle=parent1 -centertitle=base -righttitle=parent2 -outputtitle=merged -automerge -reviewconflicts -solo
+bcomposx.args=$local $other $base -mergeoutput=$output -ro -lefttitle=$labellocal -centertitle=$labelbase -righttitle=$labelother -outputtitle=merged -automerge -reviewconflicts -solo
bcomposx.gui=True
bcomposx.priority=-1
bcomposx.diffargs=-lro -lefttitle=$plabel1 -righttitle=$clabel -solo -expandall $parent $child
--- a/mercurial/dirstate.py Wed Jan 09 20:00:35 2019 -0800
+++ b/mercurial/dirstate.py Fri Jan 18 13:28:22 2019 -0500
@@ -317,7 +317,7 @@
return copies
def setbranch(self, branch):
- self._branch = encoding.fromlocal(branch)
+ self.__class__._branch.set(self, encoding.fromlocal(branch))
f = self._opener('branch', 'w', atomictemp=True, checkambig=True)
try:
f.write(self._branch + '\n')
--- a/mercurial/dirstateguard.py Wed Jan 09 20:00:35 2019 -0800
+++ b/mercurial/dirstateguard.py Fri Jan 18 13:28:22 2019 -0500
@@ -37,7 +37,7 @@
self._narrowspecbackupname = ('narrowspec.backup.%s.%d' %
(name, id(self)))
repo.dirstate.savebackup(repo.currenttransaction(), self._backupname)
- narrowspec.savebackup(repo, self._narrowspecbackupname)
+ narrowspec.savewcbackup(repo, self._narrowspecbackupname)
self._active = True
def __del__(self):
@@ -56,12 +56,12 @@
self._repo.dirstate.clearbackup(self._repo.currenttransaction(),
self._backupname)
- narrowspec.clearbackup(self._repo, self._narrowspecbackupname)
+ narrowspec.clearwcbackup(self._repo, self._narrowspecbackupname)
self._active = False
self._closed = True
def _abort(self):
- narrowspec.restorebackup(self._repo, self._narrowspecbackupname)
+ narrowspec.restorewcbackup(self._repo, self._narrowspecbackupname)
self._repo.dirstate.restorebackup(self._repo.currenttransaction(),
self._backupname)
self._active = False
--- a/mercurial/dispatch.py Wed Jan 09 20:00:35 2019 -0800
+++ b/mercurial/dispatch.py Fri Jan 18 13:28:22 2019 -0500
@@ -37,6 +37,7 @@
hook,
profiling,
pycompat,
+ registrar,
scmutil,
ui as uimod,
util,
@@ -49,7 +50,7 @@
class request(object):
def __init__(self, args, ui=None, repo=None, fin=None, fout=None,
- ferr=None, prereposetups=None):
+ ferr=None, fmsg=None, prereposetups=None):
self.args = args
self.ui = ui
self.repo = repo
@@ -58,6 +59,8 @@
self.fin = fin
self.fout = fout
self.ferr = ferr
+ # separate stream for status/error messages
+ self.fmsg = fmsg
# remember options pre-parsed by _earlyparseopts()
self.earlyoptions = {}
@@ -204,6 +207,8 @@
req.ui.fout = req.fout
if req.ferr:
req.ui.ferr = req.ferr
+ if req.fmsg:
+ req.ui.fmsg = req.fmsg
except error.Abort as inst:
ferr.write(_("abort: %s\n") % inst)
if inst.hint:
@@ -243,11 +248,19 @@
req.ui.flush()
if req.ui.logblockedtimes:
req.ui._blockedtimes['command_duration'] = duration * 1000
- req.ui.log('uiblocked', 'ui blocked ms',
+ req.ui.log('uiblocked', 'ui blocked ms\n',
**pycompat.strkwargs(req.ui._blockedtimes))
- req.ui.log("commandfinish", "%s exited %d after %0.2f seconds\n",
- msg, ret & 255, duration,
- canonical_command=req.canonical_command)
+ return_code = ret & 255
+ req.ui.log(
+ "commandfinish",
+ "%s exited %d after %0.2f seconds\n",
+ msg,
+ return_code,
+ duration,
+ return_code=return_code,
+ duration=duration,
+ canonical_command=req.canonical_command,
+ )
try:
req._runexithandlers()
except: # exiting, so no re-raises
@@ -503,6 +516,7 @@
return ui.system(cmd, environ=env,
blockedtag='alias_%s' % self.name)
self.fn = fn
+ self.alias = True
self._populatehelp(ui, name, shdef, self.fn)
return
@@ -530,6 +544,7 @@
self.fn, self.opts = tableentry
cmdhelp = None
+ self.alias = True
self._populatehelp(ui, name, cmd, self.fn, cmdhelp)
except error.UnknownCommand:
@@ -543,7 +558,7 @@
def _populatehelp(self, ui, name, cmd, fn, defaulthelp=None):
# confine strings to be passed to i18n.gettext()
cfg = {}
- for k in ('doc', 'help'):
+ for k in ('doc', 'help', 'category'):
v = ui.config('alias', '%s:%s' % (name, k), None)
if v is None:
continue
@@ -558,11 +573,14 @@
# drop prefix in old-style help lines so hg shows the alias
self.help = self.help[4 + len(cmd):]
+ self.owndoc = 'doc' in cfg
doc = cfg.get('doc', pycompat.getdoc(fn))
if doc is not None:
doc = pycompat.sysstr(doc)
self.__doc__ = doc
+ self.helpcategory = cfg.get('category', registrar.command.CATEGORY_NONE)
+
@property
def args(self):
args = pycompat.maplist(util.expandpath, self.givenargs)
@@ -613,6 +631,7 @@
self.definition = definition
self.cmdtable = cmdtable.copy()
self.source = source
+ self.alias = True
@util.propertycache
def _aliasdef(self):
@@ -847,6 +866,9 @@
# Check abbreviation/ambiguity of shell alias.
shellaliasfn = _checkshellalias(lui, ui, args)
if shellaliasfn:
+ # no additional configs will be set, set up the ui instances
+ for ui_ in uis:
+ extensions.populateui(ui_)
return shellaliasfn()
# check for fallback encoding
@@ -929,6 +951,10 @@
for ui_ in uis:
ui_.disablepager()
+ # configs are fully loaded, set up the ui instances
+ for ui_ in uis:
+ extensions.populateui(ui_)
+
if options['version']:
return commands.version_(ui)
if options['help']:
@@ -948,6 +974,7 @@
repo.ui.fin = ui.fin
repo.ui.fout = ui.fout
repo.ui.ferr = ui.ferr
+ repo.ui.fmsg = ui.fmsg
else:
try:
repo = hg.repository(ui, path=path,
--- a/mercurial/exchange.py Wed Jan 09 20:00:35 2019 -0800
+++ b/mercurial/exchange.py Fri Jan 18 13:28:22 2019 -0500
@@ -40,6 +40,7 @@
streamclone,
url as urlmod,
util,
+ wireprototypes,
)
from .utils import (
stringutil,
@@ -333,6 +334,34 @@
heads = cl.heads()
return discovery.outgoing(repo, common, heads)
+def _checkpublish(pushop):
+ repo = pushop.repo
+ ui = repo.ui
+ behavior = ui.config('experimental', 'auto-publish')
+ if pushop.publish or behavior not in ('warn', 'confirm', 'abort'):
+ return
+ remotephases = listkeys(pushop.remote, 'phases')
+ if not remotephases.get('publishing', False):
+ return
+
+ if pushop.revs is None:
+ published = repo.filtered('served').revs('not public()')
+ else:
+ published = repo.revs('::%ln - public()', pushop.revs)
+ if published:
+ if behavior == 'warn':
+ ui.warn(_('%i changesets about to be published\n')
+ % len(published))
+ elif behavior == 'confirm':
+ if ui.promptchoice(_('push and publish %i changesets (yn)?'
+ '$$ &Yes $$ &No') % len(published)):
+ raise error.Abort(_('user quit'))
+ elif behavior == 'abort':
+ msg = _('push would publish %i changesets') % len(published)
+ hint = _("use --publish or adjust 'experimental.auto-publish'"
+ " config")
+ raise error.Abort(msg, hint=hint)
+
def _forcebundle1(op):
"""return true if a pull/push must use bundle1
@@ -358,7 +387,7 @@
"""
def __init__(self, repo, remote, force=False, revs=None, newbranch=False,
- bookmarks=(), pushvars=None):
+ bookmarks=(), publish=False, pushvars=None):
# repo we push from
self.repo = repo
self.ui = repo.ui
@@ -420,6 +449,8 @@
self.pkfailcb = {}
# an iterable of pushvars or None
self.pushvars = pushvars
+ # publish pushed changesets
+ self.publish = publish
@util.propertycache
def futureheads(self):
@@ -477,7 +508,7 @@
def push(repo, remote, force=False, revs=None, newbranch=False, bookmarks=(),
- opargs=None):
+ publish=False, opargs=None):
'''Push outgoing changesets (limited by revs) from a local
repository to remote. Return an integer:
- None means nothing to push
@@ -489,7 +520,7 @@
if opargs is None:
opargs = {}
pushop = pushoperation(repo, remote, force, revs, newbranch, bookmarks,
- **pycompat.strkwargs(opargs))
+ publish, **pycompat.strkwargs(opargs))
if pushop.remote.local():
missing = (set(pushop.repo.requirements)
- pushop.remote.local().supported)
@@ -530,6 +561,7 @@
lock or util.nullcontextmanager(), \
pushop.trmanager or util.nullcontextmanager():
pushop.repo.checkpush(pushop)
+ _checkpublish(pushop)
_pushdiscovery(pushop)
if not _forcebundle1(pushop):
_pushbundle2(pushop)
@@ -629,7 +661,10 @@
# XXX Beware that revset break if droots is not strictly
# XXX root we may want to ensure it is but it is costly
fallback = list(unfi.set(revset, droots, pushop.fallbackheads))
- if not outgoing.missing:
+ if not pushop.remotephases.publishing and pushop.publish:
+ future = list(unfi.set('%ln and (not public() or %ln::)',
+ pushop.futureheads, droots))
+ elif not outgoing.missing:
future = fallback
else:
# adds changeset we are going to push as draft
@@ -1633,6 +1668,13 @@
kwargs['common'] = pullop.common
kwargs['heads'] = pullop.heads or pullop.rheads
+ # check server supports narrow and then adding includepats and excludepats
+ servernarrow = pullop.remote.capable(wireprototypes.NARROWCAP)
+ if servernarrow and pullop.includepats:
+ kwargs['includepats'] = pullop.includepats
+ if servernarrow and pullop.excludepats:
+ kwargs['excludepats'] = pullop.excludepats
+
if streaming:
kwargs['cg'] = False
kwargs['stream'] = True
--- a/mercurial/exchangev2.py Wed Jan 09 20:00:35 2019 -0800
+++ b/mercurial/exchangev2.py Fri Jan 18 13:28:22 2019 -0500
@@ -98,7 +98,7 @@
relevantcsetnodes = set()
clnode = repo.changelog.node
- for rev in repo.revs(b'ancestors(%ln, %d)',
+ for rev in repo.revs(b'ancestors(%ln, %s)',
pullheads, pullop.depth - 1):
relevantcsetnodes.add(clnode(rev))
--- a/mercurial/exewrapper.c Wed Jan 09 20:00:35 2019 -0800
+++ b/mercurial/exewrapper.c Fri Jan 18 13:28:22 2019 -0500
@@ -7,6 +7,7 @@
GNU General Public License version 2 or any later version.
*/
+#include <Python.h>
#include <stdio.h>
#include <tchar.h>
#include <windows.h>
@@ -46,6 +47,10 @@
void(__cdecl * Py_SetPythonHome)(TCHAR * home);
int(__cdecl * Py_Main)(int argc, TCHAR *argv[]);
+#if PY_MAJOR_VERSION >= 3
+ Py_LegacyWindowsStdioFlag = 1;
+#endif
+
if (GetModuleFileName(NULL, pyscript, _countof(pyscript)) == 0) {
err = "GetModuleFileName failed";
goto bail;
--- a/mercurial/extensions.py Wed Jan 09 20:00:35 2019 -0800
+++ b/mercurial/extensions.py Fri Jan 18 13:28:22 2019 -0500
@@ -121,13 +121,12 @@
return mod
def _reportimporterror(ui, err, failed, next):
- # note: this ui.debug happens before --debug is processed,
+ # note: this ui.log happens before --debug is processed,
# Use --config ui.debug=1 to see them.
- if ui.configbool('devel', 'debug.extensions'):
- ui.debug('debug.extensions: - could not import %s (%s): trying %s\n'
- % (failed, stringutil.forcebytestr(err), next))
- if ui.debugflag:
- ui.traceback()
+ ui.log(b'extension', b' - could not import %s (%s): trying %s\n',
+ failed, stringutil.forcebytestr(err), next)
+ if ui.debugflag and ui.configbool('devel', 'debug.extensions'):
+ ui.traceback()
def _rejectunicode(name, xs):
if isinstance(xs, (list, set, tuple)):
@@ -166,7 +165,7 @@
_rejectunicode(t, o._table)
_validatecmdtable(ui, getattr(mod, 'cmdtable', {}))
-def load(ui, name, path, log=lambda *a: None, loadingtime=None):
+def load(ui, name, path, loadingtime=None):
if name.startswith('hgext.') or name.startswith('hgext/'):
shortname = name[6:]
else:
@@ -175,11 +174,11 @@
return None
if shortname in _extensions:
return _extensions[shortname]
- log(' - loading extension: %r\n', shortname)
+ ui.log(b'extension', b' - loading extension: %s\n', shortname)
_extensions[shortname] = None
- with util.timedcm('load extension %r', shortname) as stats:
+ with util.timedcm('load extension %s', shortname) as stats:
mod = _importext(name, path, bind(_reportimporterror, ui))
- log(' > %r extension loaded in %s\n', shortname, stats)
+ ui.log(b'extension', b' > %s extension loaded in %s\n', shortname, stats)
if loadingtime is not None:
loadingtime[shortname] += stats.elapsed
@@ -189,19 +188,21 @@
# of Mercurial.
minver = getattr(mod, 'minimumhgversion', None)
if minver and util.versiontuple(minver, 2) > util.versiontuple(n=2):
- ui.warn(_('(third party extension %s requires version %s or newer '
- 'of Mercurial; disabling)\n') % (shortname, minver))
+ msg = _('(third party extension %s requires version %s or newer '
+ 'of Mercurial (current: %s); disabling)\n')
+ ui.warn(msg % (shortname, minver, util.version()))
return
- log(' - validating extension tables: %r\n', shortname)
+ ui.log(b'extension', b' - validating extension tables: %s\n', shortname)
_validatetables(ui, mod)
_extensions[shortname] = mod
_order.append(shortname)
- log(' - invoking registered callbacks: %r\n', shortname)
- with util.timedcm('callbacks extension %r', shortname) as stats:
+ ui.log(b'extension', b' - invoking registered callbacks: %s\n',
+ shortname)
+ with util.timedcm('callbacks extension %s', shortname) as stats:
for fn in _aftercallbacks.get(shortname, []):
fn(loaded=True)
- log(' > callbacks completed in %s\n', stats)
+ ui.log(b'extension', b' > callbacks completed in %s\n', stats)
return mod
def _runuisetup(name, ui):
@@ -225,6 +226,8 @@
except TypeError:
if pycompat.getargspec(extsetup).args:
raise
+ ui.deprecwarn("extsetup for '%s' must take a ui argument"
+ % name, "4.9")
extsetup() # old extsetup with no ui argument
except Exception as inst:
ui.traceback(force=True)
@@ -234,28 +237,25 @@
return True
def loadall(ui, whitelist=None):
- if ui.configbool('devel', 'debug.extensions'):
- log = lambda msg, *values: ui.debug('debug.extensions: ',
- msg % values, label='debug.extensions')
- else:
- log = lambda *a, **kw: None
loadingtime = collections.defaultdict(int)
result = ui.configitems("extensions")
if whitelist is not None:
result = [(k, v) for (k, v) in result if k in whitelist]
newindex = len(_order)
- log('loading %sextensions\n', 'additional ' if newindex else '')
- log('- processing %d entries\n', len(result))
+ ui.log(b'extension', b'loading %sextensions\n',
+ 'additional ' if newindex else '')
+ ui.log(b'extension', b'- processing %d entries\n', len(result))
with util.timedcm('load all extensions') as stats:
for (name, path) in result:
if path:
if path[0:1] == '!':
if name not in _disabledextensions:
- log(' - skipping disabled extension: %r\n', name)
+ ui.log(b'extension',
+ b' - skipping disabled extension: %s\n', name)
_disabledextensions[name] = path[1:]
continue
try:
- load(ui, name, path, log, loadingtime)
+ load(ui, name, path, loadingtime)
except Exception as inst:
msg = stringutil.forcebytestr(inst)
if path:
@@ -268,8 +268,8 @@
ui.warn(_("*** (%s)\n") % inst.hint)
ui.traceback()
- log('> loaded %d extensions, total time %s\n',
- len(_order) - newindex, stats)
+ ui.log(b'extension', b'> loaded %d extensions, total time %s\n',
+ len(_order) - newindex, stats)
# list of (objname, loadermod, loadername) tuple:
# - objname is the name of an object in extension module,
# from which extra information is loaded
@@ -282,52 +282,55 @@
('configtable', configitems, 'loadconfigtable'),
]
- log('- loading configtable attributes\n')
+ ui.log(b'extension', b'- loading configtable attributes\n')
_loadextra(ui, newindex, earlyextraloaders)
broken = set()
- log('- executing uisetup hooks\n')
+ ui.log(b'extension', b'- executing uisetup hooks\n')
with util.timedcm('all uisetup') as alluisetupstats:
for name in _order[newindex:]:
- log(' - running uisetup for %r\n', name)
- with util.timedcm('uisetup %r', name) as stats:
+ ui.log(b'extension', b' - running uisetup for %s\n', name)
+ with util.timedcm('uisetup %s', name) as stats:
if not _runuisetup(name, ui):
- log(' - the %r extension uisetup failed\n', name)
+ ui.log(b'extension',
+ b' - the %s extension uisetup failed\n', name)
broken.add(name)
- log(' > uisetup for %r took %s\n', name, stats)
+ ui.log(b'extension', b' > uisetup for %s took %s\n', name, stats)
loadingtime[name] += stats.elapsed
- log('> all uisetup took %s\n', alluisetupstats)
+ ui.log(b'extension', b'> all uisetup took %s\n', alluisetupstats)
- log('- executing extsetup hooks\n')
+ ui.log(b'extension', b'- executing extsetup hooks\n')
with util.timedcm('all extsetup') as allextetupstats:
for name in _order[newindex:]:
if name in broken:
continue
- log(' - running extsetup for %r\n', name)
- with util.timedcm('extsetup %r', name) as stats:
+ ui.log(b'extension', b' - running extsetup for %s\n', name)
+ with util.timedcm('extsetup %s', name) as stats:
if not _runextsetup(name, ui):
- log(' - the %r extension extsetup failed\n', name)
+ ui.log(b'extension',
+ b' - the %s extension extsetup failed\n', name)
broken.add(name)
- log(' > extsetup for %r took %s\n', name, stats)
+ ui.log(b'extension', b' > extsetup for %s took %s\n', name, stats)
loadingtime[name] += stats.elapsed
- log('> all extsetup took %s\n', allextetupstats)
+ ui.log(b'extension', b'> all extsetup took %s\n', allextetupstats)
for name in broken:
- log(' - disabling broken %r extension\n', name)
+ ui.log(b'extension', b' - disabling broken %s extension\n', name)
_extensions[name] = None
# Call aftercallbacks that were never met.
- log('- executing remaining aftercallbacks\n')
+ ui.log(b'extension', b'- executing remaining aftercallbacks\n')
with util.timedcm('aftercallbacks') as stats:
for shortname in _aftercallbacks:
if shortname in _extensions:
continue
for fn in _aftercallbacks[shortname]:
- log(' - extension %r not loaded, notify callbacks\n',
- shortname)
+ ui.log(b'extension',
+ b' - extension %s not loaded, notify callbacks\n',
+ shortname)
fn(loaded=False)
- log('> remaining aftercallbacks completed in %s\n', stats)
+ ui.log(b'extension', b'> remaining aftercallbacks completed in %s\n', stats)
# loadall() is called multiple times and lingering _aftercallbacks
# entries could result in double execution. See issue4646.
@@ -351,7 +354,7 @@
# - loadermod is the module where loader is placed
# - loadername is the name of the function,
# which takes (ui, extensionname, extraobj) arguments
- log('- loading extension registration objects\n')
+ ui.log(b'extension', b'- loading extension registration objects\n')
extraloaders = [
('cmdtable', commands, 'loadcmdtable'),
('colortable', color, 'loadcolortable'),
@@ -364,14 +367,15 @@
]
with util.timedcm('load registration objects') as stats:
_loadextra(ui, newindex, extraloaders)
- log('> extension registration object loading took %s\n', stats)
+ ui.log(b'extension', b'> extension registration object loading took %s\n',
+ stats)
# Report per extension loading time (except reposetup)
for name in sorted(loadingtime):
- extension_msg = '> extension %s take a total of %s to load\n'
- log(extension_msg, name, util.timecount(loadingtime[name]))
+ ui.log(b'extension', b'> extension %s take a total of %s to load\n',
+ name, util.timecount(loadingtime[name]))
- log('extension loading complete\n')
+ ui.log(b'extension', b'extension loading complete\n')
def _loadextra(ui, newindex, extraloaders):
for name in _order[newindex:]:
@@ -404,6 +408,25 @@
else:
_aftercallbacks.setdefault(extension, []).append(callback)
+def populateui(ui):
+ """Run extension hooks on the given ui to populate additional members,
+ extend the class dynamically, etc.
+
+ This will be called after the configuration is loaded, and/or extensions
+ are loaded. In general, it's once per ui instance, but in command-server
+ and hgweb, this may be called more than once with the same ui.
+ """
+ for name, mod in extensions(ui):
+ hook = getattr(mod, 'uipopulate', None)
+ if not hook:
+ continue
+ try:
+ hook(ui)
+ except Exception as inst:
+ ui.traceback(force=True)
+ ui.warn(_('*** failed to populate ui by extension %s: %s\n')
+ % (name, stringutil.forcebytestr(inst)))
+
def bind(func, *args):
'''Partial function application
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/mercurial/exthelper.py Fri Jan 18 13:28:22 2019 -0500
@@ -0,0 +1,300 @@
+# Copyright 2012 Logilab SA <contact@logilab.fr>
+# Pierre-Yves David <pierre-yves.david@ens-lyon.org>
+# Octobus <contact@octobus.net>
+#
+# This software may be used and distributed according to the terms of the
+# GNU General Public License version 2 or any later version.
+
+#####################################################################
+### Extension helper ###
+#####################################################################
+
+from __future__ import absolute_import
+
+from . import (
+ commands,
+ error,
+ extensions,
+ registrar,
+)
+
+class exthelper(object):
+ """Helper for modular extension setup
+
+ A single helper should be instantiated for each module of an
+ extension, where a command or function needs to be wrapped, or a
+ command, extension hook, fileset, revset or template needs to be
+ registered. Helper methods are then used as decorators for
+ these various purposes. If an extension spans multiple modules,
+ all helper instances should be merged in the main module.
+
+ All decorators return the original function and may be chained.
+
+ Aside from the helper functions with examples below, several
+ registrar method aliases are available for adding commands,
+ configitems, filesets, revsets, and templates. Simply decorate
+ the appropriate methods, and assign the corresponding exthelper
+ variable to a module level variable of the extension. The
+ extension loading mechanism will handle the rest.
+
+ example::
+
+ # ext.py
+ eh = exthelper.exthelper()
+
+ # As needed:
+ cmdtable = eh.cmdtable
+ configtable = eh.configtable
+ filesetpredicate = eh.filesetpredicate
+ revsetpredicate = eh.revsetpredicate
+ templatekeyword = eh.templatekeyword
+
+ @eh.command('mynewcommand',
+ [('r', 'rev', [], _('operate on these revisions'))],
+ _('-r REV...'),
+ helpcategory=command.CATEGORY_XXX)
+ def newcommand(ui, repo, *revs, **opts):
+ # implementation goes here
+
+ eh.configitem('experimental', 'foo',
+ default=False,
+ )
+
+ @eh.filesetpredicate('lfs()')
+ def filesetbabar(mctx, x):
+ return mctx.predicate(...)
+
+ @eh.revsetpredicate('hidden')
+ def revsetbabar(repo, subset, x):
+ args = revset.getargs(x, 0, 0, 'babar accept no argument')
+ return [r for r in subset if 'babar' in repo[r].description()]
+
+ @eh.templatekeyword('babar')
+ def kwbabar(ctx):
+ return 'babar'
+ """
+
+ def __init__(self):
+ self._uipopulatecallables = []
+ self._uicallables = []
+ self._extcallables = []
+ self._repocallables = []
+ self._commandwrappers = []
+ self._extcommandwrappers = []
+ self._functionwrappers = []
+ self.cmdtable = {}
+ self.command = registrar.command(self.cmdtable)
+ self.configtable = {}
+ self.configitem = registrar.configitem(self.configtable)
+ self.filesetpredicate = registrar.filesetpredicate()
+ self.revsetpredicate = registrar.revsetpredicate()
+ self.templatekeyword = registrar.templatekeyword()
+
+ def merge(self, other):
+ self._uicallables.extend(other._uicallables)
+ self._uipopulatecallables.extend(other._uipopulatecallables)
+ self._extcallables.extend(other._extcallables)
+ self._repocallables.extend(other._repocallables)
+ self.filesetpredicate._merge(other.filesetpredicate)
+ self.revsetpredicate._merge(other.revsetpredicate)
+ self.templatekeyword._merge(other.templatekeyword)
+ self._commandwrappers.extend(other._commandwrappers)
+ self._extcommandwrappers.extend(other._extcommandwrappers)
+ self._functionwrappers.extend(other._functionwrappers)
+ self.cmdtable.update(other.cmdtable)
+ for section, items in other.configtable.iteritems():
+ if section in self.configtable:
+ self.configtable[section].update(items)
+ else:
+ self.configtable[section] = items
+
+ def finaluisetup(self, ui):
+ """Method to be used as the extension uisetup
+
+ The following operations belong here:
+
+ - Changes to ui.__class__ . The ui object that will be used to run the
+ command has not yet been created. Changes made here will affect ui
+ objects created after this, and in particular the ui that will be
+ passed to runcommand
+ - Command wraps (extensions.wrapcommand)
+ - Changes that need to be visible to other extensions: because
+ initialization occurs in phases (all extensions run uisetup, then all
+ run extsetup), a change made here will be visible to other extensions
+ during extsetup
+ - Monkeypatch or wrap function (extensions.wrapfunction) of dispatch
+ module members
+ - Setup of pre-* and post-* hooks
+ - pushkey setup
+ """
+ for command, wrapper, opts in self._commandwrappers:
+ entry = extensions.wrapcommand(commands.table, command, wrapper)
+ if opts:
+ for opt in opts:
+ entry[1].append(opt)
+ for cont, funcname, wrapper in self._functionwrappers:
+ extensions.wrapfunction(cont, funcname, wrapper)
+ for c in self._uicallables:
+ c(ui)
+
+ def finaluipopulate(self, ui):
+ """Method to be used as the extension uipopulate
+
+ This is called once per ui instance to:
+
+ - Set up additional ui members
+ - Update configuration by ``ui.setconfig()``
+ - Extend the class dynamically
+ """
+ for c in self._uipopulatecallables:
+ c(ui)
+
+ def finalextsetup(self, ui):
+ """Method to be used as a the extension extsetup
+
+ The following operations belong here:
+
+ - Changes depending on the status of other extensions. (if
+ extensions.find('mq'))
+ - Add a global option to all commands
+ """
+ knownexts = {}
+
+ for ext, command, wrapper, opts in self._extcommandwrappers:
+ if ext not in knownexts:
+ try:
+ e = extensions.find(ext)
+ except KeyError:
+ # Extension isn't enabled, so don't bother trying to wrap
+ # it.
+ continue
+ knownexts[ext] = e.cmdtable
+ entry = extensions.wrapcommand(knownexts[ext], command, wrapper)
+ if opts:
+ for opt in opts:
+ entry[1].append(opt)
+
+ for c in self._extcallables:
+ c(ui)
+
+ def finalreposetup(self, ui, repo):
+ """Method to be used as the extension reposetup
+
+ The following operations belong here:
+
+ - All hooks but pre-* and post-*
+ - Modify configuration variables
+ - Changes to repo.__class__, repo.dirstate.__class__
+ """
+ for c in self._repocallables:
+ c(ui, repo)
+
+ def uisetup(self, call):
+ """Decorated function will be executed during uisetup
+
+ example::
+
+ @eh.uisetup
+ def setupbabar(ui):
+ print 'this is uisetup!'
+ """
+ self._uicallables.append(call)
+ return call
+
+ def uipopulate(self, call):
+ """Decorated function will be executed during uipopulate
+
+ example::
+
+ @eh.uipopulate
+ def setupfoo(ui):
+ print 'this is uipopulate!'
+ """
+ self._uipopulatecallables.append(call)
+ return call
+
+ def extsetup(self, call):
+ """Decorated function will be executed during extsetup
+
+ example::
+
+ @eh.extsetup
+ def setupcelestine(ui):
+ print 'this is extsetup!'
+ """
+ self._extcallables.append(call)
+ return call
+
+ def reposetup(self, call):
+ """Decorated function will be executed during reposetup
+
+ example::
+
+ @eh.reposetup
+ def setupzephir(ui, repo):
+ print 'this is reposetup!'
+ """
+ self._repocallables.append(call)
+ return call
+
+ def wrapcommand(self, command, extension=None, opts=None):
+ """Decorated function is a command wrapper
+
+ The name of the command must be given as the decorator argument.
+ The wrapping is installed during `uisetup`.
+
+ If the second option `extension` argument is provided, the wrapping
+ will be applied in the extension commandtable. This argument must be a
+ string that will be searched using `extension.find` if not found and
+ Abort error is raised. If the wrapping applies to an extension, it is
+ installed during `extsetup`.
+
+ example::
+
+ @eh.wrapcommand('summary')
+ def wrapsummary(orig, ui, repo, *args, **kwargs):
+ ui.note('Barry!')
+ return orig(ui, repo, *args, **kwargs)
+
+ The `opts` argument allows specifying a list of tuples for additional
+ arguments for the command. See ``mercurial.fancyopts.fancyopts()`` for
+ the format of the tuple.
+
+ """
+ if opts is None:
+ opts = []
+ else:
+ for opt in opts:
+ if not isinstance(opt, tuple):
+ raise error.ProgrammingError('opts must be list of tuples')
+ if len(opt) not in (4, 5):
+ msg = 'each opt tuple must contain 4 or 5 values'
+ raise error.ProgrammingError(msg)
+
+ def dec(wrapper):
+ if extension is None:
+ self._commandwrappers.append((command, wrapper, opts))
+ else:
+ self._extcommandwrappers.append((extension, command, wrapper,
+ opts))
+ return wrapper
+ return dec
+
+ def wrapfunction(self, container, funcname):
+ """Decorated function is a function wrapper
+
+ This function takes two arguments, the container and the name of the
+ function to wrap. The wrapping is performed during `uisetup`.
+ (there is no extension support)
+
+ example::
+
+ @eh.function(discovery, 'checkheads')
+ def wrapfunction(orig, *args, **kwargs):
+ ui.note('His head smashed in and his heart cut out')
+ return orig(*args, **kwargs)
+ """
+ def dec(wrapper):
+ self._functionwrappers.append((container, funcname, wrapper))
+ return wrapper
+ return dec
--- a/mercurial/filelog.py Wed Jan 09 20:00:35 2019 -0800
+++ b/mercurial/filelog.py Fri Jan 18 13:28:22 2019 -0500
@@ -92,11 +92,11 @@
def emitrevisions(self, nodes, nodesorder=None,
revisiondata=False, assumehaveparentrevisions=False,
- deltaprevious=False):
+ deltamode=repository.CG_DELTAMODE_STD):
return self._revlog.emitrevisions(
nodes, nodesorder=nodesorder, revisiondata=revisiondata,
assumehaveparentrevisions=assumehaveparentrevisions,
- deltaprevious=deltaprevious)
+ deltamode=deltamode)
def addrevision(self, revisiondata, transaction, linkrev, p1, p2,
node=None, flags=revlog.REVIDX_DEFAULT_FLAGS,
--- a/mercurial/filemerge.py Wed Jan 09 20:00:35 2019 -0800
+++ b/mercurial/filemerge.py Fri Jan 18 13:28:22 2019 -0500
@@ -13,7 +13,11 @@
import shutil
from .i18n import _
-from .node import nullid, short
+from .node import (
+ hex,
+ nullid,
+ short,
+)
from . import (
encoding,
@@ -27,6 +31,7 @@
tagmerge,
templatekw,
templater,
+ templateutil,
util,
)
@@ -536,6 +541,44 @@
raise error.InMemoryMergeConflictsError('in-memory merge does not support '
'external merge tools')
+def _describemerge(ui, repo, mynode, fcl, fcb, fco, env, toolpath, args):
+ tmpl = ui.config('ui', 'pre-merge-tool-output-template')
+ if not tmpl:
+ return
+
+ mappingdict = templateutil.mappingdict
+ props = {'ctx': fcl.changectx(),
+ 'node': hex(mynode),
+ 'path': fcl.path(),
+ 'local': mappingdict({'ctx': fcl.changectx(),
+ 'fctx': fcl,
+ 'node': hex(mynode),
+ 'name': _('local'),
+ 'islink': 'l' in fcl.flags(),
+ 'label': env['HG_MY_LABEL']}),
+ 'base': mappingdict({'ctx': fcb.changectx(),
+ 'fctx': fcb,
+ 'name': _('base'),
+ 'islink': 'l' in fcb.flags(),
+ 'label': env['HG_BASE_LABEL']}),
+ 'other': mappingdict({'ctx': fco.changectx(),
+ 'fctx': fco,
+ 'name': _('other'),
+ 'islink': 'l' in fco.flags(),
+ 'label': env['HG_OTHER_LABEL']}),
+ 'toolpath': toolpath,
+ 'toolargs': args}
+
+ # TODO: make all of this something that can be specified on a per-tool basis
+ tmpl = templater.unquotestring(tmpl)
+
+ # Not using cmdutil.rendertemplate here since it causes errors importing
+ # things for us to import cmdutil.
+ tres = formatter.templateresources(ui, repo)
+ t = formatter.maketemplater(ui, tmpl, defaults=templatekw.keywords,
+ resources=tres)
+ ui.status(t.renderdefault(props))
+
def _xmerge(repo, mynode, orig, fcd, fco, fca, toolconf, files, labels=None):
tool, toolpath, binary, symlink, scriptfn = toolconf
if fcd.isabsent() or fco.isabsent():
@@ -584,6 +627,7 @@
if scriptfn is None:
cmd = toolpath + ' ' + args
repo.ui.debug('launching merge tool: %s\n' % cmd)
+ _describemerge(ui, repo, mynode, fcd, fca, fco, env, toolpath, args)
r = ui.system(cmd, cwd=repo.root, environ=env,
blockedtag='mergetool')
else:
--- a/mercurial/help.py Wed Jan 09 20:00:35 2019 -0800
+++ b/mercurial/help.py Fri Jan 18 13:28:22 2019 -0500
@@ -119,7 +119,6 @@
TOPIC_CATEGORY_CONCEPTS: 'Concepts',
TOPIC_CATEGORY_MISC: 'Miscellaneous',
TOPIC_CATEGORY_NONE: 'Uncategorized topics',
- TOPIC_CATEGORY_NONE: 'Uncategorized topics',
}
def listexts(header, exts, indent=1, showdeprecated=False):
@@ -160,6 +159,8 @@
if shortopt:
so = '-' + shortopt
lo = '--' + longopt
+ if default is True:
+ lo = '--[no-]' + longopt
if isinstance(default, fancyopts.customopt):
default = default.getdefaultvalue()
@@ -168,7 +169,10 @@
# the %s-shows-repr property to handle integers etc. To
# match that behavior on Python 3, we do str(default) and
# then convert it to bytes.
- desc += _(" (default: %s)") % pycompat.bytestr(default)
+ defaultstr = pycompat.bytestr(default)
+ if default is True:
+ defaultstr = _("on")
+ desc += _(" (default: %s)") % defaultstr
if isinstance(default, list):
lo += " %s [+]" % optlabel
@@ -191,13 +195,31 @@
if notomitted:
rst.append('\n\n.. container:: notomitted\n\n %s\n\n' % notomitted)
-def filtercmd(ui, cmd, kw, doc):
+def filtercmd(ui, cmd, func, kw, doc):
if not ui.debugflag and cmd.startswith("debug") and kw != "debug":
+ # Debug command, and user is not looking for those.
return True
- if not ui.verbose and doc and any(w in doc for w in _exclkeywords):
+ if not ui.verbose:
+ if not kw and not doc:
+ # Command had no documentation, no point in showing it by default.
+ return True
+ if getattr(func, 'alias', False) and not getattr(func, 'owndoc', False):
+ # Alias didn't have its own documentation.
+ return True
+ if doc and any(w in doc for w in _exclkeywords):
+ # Documentation has excluded keywords.
+ return True
+ if kw == "shortlist" and not getattr(func, 'helpbasic', False):
+ # We're presenting the short list but the command is not basic.
+ return True
+ if ui.configbool('help', 'hidden-command.%s' % cmd):
+ # Configuration explicitly hides the command.
return True
return False
+def filtertopic(ui, topic):
+ return ui.configbool('help', 'hidden-topic.%s' % topic, False)
+
def topicmatch(ui, commands, kw):
"""Return help topics matching kw.
@@ -218,20 +240,23 @@
if (sum(map(lowercontains, names))
or lowercontains(header)
or (callable(doc) and lowercontains(doc(ui)))):
- results['topics'].append((names[0], header))
+ name = names[0]
+ if not filtertopic(ui, name):
+ results['topics'].append((names[0], header))
for cmd, entry in commands.table.iteritems():
if len(entry) == 3:
summary = entry[2]
else:
summary = ''
# translate docs *before* searching there
- docs = _(pycompat.getdoc(entry[0])) or ''
+ func = entry[0]
+ docs = _(pycompat.getdoc(func)) or ''
if kw in cmd or lowercontains(summary) or lowercontains(docs):
doclines = docs.splitlines()
if doclines:
summary = doclines[0]
cmdname = cmdutil.parsealiases(cmd)[0]
- if filtercmd(ui, cmdname, kw, docs):
+ if filtercmd(ui, cmdname, func, kw, docs):
continue
results['commands'].append((cmdname, summary))
for name, docs in itertools.chain(
@@ -251,12 +276,13 @@
for cmd, entry in getattr(mod, 'cmdtable', {}).iteritems():
if kw in cmd or (len(entry) > 2 and lowercontains(entry[2])):
cmdname = cmdutil.parsealiases(cmd)[0]
- cmddoc = pycompat.getdoc(entry[0])
+ func = entry[0]
+ cmddoc = pycompat.getdoc(func)
if cmddoc:
cmddoc = gettext(cmddoc).splitlines()[0]
else:
cmddoc = _('(no help text available)')
- if filtercmd(ui, cmdname, kw, cmddoc):
+ if filtercmd(ui, cmdname, func, kw, cmddoc):
continue
results['extensioncommands'].append((cmdname, cmddoc))
return results
@@ -289,6 +315,8 @@
loaddoc('changegroups', subdir='internals')),
(['config'], _('Config Registrar'),
loaddoc('config', subdir='internals')),
+ (['extensions', 'extension'], _('Extension API'),
+ loaddoc('extensions', subdir='internals')),
(['requirements'], _('Repository Requirements'),
loaddoc('requirements', subdir='internals')),
(['revlogs'], _('Revision Logs'),
@@ -530,14 +558,8 @@
func = e[0]
if select and not select(f):
continue
- if (not select and name != 'shortlist' and
- func.__module__ != commands.__name__):
- continue
- if name == "shortlist":
- if not getattr(func, 'helpbasic', False):
- continue
doc = pycompat.getdoc(func)
- if filtercmd(ui, f, name, doc):
+ if filtercmd(ui, f, func, name, doc):
continue
doc = gettext(doc)
if not doc:
@@ -594,7 +616,8 @@
ex = opts.get
anyopts = (ex(r'keyword') or not (ex(r'command') or ex(r'extension')))
if not name and anyopts:
- exts = listexts(_('enabled extensions:'), extensions.enabled())
+ exts = listexts(_('enabled extensions:'), extensions.enabled(),
+ showdeprecated=ui.verbose)
if exts:
rst.append('\n')
rst.extend(exts)
@@ -609,7 +632,10 @@
else:
category = TOPIC_CATEGORY_NONE
- topiccats.setdefault(category, []).append((names[0], header))
+ topicname = names[0]
+ if not filtertopic(ui, topicname):
+ topiccats.setdefault(category, []).append(
+ (topicname, header))
# Check that all categories have an order.
missing_order = set(topiccats.keys()) - set(TOPIC_CATEGORY_ORDER)
--- a/mercurial/help/config.txt Wed Jan 09 20:00:35 2019 -0800
+++ b/mercurial/help/config.txt Fri Jan 18 13:28:22 2019 -0500
@@ -462,7 +462,7 @@
(default: False)
``status.terse``
- Default value for the --terse flag, which condenes status output.
+ Default value for the --terse flag, which condenses status output.
(default: empty)
``update.check``
@@ -1756,6 +1756,9 @@
possible. Some progress bars only offer indeterminate information, while others
have a definite end point.
+``debug``
+ Whether to print debug info when updating the progress bar. (default: False)
+
``delay``
Number of seconds (float) before showing the progress bar. (default: 3)
@@ -1806,6 +1809,16 @@
Alias definitions for revsets. See :hg:`help revsets` for details.
+``rewrite``
+-----------
+
+``backup-bundle``
+ Whether to save stripped changesets to a bundle file. (default: True)
+
+``update-timestamp``
+ If true, updates the date and time of the changeset to current. It is only
+ applicable for hg amend in current version.
+
``storage``
-----------
@@ -2246,6 +2259,14 @@
Can be overridden per-merge-tool, see the ``[merge-tools]`` section.
+``message-output``
+ Where to write status and error messages. (default: ``stdio``)
+
+ ``stderr``
+ Everything to stderr.
+ ``stdio``
+ Status to stdout, and error to stderr.
+
``origbackuppath``
The path to a directory used to store generated .orig files. If the path is
not a directory, one will be created. If set, files stored in this
@@ -2296,6 +2317,16 @@
On Windows, this configuration option is ignored and the command aborted.
+``pre-merge-tool-output-template``
+ A template that is printed before executing an external merge tool. This can
+ be used to print out additional context that might be useful to have during
+ the conflict resolution, such as the description of the various commits
+ involved or bookmarks/tags.
+
+ Additional information is available in the ``local`, ``base``, and ``other``
+ dicts. For example: ``{local.label}``, ``{base.name}``, or
+ ``{other.islink}``.
+
``quiet``
Reduce the amount of output printed.
(default: False)
--- a/mercurial/help/hgignore.txt Wed Jan 09 20:00:35 2019 -0800
+++ b/mercurial/help/hgignore.txt Fri Jan 18 13:28:22 2019 -0500
@@ -59,14 +59,17 @@
Regular expression, Python/Perl syntax.
``glob``
Shell-style glob.
+``rootglob``
+ A variant of ``glob`` that is rooted (see below).
The chosen syntax stays in effect when parsing all patterns that
follow, until another syntax is selected.
-Neither glob nor regexp patterns are rooted. A glob-syntax pattern of
-the form ``*.c`` will match a file ending in ``.c`` in any directory,
-and a regexp pattern of the form ``\.c$`` will do the same. To root a
-regexp pattern, start it with ``^``.
+Neither ``glob`` nor regexp patterns are rooted. A glob-syntax
+pattern of the form ``*.c`` will match a file ending in ``.c`` in any
+directory, and a regexp pattern of the form ``\.c$`` will do the
+same. To root a regexp pattern, start it with ``^``. To get the same
+effect with glob-syntax, you have to use ``rootglob``.
Subdirectories can have their own .hgignore settings by adding
``subinclude:path/to/subdir/.hgignore`` to the root ``.hgignore``. See
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/mercurial/help/internals/extensions.txt Fri Jan 18 13:28:22 2019 -0500
@@ -0,0 +1,354 @@
+Extensions allow the creation of new features and using them directly from
+the main hg command line as if they were built-in commands. The extensions
+have full access to the *internal* API.
+
+Use of Mercurial's internal API very likely makes your code subject to
+Mercurial's license. Before going any further, read the License page.
+
+There are NO guarantees that third-party code calling into Mercurial's
+internals won't break from release to release. If you do use Mercurial's API
+for published third-party code, we expect you to test your code before each
+major Mercurial release. This will prevent various bug reports from your users
+when they upgrade their copy of Mercurial.
+
+File Layout
+===========
+
+Extensions are usually written as simple python modules. Larger ones are
+better split into multiple modules of a single package (see the convert
+extension). The package root module gives its name to the extension and
+implements the ``cmdtable`` and optional callbacks described below.
+
+Command table
+=============
+
+To write your own extension, your python module can provide an optional dict
+named ``cmdtable`` with entries describing each command. A command should be
+registered to the ``cmdtable`` by ``@command`` decorator.
+
+Example using ``@command`` decorator (requires Mercurial 1.9)::
+
+ from mercurial import cmdutil
+ from mercurial.i18n import _
+
+ cmdtable = {}
+ command = cmdutil.command(cmdtable)
+
+ @command('print-parents',
+ [('s', 'short', None, _('print short form')),
+ ('l', 'long', None, _('print long form'))],
+ _('[options] node'))
+ def printparents(ui, repo, node, **opts):
+ ...
+
+The cmdtable dictionary
+-----------------------
+
+The ``cmdtable`` dictionary uses as key the new command names, and, as value,
+a tuple containing:
+
+1. the function to be called when the command is used.
+2. a list of options the command can take.
+3. a command line synopsis for the command (the function docstring is used for
+ the full help).
+
+List of options
+---------------
+
+All the command flag options are documented in the mercurial/fancyopts.py
+sources.
+
+The options list is a list of tuples containing:
+
+1. the short option letter, or ``''`` if no short option is available
+ (for example, ``o`` for a ``-o`` option).
+2. the long option name (for example, ``option`` for a ``--option`` option).
+3. a default value for the option.
+4. a help string for the option (it's possible to omit the "hg newcommand"
+ part and only the options and parameter substring is needed).
+
+Command function signatures
+---------------------------
+
+Functions that implement new commands always receive a ``ui`` and usually
+a ``repo`` parameter. The rest of parameters are taken from the command line
+items that don't start with a dash and are passed in the same order they were
+written. If no default value is given in the parameter list they are required.
+
+If there is no repo to be associated with the command and consequently no
+``repo`` passed, then ``norepo=True`` should be passed to the ``@command``
+decorator::
+
+ @command('mycommand', [], norepo=True)
+ def mycommand(ui, **opts):
+ ...
+
+For examples of ``norepo``, see the convert extension.
+
+Command function docstrings
+===========================
+
+The docstring of your function is used as the main help text, shown by
+``hg help mycommand``. The docstring should be formatted using a simple
+subset of reStructuredText markup. The supported constructs include:
+
+Paragraphs::
+
+ This is a paragraph.
+
+ Paragraphs are separated
+ by blank lines.
+
+A verbatim block is introduced with a double colon followed by an indented
+block. The double colon is turned into a single colon on display::
+
+ Some text::
+
+ verbatim
+ text
+ !!
+
+We have field lists::
+
+ :key1: value1
+ :key2: value2
+
+Bullet lists::
+
+ - foo
+ - bar
+
+Enumerated lists::
+
+ 1. foo
+ 2. bar
+
+Inline markup::
+
+ ``*bold*``, ``monospace``, :hg:`command`
+
+Mark Mercurial commands with ``:hg:`` to make a nice link to the corresponding
+documentation. We'll expand the support if new constructs can be parsed
+without too much trouble.
+
+Communicating with the user
+===========================
+
+Besides the ``ui`` methods, like ``ui.write(*msg)`` or
+``ui.prompt(msg, default="y")``, an extension can add help text for each
+of its commands and the extension itself.
+
+The module docstring will be used as help string when ``hg help extensionname``
+is used and, similarly, the help string for a command and the docstring
+belonging to the function that's wrapped by the command will be shown when
+``hg help command`` is invoked.
+
+Setup Callbacks
+===============
+
+Extensions are loaded in phases. All extensions are processed in a given phase
+before the next phase begins. In the first phase, all extension modules are
+loaded and registered with Mercurial. This means that you can find all enabled
+extensions with ``extensions.find`` in the following phases.
+
+Extension setup
+---------------
+
+There are two callbacks to be called when extensions are loaded, named
+``uisetup`` and ``extsetup``. ``uisetup`` is called first for each extension,
+then ``extsetup`` is called. This means ``extsetup`` can be useful in case
+one extension optionally depends on another extension.
+
+Both ``uisetup`` and ``extsetup`` receive a ui object with the local
+repository configuration::
+
+ def uisetup(ui):
+ # ...
+
+ def extsetup(ui):
+ # ...
+
+Be aware that ``uisetup`` in NOT the function to configure a ``ui`` instance.
+It's called only once per process, not per ``ui`` instance. Also, any changes
+to the ``ui`` may be discarded because the ``ui`` here temporarily loaded
+local configuration. So, it's generally wrong to do `ui.setconfig()` in
+these callbacks. Notable exception is setting ``pre/post-<command>`` hooks
+and extending ``ui.__class__``.
+
+In Mercurial 1.3.1 or earlier, ``extsetup`` takes no argument.
+
+Command table setup
+-------------------
+
+After ``extsetup``, the ``cmdtable`` is copied into the global command table
+in Mercurial.
+
+Ui instance setup
+-----------------
+
+The optional ``uipopulate`` is called for each ``ui`` instance after
+configuration is loaded, where extensions can set up additional ui members,
+update configuration by ``ui.setconfig()``, and extend the class dynamically.
+
+Typically there are three ``ui`` instances involved in command execution:
+
+``req.ui`` (or ``repo.baseui``)
+ Only system and user configurations are loaded into it.
+``lui``
+ Local repository configuration is loaded as well. This will be used at
+ early dispatching stage where a repository isn't available.
+``repo.ui``
+ The fully-loaded ``ui`` used after a repository is instantiated. This
+ will be created from the ``req.ui`` per repository.
+
+In command server and hgweb, this may be called more than once for the same
+``ui`` instance.
+
+(New in Mercurial 4.9)
+
+Repository setup
+----------------
+
+Extensions can implement an optional callback named ``reposetup``. It is
+called after the main Mercurial repository initialization, and can be used
+to setup any local state the extension might need.
+
+As other command functions it receives an ``ui`` object and a ``repo`` object
+(no additional parameters for this, though)::
+
+ def reposetup(ui, repo):
+ #do initialization here.
+
+It is important to take into account that the ``ui`` object that is received
+by the ``reposetup`` function is not the same as the one received by the
+``uisetup`` and ``extsetup`` functions. This is particularly important when
+setting up hooks as described in the following section, since not all hooks
+use the same ``ui`` object and hence different hooks must be configured in
+different setup functions.
+
+Wrapping methods on the ui and repo classes
+-------------------------------------------
+
+Because extensions can be loaded *per repository*, you should avoid using
+``extensions.wrapfunction()`` on methods of the ``ui`` and ``repo`` objects.
+Instead, create a subclass of the specific class of the instance passed into
+the ``*setup()`` hook; e.g. use ``ui.__class__`` as the base class, then
+reassign your new class to ``ui.__class__`` again. Mercurial will then use
+your updated ``ui`` or ``repo`` instance only for repositories where your
+extension is enabled (or copies thereof, reusing your new class).
+
+For example::
+
+ def uisetup(ui):
+ class echologui(ui.__class__):
+ def log(self, service, *msg, **opts):
+ if msg:
+ self.write('%s: %s\n' % (service, msg[0] % msg[1:]))
+ super(echologui, self).log(service, *msg, **opts)
+
+ ui.__class__ = echologui
+
+Configuring Hooks
+=================
+
+Some extensions must use hooks to do their work. These required hooks can
+be configured manually by the user by modifying the ``[hook]`` section of
+their hgrc, but they can also be configured automatically by calling the
+``ui.setconfig('hooks', ...)`` function in one of the setup functions
+described above.
+
+The main difference between manually modifying the hooks section in the hgrc
+and using ``ui.setconfig()`` is that when using ``ui.setconfig()`` you have
+access to the actual hook function object, which you can pass directly to
+``ui.setconfig()``, while when you use the hooks section of the hgrc file
+you must refer to the hook function by using the
+``python:modulename.functioname`` idiom (e.g. ``python:hgext.notify.hook``).
+
+For example::
+
+ # Define hooks -- note that the actual function name it irrelevant.
+ def preupdatehook(ui, repo, **kwargs):
+ ui.write("Pre-update hook triggered\n")
+
+ def updatehook(ui, repo, **kwargs):
+ ui.write("Update hook triggered\n")
+
+ def uisetup(ui):
+ # When pre-<cmd> and post-<cmd> hooks are configured by means of
+ # the ui.setconfig() function, you must use the ui object passed
+ # to uisetup or extsetup.
+ ui.setconfig("hooks", "pre-update.myextension", preupdatehook)
+
+ def reposetup(ui, repo):
+ # Repository-specific hooks can be configured here. These include
+ # the update hook.
+ ui.setconfig("hooks", "update.myextension", updatehook)
+
+Note how different hooks may need to be configured in different setup
+functions. In the example you can see that the ``update`` hook must be
+configured in the ``reposetup`` function, while the ``pre-update`` hook
+must be configured on the ``uisetup`` or the ``extsetup`` functions.
+
+Marking compatible versions
+===========================
+
+Every extension should use the ``testedwith`` variable to specify Mercurial
+releases it's known to be compatible with. This helps us and users diagnose
+where problems are coming from::
+
+ testedwith = '2.0 2.0.1 2.1 2.1.1 2.1.2'
+
+Do not use the ``internal`` marker in third-party extensions; we will
+immediately drop all bug reports mentioning your extension if we catch you
+doing this.
+
+Similarly, an extension can use the ``buglink`` variable to specify how users
+should report issues with the extension. This link will be included in the
+error message if the extension produces errors::
+
+ buglink = 'https://bitbucket.org/USER/REPO/issues'
+
+Wrap up: what belongs where?
+============================
+
+You will find here a list of most common tasks, based on setups from the
+extensions included in Mercurial core.
+
+uisetup
+-------
+
+* Changes to ``ui.__class__`` . The ``ui`` object that will be used to run
+ the command has not yet been created. Changes made here will affect ``ui``
+ objects created after this, and in particular the ``ui`` that will be passed
+ to ``runcommand``
+* Command wraps (``extensions.wrapcommand``)
+* Changes that need to be visible by other extensions: because initialization
+ occurs in phases (all extensions run ``uisetup``, then all run ``extsetup``),
+ a change made here will be visible by other extensions during ``extsetup``.
+* Monkeypatches or function wraps (``extensions.wrapfunction``) of ``dispatch``
+ module members
+* Set up ``pre-*`` and ``post-*`` hooks. (DEPRECATED. ``uipopulate`` is
+ preferred on Mercurial 4.9 and later.)
+* ``pushkey`` setup
+
+extsetup
+--------
+
+* Changes depending on the status of other extensions. (``if extensions.find('mq')``)
+* Add a global option to all commands
+* Extend revsets
+
+uipopulate
+----------
+
+* Modify ``ui`` instance attributes and configuration variables.
+* Changes to ``ui.__class__`` per instance.
+* Set up all hooks per scoped configuration.
+
+reposetup
+---------
+
+* Set up all hooks but ``pre-*`` and ``post-*``. (DEPRECATED. ``uipopulate`` is
+ preferred on Mercurial 4.9 and later.)
+* Modify configuration variables
+* Changes to ``repo.__class__``, ``repo.dirstate.__class__``
--- a/mercurial/help/internals/revlogs.txt Wed Jan 09 20:00:35 2019 -0800
+++ b/mercurial/help/internals/revlogs.txt Fri Jan 18 13:28:22 2019 -0500
@@ -11,8 +11,8 @@
links to its *parent* entries. The collective metadata is referred
to as the *index* and the revision data is the *data*.
-Revision data is stored as a series of compressed deltas against previous
-revisions.
+Revision data is stored as a series of compressed deltas against
+ancestor revisions.
Revlogs are written in an append-only fashion. We never need to rewrite
a file to insert nor do we need to remove data. Rolling back in-progress
@@ -35,9 +35,6 @@
significant half of the integer is the format/version short. The other
short holds feature flags that dictate behavior of the revlog.
-Only 1 bit of the format/version short is currently used. Remaining
-bits are reserved for future use.
-
The following values for the format/version short are defined:
0
@@ -53,15 +50,22 @@
beyond 32-bit header.
The feature flags short consists of bit flags. Where 0 is the least
-significant bit, the following bit offsets define flags:
+significant bit. The bit flags vary by revlog version.
+
+Version 0 revlogs have no defined flags and the presence of a flag
+is considered an error.
+
+Version 1 revlogs have the following flags at the specified bit offsets:
0
Store revision data inline.
1
Generaldelta encoding.
-2-15
- Reserved for future use.
+Version 2 revlogs have the following flags at the specified bit offsets:
+
+0
+ Store revision data inline.
The following header values are common:
@@ -153,8 +157,10 @@
(In development. Format not finalized or stable.)
-Version 2 is currently identical to version 1. This will obviously
-change.
+Version 2 is identical to version 2 with the following differences.
+
+There is no dedicated *generaldelta* revlog format flag. Instead,
+the feature is implied enabled by default.
Delta Chains
============
--- a/mercurial/help/internals/wireprotocolv2.txt Wed Jan 09 20:00:35 2019 -0800
+++ b/mercurial/help/internals/wireprotocolv2.txt Fri Jan 18 13:28:22 2019 -0500
@@ -426,8 +426,10 @@
has no file revisions data. This means that all referenced file revisions
in the queried set of changeset revisions will be sent.
-TODO we'll probably want a more complicated mechanism for the client to
-specify which ancestor revisions are known.
+TODO we want a more complicated mechanism for the client to specify which
+ancestor revisions are known. This is needed so intelligent deltas can be
+emitted and so updated linknodes can be sent if the client needs to adjust
+its linknodes for existing file nodes to older changeset revisions.
TODO we may want to make linknodes an array so multiple changesets can be
marked as introducing a file revision, since this can occur with e.g. hidden
changesets.
--- a/mercurial/help/patterns.txt Wed Jan 09 20:00:35 2019 -0800
+++ b/mercurial/help/patterns.txt Fri Jan 18 13:28:22 2019 -0500
@@ -20,7 +20,9 @@
To use an extended glob, start a name with ``glob:``. Globs are rooted
at the current directory; a glob such as ``*.c`` will only match files
-in the current directory ending with ``.c``.
+in the current directory ending with ``.c``. ``rootglob:`` can be used
+instead of ``glob:`` for a glob that is rooted at the root of the
+repository.
The supported glob syntax extensions are ``**`` to match any string
across path separators and ``{a,b}`` to mean "a or b".
@@ -64,6 +66,7 @@
foo/*.c any name ending in ".c" in the directory foo
foo/**.c any name ending in ".c" in any subdirectory of foo
including itself.
+ rootglob:*.c any name ending in ".c" in the root of the repository
Regexp examples::
--- a/mercurial/hg.py Wed Jan 09 20:00:35 2019 -0800
+++ b/mercurial/hg.py Fri Jan 18 13:28:22 2019 -0500
@@ -38,6 +38,7 @@
narrowspec,
node,
phases,
+ repository as repositorymod,
scmutil,
sshpeer,
statichttprepo,
@@ -160,23 +161,19 @@
obj = _peerlookup(path).instance(ui, path, create, intents=intents,
createopts=createopts)
ui = getattr(obj, "ui", ui)
- if ui.configbool('devel', 'debug.extensions'):
- log = lambda msg, *values: ui.debug('debug.extensions: ',
- msg % values, label='debug.extensions')
- else:
- log = lambda *a, **kw: None
for f in presetupfuncs or []:
f(ui, obj)
- log('- executing reposetup hooks\n')
+ ui.log(b'extension', b'- executing reposetup hooks\n')
with util.timedcm('all reposetup') as allreposetupstats:
for name, module in extensions.extensions(ui):
- log(' - running reposetup for %s\n' % (name,))
+ ui.log(b'extension', b' - running reposetup for %s\n', name)
hook = getattr(module, 'reposetup', None)
if hook:
with util.timedcm('reposetup %r', name) as stats:
hook(ui, obj)
- log(' > reposetup for %r took %s\n', name, stats)
- log('> all reposetup took %s\n', allreposetupstats)
+ ui.log(b'extension', b' > reposetup for %s took %s\n',
+ name, stats)
+ ui.log(b'extension', b'> all reposetup took %s\n', allreposetupstats)
if not obj.local():
for f in wirepeersetupfuncs:
f(ui, obj)
@@ -270,6 +267,7 @@
})
postshare(srcrepo, r, defaultpath=defaultpath)
+ r = repository(ui, dest)
_postshareupdate(r, update, checkout=checkout)
return r
@@ -334,6 +332,9 @@
template = ('[paths]\n'
'default = %s\n')
destrepo.vfs.write('hgrc', util.tonativeeol(template % default))
+ if repositorymod.NARROW_REQUIREMENT in sourcerepo.requirements:
+ with destrepo.wlock():
+ narrowspec.copytoworkingcopy(destrepo)
def _postshareupdate(repo, update, checkout=None):
"""Maybe perform a working directory update after a shared repo is created.
@@ -451,15 +452,14 @@
defaultpath = source
sharerepo = repository(ui, path=sharepath)
- share(ui, sharerepo, dest=dest, update=False, bookmarks=False,
- defaultpath=defaultpath)
+ destrepo = share(ui, sharerepo, dest=dest, update=False, bookmarks=False,
+ defaultpath=defaultpath)
# We need to perform a pull against the dest repo to fetch bookmarks
# and other non-store data that isn't shared by default. In the case of
# non-existing shared repo, this means we pull from the remote twice. This
# is a bit weird. But at the time it was implemented, there wasn't an easy
# way to pull just non-changegroup data.
- destrepo = repository(ui, path=dest)
exchange.pull(destrepo, srcpeer, heads=revs)
_postshareupdate(destrepo, update)
@@ -735,8 +735,9 @@
local = destpeer.local()
if local:
if narrow:
- with local.lock():
+ with local.wlock(), local.lock():
local.setnarrowpats(storeincludepats, storeexcludepats)
+ narrowspec.copytoworkingcopy(local)
u = util.url(abspath)
defaulturl = bytes(u)
--- a/mercurial/hgweb/hgweb_mod.py Wed Jan 09 20:00:35 2019 -0800
+++ b/mercurial/hgweb/hgweb_mod.py Fri Jan 18 13:28:22 2019 -0500
@@ -22,6 +22,7 @@
from .. import (
encoding,
error,
+ extensions,
formatter,
hg,
hook,
@@ -212,6 +213,8 @@
u = baseui.copy()
else:
u = uimod.ui.load()
+ extensions.loadall(u)
+ extensions.populateui(u)
r = hg.repository(u, repo)
else:
# we trust caller to give us a private copy
--- a/mercurial/hgweb/hgwebdir_mod.py Wed Jan 09 20:00:35 2019 -0800
+++ b/mercurial/hgweb/hgwebdir_mod.py Fri Jan 18 13:28:22 2019 -0500
@@ -30,6 +30,7 @@
configitems,
encoding,
error,
+ extensions,
hg,
profiling,
pycompat,
@@ -268,6 +269,10 @@
self.lastrefresh = 0
self.motd = None
self.refresh()
+ if not baseui:
+ # set up environment for new ui
+ extensions.loadall(self.ui)
+ extensions.populateui(self.ui)
def refresh(self):
if self.ui:
@@ -304,6 +309,7 @@
paths = self.conf
elif isinstance(self.conf, dict):
paths = self.conf.items()
+ extensions.populateui(u)
repos = findrepos(paths)
for prefix, root in u.configitems('collections'):
--- a/mercurial/hgweb/server.py Wed Jan 09 20:00:35 2019 -0800
+++ b/mercurial/hgweb/server.py Fri Jan 18 13:28:22 2019 -0500
@@ -94,7 +94,7 @@
try:
self.do_hgweb()
except socket.error as inst:
- if inst[0] != errno.EPIPE:
+ if inst.errno != errno.EPIPE:
raise
def do_POST(self):
--- a/mercurial/hgweb/webcommands.py Wed Jan 09 20:00:35 2019 -0800
+++ b/mercurial/hgweb/webcommands.py Fri Jan 18 13:28:22 2019 -0500
@@ -1216,8 +1216,7 @@
bodyfh = web.res.getbodyfile()
- archival.archive(web.repo, bodyfh, cnode, artype, prefix=name,
- matchfn=match,
+ archival.archive(web.repo, bodyfh, cnode, artype, prefix=name, match=match,
subrepos=web.configbool("web", "archivesubrepos"))
return []
--- a/mercurial/hook.py Wed Jan 09 20:00:35 2019 -0800
+++ b/mercurial/hook.py Fri Jan 18 13:28:22 2019 -0500
@@ -102,7 +102,7 @@
(hname, exc.args[0]))
else:
ui.warn(_('error: %s hook raised an exception: '
- '%s\n') % (hname, encoding.strtolocal(str(exc))))
+ '%s\n') % (hname, stringutil.forcebytestr(exc)))
if throw:
raise
if not ui.tracebackflag:
--- a/mercurial/httpconnection.py Wed Jan 09 20:00:35 2019 -0800
+++ b/mercurial/httpconnection.py Fri Jan 18 13:28:22 2019 -0500
@@ -92,6 +92,18 @@
prefix = auth.get('prefix')
if not prefix:
continue
+
+ prefixurl = util.url(prefix)
+ if prefixurl.user and prefixurl.user != user:
+ # If a username was set in the prefix, it must match the username in
+ # the URI.
+ continue
+
+ # The URI passed in has been stripped of credentials, so erase the user
+ # here to allow simpler matching.
+ prefixurl.user = None
+ prefix = bytes(prefixurl)
+
p = prefix.split('://', 1)
if len(p) > 1:
schemes, prefix = [p[0]], p[1]
--- a/mercurial/keepalive.py Wed Jan 09 20:00:35 2019 -0800
+++ b/mercurial/keepalive.py Fri Jan 18 13:28:22 2019 -0500
@@ -636,7 +636,7 @@
self.sentbytescount += len(str)
except socket.error as v:
reraise = True
- if v[0] == errno.EPIPE: # Broken pipe
+ if v.args[0] == errno.EPIPE: # Broken pipe
if self._HTTPConnection__state == httplib._CS_REQ_SENT:
self._broken_pipe_resp = None
self._broken_pipe_resp = self.getresponse()
--- a/mercurial/localrepo.py Wed Jan 09 20:00:35 2019 -0800
+++ b/mercurial/localrepo.py Fri Jan 18 13:28:22 2019 -0500
@@ -91,11 +91,16 @@
def __get__(self, repo, type=None):
if repo is None:
return self
- return super(_basefilecache, self).__get__(repo.unfiltered(), type)
- def __set__(self, repo, value):
- return super(_basefilecache, self).__set__(repo.unfiltered(), value)
- def __delete__(self, repo):
- return super(_basefilecache, self).__delete__(repo.unfiltered())
+ # proxy to unfiltered __dict__ since filtered repo has no entry
+ unfi = repo.unfiltered()
+ try:
+ return unfi.__dict__[self.sname]
+ except KeyError:
+ pass
+ return super(_basefilecache, self).__get__(unfi, type)
+
+ def set(self, repo, value):
+ return super(_basefilecache, self).set(repo.unfiltered(), value)
class repofilecache(_basefilecache):
"""filecache for files in .hg but outside of .hg/store"""
@@ -358,7 +363,7 @@
# Increment the sub-version when the revlog v2 format changes to lock out old
# clients.
-REVLOGV2_REQUIREMENT = 'exp-revlogv2.0'
+REVLOGV2_REQUIREMENT = 'exp-revlogv2.1'
# A repository with the sparserevlog feature will have delta chains that
# can spread over a larger span. Sparse reading cuts these large spans into
@@ -446,15 +451,10 @@
# The .hg/hgrc file may load extensions or contain config options
# that influence repository construction. Attempt to load it and
# process any new extensions that it may have pulled in.
- try:
- ui.readconfig(hgvfs.join(b'hgrc'), root=wdirvfs.base)
- # Run this before extensions.loadall() so extensions can be
- # automatically enabled.
+ if loadhgrc(ui, wdirvfs, hgvfs, requirements):
afterhgrcload(ui, wdirvfs, hgvfs, requirements)
- except IOError:
- pass
- else:
extensions.loadall(ui)
+ extensions.populateui(ui)
# Set of module names of extensions loaded for this repository.
extensionmodulenames = {m.__name__ for n, m in extensions.extensions(ui)}
@@ -508,6 +508,8 @@
else:
storebasepath = hgvfs.base
cachepath = hgvfs.join(b'cache')
+ wcachepath = hgvfs.join(b'wcache')
+
# The store has changed over time and the exact layout is dictated by
# requirements. The store interface abstracts differences across all
@@ -522,6 +524,9 @@
# The cache vfs is used to manage cache files.
cachevfs = vfsmod.vfs(cachepath, cacheaudited=True)
cachevfs.createmode = store.createmode
+ # The cache vfs is used to manage cache files related to the working copy
+ wcachevfs = vfsmod.vfs(wcachepath, cacheaudited=True)
+ wcachevfs.createmode = store.createmode
# Now resolve the type for the repository object. We do this by repeatedly
# calling a factory function to produces types for specific aspects of the
@@ -544,6 +549,7 @@
storevfs=storevfs,
storeoptions=storevfs.options,
cachevfs=cachevfs,
+ wcachevfs=wcachevfs,
extensionmodulenames=extensionmodulenames,
extrastate=extrastate,
baseclasses=bases)
@@ -574,9 +580,28 @@
sharedpath=storebasepath,
store=store,
cachevfs=cachevfs,
+ wcachevfs=wcachevfs,
features=features,
intents=intents)
+def loadhgrc(ui, wdirvfs, hgvfs, requirements):
+ """Load hgrc files/content into a ui instance.
+
+ This is called during repository opening to load any additional
+ config files or settings relevant to the current repository.
+
+ Returns a bool indicating whether any additional configs were loaded.
+
+ Extensions should monkeypatch this function to modify how per-repo
+ configs are loaded. For example, an extension may wish to pull in
+ configs from alternate files or sources.
+ """
+ try:
+ ui.readconfig(hgvfs.join(b'hgrc'), root=wdirvfs.base)
+ return True
+ except IOError:
+ return False
+
def afterhgrcload(ui, wdirvfs, hgvfs, requirements):
"""Perform additional actions after .hg/hgrc is loaded.
@@ -733,8 +758,7 @@
if 0 <= chainspan:
options[b'maxdeltachainspan'] = chainspan
- mmapindexthreshold = ui.configbytes(b'experimental',
- b'mmapindexthreshold')
+ mmapindexthreshold = ui.configbytes(b'storage', b'mmap-threshold')
if mmapindexthreshold is not None:
options[b'mmapindexthreshold'] = mmapindexthreshold
@@ -791,7 +815,7 @@
if path[0] == b'/':
path = path[1:]
- return filelog.narrowfilelog(self.svfs, path, self.narrowmatch())
+ return filelog.narrowfilelog(self.svfs, path, self._storenarrowmatch)
def makefilestorage(requirements, features, **kwargs):
"""Produce a type conforming to ``ilocalrepositoryfilestorage``."""
@@ -872,7 +896,7 @@
}
def __init__(self, baseui, ui, origroot, wdirvfs, hgvfs, requirements,
- supportedrequirements, sharedpath, store, cachevfs,
+ supportedrequirements, sharedpath, store, cachevfs, wcachevfs,
features, intents=None):
"""Create a new local repository instance.
@@ -915,6 +939,9 @@
cachevfs
``vfs.vfs`` used for cache files.
+ wcachevfs
+ ``vfs.vfs`` used for cache files related to the working copy.
+
features
``set`` of bytestrings defining features/capabilities of this
instance.
@@ -937,6 +964,7 @@
self.sharedpath = sharedpath
self.store = store
self.cachevfs = cachevfs
+ self.wcachevfs = wcachevfs
self.features = features
self.filtername = None
@@ -1012,12 +1040,12 @@
path = path[len(repo.path) + 1:]
if path.startswith('cache/'):
msg = 'accessing cache with vfs instead of cachevfs: "%s"'
- repo.ui.develwarn(msg % path, stacklevel=2, config="cache-vfs")
- if path.startswith('journal.'):
+ repo.ui.develwarn(msg % path, stacklevel=3, config="cache-vfs")
+ if path.startswith('journal.') or path.startswith('undo.'):
# journal is covered by 'lock'
if repo._currentlock(repo._lockref) is None:
repo.ui.develwarn('write with no lock: "%s"' % path,
- stacklevel=2, config='check-locks')
+ stacklevel=3, config='check-locks')
elif repo._currentlock(repo._wlockref) is None:
# rest of vfs files are covered by 'wlock'
#
@@ -1026,7 +1054,7 @@
if path.startswith(prefix):
return
repo.ui.develwarn('write with no wlock: "%s"' % path,
- stacklevel=2, config='check-locks')
+ stacklevel=3, config='check-locks')
return ret
return checkvfs
@@ -1045,7 +1073,7 @@
path = path[len(repo.sharedpath) + 1:]
if repo._currentlock(repo._lockref) is None:
repo.ui.develwarn('write with no lock: "%s"' % path,
- stacklevel=3)
+ stacklevel=4)
return ret
return checksvfs
@@ -1162,7 +1190,8 @@
@storecache('00manifest.i')
def manifestlog(self):
rootstore = manifest.manifestrevlog(self.svfs)
- return manifest.manifestlog(self.svfs, self, rootstore)
+ return manifest.manifestlog(self.svfs, self, rootstore,
+ self._storenarrowmatch)
@repofilecache('dirstate')
def dirstate(self):
@@ -1195,9 +1224,17 @@
return narrowspec.load(self)
@storecache(narrowspec.FILENAME)
+ def _storenarrowmatch(self):
+ if repository.NARROW_REQUIREMENT not in self.requirements:
+ return matchmod.always(self.root, '')
+ include, exclude = self.narrowpats
+ return narrowspec.match(self.root, include=include, exclude=exclude)
+
+ @storecache(narrowspec.FILENAME)
def _narrowmatch(self):
if repository.NARROW_REQUIREMENT not in self.requirements:
return matchmod.always(self.root, '')
+ narrowspec.checkworkingcopynarrowspec(self)
include, exclude = self.narrowpats
return narrowspec.match(self.root, include=include, exclude=exclude)
@@ -1325,9 +1362,8 @@
Returns a revset.abstractsmartset, which is a list-like interface
that contains integer revisions.
'''
- expr = revsetlang.formatspec(expr, *args)
- m = revset.match(None, expr)
- return m(self)
+ tree = revsetlang.spectree(expr, *args)
+ return revset.makematcher(tree)(self)
def set(self, expr, *args):
'''Find revisions matching a revset and emit changectx instances.
@@ -1399,10 +1435,11 @@
tags, tt = self._findtags()
else:
tags = self._tagscache.tags
+ rev = self.changelog.rev
for k, v in tags.iteritems():
try:
# ignore tags to unknown nodes
- self.changelog.rev(v)
+ rev(v)
t[k] = v
except (error.LookupError, ValueError):
pass
@@ -1570,7 +1607,7 @@
self.dirstate.copy(None, f)
def filectx(self, path, changeid=None, fileid=None, changectx=None):
- """changeid can be a changeset revision, node, or tag.
+ """changeid must be a changeset revision, if specified.
fileid can be a file revision or node."""
return context.filectx(self, path, changeid, fileid,
changectx=changectx)
@@ -1799,6 +1836,7 @@
# discard all changes (including ones already written
# out) in this transaction
narrowspec.restorebackup(self, 'journal.narrowspec')
+ narrowspec.restorewcbackup(self, 'journal.narrowspec.dirstate')
repo.dirstate.restorebackup(None, 'journal.dirstate')
repo.invalidate(clearfilecache=True)
@@ -1875,6 +1913,8 @@
def _journalfiles(self):
return ((self.svfs, 'journal'),
+ (self.svfs, 'journal.narrowspec'),
+ (self.vfs, 'journal.narrowspec.dirstate'),
(self.vfs, 'journal.dirstate'),
(self.vfs, 'journal.branch'),
(self.vfs, 'journal.desc'),
@@ -1887,6 +1927,7 @@
@unfilteredmethod
def _writejournal(self, desc):
self.dirstate.savebackup(None, 'journal.dirstate')
+ narrowspec.savewcbackup(self, 'journal.narrowspec.dirstate')
narrowspec.savebackup(self, 'journal.narrowspec')
self.vfs.write("journal.branch",
encoding.fromlocal(self.dirstate.branch()))
@@ -1976,6 +2017,7 @@
dsguard.close()
narrowspec.restorebackup(self, 'undo.narrowspec')
+ narrowspec.restorewcbackup(self, 'undo.narrowspec.dirstate')
self.dirstate.restorebackup(None, 'undo.dirstate')
try:
branch = self.vfs.read('undo.branch')
@@ -2877,11 +2919,11 @@
if scmutil.gdinitconfig(ui):
requirements.add('generaldelta')
+ # experimental config: format.sparse-revlog
+ if ui.configbool('format', 'sparse-revlog'):
+ requirements.add(SPARSEREVLOG_REQUIREMENT)
if ui.configbool('experimental', 'treemanifest'):
requirements.add('treemanifest')
- # experimental config: format.sparse-revlog
- if ui.configbool('format', 'sparse-revlog'):
- requirements.add(SPARSEREVLOG_REQUIREMENT)
revlogv2 = ui.config('experimental', 'revlogv2')
if revlogv2 == 'enable-unstable-format-and-corrupt-my-data':
@@ -2992,6 +3034,9 @@
wdirvfs.makedirs()
hgvfs.makedir(notindexed=True)
+ if 'sharedrepo' not in createopts:
+ hgvfs.mkdir(b'cache')
+ hgvfs.mkdir(b'wcache')
if b'store' in requirements and 'sharedrepo' not in createopts:
hgvfs.mkdir(b'store')
--- a/mercurial/logcmdutil.py Wed Jan 09 20:00:35 2019 -0800
+++ b/mercurial/logcmdutil.py Fri Jan 18 13:28:22 2019 -0500
@@ -55,8 +55,8 @@
return limit
def diffordiffstat(ui, repo, diffopts, node1, node2, match,
- changes=None, stat=False, fp=None, prefix='',
- root='', listsubrepos=False, hunksfilterfn=None):
+ changes=None, stat=False, fp=None, graphwidth=0,
+ prefix='', root='', listsubrepos=False, hunksfilterfn=None):
'''show diff or diffstat.'''
if root:
relroot = pathutil.canonpath(repo.root, repo.getcwd(), root)
@@ -76,7 +76,7 @@
diffopts = diffopts.copy(context=0, noprefix=False)
width = 80
if not ui.plain():
- width = ui.termwidth()
+ width = ui.termwidth() - graphwidth
chunks = repo[node2].diff(repo[node1], match, changes, opts=diffopts,
prefix=prefix, relroot=relroot,
@@ -130,12 +130,13 @@
def _makehunksfilter(self, ctx):
return None
- def showdiff(self, ui, ctx, diffopts, stat=False):
+ def showdiff(self, ui, ctx, diffopts, graphwidth=0, stat=False):
repo = ctx.repo()
node = ctx.node()
prev = ctx.p1().node()
diffordiffstat(ui, repo, diffopts, prev, node,
match=self._makefilematcher(ctx), stat=stat,
+ graphwidth=graphwidth,
hunksfilterfn=self._makehunksfilter(ctx))
def changesetlabels(ctx):
@@ -193,6 +194,7 @@
def _show(self, ctx, copies, props):
'''show a single changeset or file revision'''
changenode = ctx.node()
+ graphwidth = props.get('graphwidth', 0)
if self.ui.quiet:
self.ui.write("%s\n" % scmutil.formatchangeid(ctx),
@@ -285,7 +287,7 @@
label='log.summary')
self.ui.write("\n")
- self._showpatch(ctx)
+ self._showpatch(ctx, graphwidth)
def _showobsfate(self, ctx):
# TODO: do not depend on templater
@@ -304,13 +306,15 @@
'''empty method used by extension as a hook point
'''
- def _showpatch(self, ctx):
+ def _showpatch(self, ctx, graphwidth=0):
if self._includestat:
- self._differ.showdiff(self.ui, ctx, self._diffopts, stat=True)
+ self._differ.showdiff(self.ui, ctx, self._diffopts,
+ graphwidth, stat=True)
if self._includestat and self._includediff:
self.ui.write("\n")
if self._includediff:
- self._differ.showdiff(self.ui, ctx, self._diffopts, stat=False)
+ self._differ.showdiff(self.ui, ctx, self._diffopts,
+ graphwidth, stat=False)
if self._includestat or self._includediff:
self.ui.write("\n")
@@ -433,6 +437,7 @@
props['ctx'] = ctx
props['index'] = index = next(self._counter)
props['revcache'] = {'copies': copies}
+ graphwidth = props.get('graphwidth', 0)
# write separator, which wouldn't work well with the header part below
# since there's inherently a conflict between header (across items) and
@@ -453,7 +458,7 @@
# write changeset metadata, then patch if requested
key = self._parts[self._tref]
self.ui.write(self.t.render(key, props))
- self._showpatch(ctx)
+ self._showpatch(ctx, graphwidth)
if self._parts['footer']:
if not self.footer:
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/mercurial/loggingutil.py Fri Jan 18 13:28:22 2019 -0500
@@ -0,0 +1,121 @@
+# loggingutil.py - utility for logging events
+#
+# Copyright 2010 Nicolas Dumazet
+# Copyright 2013 Facebook, Inc.
+#
+# This software may be used and distributed according to the terms of the
+# GNU General Public License version 2 or any later version.
+
+from __future__ import absolute_import
+
+import errno
+
+from . import (
+ pycompat,
+)
+
+from .utils import (
+ dateutil,
+ procutil,
+ stringutil,
+)
+
+def openlogfile(ui, vfs, name, maxfiles=0, maxsize=0):
+ """Open log file in append mode, with optional rotation
+
+ If maxsize > 0, the log file will be rotated up to maxfiles.
+ """
+ def rotate(oldpath, newpath):
+ try:
+ vfs.unlink(newpath)
+ except OSError as err:
+ if err.errno != errno.ENOENT:
+ ui.debug("warning: cannot remove '%s': %s\n" %
+ (newpath, err.strerror))
+ try:
+ if newpath:
+ vfs.rename(oldpath, newpath)
+ except OSError as err:
+ if err.errno != errno.ENOENT:
+ ui.debug("warning: cannot rename '%s' to '%s': %s\n" %
+ (newpath, oldpath, err.strerror))
+
+ if maxsize > 0:
+ try:
+ st = vfs.stat(name)
+ except OSError:
+ pass
+ else:
+ if st.st_size >= maxsize:
+ path = vfs.join(name)
+ for i in pycompat.xrange(maxfiles - 1, 1, -1):
+ rotate(oldpath='%s.%d' % (path, i - 1),
+ newpath='%s.%d' % (path, i))
+ rotate(oldpath=path,
+ newpath=maxfiles > 0 and path + '.1')
+ return vfs(name, 'a', makeparentdirs=False)
+
+def _formatlogline(msg):
+ date = dateutil.datestr(format=b'%Y/%m/%d %H:%M:%S')
+ pid = procutil.getpid()
+ return b'%s (%d)> %s' % (date, pid, msg)
+
+def _matchevent(event, tracked):
+ return b'*' in tracked or event in tracked
+
+class filelogger(object):
+ """Basic logger backed by physical file with optional rotation"""
+
+ def __init__(self, vfs, name, tracked, maxfiles=0, maxsize=0):
+ self._vfs = vfs
+ self._name = name
+ self._trackedevents = set(tracked)
+ self._maxfiles = maxfiles
+ self._maxsize = maxsize
+
+ def tracked(self, event):
+ return _matchevent(event, self._trackedevents)
+
+ def log(self, ui, event, msg, opts):
+ line = _formatlogline(msg)
+ try:
+ with openlogfile(ui, self._vfs, self._name,
+ maxfiles=self._maxfiles,
+ maxsize=self._maxsize) as fp:
+ fp.write(line)
+ except IOError as err:
+ ui.debug(b'cannot write to %s: %s\n'
+ % (self._name, stringutil.forcebytestr(err)))
+
+class fileobjectlogger(object):
+ """Basic logger backed by file-like object"""
+
+ def __init__(self, fp, tracked):
+ self._fp = fp
+ self._trackedevents = set(tracked)
+
+ def tracked(self, event):
+ return _matchevent(event, self._trackedevents)
+
+ def log(self, ui, event, msg, opts):
+ line = _formatlogline(msg)
+ try:
+ self._fp.write(line)
+ self._fp.flush()
+ except IOError as err:
+ ui.debug(b'cannot write to %s: %s\n'
+ % (stringutil.forcebytestr(self._fp.name),
+ stringutil.forcebytestr(err)))
+
+class proxylogger(object):
+ """Forward log events to another logger to be set later"""
+
+ def __init__(self):
+ self.logger = None
+
+ def tracked(self, event):
+ return self.logger is not None and self.logger.tracked(event)
+
+ def log(self, ui, event, msg, opts):
+ assert self.logger is not None
+ self.logger.log(ui, event, msg, opts)
--- a/mercurial/manifest.py Wed Jan 09 20:00:35 2019 -0800
+++ b/mercurial/manifest.py Fri Jan 18 13:28:22 2019 -0500
@@ -1135,7 +1135,10 @@
return m1.diff(m2, clean=clean)
result = {}
emptytree = treemanifest()
- def _diff(t1, t2):
+
+ def _iterativediff(t1, t2, stack):
+ """compares two tree manifests and append new tree-manifests which
+ needs to be compared to stack"""
if t1._node == t2._node and not t1._dirty and not t2._dirty:
return
t1._load()
@@ -1144,11 +1147,11 @@
for d, m1 in t1._dirs.iteritems():
m2 = t2._dirs.get(d, emptytree)
- _diff(m1, m2)
+ stack.append((m1, m2))
for d, m2 in t2._dirs.iteritems():
if d not in t1._dirs:
- _diff(emptytree, m2)
+ stack.append((emptytree, m2))
for fn, n1 in t1._files.iteritems():
fl1 = t1._flags.get(fn, '')
@@ -1164,7 +1167,12 @@
fl2 = t2._flags.get(fn, '')
result[t2._subpath(fn)] = ((None, ''), (n2, fl2))
- _diff(self, m2)
+ stackls = []
+ _iterativediff(self, m2, stackls)
+ while stackls:
+ t1, t2 = stackls.pop()
+ # stackls is populated in the function call
+ _iterativediff(t1, t2, stackls)
return result
def unmodifiedsince(self, m2):
@@ -1575,11 +1583,11 @@
def emitrevisions(self, nodes, nodesorder=None,
revisiondata=False, assumehaveparentrevisions=False,
- deltaprevious=False):
+ deltamode=repository.CG_DELTAMODE_STD):
return self._revlog.emitrevisions(
nodes, nodesorder=nodesorder, revisiondata=revisiondata,
assumehaveparentrevisions=assumehaveparentrevisions,
- deltaprevious=deltaprevious)
+ deltamode=deltamode)
def addgroup(self, deltas, linkmapper, transaction, addrevisioncb=None):
return self._revlog.addgroup(deltas, linkmapper, transaction,
@@ -1636,7 +1644,7 @@
of the list of files in the given commit. Consumers of the output of this
class do not care about the implementation details of the actual manifests
they receive (i.e. tree or flat or lazily loaded, etc)."""
- def __init__(self, opener, repo, rootstore):
+ def __init__(self, opener, repo, rootstore, narrowmatch):
usetreemanifest = False
cachesize = 4
@@ -1649,7 +1657,7 @@
self._rootstore = rootstore
self._rootstore._setupmanifestcachehooks(repo)
- self._narrowmatch = repo.narrowmatch()
+ self._narrowmatch = narrowmatch
# A cache of the manifestctx or treemanifestctx for each directory
self._dirmancache = {}
--- a/mercurial/match.py Wed Jan 09 20:00:35 2019 -0800
+++ b/mercurial/match.py Fri Jan 18 13:28:22 2019 -0500
@@ -25,6 +25,7 @@
)
allpatternkinds = ('re', 'glob', 'path', 'relglob', 'relpath', 'relre',
+ 'rootglob',
'listfile', 'listfile0', 'set', 'include', 'subinclude',
'rootfilesin')
cwdrelativepatternkinds = ('relpath', 'glob')
@@ -221,7 +222,7 @@
for kind, pat in [_patsplit(p, default) for p in patterns]:
if kind in cwdrelativepatternkinds:
pat = pathutil.canonpath(root, cwd, pat, auditor)
- elif kind in ('relglob', 'path', 'rootfilesin'):
+ elif kind in ('relglob', 'path', 'rootfilesin', 'rootglob'):
pat = util.normpath(pat)
elif kind in ('listfile', 'listfile0'):
try:
@@ -1057,14 +1058,14 @@
i, n = 0, len(pat)
res = ''
group = 0
- escape = util.stringutil.reescape
+ escape = util.stringutil.regexbytesescapemap.get
def peek():
return i < n and pat[i:i + 1]
while i < n:
c = pat[i:i + 1]
i += 1
if c not in '*?[{},\\':
- res += escape(c)
+ res += escape(c, c)
elif c == '*':
if peek() == '*':
i += 1
@@ -1105,11 +1106,11 @@
p = peek()
if p:
i += 1
- res += escape(p)
+ res += escape(p, p)
else:
- res += escape(c)
+ res += escape(c, c)
else:
- res += escape(c)
+ res += escape(c, c)
return res
def _regex(kind, pat, globsuffix):
@@ -1137,7 +1138,7 @@
if pat.startswith('^'):
return pat
return '.*' + pat
- if kind == 'glob':
+ if kind in ('glob', 'rootglob'):
return _globre(pat) + globsuffix
raise error.ProgrammingError('not a regex pattern: %s:%s' % (kind, pat))
@@ -1184,33 +1185,59 @@
else:
return regex, lambda f: any(mf(f) for mf in matchfuncs)
+MAX_RE_SIZE = 20000
+
+def _joinregexes(regexps):
+ """gather multiple regular expressions into a single one"""
+ return '|'.join(regexps)
+
def _buildregexmatch(kindpats, globsuffix):
"""Build a match function from a list of kinds and kindpats,
- return regexp string and a matcher function."""
+ return regexp string and a matcher function.
+
+ Test too large input
+ >>> _buildregexmatch([
+ ... (b'relglob', b'?' * MAX_RE_SIZE, b'')
+ ... ], b'$')
+ Traceback (most recent call last):
+ ...
+ Abort: matcher pattern is too long (20009 bytes)
+ """
try:
- regex = '(?:%s)' % '|'.join([_regex(k, p, globsuffix)
- for (k, p, s) in kindpats])
- if len(regex) > 20000:
- raise OverflowError
- return regex, _rematcher(regex)
- except OverflowError:
- # We're using a Python with a tiny regex engine and we
- # made it explode, so we'll divide the pattern list in two
- # until it works
- l = len(kindpats)
- if l < 2:
- raise
- regexa, a = _buildregexmatch(kindpats[:l//2], globsuffix)
- regexb, b = _buildregexmatch(kindpats[l//2:], globsuffix)
- return regex, lambda s: a(s) or b(s)
+ allgroups = []
+ regexps = [_regex(k, p, globsuffix) for (k, p, s) in kindpats]
+ fullregexp = _joinregexes(regexps)
+
+ startidx = 0
+ groupsize = 0
+ for idx, r in enumerate(regexps):
+ piecesize = len(r)
+ if piecesize > MAX_RE_SIZE:
+ msg = _("matcher pattern is too long (%d bytes)") % piecesize
+ raise error.Abort(msg)
+ elif (groupsize + piecesize) > MAX_RE_SIZE:
+ group = regexps[startidx:idx]
+ allgroups.append(_joinregexes(group))
+ startidx = idx
+ groupsize = 0
+ groupsize += piecesize + 1
+
+ if startidx == 0:
+ func = _rematcher(fullregexp)
+ else:
+ group = regexps[startidx:]
+ allgroups.append(_joinregexes(group))
+ allmatchers = [_rematcher(g) for g in allgroups]
+ func = lambda s: any(m(s) for m in allmatchers)
+ return fullregexp, func
except re.error:
for k, p, s in kindpats:
try:
- _rematcher('(?:%s)' % _regex(k, p, globsuffix))
+ _rematcher(_regex(k, p, globsuffix))
except re.error:
if s:
raise error.Abort(_("%s: invalid pattern (%s): %s") %
- (s, k, p))
+ (s, k, p))
else:
raise error.Abort(_("invalid pattern (%s): %s") % (k, p))
raise error.Abort(_("invalid pattern"))
@@ -1226,7 +1253,7 @@
r = []
d = []
for kind, pat, source in kindpats:
- if kind == 'glob': # find the non-glob prefix
+ if kind in ('glob', 'rootglob'): # find the non-glob prefix
root = []
for p in pat.split('/'):
if '[' in p or '{' in p or '*' in p or '?' in p:
@@ -1325,14 +1352,21 @@
syntax: glob # defaults following lines to non-rooted globs
re:pattern # non-rooted regular expression
glob:pattern # non-rooted glob
+ rootglob:pat # rooted glob (same root as ^ in regexps)
pattern # pattern of the current default type
if sourceinfo is set, returns a list of tuples:
(pattern, lineno, originalline). This is useful to debug ignore patterns.
'''
- syntaxes = {'re': 'relre:', 'regexp': 'relre:', 'glob': 'relglob:',
- 'include': 'include', 'subinclude': 'subinclude'}
+ syntaxes = {
+ 're': 'relre:',
+ 'regexp': 'relre:',
+ 'glob': 'relglob:',
+ 'rootglob': 'rootglob:',
+ 'include': 'include',
+ 'subinclude': 'subinclude',
+ }
syntax = 'relre:'
patterns = []
--- a/mercurial/merge.py Wed Jan 09 20:00:35 2019 -0800
+++ b/mercurial/merge.py Fri Jan 18 13:28:22 2019 -0500
@@ -478,6 +478,13 @@
f.write(_pack(format, key, len(data), data))
f.close()
+ @staticmethod
+ def getlocalkey(path):
+ """hash the path of a local file context for storage in the .hg/merge
+ directory."""
+
+ return hex(hashlib.sha1(path).digest())
+
def add(self, fcl, fco, fca, fd):
"""add a new (potentially?) conflicting file the merge state
fcl: file context for local,
@@ -488,11 +495,11 @@
note: also write the local version to the `.hg/merge` directory.
"""
if fcl.isabsent():
- hash = nullhex
+ localkey = nullhex
else:
- hash = hex(hashlib.sha1(fcl.path()).digest())
- self._repo.vfs.write('merge/' + hash, fcl.data())
- self._state[fd] = [MERGE_RECORD_UNRESOLVED, hash, fcl.path(),
+ localkey = mergestate.getlocalkey(fcl.path())
+ self._repo.vfs.write('merge/' + localkey, fcl.data())
+ self._state[fd] = [MERGE_RECORD_UNRESOLVED, localkey, fcl.path(),
fca.path(), hex(fca.filenode()),
fco.path(), hex(fco.filenode()),
fcl.flags()]
@@ -551,7 +558,7 @@
MERGE_RECORD_DRIVER_RESOLVED):
return True, 0
stateentry = self._state[dfile]
- state, hash, lfile, afile, anode, ofile, onode, flags = stateentry
+ state, localkey, lfile, afile, anode, ofile, onode, flags = stateentry
octx = self._repo[self._other]
extras = self.extras(dfile)
anccommitnode = extras.get('ancestorlinknode')
@@ -559,7 +566,7 @@
actx = self._repo[anccommitnode]
else:
actx = None
- fcd = self._filectxorabsent(hash, wctx, dfile)
+ fcd = self._filectxorabsent(localkey, wctx, dfile)
fco = self._filectxorabsent(onode, octx, ofile)
# TODO: move this to filectxorabsent
fca = self._repo.filectx(afile, fileid=anode, changectx=actx)
@@ -577,8 +584,8 @@
flags = flo
if preresolve:
# restore local
- if hash != nullhex:
- f = self._repo.vfs('merge/' + hash)
+ if localkey != nullhex:
+ f = self._repo.vfs('merge/' + localkey)
wctx[dfile].write(f.read(), flags)
f.close()
else:
@@ -1538,8 +1545,27 @@
unresolvedcount = attr.ib()
def isempty(self):
- return (not self.updatedcount and not self.mergedcount
- and not self.removedcount and not self.unresolvedcount)
+ return not (self.updatedcount or self.mergedcount
+ or self.removedcount or self.unresolvedcount)
+
+def emptyactions():
+ """create an actions dict, to be populated and passed to applyupdates()"""
+ return dict((m, [])
+ for m in (
+ ACTION_ADD,
+ ACTION_ADD_MODIFIED,
+ ACTION_FORGET,
+ ACTION_GET,
+ ACTION_CHANGED_DELETED,
+ ACTION_DELETED_CHANGED,
+ ACTION_REMOVE,
+ ACTION_DIR_RENAME_MOVE_LOCAL,
+ ACTION_LOCAL_DIR_RENAME_GET,
+ ACTION_MERGE,
+ ACTION_EXEC,
+ ACTION_KEEP,
+ ACTION_PATH_CONFLICT,
+ ACTION_PATH_CONFLICT_RESOLVE))
def applyupdates(repo, actions, wctx, mctx, overwrite, labels=None):
"""apply the merge action list to the working directory
@@ -2090,22 +2116,7 @@
del actionbyfile[f]
# Convert to dictionary-of-lists format
- actions = dict((m, [])
- for m in (
- ACTION_ADD,
- ACTION_ADD_MODIFIED,
- ACTION_FORGET,
- ACTION_GET,
- ACTION_CHANGED_DELETED,
- ACTION_DELETED_CHANGED,
- ACTION_REMOVE,
- ACTION_DIR_RENAME_MOVE_LOCAL,
- ACTION_LOCAL_DIR_RENAME_GET,
- ACTION_MERGE,
- ACTION_EXEC,
- ACTION_KEEP,
- ACTION_PATH_CONFLICT,
- ACTION_PATH_CONFLICT_RESOLVE))
+ actions = emptyactions()
for f, (m, args, msg) in actionbyfile.iteritems():
if m not in actions:
actions[m] = []
--- a/mercurial/narrowspec.py Wed Jan 09 20:00:35 2019 -0800
+++ b/mercurial/narrowspec.py Fri Jan 18 13:28:22 2019 -0500
@@ -13,12 +13,16 @@
from . import (
error,
match as matchmod,
+ merge,
repository,
sparse,
util,
)
+# The file in .hg/store/ that indicates which paths exit in the store
FILENAME = 'narrowspec'
+# The file in .hg/ that indicates which paths exit in the dirstate
+DIRSTATE_FILENAME = 'narrowspec.dirstate'
# Pattern prefixes that are allowed in narrow patterns. This list MUST
# only contain patterns that are fast and safe to evaluate. Keep in mind
@@ -127,6 +131,18 @@
return matchmod.match(root, '', [], include=include or [],
exclude=exclude or [])
+def parseconfig(ui, spec):
+ # maybe we should care about the profiles returned too
+ includepats, excludepats, profiles = sparse.parseconfig(ui, spec, 'narrow')
+ if profiles:
+ raise error.Abort(_("including other spec files using '%include' is not"
+ " supported in narrowspec"))
+
+ validatepatterns(includepats)
+ validatepatterns(excludepats)
+
+ return includepats, excludepats
+
def load(repo):
try:
spec = repo.svfs.read(FILENAME)
@@ -136,17 +152,8 @@
if e.errno == errno.ENOENT:
return set(), set()
raise
- # maybe we should care about the profiles returned too
- includepats, excludepats, profiles = sparse.parseconfig(repo.ui, spec,
- 'narrow')
- if profiles:
- raise error.Abort(_("including other spec files using '%include' is not"
- " supported in narrowspec"))
- validatepatterns(includepats)
- validatepatterns(excludepats)
-
- return includepats, excludepats
+ return parseconfig(repo.ui, spec)
def save(repo, includepats, excludepats):
validatepatterns(includepats)
@@ -154,19 +161,38 @@
spec = format(includepats, excludepats)
repo.svfs.write(FILENAME, spec)
+def copytoworkingcopy(repo):
+ spec = repo.svfs.read(FILENAME)
+ repo.vfs.write(DIRSTATE_FILENAME, spec)
+
def savebackup(repo, backupname):
if repository.NARROW_REQUIREMENT not in repo.requirements:
return
+ svfs = repo.svfs
+ svfs.tryunlink(backupname)
+ util.copyfile(svfs.join(FILENAME), svfs.join(backupname), hardlink=True)
+
+def restorebackup(repo, backupname):
+ if repository.NARROW_REQUIREMENT not in repo.requirements:
+ return
+ util.rename(repo.svfs.join(backupname), repo.svfs.join(FILENAME))
+
+def savewcbackup(repo, backupname):
+ if repository.NARROW_REQUIREMENT not in repo.requirements:
+ return
vfs = repo.vfs
vfs.tryunlink(backupname)
- util.copyfile(repo.svfs.join(FILENAME), vfs.join(backupname), hardlink=True)
+ # It may not exist in old repos
+ if vfs.exists(DIRSTATE_FILENAME):
+ util.copyfile(vfs.join(DIRSTATE_FILENAME), vfs.join(backupname),
+ hardlink=True)
-def restorebackup(repo, backupname):
+def restorewcbackup(repo, backupname):
if repository.NARROW_REQUIREMENT not in repo.requirements:
return
- util.rename(repo.vfs.join(backupname), repo.svfs.join(FILENAME))
+ util.rename(repo.vfs.join(backupname), repo.vfs.join(DIRSTATE_FILENAME))
-def clearbackup(repo, backupname):
+def clearwcbackup(repo, backupname):
if repository.NARROW_REQUIREMENT not in repo.requirements:
return
repo.vfs.unlink(backupname)
@@ -223,3 +249,66 @@
else:
res_includes = set(req_includes)
return res_includes, res_excludes, invalid_includes
+
+# These two are extracted for extensions (specifically for Google's CitC file
+# system)
+def _deletecleanfiles(repo, files):
+ for f in files:
+ repo.wvfs.unlinkpath(f)
+
+def _writeaddedfiles(repo, pctx, files):
+ actions = merge.emptyactions()
+ addgaction = actions[merge.ACTION_GET].append
+ mf = repo['.'].manifest()
+ for f in files:
+ if not repo.wvfs.exists(f):
+ addgaction((f, (mf.flags(f), False), "narrowspec updated"))
+ merge.applyupdates(repo, actions, wctx=repo[None],
+ mctx=repo['.'], overwrite=False)
+
+def checkworkingcopynarrowspec(repo):
+ storespec = repo.svfs.tryread(FILENAME)
+ wcspec = repo.vfs.tryread(DIRSTATE_FILENAME)
+ if wcspec != storespec:
+ raise error.Abort(_("working copy's narrowspec is stale"),
+ hint=_("run 'hg tracked --update-working-copy'"))
+
+def updateworkingcopy(repo, assumeclean=False):
+ """updates the working copy and dirstate from the store narrowspec
+
+ When assumeclean=True, files that are not known to be clean will also
+ be deleted. It is then up to the caller to make sure they are clean.
+ """
+ oldspec = repo.vfs.tryread(DIRSTATE_FILENAME)
+ newspec = repo.svfs.tryread(FILENAME)
+
+ oldincludes, oldexcludes = parseconfig(repo.ui, oldspec)
+ newincludes, newexcludes = parseconfig(repo.ui, newspec)
+ oldmatch = match(repo.root, include=oldincludes, exclude=oldexcludes)
+ newmatch = match(repo.root, include=newincludes, exclude=newexcludes)
+ addedmatch = matchmod.differencematcher(newmatch, oldmatch)
+ removedmatch = matchmod.differencematcher(oldmatch, newmatch)
+
+ ds = repo.dirstate
+ lookup, status = ds.status(removedmatch, subrepos=[], ignored=False,
+ clean=True, unknown=False)
+ trackeddirty = status.modified + status.added
+ clean = status.clean
+ if assumeclean:
+ assert not trackeddirty
+ clean.extend(lookup)
+ else:
+ trackeddirty.extend(lookup)
+ _deletecleanfiles(repo, clean)
+ for f in sorted(trackeddirty):
+ repo.ui.status(_('not deleting possibly dirty file %s\n') % f)
+ for f in clean + trackeddirty:
+ ds.drop(f)
+
+ repo.narrowpats = newincludes, newexcludes
+ repo._narrowmatch = newmatch
+ pctx = repo['.']
+ newfiles = [f for f in pctx.manifest().walk(addedmatch) if f not in ds]
+ for f in newfiles:
+ ds.normallookup(f)
+ _writeaddedfiles(repo, pctx, newfiles)
--- a/mercurial/obsolete.py Wed Jan 09 20:00:35 2019 -0800
+++ b/mercurial/obsolete.py Fri Jan 18 13:28:22 2019 -0500
@@ -916,11 +916,12 @@
cl = repo.changelog
torev = cl.nodemap.get
tonode = cl.node
+ obsstore = repo.obsstore
for rev in repo.revs('(not public()) and (not obsolete())'):
# We only evaluate mutable, non-obsolete revision
node = tonode(rev)
# (future) A cache of predecessors may worth if split is very common
- for pnode in obsutil.allpredecessors(repo.obsstore, [node],
+ for pnode in obsutil.allpredecessors(obsstore, [node],
ignoreflags=bumpedfix):
prev = torev(pnode) # unfiltered! but so is phasecache
if (prev is not None) and (phase(repo, prev) <= public):
--- a/mercurial/obsutil.py Wed Jan 09 20:00:35 2019 -0800
+++ b/mercurial/obsutil.py Fri Jan 18 13:28:22 2019 -0500
@@ -148,10 +148,11 @@
remaining = set(nodes)
seen = set(remaining)
+ prec = obsstore.predecessors.get
while remaining:
current = remaining.pop()
yield current
- for mark in obsstore.predecessors.get(current, ()):
+ for mark in prec(current, ()):
# ignore marker flagged with specified flag
if mark[2] & ignoreflags:
continue
@@ -396,12 +397,14 @@
This is a first and basic implementation, with many shortcoming.
"""
- diffopts = diffutil.diffallopts(leftctx.repo().ui, {'git': True})
+ # lefctx.repo() and rightctx.repo() are the same here
+ repo = leftctx.repo()
+ diffopts = diffutil.diffallopts(repo.ui, {'git': True})
# Leftctx or right ctx might be filtered, so we need to use the contexts
# with an unfiltered repository to safely compute the diff
- leftunfi = leftctx._repo.unfiltered()[leftctx.rev()]
+ leftunfi = repo.unfiltered()[leftctx.rev()]
leftdiff = leftunfi.diff(opts=diffopts)
- rightunfi = rightctx._repo.unfiltered()[rightctx.rev()]
+ rightunfi = repo.unfiltered()[rightctx.rev()]
rightdiff = rightunfi.diff(opts=diffopts)
left, right = (0, 0)
@@ -708,7 +711,8 @@
if part not in newss:
newss.append(part)
productresult.append(newss)
- markss = productresult
+ if productresult:
+ markss = productresult
succssets.extend(markss)
# remove duplicated and subset
seen = []
--- a/mercurial/phases.py Wed Jan 09 20:00:35 2019 -0800
+++ b/mercurial/phases.py Fri Jan 18 13:28:22 2019 -0500
@@ -133,8 +133,9 @@
allphases = range(internal + 1)
trackedphases = allphases[1:]
# record phase names
+cmdphasenames = ['public', 'draft', 'secret'] # known to `hg phase` command
phasenames = [None] * len(allphases)
-phasenames[:3] = ['public', 'draft', 'secret']
+phasenames[:len(cmdphasenames)] = cmdphasenames
phasenames[archived] = 'archived'
phasenames[internal] = 'internal'
# record phase property
--- a/mercurial/policy.py Wed Jan 09 20:00:35 2019 -0800
+++ b/mercurial/policy.py Fri Jan 18 13:28:22 2019 -0500
@@ -69,7 +69,7 @@
(r'cext', r'bdiff'): 3,
(r'cext', r'mpatch'): 1,
(r'cext', r'osutil'): 4,
- (r'cext', r'parsers'): 11,
+ (r'cext', r'parsers'): 12,
}
# map import request to other package or module
--- a/mercurial/posix.py Wed Jan 09 20:00:35 2019 -0800
+++ b/mercurial/posix.py Fri Jan 18 13:28:22 2019 -0500
@@ -153,7 +153,7 @@
# Turn off all +x bits
os.chmod(f, s & 0o666)
-def copymode(src, dst, mode=None):
+def copymode(src, dst, mode=None, enforcewritable=False):
'''Copy the file mode from the file at path src to dst.
If src doesn't exist, we're using mode instead. If mode is None, we're
using umask.'''
@@ -166,7 +166,13 @@
if st_mode is None:
st_mode = ~umask
st_mode &= 0o666
- os.chmod(dst, st_mode)
+
+ new_mode = st_mode
+
+ if enforcewritable:
+ new_mode |= stat.S_IWUSR
+
+ os.chmod(dst, new_mode)
def checkexec(path):
"""
@@ -182,7 +188,7 @@
try:
EXECFLAGS = stat.S_IXUSR | stat.S_IXGRP | stat.S_IXOTH
basedir = os.path.join(path, '.hg')
- cachedir = os.path.join(basedir, 'cache')
+ cachedir = os.path.join(basedir, 'wcache')
storedir = os.path.join(basedir, 'store')
if not os.path.exists(cachedir):
try:
@@ -255,7 +261,7 @@
# mktemp is not racy because symlink creation will fail if the
# file already exists
while True:
- cachedir = os.path.join(path, '.hg', 'cache')
+ cachedir = os.path.join(path, '.hg', 'wcache')
checklink = os.path.join(cachedir, 'checklink')
# try fast path, read only
if os.path.islink(checklink):
--- a/mercurial/pure/bdiff.py Wed Jan 09 20:00:35 2019 -0800
+++ b/mercurial/pure/bdiff.py Fri Jan 18 13:28:22 2019 -0500
@@ -90,13 +90,3 @@
text = re.sub('[ \t\r]+', ' ', text)
text = text.replace(' \n', '\n')
return text
-
-def splitnewlines(text):
- '''like str.splitlines, but only split on newlines.'''
- lines = [l + '\n' for l in text.split('\n')]
- if lines:
- if lines[-1] == '\n':
- lines.pop()
- else:
- lines[-1] = lines[-1][:-1]
- return lines
--- a/mercurial/pycompat.py Wed Jan 09 20:00:35 2019 -0800
+++ b/mercurial/pycompat.py Fri Jan 18 13:28:22 2019 -0500
@@ -403,7 +403,8 @@
isjython = sysplatform.startswith(b'java')
-isdarwin = sysplatform == b'darwin'
+isdarwin = sysplatform.startswith(b'darwin')
+islinux = sysplatform.startswith(b'linux')
isposix = osname == b'posix'
iswindows = osname == b'nt'
--- a/mercurial/registrar.py Wed Jan 09 20:00:35 2019 -0800
+++ b/mercurial/registrar.py Fri Jan 18 13:28:22 2019 -0500
@@ -73,6 +73,25 @@
return func
+ def _merge(self, registrarbase):
+ """Merge the entries of the given registrar object into this one.
+
+ The other registrar object must not contain any entries already in the
+ current one, or a ProgrammmingError is raised. Additionally, the types
+ of the two registrars must match.
+ """
+ if not isinstance(registrarbase, type(self)):
+ msg = "cannot merge different types of registrar"
+ raise error.ProgrammingError(msg)
+
+ dups = set(registrarbase._table).intersection(self._table)
+
+ if dups:
+ msg = 'duplicate registration for names: "%s"' % '", "'.join(dups)
+ raise error.ProgrammingError(msg)
+
+ self._table.update(registrarbase._table)
+
def _parsefuncdecl(self, decl):
"""Parse function declaration and return the name of function in it
"""
@@ -169,6 +188,10 @@
"""
# Command categories for grouping them in help output.
+ # These can also be specified for aliases, like:
+ # [alias]
+ # myalias = something
+ # myalias:category = repo
CATEGORY_REPO_CREATION = 'repo'
CATEGORY_REMOTE_REPO_MANAGEMENT = 'remote'
CATEGORY_COMMITTING = 'commit'
--- a/mercurial/repair.py Wed Jan 09 20:00:35 2019 -0800
+++ b/mercurial/repair.py Fri Jan 18 13:28:22 2019 -0500
@@ -108,8 +108,9 @@
repo = repo.unfiltered()
repo.destroying()
+ vfs = repo.vfs
+ cl = repo.changelog
- cl = repo.changelog
# TODO handle undo of merge sets
if isinstance(nodelist, str):
nodelist = [nodelist]
@@ -152,31 +153,13 @@
stripobsidx = [i for i, m in enumerate(repo.obsstore)
if m in obsmarkers]
- # For a set s, max(parents(s) - s) is the same as max(heads(::s - s)), but
- # is much faster
- newbmtarget = repo.revs('max(parents(%ld) - (%ld))', tostrip, tostrip)
- if newbmtarget:
- newbmtarget = repo[newbmtarget.first()].node()
- else:
- newbmtarget = '.'
+ newbmtarget, updatebm = _bookmarkmovements(repo, tostrip)
- bm = repo._bookmarks
- updatebm = []
- for m in bm:
- rev = repo[bm[m]].rev()
- if rev in tostrip:
- updatebm.append(m)
-
- # create a changegroup for all the branches we need to keep
backupfile = None
- vfs = repo.vfs
node = nodelist[-1]
if backup:
- backupfile = backupbundle(repo, stripbases, cl.heads(), node, topic)
- repo.ui.status(_("saved backup bundle to %s\n") %
- vfs.join(backupfile))
- repo.ui.log("backupbundle", "saved backup bundle to %s\n",
- vfs.join(backupfile))
+ backupfile = _createstripbackup(repo, stripbases, node, topic)
+ # create a changegroup for all the branches we need to keep
tmpbundlefile = None
if saveheads:
# do not compress temporary bundle if we remove it from disk later
@@ -188,7 +171,7 @@
tmpbundlefile = backupbundle(repo, savebases, saveheads, node, 'temp',
compress=False, obsolescence=False)
- with ui.uninterruptable():
+ with ui.uninterruptible():
try:
with repo.transaction("strip") as tr:
# TODO this code violates the interface abstraction of the
@@ -237,7 +220,7 @@
with repo.transaction('repair') as tr:
bmchanges = [(m, repo[newbmtarget].node()) for m in updatebm]
- bm.applychanges(repo, tr, bmchanges)
+ repo._bookmarks.applychanges(repo, tr, bmchanges)
# remove undo files
for undovfs, undofile in repo.undofiles():
@@ -269,6 +252,36 @@
# extensions can use it
return backupfile
+def _bookmarkmovements(repo, tostrip):
+ # compute necessary bookmark movement
+ bm = repo._bookmarks
+ updatebm = []
+ for m in bm:
+ rev = repo[bm[m]].rev()
+ if rev in tostrip:
+ updatebm.append(m)
+ newbmtarget = None
+ if updatebm: # don't compute anything is there is no bookmark to move anyway
+ # For a set s, max(parents(s) - s) is the same as max(heads(::s - s)),
+ # but is much faster
+ newbmtarget = repo.revs('max(parents(%ld) - (%ld))', tostrip, tostrip)
+ if newbmtarget:
+ newbmtarget = repo[newbmtarget.first()].node()
+ else:
+ newbmtarget = '.'
+ return newbmtarget, updatebm
+
+def _createstripbackup(repo, stripbases, node, topic):
+ # backup the changeset we are about to strip
+ vfs = repo.vfs
+ cl = repo.changelog
+ backupfile = backupbundle(repo, stripbases, cl.heads(), node, topic)
+ repo.ui.status(_("saved backup bundle to %s\n") %
+ vfs.join(backupfile))
+ repo.ui.log("backupbundle", "saved backup bundle to %s\n",
+ vfs.join(backupfile))
+ return backupfile
+
def safestriproots(ui, repo, nodes):
"""return list of roots of nodes where descendants are covered by nodes"""
torev = repo.unfiltered().changelog.rev
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/mercurial/repocache.py Fri Jan 18 13:28:22 2019 -0500
@@ -0,0 +1,131 @@
+# repocache.py - in-memory repository cache for long-running services
+#
+# Copyright 2018 Yuya Nishihara <yuya@tcha.org>
+#
+# This software may be used and distributed according to the terms of the
+# GNU General Public License version 2 or any later version.
+
+from __future__ import absolute_import
+
+import collections
+import gc
+import threading
+
+from . import (
+ error,
+ hg,
+ obsolete,
+ scmutil,
+ util,
+)
+
+class repoloader(object):
+ """Load repositories in background thread
+
+ This is designed for a forking server. A cached repo cannot be obtained
+ until the server fork()s a worker and the loader thread stops.
+ """
+
+ def __init__(self, ui, maxlen):
+ self._ui = ui.copy()
+ self._cache = util.lrucachedict(max=maxlen)
+ # use deque and Event instead of Queue since deque can discard
+ # old items to keep at most maxlen items.
+ self._inqueue = collections.deque(maxlen=maxlen)
+ self._accepting = False
+ self._newentry = threading.Event()
+ self._thread = None
+
+ def start(self):
+ assert not self._thread
+ if self._inqueue.maxlen == 0:
+ # no need to spawn loader thread as the cache is disabled
+ return
+ self._accepting = True
+ self._thread = threading.Thread(target=self._mainloop)
+ self._thread.start()
+
+ def stop(self):
+ if not self._thread:
+ return
+ self._accepting = False
+ self._newentry.set()
+ self._thread.join()
+ self._thread = None
+ self._cache.clear()
+ self._inqueue.clear()
+
+ def load(self, path):
+ """Request to load the specified repository in background"""
+ self._inqueue.append(path)
+ self._newentry.set()
+
+ def get(self, path):
+ """Return a cached repo if available
+
+ This function must be called after fork(), where the loader thread
+ is stopped. Otherwise, the returned repo might be updated by the
+ loader thread.
+ """
+ if self._thread and self._thread.is_alive():
+ raise error.ProgrammingError(b'cannot obtain cached repo while '
+ b'loader is active')
+ return self._cache.peek(path, None)
+
+ def _mainloop(self):
+ while self._accepting:
+ # Avoid heavy GC after fork(), which would cancel the benefit of
+ # COW. We assume that GIL is acquired while GC is underway in the
+ # loader thread. If that isn't true, we might have to move
+ # gc.collect() to the main thread so that fork() would never stop
+ # the thread where GC is in progress.
+ gc.collect()
+
+ self._newentry.wait()
+ while self._accepting:
+ self._newentry.clear()
+ try:
+ path = self._inqueue.popleft()
+ except IndexError:
+ break
+ scmutil.callcatch(self._ui, lambda: self._load(path))
+
+ def _load(self, path):
+ start = util.timer()
+ # TODO: repo should be recreated if storage configuration changed
+ try:
+ # pop before loading so inconsistent state wouldn't be exposed
+ repo = self._cache.pop(path)
+ except KeyError:
+ repo = hg.repository(self._ui, path).unfiltered()
+ _warmupcache(repo)
+ repo.ui.log(b'repocache', b'loaded repo into cache: %s (in %.3fs)\n',
+ path, util.timer() - start)
+ self._cache.insert(path, repo)
+
+# TODO: think about proper API of preloading cache
+def _warmupcache(repo):
+ repo.invalidateall()
+ repo.changelog
+ repo.obsstore._all
+ repo.obsstore.successors
+ repo.obsstore.predecessors
+ repo.obsstore.children
+ for name in obsolete.cachefuncs:
+ obsolete.getrevs(repo, name)
+ repo._phasecache.loadphaserevs(repo)
+
+# TODO: think about proper API of attaching preloaded attributes
+def copycache(srcrepo, destrepo):
+ """Copy cached attributes from srcrepo to destrepo"""
+ destfilecache = destrepo._filecache
+ srcfilecache = srcrepo._filecache
+ if 'changelog' in srcfilecache:
+ destfilecache['changelog'] = ce = srcfilecache['changelog']
+ ce.obj.opener = ce.obj._realopener = destrepo.svfs
+ if 'obsstore' in srcfilecache:
+ destfilecache['obsstore'] = ce = srcfilecache['obsstore']
+ ce.obj.svfs = destrepo.svfs
+ if '_phasecache' in srcfilecache:
+ destfilecache['_phasecache'] = ce = srcfilecache['_phasecache']
+ ce.obj.opener = destrepo.svfs
--- a/mercurial/repository.py Wed Jan 09 20:00:35 2019 -0800
+++ b/mercurial/repository.py Fri Jan 18 13:28:22 2019 -0500
@@ -39,6 +39,11 @@
REVISION_FLAGS_KNOWN = (
REVISION_FLAG_CENSORED | REVISION_FLAG_ELLIPSIS | REVISION_FLAG_EXTSTORED)
+CG_DELTAMODE_STD = b'default'
+CG_DELTAMODE_PREV = b'previous'
+CG_DELTAMODE_FULL = b'fulltext'
+CG_DELTAMODE_P1 = b'p1'
+
class ipeerconnection(interfaceutil.Interface):
"""Represents a "connection" to a repository.
@@ -614,7 +619,7 @@
nodesorder=None,
revisiondata=False,
assumehaveparentrevisions=False,
- deltaprevious=False):
+ deltamode=CG_DELTAMODE_STD):
"""Produce ``irevisiondelta`` for revisions.
Given an iterable of nodes, emits objects conforming to the
@@ -657,10 +662,10 @@
The ``linknode`` attribute on the returned ``irevisiondelta`` may not
be set and it is the caller's responsibility to resolve it, if needed.
- If ``deltaprevious`` is True and revision data is requested, all
- revision data should be emitted as deltas against the revision
- emitted just prior. The initial revision should be a delta against
- its 1st parent.
+ If ``deltamode`` is CG_DELTAMODE_PREV and revision data is requested,
+ all revision data should be emitted as deltas against the revision
+ emitted just prior. The initial revision should be a delta against its
+ 1st parent.
"""
class ifilemutation(interfaceutil.Interface):
@@ -1430,6 +1435,12 @@
Typically .hg/cache.
""")
+ wcachevfs = interfaceutil.Attribute(
+ """A VFS used to access the cache directory dedicated to working copy
+
+ Typically .hg/wcache.
+ """)
+
filteredrevcache = interfaceutil.Attribute(
"""Holds sets of revisions to be filtered.""")
@@ -1466,7 +1477,7 @@
narrowpats = interfaceutil.Attribute(
"""Matcher patterns for this repository's narrowspec.""")
- def narrowmatch():
+ def narrowmatch(match=None, includeexact=False):
"""Obtain a matcher for the narrowspec."""
def setnarrowpats(newincludes, newexcludes):
--- a/mercurial/revlog.py Wed Jan 09 20:00:35 2019 -0800
+++ b/mercurial/revlog.py Fri Jan 18 13:28:22 2019 -0500
@@ -97,6 +97,11 @@
REVIDX_RAWTEXT_CHANGING_FLAGS
parsers = policy.importmod(r'parsers')
+try:
+ from . import rustext
+ rustext.__name__ # force actual import (see hgdemandimport)
+except ImportError:
+ rustext = None
# Aliased for performance.
_zlibdecompress = zlib.decompress
@@ -347,6 +352,7 @@
# When True, indexfile is opened with checkambig=True at writing, to
# avoid file stat ambiguity.
self._checkambig = checkambig
+ self._mmaplargeindex = mmaplargeindex
self._censorable = censorable
# 3-tuple of (node, rev, text) for a raw revision.
self._revisioncache = None
@@ -375,45 +381,51 @@
# custom flags.
self._flagprocessors = dict(_flagprocessors)
+ # 2-tuple of file handles being used for active writing.
+ self._writinghandles = None
+
+ self._loadindex()
+
+ def _loadindex(self):
mmapindexthreshold = None
- v = REVLOG_DEFAULT_VERSION
- opts = getattr(opener, 'options', None)
- if opts is not None:
- if 'revlogv2' in opts:
- # version 2 revlogs always use generaldelta.
- v = REVLOGV2 | FLAG_GENERALDELTA | FLAG_INLINE_DATA
- elif 'revlogv1' in opts:
- if 'generaldelta' in opts:
- v |= FLAG_GENERALDELTA
- else:
- v = 0
- if 'chunkcachesize' in opts:
- self._chunkcachesize = opts['chunkcachesize']
- if 'maxchainlen' in opts:
- self._maxchainlen = opts['maxchainlen']
- if 'deltabothparents' in opts:
- self._deltabothparents = opts['deltabothparents']
- self._lazydeltabase = bool(opts.get('lazydeltabase', False))
- if 'compengine' in opts:
- self._compengine = opts['compengine']
- if 'maxdeltachainspan' in opts:
- self._maxdeltachainspan = opts['maxdeltachainspan']
- if mmaplargeindex and 'mmapindexthreshold' in opts:
- mmapindexthreshold = opts['mmapindexthreshold']
- self._sparserevlog = bool(opts.get('sparse-revlog', False))
- withsparseread = bool(opts.get('with-sparse-read', False))
- # sparse-revlog forces sparse-read
- self._withsparseread = self._sparserevlog or withsparseread
- if 'sparse-read-density-threshold' in opts:
- self._srdensitythreshold = opts['sparse-read-density-threshold']
- if 'sparse-read-min-gap-size' in opts:
- self._srmingapsize = opts['sparse-read-min-gap-size']
- if opts.get('enableellipsis'):
- self._flagprocessors[REVIDX_ELLIPSIS] = ellipsisprocessor
-
- # revlog v0 doesn't have flag processors
- for flag, processor in opts.get(b'flagprocessors', {}).iteritems():
- _insertflagprocessor(flag, processor, self._flagprocessors)
+ opts = getattr(self.opener, 'options', {}) or {}
+
+ if 'revlogv2' in opts:
+ newversionflags = REVLOGV2 | FLAG_INLINE_DATA
+ elif 'revlogv1' in opts:
+ newversionflags = REVLOGV1 | FLAG_INLINE_DATA
+ if 'generaldelta' in opts:
+ newversionflags |= FLAG_GENERALDELTA
+ else:
+ newversionflags = REVLOG_DEFAULT_VERSION
+
+ if 'chunkcachesize' in opts:
+ self._chunkcachesize = opts['chunkcachesize']
+ if 'maxchainlen' in opts:
+ self._maxchainlen = opts['maxchainlen']
+ if 'deltabothparents' in opts:
+ self._deltabothparents = opts['deltabothparents']
+ self._lazydeltabase = bool(opts.get('lazydeltabase', False))
+ if 'compengine' in opts:
+ self._compengine = opts['compengine']
+ if 'maxdeltachainspan' in opts:
+ self._maxdeltachainspan = opts['maxdeltachainspan']
+ if self._mmaplargeindex and 'mmapindexthreshold' in opts:
+ mmapindexthreshold = opts['mmapindexthreshold']
+ self._sparserevlog = bool(opts.get('sparse-revlog', False))
+ withsparseread = bool(opts.get('with-sparse-read', False))
+ # sparse-revlog forces sparse-read
+ self._withsparseread = self._sparserevlog or withsparseread
+ if 'sparse-read-density-threshold' in opts:
+ self._srdensitythreshold = opts['sparse-read-density-threshold']
+ if 'sparse-read-min-gap-size' in opts:
+ self._srmingapsize = opts['sparse-read-min-gap-size']
+ if opts.get('enableellipsis'):
+ self._flagprocessors[REVIDX_ELLIPSIS] = ellipsisprocessor
+
+ # revlog v0 doesn't have flag processors
+ for flag, processor in opts.get(b'flagprocessors', {}).iteritems():
+ _insertflagprocessor(flag, processor, self._flagprocessors)
if self._chunkcachesize <= 0:
raise error.RevlogError(_('revlog chunk cache size %r is not '
@@ -422,45 +434,61 @@
raise error.RevlogError(_('revlog chunk cache size %r is not a '
'power of 2') % self._chunkcachesize)
- self._loadindex(v, mmapindexthreshold)
-
- def _loadindex(self, v, mmapindexthreshold):
indexdata = ''
self._initempty = True
try:
with self._indexfp() as f:
if (mmapindexthreshold is not None and
self.opener.fstat(f).st_size >= mmapindexthreshold):
+ # TODO: should .close() to release resources without
+ # relying on Python GC
indexdata = util.buffer(util.mmapread(f))
else:
indexdata = f.read()
if len(indexdata) > 0:
- v = versionformat_unpack(indexdata[:4])[0]
+ versionflags = versionformat_unpack(indexdata[:4])[0]
self._initempty = False
+ else:
+ versionflags = newversionflags
except IOError as inst:
if inst.errno != errno.ENOENT:
raise
- self.version = v
- self._inline = v & FLAG_INLINE_DATA
- self._generaldelta = v & FLAG_GENERALDELTA
- flags = v & ~0xFFFF
- fmt = v & 0xFFFF
+ versionflags = newversionflags
+
+ self.version = versionflags
+
+ flags = versionflags & ~0xFFFF
+ fmt = versionflags & 0xFFFF
+
if fmt == REVLOGV0:
if flags:
raise error.RevlogError(_('unknown flags (%#04x) in version %d '
'revlog %s') %
(flags >> 16, fmt, self.indexfile))
+
+ self._inline = False
+ self._generaldelta = False
+
elif fmt == REVLOGV1:
if flags & ~REVLOGV1_FLAGS:
raise error.RevlogError(_('unknown flags (%#04x) in version %d '
'revlog %s') %
(flags >> 16, fmt, self.indexfile))
+
+ self._inline = versionflags & FLAG_INLINE_DATA
+ self._generaldelta = versionflags & FLAG_GENERALDELTA
+
elif fmt == REVLOGV2:
if flags & ~REVLOGV2_FLAGS:
raise error.RevlogError(_('unknown flags (%#04x) in version %d '
'revlog %s') %
(flags >> 16, fmt, self.indexfile))
+
+ self._inline = versionflags & FLAG_INLINE_DATA
+ # generaldelta implied by version 2 revlogs.
+ self._generaldelta = True
+
else:
raise error.RevlogError(_('unknown version (%d) in revlog %s') %
(fmt, self.indexfile))
@@ -505,8 +533,21 @@
@contextlib.contextmanager
def _datareadfp(self, existingfp=None):
"""file object suitable to read data"""
+ # Use explicit file handle, if given.
if existingfp is not None:
yield existingfp
+
+ # Use a file handle being actively used for writes, if available.
+ # There is some danger to doing this because reads will seek the
+ # file. However, _writeentry() performs a SEEK_END before all writes,
+ # so we should be safe.
+ elif self._writinghandles:
+ if self._inline:
+ yield self._writinghandles[0]
+ else:
+ yield self._writinghandles[1]
+
+ # Otherwise open a new file handle.
else:
if self._inline:
func = self._indexfp
@@ -752,7 +793,7 @@
return chain, stopped
def ancestors(self, revs, stoprev=0, inclusive=False):
- """Generate the ancestors of 'revs' in reverse topological order.
+ """Generate the ancestors of 'revs' in reverse revision order.
Does not generate revs lower than stoprev.
See the documentation for ancestor.lazyancestors for more details."""
@@ -763,12 +804,17 @@
for r in revs:
checkrev(r)
# and we're sure ancestors aren't filtered as well
- if util.safehasattr(parsers, 'rustlazyancestors'):
- return ancestor.rustlazyancestors(
- self.index, revs,
- stoprev=stoprev, inclusive=inclusive)
- return ancestor.lazyancestors(self._uncheckedparentrevs, revs,
- stoprev=stoprev, inclusive=inclusive)
+
+ if rustext is not None:
+ lazyancestors = rustext.ancestor.LazyAncestors
+ arg = self.index
+ elif util.safehasattr(parsers, 'rustlazyancestors'):
+ lazyancestors = ancestor.rustlazyancestors
+ arg = self.index
+ else:
+ lazyancestors = ancestor.lazyancestors
+ arg = self._uncheckedparentrevs
+ return lazyancestors(arg, revs, stoprev=stoprev, inclusive=inclusive)
def descendants(self, revs):
return dagop.descendantrevs(revs, self.revs, self.parentrevs)
@@ -849,6 +895,8 @@
if common is None:
common = [nullrev]
+ if rustext is not None:
+ return rustext.ancestor.MissingAncestors(self.index, common)
return ancestor.incrementalmissingancestors(self.parentrevs, common)
def findmissingrevs(self, common=None, heads=None):
@@ -1056,11 +1104,13 @@
assert heads
return (orderedout, roots, heads)
- def headrevs(self):
- try:
- return self.index.headrevs()
- except AttributeError:
- return self._headrevs()
+ def headrevs(self, revs=None):
+ if revs is None:
+ try:
+ return self.index.headrevs()
+ except AttributeError:
+ return self._headrevs()
+ return dagop.headrevs(revs, self.parentrevs)
def computephases(self, roots):
return self.index.computephasesmapsets(roots)
@@ -1342,6 +1392,8 @@
original seek position will NOT be restored.
Returns a str or buffer of raw byte data.
+
+ Raises if the requested number of bytes could not be read.
"""
# Cache data both forward and backward around the requested
# data, in a fixed size window. This helps speed up operations
@@ -1353,9 +1405,26 @@
with self._datareadfp(df) as df:
df.seek(realoffset)
d = df.read(reallength)
+
self._cachesegment(realoffset, d)
if offset != realoffset or reallength != length:
- return util.buffer(d, offset - realoffset, length)
+ startoffset = offset - realoffset
+ if len(d) - startoffset < length:
+ raise error.RevlogError(
+ _('partial read of revlog %s; expected %d bytes from '
+ 'offset %d, got %d') %
+ (self.indexfile if self._inline else self.datafile,
+ length, realoffset, len(d) - startoffset))
+
+ return util.buffer(d, startoffset, length)
+
+ if len(d) < length:
+ raise error.RevlogError(
+ _('partial read of revlog %s; expected %d bytes from offset '
+ '%d, got %d') %
+ (self.indexfile if self._inline else self.datafile,
+ length, offset, len(d)))
+
return d
def _getsegment(self, offset, length, df=None):
@@ -1498,15 +1567,25 @@
def issnapshot(self, rev):
"""tells whether rev is a snapshot
"""
+ if not self._sparserevlog:
+ return self.deltaparent(rev) == nullrev
+ elif util.safehasattr(self.index, 'issnapshot'):
+ # directly assign the method to cache the testing and access
+ self.issnapshot = self.index.issnapshot
+ return self.issnapshot(rev)
if rev == nullrev:
return True
- deltap = self.deltaparent(rev)
- if deltap == nullrev:
+ entry = self.index[rev]
+ base = entry[3]
+ if base == rev:
+ return True
+ if base == nullrev:
return True
- p1, p2 = self.parentrevs(rev)
- if deltap in (p1, p2):
+ p1 = entry[5]
+ p2 = entry[6]
+ if base == p1 or base == p2:
return False
- return self.issnapshot(deltap)
+ return self.issnapshot(base)
def snapshotdepth(self, rev):
"""number of snapshot in the chain before this one"""
@@ -1731,10 +1810,13 @@
if fp:
fp.flush()
fp.close()
-
- with self._datafp('w') as df:
+ # We can't use the cached file handle after close(). So prevent
+ # its usage.
+ self._writinghandles = None
+
+ with self._indexfp('r') as ifh, self._datafp('w') as dfh:
for r in self:
- df.write(self._getsegmentforrevs(r, r)[1])
+ dfh.write(self._getsegmentforrevs(r, r, df=ifh)[1])
with self._indexfp('w') as fp:
self.version &= ~FLAG_INLINE_DATA
@@ -1977,7 +2059,9 @@
# if the file was seeked to before the end. See issue4943 for more.
#
# We work around this issue by inserting a seek() before writing.
- # Note: This is likely not necessary on Python 3.
+ # Note: This is likely not necessary on Python 3. However, because
+ # the file handle is reused for reads and may be seeked there, we need
+ # to be careful before changing this.
ifh.seek(0, os.SEEK_END)
if dfh:
dfh.seek(0, os.SEEK_END)
@@ -2010,6 +2094,9 @@
this revlog and the node that was added.
"""
+ if self._writinghandles:
+ raise error.ProgrammingError('cannot nest addgroup() calls')
+
nodes = []
r = len(self)
@@ -2029,6 +2116,9 @@
if dfh:
dfh.flush()
ifh.flush()
+
+ self._writinghandles = (ifh, dfh)
+
try:
deltacomputer = deltautil.deltacomputer(self)
# loop through our set of deltas
@@ -2090,7 +2180,10 @@
ifh.close()
dfh = self._datafp("a+")
ifh = self._indexfp("a+")
+ self._writinghandles = (ifh, dfh)
finally:
+ self._writinghandles = None
+
if dfh:
dfh.close()
ifh.close()
@@ -2205,7 +2298,8 @@
return res
def emitrevisions(self, nodes, nodesorder=None, revisiondata=False,
- assumehaveparentrevisions=False, deltaprevious=False):
+ assumehaveparentrevisions=False,
+ deltamode=repository.CG_DELTAMODE_STD):
if nodesorder not in ('nodes', 'storage', 'linear', None):
raise error.ProgrammingError('unhandled value for nodesorder: %s' %
nodesorder)
@@ -2213,6 +2307,10 @@
if nodesorder is None and not self._generaldelta:
nodesorder = 'storage'
+ if (not self._storedeltachains and
+ deltamode != repository.CG_DELTAMODE_PREV):
+ deltamode = repository.CG_DELTAMODE_FULL
+
return storageutil.emitrevisions(
self, nodes, nodesorder, revlogrevisiondelta,
deltaparentfn=self.deltaparent,
@@ -2220,10 +2318,9 @@
rawsizefn=self.rawsize,
revdifffn=self.revdiff,
flagsfn=self.flags,
- sendfulltext=not self._storedeltachains,
+ deltamode=deltamode,
revisiondata=revisiondata,
- assumehaveparentrevisions=assumehaveparentrevisions,
- deltaprevious=deltaprevious)
+ assumehaveparentrevisions=assumehaveparentrevisions)
DELTAREUSEALWAYS = 'always'
DELTAREUSESAMEREVS = 'samerevs'
@@ -2234,7 +2331,7 @@
DELTAREUSEALL = {'always', 'samerevs', 'never', 'fulladd'}
def clone(self, tr, destrevlog, addrevisioncb=None,
- deltareuse=DELTAREUSESAMEREVS, deltabothparents=None):
+ deltareuse=DELTAREUSESAMEREVS, forcedeltabothparents=None):
"""Copy this revlog to another, possibly with format changes.
The destination revlog will contain the same revisions and nodes.
@@ -2268,9 +2365,9 @@
deltas will be recomputed if the delta's parent isn't a parent of the
revision.
- In addition to the delta policy, the ``deltabothparents`` argument
- controls whether to compute deltas against both parents for merges.
- By default, the current default is used.
+ In addition to the delta policy, the ``forcedeltabothparents``
+ argument controls whether to force compute deltas against both parents
+ for merges. By default, the current default is used.
"""
if deltareuse not in self.DELTAREUSEALL:
raise ValueError(_('value for deltareuse invalid: %s') % deltareuse)
@@ -2293,7 +2390,7 @@
elif deltareuse == self.DELTAREUSESAMEREVS:
destrevlog._lazydeltabase = False
- destrevlog._deltabothparents = deltabothparents or oldamd
+ destrevlog._deltabothparents = forcedeltabothparents or oldamd
populatecachedelta = deltareuse in (self.DELTAREUSEALWAYS,
self.DELTAREUSESAMEREVS)
@@ -2412,7 +2509,7 @@
self.opener.rename(newrl.datafile, self.datafile)
self.clearcaches()
- self._loadindex(self.version, None)
+ self._loadindex()
def verifyintegrity(self, state):
"""Verifies the integrity of the revlog.
--- a/mercurial/revlogutils/constants.py Wed Jan 09 20:00:35 2019 -0800
+++ b/mercurial/revlogutils/constants.py Fri Jan 18 13:28:22 2019 -0500
@@ -20,13 +20,15 @@
# Dummy value until file format is finalized.
# Reminder: change the bounds check in revlog.__init__ when this is changed.
REVLOGV2 = 0xDEAD
+# Shared across v1 and v2.
FLAG_INLINE_DATA = (1 << 16)
+# Only used by v1, implied by v2.
FLAG_GENERALDELTA = (1 << 17)
REVLOG_DEFAULT_FLAGS = FLAG_INLINE_DATA
REVLOG_DEFAULT_FORMAT = REVLOGV1
REVLOG_DEFAULT_VERSION = REVLOG_DEFAULT_FORMAT | REVLOG_DEFAULT_FLAGS
REVLOGV1_FLAGS = FLAG_INLINE_DATA | FLAG_GENERALDELTA
-REVLOGV2_FLAGS = REVLOGV1_FLAGS
+REVLOGV2_FLAGS = FLAG_INLINE_DATA
# revlog index flags
--- a/mercurial/revlogutils/deltas.py Wed Jan 09 20:00:35 2019 -0800
+++ b/mercurial/revlogutils/deltas.py Fri Jan 18 13:28:22 2019 -0500
@@ -10,7 +10,6 @@
from __future__ import absolute_import
import collections
-import heapq
import struct
# import stuff from node for others to import from revlog
@@ -31,6 +30,7 @@
from .. import (
error,
mdiff,
+ util,
)
# maximum <delta-chain-data>/<revision-text-length> ratio
@@ -39,18 +39,24 @@
class _testrevlog(object):
"""minimalist fake revlog to use in doctests"""
- def __init__(self, data, density=0.5, mingap=0):
+ def __init__(self, data, density=0.5, mingap=0, snapshot=()):
"""data is an list of revision payload boundaries"""
self._data = data
self._srdensitythreshold = density
self._srmingapsize = mingap
+ self._snapshot = set(snapshot)
+ self.index = None
def start(self, rev):
+ if rev == nullrev:
+ return 0
if rev == 0:
return 0
return self._data[rev - 1]
def end(self, rev):
+ if rev == nullrev:
+ return 0
return self._data[rev]
def length(self, rev):
@@ -59,7 +65,12 @@
def __len__(self):
return len(self._data)
-def slicechunk(revlog, revs, deltainfo=None, targetsize=None):
+ def issnapshot(self, rev):
+ if rev == nullrev:
+ return True
+ return rev in self._snapshot
+
+def slicechunk(revlog, revs, targetsize=None):
"""slice revs to reduce the amount of unrelated data to be read from disk.
``revs`` is sliced into groups that should be read in one time.
@@ -76,7 +87,7 @@
If individual revisions chunk are larger than this limit, they will still
be raised individually.
- >>> revlog = _testrevlog([
+ >>> data = [
... 5, #00 (5)
... 10, #01 (5)
... 12, #02 (2)
@@ -93,7 +104,8 @@
... 85, #13 (11)
... 86, #14 (1)
... 91, #15 (5)
- ... ])
+ ... ]
+ >>> revlog = _testrevlog(data, snapshot=range(16))
>>> list(slicechunk(revlog, list(range(16))))
[[0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]]
@@ -111,19 +123,23 @@
[[0], [11], [13], [15]]
>>> list(slicechunk(revlog, [0, 11, 13, 15], targetsize=20))
[[0], [11], [13, 15]]
+
+ Slicing involving nullrev
+ >>> list(slicechunk(revlog, [-1, 0, 11, 13, 15], targetsize=20))
+ [[-1, 0], [11], [13, 15]]
+ >>> list(slicechunk(revlog, [-1, 13, 15], targetsize=5))
+ [[-1], [13], [15]]
"""
if targetsize is not None:
targetsize = max(targetsize, revlog._srmingapsize)
# targetsize should not be specified when evaluating delta candidates:
# * targetsize is used to ensure we stay within specification when reading,
- # * deltainfo is used to pick are good delta chain when writing.
- if not (deltainfo is None or targetsize is None):
- msg = 'cannot use `targetsize` with a `deltainfo`'
- raise error.ProgrammingError(msg)
- for chunk in _slicechunktodensity(revlog, revs,
- deltainfo,
- revlog._srdensitythreshold,
- revlog._srmingapsize):
+ densityslicing = getattr(revlog.index, 'slicechunktodensity', None)
+ if densityslicing is None:
+ densityslicing = lambda x, y, z: _slicechunktodensity(revlog, x, y, z)
+ for chunk in densityslicing(revs,
+ revlog._srdensitythreshold,
+ revlog._srmingapsize):
for subchunk in _slicechunktosize(revlog, chunk, targetsize):
yield subchunk
@@ -135,7 +151,7 @@
happens when "minimal gap size" interrupted the slicing or when chain are
built in a way that create large blocks next to each other.
- >>> revlog = _testrevlog([
+ >>> data = [
... 3, #0 (3)
... 5, #1 (2)
... 6, #2 (1)
@@ -145,7 +161,10 @@
... 12, #6 (1)
... 13, #7 (1)
... 14, #8 (1)
- ... ])
+ ... ]
+
+ == All snapshots cases ==
+ >>> revlog = _testrevlog(data, snapshot=range(9))
Cases where chunk is already small enough
>>> list(_slicechunktosize(revlog, [0], 3))
@@ -180,40 +199,111 @@
[[1], [3]]
>>> list(_slicechunktosize(revlog, [3, 4, 5], 2))
[[3], [5]]
+
+ == No Snapshot cases ==
+ >>> revlog = _testrevlog(data)
+
+ Cases where chunk is already small enough
+ >>> list(_slicechunktosize(revlog, [0], 3))
+ [[0]]
+ >>> list(_slicechunktosize(revlog, [6, 7], 3))
+ [[6, 7]]
+ >>> list(_slicechunktosize(revlog, [0], None))
+ [[0]]
+ >>> list(_slicechunktosize(revlog, [6, 7], None))
+ [[6, 7]]
+
+ cases where we need actual slicing
+ >>> list(_slicechunktosize(revlog, [0, 1], 3))
+ [[0], [1]]
+ >>> list(_slicechunktosize(revlog, [1, 3], 3))
+ [[1], [3]]
+ >>> list(_slicechunktosize(revlog, [1, 2, 3], 3))
+ [[1], [2, 3]]
+ >>> list(_slicechunktosize(revlog, [3, 5], 3))
+ [[3], [5]]
+ >>> list(_slicechunktosize(revlog, [3, 4, 5], 3))
+ [[3], [4, 5]]
+ >>> list(_slicechunktosize(revlog, [5, 6, 7, 8], 3))
+ [[5], [6, 7, 8]]
+ >>> list(_slicechunktosize(revlog, [0, 1, 2, 3, 4, 5, 6, 7, 8], 3))
+ [[0], [1, 2], [3], [5], [6, 7, 8]]
+
+ Case with too large individual chunk (must return valid chunk)
+ >>> list(_slicechunktosize(revlog, [0, 1], 2))
+ [[0], [1]]
+ >>> list(_slicechunktosize(revlog, [1, 3], 1))
+ [[1], [3]]
+ >>> list(_slicechunktosize(revlog, [3, 4, 5], 2))
+ [[3], [5]]
+
+ == mixed case ==
+ >>> revlog = _testrevlog(data, snapshot=[0, 1, 2])
+ >>> list(_slicechunktosize(revlog, list(range(9)), 5))
+ [[0, 1], [2], [3, 4, 5], [6, 7, 8]]
"""
assert targetsize is None or 0 <= targetsize
- if targetsize is None or segmentspan(revlog, revs) <= targetsize:
+ startdata = revlog.start(revs[0])
+ enddata = revlog.end(revs[-1])
+ fullspan = enddata - startdata
+ if targetsize is None or fullspan <= targetsize:
yield revs
return
startrevidx = 0
- startdata = revlog.start(revs[0])
- endrevidx = 0
+ endrevidx = 1
iterrevs = enumerate(revs)
next(iterrevs) # skip first rev.
+ # first step: get snapshots out of the way
for idx, r in iterrevs:
span = revlog.end(r) - startdata
- if span <= targetsize:
- endrevidx = idx
+ snapshot = revlog.issnapshot(r)
+ if span <= targetsize and snapshot:
+ endrevidx = idx + 1
else:
- chunk = _trimchunk(revlog, revs, startrevidx, endrevidx + 1)
+ chunk = _trimchunk(revlog, revs, startrevidx, endrevidx)
if chunk:
yield chunk
startrevidx = idx
startdata = revlog.start(r)
- endrevidx = idx
- yield _trimchunk(revlog, revs, startrevidx)
+ endrevidx = idx + 1
+ if not snapshot:
+ break
-def _slicechunktodensity(revlog, revs, deltainfo=None, targetdensity=0.5,
+ # for the others, we use binary slicing to quickly converge toward valid
+ # chunks (otherwise, we might end up looking for start/end of many
+ # revisions). This logic is not looking for the perfect slicing point, it
+ # focuses on quickly converging toward valid chunks.
+ nbitem = len(revs)
+ while (enddata - startdata) > targetsize:
+ endrevidx = nbitem
+ if nbitem - startrevidx <= 1:
+ break # protect against individual chunk larger than limit
+ localenddata = revlog.end(revs[endrevidx - 1])
+ span = localenddata - startdata
+ while span > targetsize:
+ if endrevidx - startrevidx <= 1:
+ break # protect against individual chunk larger than limit
+ endrevidx -= (endrevidx - startrevidx) // 2
+ localenddata = revlog.end(revs[endrevidx - 1])
+ span = localenddata - startdata
+ chunk = _trimchunk(revlog, revs, startrevidx, endrevidx)
+ if chunk:
+ yield chunk
+ startrevidx = endrevidx
+ startdata = revlog.start(revs[startrevidx])
+
+ chunk = _trimchunk(revlog, revs, startrevidx)
+ if chunk:
+ yield chunk
+
+def _slicechunktodensity(revlog, revs, targetdensity=0.5,
mingapsize=0):
"""slice revs to reduce the amount of unrelated data to be read from disk.
``revs`` is sliced into groups that should be read in one time.
Assume that revs are sorted.
- ``deltainfo`` is a _deltainfo instance of a revision that we would append
- to the top of the revlog.
-
The initial chunk is sliced until the overall density (payload/chunks-span
ratio) is above `targetdensity`. No gap smaller than `mingapsize` is
skipped.
@@ -264,21 +354,14 @@
yield revs
return
- nextrev = len(revlog)
- nextoffset = revlog.end(nextrev - 1)
-
- if deltainfo is None:
- deltachainspan = segmentspan(revlog, revs)
- chainpayload = sum(length(r) for r in revs)
- else:
- deltachainspan = deltainfo.distance
- chainpayload = deltainfo.compresseddeltalen
+ deltachainspan = segmentspan(revlog, revs)
if deltachainspan < mingapsize:
yield revs
return
readdata = deltachainspan
+ chainpayload = sum(length(r) for r in revs)
if deltachainspan:
density = chainpayload / float(deltachainspan)
@@ -289,21 +372,12 @@
yield revs
return
- if deltainfo is not None and deltainfo.deltalen:
- revs = list(revs)
- revs.append(nextrev)
-
# Store the gaps in a heap to have them sorted by decreasing size
- gapsheap = []
- heapq.heapify(gapsheap)
+ gaps = []
prevend = None
for i, rev in enumerate(revs):
- if rev < nextrev:
- revstart = start(rev)
- revlen = length(rev)
- else:
- revstart = nextoffset
- revlen = deltainfo.deltalen
+ revstart = start(rev)
+ revlen = length(rev)
# Skip empty revisions to form larger holes
if revlen == 0:
@@ -313,30 +387,31 @@
gapsize = revstart - prevend
# only consider holes that are large enough
if gapsize > mingapsize:
- heapq.heappush(gapsheap, (-gapsize, i))
+ gaps.append((gapsize, i))
prevend = revstart + revlen
+ # sort the gaps to pop them from largest to small
+ gaps.sort()
# Collect the indices of the largest holes until the density is acceptable
- indicesheap = []
- heapq.heapify(indicesheap)
- while gapsheap and density < targetdensity:
- oppgapsize, gapidx = heapq.heappop(gapsheap)
+ selected = []
+ while gaps and density < targetdensity:
+ gapsize, gapidx = gaps.pop()
- heapq.heappush(indicesheap, gapidx)
+ selected.append(gapidx)
# the gap sizes are stored as negatives to be sorted decreasingly
# by the heap
- readdata -= (-oppgapsize)
+ readdata -= gapsize
if readdata > 0:
density = chainpayload / float(readdata)
else:
density = 1.0
+ selected.sort()
# Cut the revs at collected indices
previdx = 0
- while indicesheap:
- idx = heapq.heappop(indicesheap)
+ for idx in selected:
chunk = _trimchunk(revlog, revs, previdx, idx)
if chunk:
@@ -401,7 +476,7 @@
return revs[startidx:endidx]
-def segmentspan(revlog, revs, deltainfo=None):
+def segmentspan(revlog, revs):
"""Get the byte span of a segment of revisions
revs is a sorted array of revision numbers
@@ -427,13 +502,7 @@
"""
if not revs:
return 0
- if deltainfo is not None and len(revlog) <= revs[-1]:
- if len(revs) == 1:
- return deltainfo.deltalen
- offset = revlog.end(len(revlog) - 1)
- end = deltainfo.deltalen + offset
- else:
- end = revlog.end(revs[-1])
+ end = revlog.end(revs[-1])
return end - revlog.start(revs[0])
def _textfromdelta(fh, revlog, baserev, delta, p1, p2, flags, expectednode):
@@ -489,45 +558,23 @@
# deltas we need to apply -- bounding it limits the amount of CPU
# we consume.
- if revlog._sparserevlog:
- # As sparse-read will be used, we can consider that the distance,
- # instead of being the span of the whole chunk,
- # is the span of the largest read chunk
- base = deltainfo.base
-
- if base != nullrev:
- deltachain = revlog._deltachain(base)[0]
- else:
- deltachain = []
-
- # search for the first non-snapshot revision
- for idx, r in enumerate(deltachain):
- if not revlog.issnapshot(r):
- break
- deltachain = deltachain[idx:]
- chunks = slicechunk(revlog, deltachain, deltainfo)
- all_span = [segmentspan(revlog, revs, deltainfo)
- for revs in chunks]
- distance = max(all_span)
- else:
- distance = deltainfo.distance
-
textlen = revinfo.textlen
defaultmax = textlen * 4
maxdist = revlog._maxdeltachainspan
if not maxdist:
- maxdist = distance # ensure the conditional pass
+ maxdist = deltainfo.distance # ensure the conditional pass
maxdist = max(maxdist, defaultmax)
- if revlog._sparserevlog and maxdist < revlog._srmingapsize:
- # In multiple place, we are ignoring irrelevant data range below a
- # certain size. Be also apply this tradeoff here and relax span
- # constraint for small enought content.
- maxdist = revlog._srmingapsize
# Bad delta from read span:
#
# If the span of data read is larger than the maximum allowed.
- if maxdist < distance:
+ #
+ # In the sparse-revlog case, we rely on the associated "sparse reading"
+ # to avoid issue related to the span of data. In theory, it would be
+ # possible to build pathological revlog where delta pattern would lead
+ # to too many reads. However, they do not happen in practice at all. So
+ # we skip the span check entirely.
+ if not revlog._sparserevlog and maxdist < deltainfo.distance:
return False
# Bad delta from new delta size:
@@ -567,6 +614,11 @@
return True
+# If a revision's full text is that much bigger than a base candidate full
+# text's, it is very unlikely that it will produce a valid delta. We no longer
+# consider these candidates.
+LIMIT_BASE2TEXT = 500
+
def _candidategroups(revlog, textlen, p1, p2, cachedelta):
"""Provides group of revision to be tested as delta base
@@ -580,6 +632,7 @@
deltalength = revlog.length
deltaparent = revlog.deltaparent
+ sparse = revlog._sparserevlog
good = None
deltas_limit = textlen * LIMIT_DELTA2TEXT
@@ -599,6 +652,10 @@
or deltalength(rev))):
tested.add(rev)
rev = deltaparent(rev)
+ # no need to try a delta against nullrev, this will be done as a
+ # last resort.
+ if rev == nullrev:
+ continue
# filter out revision we tested already
if rev in tested:
continue
@@ -606,13 +663,22 @@
# filter out delta base that will never produce good delta
if deltas_limit < revlog.length(rev):
continue
- # no need to try a delta against nullrev, this will be done as a
- # last resort.
- if rev == nullrev:
+ if sparse and revlog.rawsize(rev) < (textlen // LIMIT_BASE2TEXT):
continue
# no delta for rawtext-changing revs (see "candelta" for why)
if revlog.flags(rev) & REVIDX_RAWTEXT_CHANGING_FLAGS:
continue
+ # If we reach here, we are about to build and test a delta.
+ # The delta building process will compute the chaininfo in all
+ # case, since that computation is cached, it is fine to access it
+ # here too.
+ chainlen, chainsize = revlog._chaininfo(rev)
+ # if chain will be too long, skip base
+ if revlog._maxchainlen and chainlen >= revlog._maxchainlen:
+ continue
+ # if chain already have too much data, skip base
+ if deltas_limit < chainsize:
+ continue
group.append(rev)
if group:
# XXX: in the sparse revlog case, group can become large,
@@ -623,11 +689,14 @@
def _findsnapshots(revlog, cache, start_rev):
"""find snapshot from start_rev to tip"""
- deltaparent = revlog.deltaparent
- issnapshot = revlog.issnapshot
- for rev in revlog.revs(start_rev):
- if issnapshot(rev):
- cache[deltaparent(rev)].append(rev)
+ if util.safehasattr(revlog.index, 'findsnapshots'):
+ revlog.index.findsnapshots(cache, start_rev)
+ else:
+ deltaparent = revlog.deltaparent
+ issnapshot = revlog.issnapshot
+ for rev in revlog.revs(start_rev):
+ if issnapshot(rev):
+ cache[deltaparent(rev)].append(rev)
def _refinedgroups(revlog, p1, p2, cachedelta):
good = None
@@ -644,7 +713,8 @@
if good is not None:
yield None
return
- for candidates in _rawgroups(revlog, p1, p2, cachedelta):
+ snapshots = collections.defaultdict(list)
+ for candidates in _rawgroups(revlog, p1, p2, cachedelta, snapshots):
good = yield candidates
if good is not None:
break
@@ -665,12 +735,8 @@
break
good = yield (base,)
# refine snapshot up
- #
- # XXX the _findsnapshots call can be expensive and is "duplicated" with
- # the one done in `_rawgroups`. Once we start working on performance,
- # we should make the two logics share this computation.
- snapshots = collections.defaultdict(list)
- _findsnapshots(revlog, snapshots, good + 1)
+ if not snapshots:
+ _findsnapshots(revlog, snapshots, good + 1)
previous = None
while good != previous:
previous = good
@@ -680,7 +746,7 @@
# we have found nothing
yield None
-def _rawgroups(revlog, p1, p2, cachedelta):
+def _rawgroups(revlog, p1, p2, cachedelta, snapshots=None):
"""Provides group of revision to be tested as delta base
This lower level function focus on emitting delta theorically interresting
@@ -710,7 +776,9 @@
yield parents
if sparse and parents:
- snapshots = collections.defaultdict(list) # map: base-rev: snapshot-rev
+ if snapshots is None:
+ # map: base-rev: snapshot-rev
+ snapshots = collections.defaultdict(list)
# See if we can use an existing snapshot in the parent chains to use as
# a base for a new intermediate-snapshot
#
--- a/mercurial/revset.py Wed Jan 09 20:00:35 2019 -0800
+++ b/mercurial/revset.py Fri Jan 18 13:28:22 2019 -0500
@@ -125,6 +125,13 @@
return baseset([x])
return baseset()
+def rawsmartset(repo, subset, x, order):
+ """argument is already a smartset, use that directly"""
+ if order == followorder:
+ return subset & x
+ else:
+ return x & subset
+
def rangeset(repo, subset, x, y, order):
m = getset(repo, fullreposet(repo), x)
n = getset(repo, fullreposet(repo), y)
@@ -218,6 +225,15 @@
def relationset(repo, subset, x, y, order):
raise error.ParseError(_("can't use a relation in this context"))
+def generationsrel(repo, subset, x, rel, n, order):
+ # TODO: support range, rewrite tests, and drop startdepth argument
+ # from ancestors() and descendants() predicates
+ if n <= 0:
+ n = -n
+ return _ancestors(repo, subset, x, startdepth=n, stopdepth=n + 1)
+ else:
+ return _descendants(repo, subset, x, startdepth=n, stopdepth=n + 1)
+
def relsubscriptset(repo, subset, x, y, z, order):
# this is pretty basic implementation of 'x#y[z]' operator, still
# experimental so undocumented. see the wiki for further ideas.
@@ -225,17 +241,11 @@
rel = getsymbol(y)
n = getinteger(z, _("relation subscript must be an integer"))
- # TODO: perhaps this should be a table of relation functions
- if rel in ('g', 'generations'):
- # TODO: support range, rewrite tests, and drop startdepth argument
- # from ancestors() and descendants() predicates
- if n <= 0:
- n = -n
- return _ancestors(repo, subset, x, startdepth=n, stopdepth=n + 1)
- else:
- return _descendants(repo, subset, x, startdepth=n, stopdepth=n + 1)
+ if rel in subscriptrelations:
+ return subscriptrelations[rel](repo, subset, x, rel, n, order)
- raise error.UnknownIdentifier(rel, ['generations'])
+ relnames = [r for r in subscriptrelations.keys() if len(r) > 1]
+ raise error.UnknownIdentifier(rel, relnames)
def subscriptset(repo, subset, x, y, order):
raise error.ParseError(_("can't use a subscript in this context"))
@@ -466,9 +476,6 @@
for name, bmrev in repo._bookmarks.iteritems():
if matcher(name):
matchrevs.add(bmrev)
- if not matchrevs:
- raise error.RepoLookupError(_("no bookmarks exist"
- " that match '%s'") % pattern)
for bmrev in matchrevs:
bms.add(repo[bmrev].rev())
else:
@@ -1161,9 +1168,19 @@
# argument set should never define order
if order == defineorder:
order = followorder
- s = getset(repo, subset, x, order=order)
- ps = parents(repo, subset, x)
- return s - ps
+ inputset = getset(repo, fullreposet(repo), x, order=order)
+ wdirparents = None
+ if node.wdirrev in inputset:
+ # a bit slower, but not common so good enough for now
+ wdirparents = [p.rev() for p in repo[None].parents()]
+ inputset = set(inputset)
+ inputset.discard(node.wdirrev)
+ heads = repo.changelog.headrevs(inputset)
+ if wdirparents is not None:
+ heads.difference_update(wdirparents)
+ heads.add(node.wdirrev)
+ heads = baseset(heads)
+ return subset & heads
@predicate('hidden()', safe=True)
def hidden(repo, subset, x):
@@ -1330,9 +1347,6 @@
for name, ns in repo.names.iteritems():
if matcher(name):
namespaces.add(ns)
- if not namespaces:
- raise error.RepoLookupError(_("no namespace exists"
- " that match '%s'") % pattern)
names = set()
for ns in namespaces:
@@ -2219,6 +2233,12 @@
"ancestor": ancestorspec,
"parent": parentspec,
"parentpost": parentpost,
+ "smartset": rawsmartset,
+}
+
+subscriptrelations = {
+ "g": generationsrel,
+ "generations": generationsrel,
}
def lookupfn(repo):
--- a/mercurial/revsetlang.py Wed Jan 09 20:00:35 2019 -0800
+++ b/mercurial/revsetlang.py Fri Jan 18 13:28:22 2019 -0500
@@ -15,6 +15,7 @@
node,
parser,
pycompat,
+ smartset,
util,
)
from .utils import (
@@ -332,7 +333,7 @@
elif op == 'negate':
s = getstring(x[1], _("can't negate that"))
return _analyze(('string', '-' + s))
- elif op in ('string', 'symbol'):
+ elif op in ('string', 'symbol', 'smartset'):
return x
elif op == 'rangeall':
return (op, None)
@@ -372,7 +373,7 @@
return 0, x
op = x[0]
- if op in ('string', 'symbol'):
+ if op in ('string', 'symbol', 'smartset'):
return 0.5, x # single revisions are small
elif op == 'and':
wa, ta = _optimize(x[1])
@@ -534,7 +535,8 @@
def foldconcat(tree):
"""Fold elements to be concatenated by `##`
"""
- if not isinstance(tree, tuple) or tree[0] in ('string', 'symbol'):
+ if (not isinstance(tree, tuple)
+ or tree[0] in ('string', 'symbol', 'smartset')):
return tree
if tree[0] == '_concat':
pending = [tree]
@@ -583,7 +585,7 @@
def _formatargtype(c, arg):
if c == 'd':
- return '%d' % int(arg)
+ return 'rev(%d)' % int(arg)
elif c == 's':
return _quote(arg)
elif c == 'r':
@@ -607,7 +609,7 @@
elif l == 1:
return _formatargtype(t, s[0])
elif t == 'd':
- return "_intlist('%s')" % "\0".join('%d' % int(a) for a in s)
+ return _formatintlist(s)
elif t == 's':
return "_list(%s)" % _quote("\0".join(s))
elif t == 'n':
@@ -621,6 +623,17 @@
m = l // 2
return '(%s or %s)' % (_formatlistexp(s[:m], t), _formatlistexp(s[m:], t))
+def _formatintlist(data):
+ try:
+ l = len(data)
+ if l == 0:
+ return "_list('')"
+ elif l == 1:
+ return _formatargtype('d', data[0])
+ return "_intlist('%s')" % "\0".join('%d' % int(a) for a in data)
+ except (TypeError, ValueError):
+ raise error.ParseError(_('invalid argument for revspec'))
+
def _formatparamexp(args, t):
return ', '.join(_formatargtype(t, a) for a in args)
@@ -638,7 +651,7 @@
Supported arguments:
%r = revset expression, parenthesized
- %d = int(arg), no quoting
+ %d = rev(int(arg)), no quoting
%s = string(arg), escaped and single-quoted
%b = arg.branch(), escaped and single-quoted
%n = hex(arg), single-quoted
@@ -650,9 +663,9 @@
>>> formatspec(b'%r:: and %lr', b'10 or 11', (b"this()", b"that()"))
'(10 or 11):: and ((this()) or (that()))'
>>> formatspec(b'%d:: and not %d::', 10, 20)
- '10:: and not 20::'
+ 'rev(10):: and not rev(20)::'
>>> formatspec(b'%ld or %ld', [], [1])
- "_list('') or 1"
+ "_list('') or rev(1)"
>>> formatspec(b'keyword(%s)', b'foo\\xe9')
"keyword('foo\\\\xe9')"
>>> b = lambda: b'default'
@@ -666,6 +679,50 @@
>>> formatspec(b'%ls', [b'a', b"'"])
"_list('a\\\\x00\\\\'')"
'''
+ parsed = _parseargs(expr, args)
+ ret = []
+ for t, arg in parsed:
+ if t is None:
+ ret.append(arg)
+ elif t == 'baseset':
+ if isinstance(arg, set):
+ arg = sorted(arg)
+ ret.append(_formatintlist(list(arg)))
+ else:
+ raise error.ProgrammingError("unknown revspec item type: %r" % t)
+ return b''.join(ret)
+
+def spectree(expr, *args):
+ """similar to formatspec but return a parsed and optimized tree"""
+ parsed = _parseargs(expr, args)
+ ret = []
+ inputs = []
+ for t, arg in parsed:
+ if t is None:
+ ret.append(arg)
+ elif t == 'baseset':
+ newtree = ('smartset', smartset.baseset(arg))
+ inputs.append(newtree)
+ ret.append("$")
+ else:
+ raise error.ProgrammingError("unknown revspec item type: %r" % t)
+ expr = b''.join(ret)
+ tree = _parsewith(expr, syminitletters=_aliassyminitletters)
+ tree = parser.buildtree(tree, ('symbol', '$'), *inputs)
+ tree = foldconcat(tree)
+ tree = analyze(tree)
+ tree = optimize(tree)
+ return tree
+
+def _parseargs(expr, args):
+ """parse the expression and replace all inexpensive args
+
+ return a list of tuple [(arg-type, arg-value)]
+
+ Arg-type can be:
+ * None: a string ready to be concatenated into a final spec
+ * 'baseset': an iterable of revisions
+ """
expr = pycompat.bytestr(expr)
argiter = iter(args)
ret = []
@@ -673,16 +730,16 @@
while pos < len(expr):
q = expr.find('%', pos)
if q < 0:
- ret.append(expr[pos:])
+ ret.append((None, expr[pos:]))
break
- ret.append(expr[pos:q])
+ ret.append((None, expr[pos:q]))
pos = q + 1
try:
d = expr[pos]
except IndexError:
raise error.ParseError(_('incomplete revspec format character'))
if d == '%':
- ret.append(d)
+ ret.append((None, d))
pos += 1
continue
@@ -692,19 +749,28 @@
raise error.ParseError(_('missing argument for revspec'))
f = _formatlistfuncs.get(d)
if f:
- # a list of some type
+ # a list of some type, might be expensive, do not replace
pos += 1
+ islist = (d == 'l')
try:
d = expr[pos]
except IndexError:
raise error.ParseError(_('incomplete revspec format character'))
+ if islist and d == 'd' and arg:
+ # we don't create a baseset yet, because it come with an
+ # extra cost. If we are going to serialize it we better
+ # skip it.
+ ret.append(('baseset', arg))
+ pos += 1
+ continue
try:
- ret.append(f(list(arg), d))
+ ret.append((None, f(list(arg), d)))
except (TypeError, ValueError):
raise error.ParseError(_('invalid argument for revspec'))
else:
+ # a single entry, not expensive, replace
try:
- ret.append(_formatargtype(d, arg))
+ ret.append((None, _formatargtype(d, arg)))
except (TypeError, ValueError):
raise error.ParseError(_('invalid argument for revspec'))
pos += 1
@@ -714,7 +780,7 @@
raise error.ParseError(_('too many revspec arguments specified'))
except StopIteration:
pass
- return ''.join(ret)
+ return ret
def prettyformat(tree):
return parser.prettyformat(tree, ('string', 'symbol'))
--- a/mercurial/rewriteutil.py Wed Jan 09 20:00:35 2019 -0800
+++ b/mercurial/rewriteutil.py Fri Jan 18 13:28:22 2019 -0500
@@ -27,10 +27,10 @@
hint = _("no changeset checked out")
raise error.Abort(msg, hint=hint)
- publicrevs = repo.revs('%ld and public()', revs)
if len(repo[None].parents()) > 1:
raise error.Abort(_("cannot %s while merging") % action)
+ publicrevs = repo.revs('%ld and public()', revs)
if publicrevs:
msg = _("cannot %s public changesets") % (action)
hint = _("see 'hg help phases' for details")
--- a/mercurial/scmutil.py Wed Jan 09 20:00:35 2019 -0800
+++ b/mercurial/scmutil.py Fri Jan 18 13:28:22 2019 -0500
@@ -12,7 +12,6 @@
import hashlib
import os
import re
-import socket
import subprocess
import weakref
@@ -212,6 +211,8 @@
ui.error(_("abort: file censored %s!\n") % inst)
except error.StorageError as inst:
ui.error(_("abort: %s!\n") % inst)
+ if inst.hint:
+ ui.error(_("(%s)\n") % inst.hint)
except error.InterventionRequired as inst:
ui.error("%s\n" % inst)
if inst.hint:
@@ -268,8 +269,6 @@
# Commands shouldn't sys.exit directly, but give a return code.
# Just in case catch this and and pass exit code to caller.
return inst.code
- except socket.error as inst:
- ui.error(_("abort: %s\n") % stringutil.forcebytestr(inst.args[-1]))
return -1
@@ -721,7 +720,7 @@
allspecs = []
for spec in specs:
if isinstance(spec, int):
- spec = revsetlang.formatspec('rev(%d)', spec)
+ spec = revsetlang.formatspec('%d', spec)
allspecs.append(spec)
return repo.anyrevs(allspecs, user=True, localalias=localalias)
@@ -812,21 +811,29 @@
raise error.ParseError(msg)
return files[0]
+def getorigvfs(ui, repo):
+ """return a vfs suitable to save 'orig' file
+
+ return None if no special directory is configured"""
+ origbackuppath = ui.config('ui', 'origbackuppath')
+ if not origbackuppath:
+ return None
+ return vfs.vfs(repo.wvfs.join(origbackuppath))
+
def origpath(ui, repo, filepath):
'''customize where .orig files are created
Fetch user defined path from config file: [ui] origbackuppath = <path>
Fall back to default (filepath with .orig suffix) if not specified
'''
- origbackuppath = ui.config('ui', 'origbackuppath')
- if not origbackuppath:
+ origvfs = getorigvfs(ui, repo)
+ if origvfs is None:
return filepath + ".orig"
# Convert filepath from an absolute path into a path inside the repo.
filepathfromroot = util.normpath(os.path.relpath(filepath,
start=repo.root))
- origvfs = vfs.vfs(repo.wjoin(origbackuppath))
origbackupdir = origvfs.dirname(filepathfromroot)
if not origvfs.isdir(origbackupdir) or origvfs.islink(origbackupdir):
ui.note(_('creating directory: %s\n') % origvfs.join(origbackupdir))
@@ -891,32 +898,33 @@
repls[key] = value
replacements = repls
+ # Unfiltered repo is needed since nodes in replacements might be hidden.
+ unfi = repo.unfiltered()
+
# Calculate bookmark movements
if moves is None:
moves = {}
- # Unfiltered repo is needed since nodes in replacements might be hidden.
- unfi = repo.unfiltered()
- for oldnodes, newnodes in replacements.items():
- for oldnode in oldnodes:
- if oldnode in moves:
- continue
- if len(newnodes) > 1:
- # usually a split, take the one with biggest rev number
- newnode = next(unfi.set('max(%ln)', newnodes)).node()
- elif len(newnodes) == 0:
- # move bookmark backwards
- allreplaced = []
- for rep in replacements:
- allreplaced.extend(rep)
- roots = list(unfi.set('max((::%n) - %ln)', oldnode,
- allreplaced))
- if roots:
- newnode = roots[0].node()
+ for oldnodes, newnodes in replacements.items():
+ for oldnode in oldnodes:
+ if oldnode in moves:
+ continue
+ if len(newnodes) > 1:
+ # usually a split, take the one with biggest rev number
+ newnode = next(unfi.set('max(%ln)', newnodes)).node()
+ elif len(newnodes) == 0:
+ # move bookmark backwards
+ allreplaced = []
+ for rep in replacements:
+ allreplaced.extend(rep)
+ roots = list(unfi.set('max((::%n) - %ln)', oldnode,
+ allreplaced))
+ if roots:
+ newnode = roots[0].node()
+ else:
+ newnode = nullid
else:
- newnode = nullid
- else:
- newnode = newnodes[0]
- moves[oldnode] = newnode
+ newnode = newnodes[0]
+ moves[oldnode] = newnode
allnewnodes = [n for ns in replacements.values() for n in ns]
toretract = {}
@@ -1166,7 +1174,7 @@
wctx.copy(origsrc, dst)
def writerequires(opener, requirements):
- with opener('requires', 'w') as fp:
+ with opener('requires', 'w', atomictemp=True) as fp:
for r in sorted(requirements):
fp.write("%s\n" % r)
@@ -1249,16 +1257,15 @@
results cached. The decorated function is called. The results are stashed
away in a ``_filecache`` dict on the object whose method is decorated.
- On subsequent access, the cached result is returned.
-
- On external property set operations, stat() calls are performed and the new
- value is cached.
+ On subsequent access, the cached result is used as it is set to the
+ instance dictionary.
- On property delete operations, cached data is removed.
+ On external property set/delete operations, the caller must update the
+ corresponding _filecache entry appropriately. Use __class__.<attr>.set()
+ instead of directly setting <attr>.
- When using the property API, cached data is always returned, if available:
- no stat() is performed to check if the file has changed and if the function
- needs to be called to reflect file changes.
+ When using the property API, the cached data is always used if available.
+ No stat() is performed to check if the file has changed.
Others can muck about with the state of the ``_filecache`` dict. e.g. they
can populate an entry before the property's getter is called. In this case,
@@ -1291,10 +1298,8 @@
# if accessed on the class, return the descriptor itself.
if obj is None:
return self
- # do we need to check if the file changed?
- if self.sname in obj.__dict__:
- assert self.name in obj._filecache, self.name
- return obj.__dict__[self.sname]
+
+ assert self.sname not in obj.__dict__
entry = obj._filecache.get(self.name)
@@ -1314,7 +1319,10 @@
obj.__dict__[self.sname] = entry.obj
return entry.obj
- def __set__(self, obj, value):
+ # don't implement __set__(), which would make __dict__ lookup as slow as
+ # function call.
+
+ def set(self, obj, value):
if self.name not in obj._filecache:
# we add an entry for the missing value because X in __dict__
# implies X in _filecache
@@ -1327,12 +1335,6 @@
ce.obj = value # update cached copy
obj.__dict__[self.sname] = value # update copy returned by obj.x
- def __delete__(self, obj):
- try:
- del obj.__dict__[self.sname]
- except KeyError:
- raise AttributeError(self.sname)
-
def extdatasource(repo, source):
"""Gather a map of rev -> value dict from the specified source
@@ -1410,12 +1412,14 @@
**kwargs)
class progress(object):
- def __init__(self, ui, topic, unit="", total=None):
+ def __init__(self, ui, updatebar, topic, unit="", total=None):
self.ui = ui
self.pos = 0
self.topic = topic
self.unit = unit
self.total = total
+ self.debug = ui.configbool('progress', 'debug')
+ self._updatebar = updatebar
def __enter__(self):
return self
@@ -1428,25 +1432,38 @@
if total:
self.total = total
self.pos = pos
- self._print(item)
+ self._updatebar(self.topic, self.pos, item, self.unit, self.total)
+ if self.debug:
+ self._printdebug(item)
def increment(self, step=1, item="", total=None):
self.update(self.pos + step, item, total)
def complete(self):
- self.ui.progress(self.topic, None)
+ self.pos = None
+ self.unit = ""
+ self.total = None
+ self._updatebar(self.topic, self.pos, "", self.unit, self.total)
- def _print(self, item):
- self.ui.progress(self.topic, self.pos, item, self.unit,
- self.total)
+ def _printdebug(self, item):
+ if self.unit:
+ unit = ' ' + self.unit
+ if item:
+ item = ' ' + item
+
+ if self.total:
+ pct = 100.0 * self.pos / self.total
+ self.ui.debug('%s:%s %d/%d%s (%4.2f%%)\n'
+ % (self.topic, item, self.pos, self.total, unit, pct))
+ else:
+ self.ui.debug('%s:%s %d%s\n' % (self.topic, item, self.pos, unit))
def gdinitconfig(ui):
"""helper function to know if a repo should be created as general delta
"""
# experimental config: format.generaldelta
return (ui.configbool('format', 'generaldelta')
- or ui.configbool('format', 'usegeneraldelta')
- or ui.configbool('format', 'sparse-revlog'))
+ or ui.configbool('format', 'usegeneraldelta'))
def gddeltaconfig(ui):
"""helper function to know if incoming delta should be optimised
--- a/mercurial/server.py Wed Jan 09 20:00:35 2019 -0800
+++ b/mercurial/server.py Fri Jan 18 13:28:22 2019 -0500
@@ -155,9 +155,11 @@
def _createcmdservice(ui, repo, opts):
mode = opts['cmdserver']
try:
- return _cmdservicemap[mode](ui, repo, opts)
+ servicefn = _cmdservicemap[mode]
except KeyError:
raise error.Abort(_('unknown mode %s') % mode)
+ commandserver.setuplogging(ui, repo)
+ return servicefn(ui, repo, opts)
def _createhgwebservice(ui, repo, opts):
# this way we can check if something was given in the command-line
--- a/mercurial/setdiscovery.py Wed Jan 09 20:00:35 2019 -0800
+++ b/mercurial/setdiscovery.py Fri Jan 18 13:28:22 2019 -0500
@@ -102,6 +102,8 @@
:headrevs: set of head revisions in local DAG to consider
:revs: set of revs to discover
:size: the maximum size of the sample"""
+ if len(revs) <= size:
+ return list(revs)
sample = set(repo.revs('heads(%ld)', revs))
if len(sample) >= size:
@@ -112,6 +114,8 @@
return sample
def _takefullsample(repo, headrevs, revs, size):
+ if len(revs) <= size:
+ return list(revs)
sample = set(repo.revs('heads(%ld)', revs))
# update from heads
@@ -157,6 +161,72 @@
sample = set(random.sample(sample, desiredlen))
return sample
+class partialdiscovery(object):
+ """an object representing ongoing discovery
+
+ Feed with data from the remote repository, this object keep track of the
+ current set of changeset in various states:
+
+ - common: revs also known remotely
+ - undecided: revs we don't have information on yet
+ - missing: revs missing remotely
+ (all tracked revisions are known locally)
+ """
+
+ def __init__(self, repo, targetheads):
+ self._repo = repo
+ self._targetheads = targetheads
+ self._common = repo.changelog.incrementalmissingrevs()
+ self._undecided = None
+ self.missing = set()
+
+ def addcommons(self, commons):
+ """registrer nodes known as common"""
+ self._common.addbases(commons)
+ self._common.removeancestorsfrom(self.undecided)
+
+ def addmissings(self, missings):
+ """registrer some nodes as missing"""
+ newmissing = self._repo.revs('%ld::%ld', missings, self.undecided)
+ if newmissing:
+ self.missing.update(newmissing)
+ self.undecided.difference_update(newmissing)
+
+ def addinfo(self, sample):
+ """consume an iterable of (rev, known) tuples"""
+ common = set()
+ missing = set()
+ for rev, known in sample:
+ if known:
+ common.add(rev)
+ else:
+ missing.add(rev)
+ if common:
+ self.addcommons(common)
+ if missing:
+ self.addmissings(missing)
+
+ def hasinfo(self):
+ """return True is we have any clue about the remote state"""
+ return self._common.hasbases()
+
+ def iscomplete(self):
+ """True if all the necessary data have been gathered"""
+ return self._undecided is not None and not self._undecided
+
+ @property
+ def undecided(self):
+ if self._undecided is not None:
+ return self._undecided
+ self._undecided = set(self._common.missingancestors(self._targetheads))
+ return self._undecided
+
+ def commonheads(self):
+ """the heads of the known common set"""
+ # heads(common) == heads(common.bases) since common represents
+ # common.bases and all its ancestors
+ return self._common.basesheads()
+
def findcommonheads(ui, local, remote,
initialsamplesize=100,
fullsamplesize=200,
@@ -223,36 +293,17 @@
# full blown discovery
- # own nodes I know we both know
+ disco = partialdiscovery(local, ownheads)
# treat remote heads (and maybe own heads) as a first implicit sample
# response
- common = cl.incrementalmissingrevs(srvheads)
- commoninsample = set(n for i, n in enumerate(sample) if yesno[i])
- common.addbases(commoninsample)
- # own nodes where I don't know if remote knows them
- undecided = set(common.missingancestors(ownheads))
- # own nodes I know remote lacks
- missing = set()
+ disco.addcommons(srvheads)
+ disco.addinfo(zip(sample, yesno))
full = False
progress = ui.makeprogress(_('searching'), unit=_('queries'))
- while undecided:
-
- if sample:
- missinginsample = [n for i, n in enumerate(sample) if not yesno[i]]
+ while not disco.iscomplete():
- if missing:
- missing.update(local.revs('descendants(%ld) - descendants(%ld)',
- missinginsample, missing))
- else:
- missing.update(local.revs('descendants(%ld)', missinginsample))
-
- undecided.difference_update(missing)
-
- if not undecided:
- break
-
- if full or common.hasbases():
+ if full or disco.hasinfo():
if full:
ui.note(_("sampling from both directions\n"))
else:
@@ -264,15 +315,12 @@
ui.debug("taking quick initial sample\n")
samplefunc = _takequicksample
targetsize = initialsamplesize
- if len(undecided) < targetsize:
- sample = list(undecided)
- else:
- sample = samplefunc(local, ownheads, undecided, targetsize)
+ sample = samplefunc(local, ownheads, disco.undecided, targetsize)
roundtrips += 1
progress.update(roundtrips)
ui.debug("query %i; still undecided: %i, sample size is: %i\n"
- % (roundtrips, len(undecided), len(sample)))
+ % (roundtrips, len(disco.undecided), len(sample)))
# indices between sample and externalized version must match
sample = list(sample)
@@ -283,15 +331,9 @@
full = True
- if sample:
- commoninsample = set(n for i, n in enumerate(sample) if yesno[i])
- common.addbases(commoninsample)
- common.removeancestorsfrom(undecided)
+ disco.addinfo(zip(sample, yesno))
- # heads(common) == heads(common.bases) since common represents common.bases
- # and all its ancestors
- # The presence of nullrev will confuse heads(). So filter it out.
- result = set(local.revs('heads(%ld)', common.bases - {nullrev}))
+ result = disco.commonheads()
elapsed = util.timer() - start
progress.complete()
ui.debug("%d total queries in %.4fs\n" % (roundtrips, elapsed))
--- a/mercurial/sparse.py Wed Jan 09 20:00:35 2019 -0800
+++ b/mercurial/sparse.py Fri Jan 18 13:28:22 2019 -0500
@@ -7,7 +7,6 @@
from __future__ import absolute_import
-import collections
import hashlib
import os
@@ -247,7 +246,7 @@
actions.append((file, None, message))
dropped.append(file)
- typeactions = collections.defaultdict(list)
+ typeactions = mergemod.emptyactions()
typeactions['r'] = actions
mergemod.applyupdates(repo, typeactions, repo[None], repo['.'], False)
@@ -380,7 +379,7 @@
fctx = repo[None][file]
actions.append((file, (fctx.flags(), False), message))
- typeactions = collections.defaultdict(list)
+ typeactions = mergemod.emptyactions()
typeactions['g'] = actions
mergemod.applyupdates(repo, typeactions, repo[None], repo['.'],
False)
@@ -483,11 +482,8 @@
dropped.append(file)
# Apply changes to disk
- typeactions = dict((m, [])
- for m in 'a f g am cd dc r dm dg m e k p pr'.split())
+ typeactions = mergemod.emptyactions()
for f, (m, args, msg) in actions.iteritems():
- if m not in typeactions:
- typeactions[m] = []
typeactions[m].append((f, args, msg))
mergemod.applyupdates(repo, typeactions, repo[None], repo['.'], False)
--- a/mercurial/state.py Wed Jan 09 20:00:35 2019 -0800
+++ b/mercurial/state.py Fri Jan 18 13:28:22 2019 -0500
@@ -13,8 +13,8 @@
The class has methods using which the data can be stored to disk in a file under
.hg/ directory.
-We store the data on disk in cbor, for which we use the third party cbor library
-to serialize and deserialize data.
+We store the data on disk in cbor, for which we use the CBOR format to encode
+the data.
"""
from __future__ import absolute_import
--- a/mercurial/statichttprepo.py Wed Jan 09 20:00:35 2019 -0800
+++ b/mercurial/statichttprepo.py Fri Jan 18 13:28:22 2019 -0500
@@ -187,7 +187,8 @@
self.requirements = requirements
rootmanifest = manifest.manifestrevlog(self.svfs)
- self.manifestlog = manifest.manifestlog(self.svfs, self, rootmanifest)
+ self.manifestlog = manifest.manifestlog(self.svfs, self, rootmanifest,
+ self.narrowmatch())
self.changelog = changelog.changelog(self.svfs)
self._tags = None
self.nodetagscache = None
--- a/mercurial/statprof.py Wed Jan 09 20:00:35 2019 -0800
+++ b/mercurial/statprof.py Fri Jan 18 13:28:22 2019 -0500
@@ -206,8 +206,10 @@
__slots__ = (u'path', u'lineno', u'function', u'source')
def __init__(self, path, lineno, function):
+ assert isinstance(path, bytes)
self.path = path
self.lineno = lineno
+ assert isinstance(function, bytes)
self.function = function
self.source = None
@@ -236,7 +238,7 @@
lineno = self.lineno - 1
fp = None
try:
- fp = open(self.path)
+ fp = open(self.path, 'rb')
for i, line in enumerate(fp):
if i == lineno:
self.source = line.strip()
@@ -272,8 +274,10 @@
stack = []
while frame:
- stack.append(CodeSite.get(frame.f_code.co_filename, frame.f_lineno,
- frame.f_code.co_name))
+ stack.append(CodeSite.get(
+ pycompat.sysbytes(frame.f_code.co_filename),
+ frame.f_lineno,
+ pycompat.sysbytes(frame.f_code.co_name)))
frame = frame.f_back
return Sample(stack, time)
@@ -370,7 +374,7 @@
file.write("%d\0%s\n" % (time, '\0'.join(sites)))
def load_data(path):
- lines = open(path, 'r').read().splitlines()
+ lines = open(path, 'rb').read().splitlines()
state.accumulated_time = [float(value) for value in lines[0].split()]
state.samples = []
@@ -510,9 +514,9 @@
for stat in stats:
site = stat.site
- sitelabel = '%s:%d:%s' % (pycompat.fsencode(site.filename()),
+ sitelabel = '%s:%d:%s' % (site.filename(),
site.lineno,
- pycompat.sysbytes(site.function))
+ site.function)
fp.write(b'%6.2f %9.2f %9.2f %s\n' % (
stat.selfpercent(), stat.totalseconds(),
stat.selfseconds(), sitelabel))
@@ -530,7 +534,7 @@
grouped = defaultdict(list)
for stat in stats:
- grouped[stat.site.filename() + r":" + stat.site.function].append(stat)
+ grouped[stat.site.filename() + b":" + stat.site.function].append(stat)
# compute sums for each function
functiondata = []
@@ -559,7 +563,7 @@
function[3], # total percent
function[1], # total cum sec
function[2], # total self sec
- pycompat.sysbytes(function[0]))) # file:function
+ function[0])) # file:function
function[4].sort(reverse=True, key=lambda i: i.selfseconds())
for stat in function[4]:
@@ -694,7 +698,7 @@
' %4.1f%% %s %s'
liststring = listpattern % (node.count / root.count * 100,
filename, function)
- codepattern = '%' + str(55 - len(liststring)) + 's %s: %s'
+ codepattern = '%' + ('%d' % (55 - len(liststring))) + 's %d: %s'
codestring = codepattern % ('line', site.lineno, site.getsource(30))
finalstring = liststring + codestring
@@ -775,7 +779,10 @@
stack = []
for frame in sample.stack:
- stack.append((frame.path, frame.lineno, frame.function))
+ stack.append(
+ (pycompat.sysstr(frame.path),
+ frame.lineno,
+ pycompat.sysstr(frame.function)))
samples.append((sample.time, stack))
--- a/mercurial/store.py Wed Jan 09 20:00:35 2019 -0800
+++ b/mercurial/store.py Fri Jan 18 13:28:22 2019 -0500
@@ -24,6 +24,22 @@
parsers = policy.importmod(r'parsers')
+def _matchtrackedpath(path, matcher):
+ """parses a fncache entry and returns whether the entry is tracking a path
+ matched by matcher or not.
+
+ If matcher is None, returns True"""
+
+ if matcher is None:
+ return True
+ path = decodedir(path)
+ if path.startswith('data/'):
+ return matcher(path[len('data/'):-len('.i')])
+ elif path.startswith('meta/'):
+ return matcher.visitdir(path[len('meta/'):-len('/00manifest.i')] or '.')
+
+ raise error.ProgrammingError("cannot decode path %s" % path)
+
# This avoids a collision between a file named foo and a dir named
# foo.i or foo.d
def _encodedir(path):
@@ -417,6 +433,8 @@
a = decodefilename(a)
except KeyError:
a = None
+ if a is not None and not _matchtrackedpath(a, matcher):
+ continue
yield a, b, size
def join(self, f):
@@ -433,6 +451,8 @@
self.vfs = vfs
self.entries = None
self._dirty = False
+ # set of new additions to fncache
+ self.addls = set()
def _load(self):
'''fill the entries from the fncache file'''
@@ -455,23 +475,36 @@
def write(self, tr):
if self._dirty:
assert self.entries is not None
+ self.entries = self.entries | self.addls
+ self.addls = set()
tr.addbackup('fncache')
fp = self.vfs('fncache', mode='wb', atomictemp=True)
if self.entries:
fp.write(encodedir('\n'.join(self.entries) + '\n'))
fp.close()
self._dirty = False
+ if self.addls:
+ # if we have just new entries, let's append them to the fncache
+ tr.addbackup('fncache')
+ fp = self.vfs('fncache', mode='ab', atomictemp=True)
+ if self.addls:
+ fp.write(encodedir('\n'.join(self.addls) + '\n'))
+ fp.close()
+ self.entries = None
+ self.addls = set()
def add(self, fn):
if self.entries is None:
self._load()
if fn not in self.entries:
- self._dirty = True
- self.entries.add(fn)
+ self.addls.add(fn)
def remove(self, fn):
if self.entries is None:
self._load()
+ if fn in self.addls:
+ self.addls.remove(fn)
+ return
try:
self.entries.remove(fn)
self._dirty = True
@@ -479,6 +512,8 @@
pass
def __contains__(self, fn):
+ if fn in self.addls:
+ return True
if self.entries is None:
self._load()
return fn in self.entries
@@ -486,9 +521,9 @@
def __iter__(self):
if self.entries is None:
self._load()
- return iter(self.entries)
+ return iter(self.entries | self.addls)
-class _fncachevfs(vfsmod.abstractvfs, vfsmod.proxyvfs):
+class _fncachevfs(vfsmod.proxyvfs):
def __init__(self, vfs, fnc, encode):
vfsmod.proxyvfs.__init__(self, vfs)
self.fncache = fnc
@@ -542,6 +577,8 @@
def datafiles(self, matcher=None):
for f in sorted(self.fncache):
+ if not _matchtrackedpath(f, matcher):
+ continue
ef = self.encode(f)
try:
yield f, ef, self.getsize(ef)
@@ -560,6 +597,7 @@
def invalidatecaches(self):
self.fncache.entries = None
+ self.fncache.addls = set()
def markremoved(self, fn):
self.fncache.remove(fn)
--- a/mercurial/streamclone.py Wed Jan 09 20:00:35 2019 -0800
+++ b/mercurial/streamclone.py Fri Jan 18 13:28:22 2019 -0500
@@ -545,10 +545,6 @@
Returns a 3-tuple of (file count, file size, data iterator).
"""
- # temporarily raise error until we add storage level logic
- if includes or excludes:
- raise error.Abort(_("server does not support narrow stream clones"))
-
with repo.lock():
entries = []
--- a/mercurial/subrepo.py Wed Jan 09 20:00:35 2019 -0800
+++ b/mercurial/subrepo.py Fri Jan 18 13:28:22 2019 -0500
@@ -1810,11 +1810,15 @@
if not opts.get(r'no_backup'):
status = self.status(None)
names = status.modified
+ origvfs = scmutil.getorigvfs(self.ui, self._subparent)
+ if origvfs is None:
+ origvfs = self.wvfs
for name in names:
bakname = scmutil.origpath(self.ui, self._subparent, name)
self.ui.note(_('saving current version of %s as %s\n') %
(name, bakname))
- self.wvfs.rename(name, bakname)
+ name = self.wvfs.join(name)
+ origvfs.rename(name, bakname)
if not opts.get(r'dry_run'):
self.get(substate, overwrite=True)
--- a/mercurial/tags.py Wed Jan 09 20:00:35 2019 -0800
+++ b/mercurial/tags.py Fri Jan 18 13:28:22 2019 -0500
@@ -481,8 +481,7 @@
duration = util.timer() - starttime
ui.log('tagscache',
- '%d/%d cache hits/lookups in %0.4f '
- 'seconds\n',
+ '%d/%d cache hits/lookups in %0.4f seconds\n',
fnodescache.hitcount, fnodescache.lookupcount, duration)
return cachefnode
--- a/mercurial/templatefuncs.py Wed Jan 09 20:00:35 2019 -0800
+++ b/mercurial/templatefuncs.py Fri Jan 18 13:28:22 2019 -0500
@@ -20,6 +20,7 @@
error,
minirst,
obsutil,
+ pycompat,
registrar,
revset as revsetmod,
revsetlang,
@@ -559,7 +560,6 @@
if len(args) > 1:
formatargs = [evalfuncarg(context, mapping, a) for a in args[1:]]
revs = query(revsetlang.formatspec(raw, *formatargs))
- revs = list(revs)
else:
cache = context.resource(mapping, 'cache')
revsetcache = cache.setdefault("revsetcache", {})
@@ -567,7 +567,6 @@
revs = revsetcache[raw]
else:
revs = query(raw)
- revs = list(revs)
revsetcache[raw] = revs
return templatekw.showrevslist(context, mapping, "revision", revs)
@@ -583,6 +582,40 @@
return minirst.format(text, style=style, keep=['verbose'])
+@templatefunc('search(pattern, text)')
+def search(context, mapping, args):
+ """Look for the first text matching the regular expression pattern.
+ Groups are accessible as ``{1}``, ``{2}``, ... in %-mapped template."""
+ if len(args) != 2:
+ # i18n: "search" is a keyword
+ raise error.ParseError(_(b'search expects two arguments'))
+
+ pat = evalstring(context, mapping, args[0])
+ src = evalstring(context, mapping, args[1])
+ try:
+ patre = re.compile(pat)
+ except re.error:
+ # i18n: "search" is a keyword
+ raise error.ParseError(_(b'search got an invalid pattern: %s') % pat)
+ # named groups shouldn't shadow *reserved* resource keywords
+ badgroups = (context.knownresourcekeys()
+ & set(pycompat.byteskwargs(patre.groupindex)))
+ if badgroups:
+ raise error.ParseError(
+ # i18n: "search" is a keyword
+ _(b'invalid group %(group)s in search pattern: %(pat)s')
+ % {b'group': b', '.join("'%s'" % g for g in sorted(badgroups)),
+ b'pat': pat})
+
+ match = patre.search(src)
+ if not match:
+ return templateutil.mappingnone()
+
+ lm = {b'0': match.group(0)}
+ lm.update((b'%d' % i, v) for i, v in enumerate(match.groups(), 1))
+ lm.update(pycompat.byteskwargs(match.groupdict()))
+ return templateutil.mappingdict(lm, tmpl=b'{0}')
+
@templatefunc('separate(sep, args...)', argspec='sep *args')
def separate(context, mapping, args):
"""Add a separator between non-empty arguments."""
--- a/mercurial/templatekw.py Wed Jan 09 20:00:35 2019 -0800
+++ b/mercurial/templatekw.py Fri Jan 18 13:28:22 2019 -0500
@@ -153,10 +153,13 @@
return dict(zip([s.split(':', 1)[0] for s in columns.splitlines()],
i18n._(columns).splitlines(True)))
+# basic internal templates
+_changeidtmpl = '{rev}:{node|formatnode}'
+
# default templates internally used for rendering of lists
defaulttempl = {
- 'parent': '{rev}:{node|formatnode} ',
- 'manifest': '{rev}:{node|formatnode}',
+ 'parent': _changeidtmpl + ' ',
+ 'manifest': _changeidtmpl,
'file_copy': '{name} ({source})',
'envvar': '{key}={value}',
'extra': '{key}={value|stringescape}'
@@ -688,17 +691,31 @@
return templateutil.mappinglist(data)
+@templatekeyword('p1', requires={'ctx'})
+def showp1(context, mapping):
+ """Changeset. The changeset's first parent. ``{p1.rev}`` for the revision
+ number, and ``{p1.node}`` for the identification hash."""
+ ctx = context.resource(mapping, 'ctx')
+ return templateutil.mappingdict({'ctx': ctx.p1()}, tmpl=_changeidtmpl)
+
+@templatekeyword('p2', requires={'ctx'})
+def showp2(context, mapping):
+ """Changeset. The changeset's second parent. ``{p2.rev}`` for the revision
+ number, and ``{p2.node}`` for the identification hash."""
+ ctx = context.resource(mapping, 'ctx')
+ return templateutil.mappingdict({'ctx': ctx.p2()}, tmpl=_changeidtmpl)
+
@templatekeyword('p1rev', requires={'ctx'})
def showp1rev(context, mapping):
"""Integer. The repository-local revision number of the changeset's
- first parent, or -1 if the changeset has no parents."""
+ first parent, or -1 if the changeset has no parents. (DEPRECATED)"""
ctx = context.resource(mapping, 'ctx')
return ctx.p1().rev()
@templatekeyword('p2rev', requires={'ctx'})
def showp2rev(context, mapping):
"""Integer. The repository-local revision number of the changeset's
- second parent, or -1 if the changeset has no second parent."""
+ second parent, or -1 if the changeset has no second parent. (DEPRECATED)"""
ctx = context.resource(mapping, 'ctx')
return ctx.p2().rev()
@@ -706,7 +723,7 @@
def showp1node(context, mapping):
"""String. The identification hash of the changeset's first parent,
as a 40 digit hexadecimal string. If the changeset has no parents, all
- digits are 0."""
+ digits are 0. (DEPRECATED)"""
ctx = context.resource(mapping, 'ctx')
return ctx.p1().hex()
@@ -714,7 +731,7 @@
def showp2node(context, mapping):
"""String. The identification hash of the changeset's second
parent, as a 40 digit hexadecimal string. If the changeset has no second
- parent, all digits are 0."""
+ parent, all digits are 0. (DEPRECATED)"""
ctx = context.resource(mapping, 'ctx')
return ctx.p2().hex()
@@ -757,7 +774,10 @@
"""helper to generate a list of revisions in which a mapped template will
be evaluated"""
repo = context.resource(mapping, 'repo')
- f = _showcompatlist(context, mapping, name, ['%d' % r for r in revs])
+ # revs may be a smartset; don't compute it until f() has to be evaluated
+ def f():
+ srevs = ['%d' % r for r in revs]
+ return _showcompatlist(context, mapping, name, srevs)
return _hybrid(f, revs,
lambda x: {name: x, 'ctx': repo[x]},
pycompat.identity, keytype=int)
--- a/mercurial/templater.py Wed Jan 09 20:00:35 2019 -0800
+++ b/mercurial/templater.py Fri Jan 18 13:28:22 2019 -0500
@@ -49,6 +49,14 @@
represents mappings (i.e. a list of dicts), which may have default
output format.
+mappingdict
+ represents a single mapping (i.e. a dict), which may have default output
+ format.
+
+mappingnone
+ represents None of Optional[mappable], which will be mapped to an empty
+ string by % operation.
+
mappedgenerator
a lazily-evaluated list of byte strings, which is e.g. a result of %
operation.
@@ -370,9 +378,7 @@
if not exp:
raise error.ParseError(_("missing argument"))
t = exp[0]
- if t in curmethods:
- return curmethods[t](exp, context)
- raise error.ParseError(_("unknown method '%s'") % t)
+ return curmethods[t](exp, context)
# template evaluation
@@ -492,6 +498,10 @@
def buildkeyvaluepair(exp, content):
raise error.ParseError(_("can't use a key-value pair in this context"))
+def buildlist(exp, context):
+ raise error.ParseError(_("can't use a list in this context"),
+ hint=_('check place of comma and parens'))
+
# methods to interpret function arguments or inner expressions (e.g. {_(x)})
exprmethods = {
"integer": lambda e, c: (templateutil.runinteger, e[1]),
@@ -504,6 +514,7 @@
"%": buildmap,
"func": buildfunc,
"keyvalue": buildkeyvaluepair,
+ "list": buildlist,
"+": lambda e, c: buildarithmetic(e, c, lambda a, b: a + b),
"-": lambda e, c: buildarithmetic(e, c, lambda a, b: a - b),
"negate": buildnegate,
--- a/mercurial/templateutil.py Wed Jan 09 20:00:35 2019 -0800
+++ b/mercurial/templateutil.py Fri Jan 18 13:28:22 2019 -0500
@@ -472,6 +472,42 @@
def tobool(self, context, mapping):
return bool(self._mappings)
+class mappingdict(mappable, _mappingsequence):
+ """Wrapper for a single template mapping
+
+ This isn't a sequence in a way that the underlying dict won't be iterated
+ as a dict, but shares most of the _mappingsequence functions.
+ """
+
+ def __init__(self, mapping, name=None, tmpl=None):
+ super(mappingdict, self).__init__(name, tmpl)
+ self._mapping = mapping
+
+ def tomap(self, context):
+ return self._mapping
+
+ def tobool(self, context, mapping):
+ # no idea when a template mapping should be considered an empty, but
+ # a mapping dict should have at least one item in practice, so always
+ # mark this as non-empty.
+ return True
+
+ def tovalue(self, context, mapping):
+ return super(mappingdict, self).tovalue(context, mapping)[0]
+
+class mappingnone(wrappedvalue):
+ """Wrapper for None, but supports map operation
+
+ This represents None of Optional[mappable]. It's similar to
+ mapplinglist([]), but the underlying value is not [], but None.
+ """
+
+ def __init__(self):
+ super(mappingnone, self).__init__(None)
+
+ def itermaps(self, context):
+ return iter([])
+
class mappedgenerator(wrapped):
"""Wrapper for generator of strings which acts as a list
--- a/mercurial/testing/storage.py Wed Jan 09 20:00:35 2019 -0800
+++ b/mercurial/testing/storage.py Fri Jan 18 13:28:22 2019 -0500
@@ -741,7 +741,8 @@
# forceprevious=True forces a delta against the previous revision.
# Special case for initial revision.
- gen = f.emitrevisions([node0], revisiondata=True, deltaprevious=True)
+ gen = f.emitrevisions([node0], revisiondata=True,
+ deltamode=repository.CG_DELTAMODE_PREV)
rev = next(gen)
self.assertEqual(rev.node, node0)
@@ -758,7 +759,7 @@
next(gen)
gen = f.emitrevisions([node0, node2], revisiondata=True,
- deltaprevious=True)
+ deltamode=repository.CG_DELTAMODE_PREV)
rev = next(gen)
self.assertEqual(rev.node, node0)
--- a/mercurial/transaction.py Wed Jan 09 20:00:35 2019 -0800
+++ b/mercurial/transaction.py Fri Jan 18 13:28:22 2019 -0500
@@ -21,6 +21,9 @@
pycompat,
util,
)
+from .utils import (
+ stringutil,
+)
version = 2
@@ -344,9 +347,13 @@
files.append(vfs(name, 'w', atomictemp=True,
checkambig=checkambig))
genfunc(*files)
+ for f in files:
+ f.close()
+ # skip discard() loop since we're sure no open file remains
+ del files[:]
finally:
for f in files:
- f.close()
+ f.discard()
return any
@active
@@ -582,8 +589,10 @@
self._vfsmap, self._entries, self._backupentries,
False, checkambigfiles=self._checkambigfiles)
self._report(_("rollback completed\n"))
- except BaseException:
+ except BaseException as exc:
self._report(_("rollback failed - please run hg recover\n"))
+ self._report(_("(failure reason: %s)\n")
+ % stringutil.forcebytestr(exc))
finally:
self._journal = None
self._releasefn(self, False) # notify failure of transaction
--- a/mercurial/ui.py Wed Jan 09 20:00:35 2019 -0800
+++ b/mercurial/ui.py Fri Jan 18 13:28:22 2019 -0500
@@ -30,6 +30,7 @@
encoding,
error,
formatter,
+ loggingutil,
progress,
pycompat,
rcutil,
@@ -228,10 +229,14 @@
self._uninterruptible = False
if src:
- self.fout = src.fout
- self.ferr = src.ferr
- self.fin = src.fin
+ self._fout = src._fout
+ self._ferr = src._ferr
+ self._fin = src._fin
+ self._fmsg = src._fmsg
+ self._fmsgout = src._fmsgout
+ self._fmsgerr = src._fmsgerr
self._finoutredirected = src._finoutredirected
+ self._loggers = src._loggers.copy()
self.pageractive = src.pageractive
self._disablepager = src._disablepager
self._tweaked = src._tweaked
@@ -253,10 +258,14 @@
self.httppasswordmgrdb = src.httppasswordmgrdb
self._blockedtimes = src._blockedtimes
else:
- self.fout = procutil.stdout
- self.ferr = procutil.stderr
- self.fin = procutil.stdin
+ self._fout = procutil.stdout
+ self._ferr = procutil.stderr
+ self._fin = procutil.stdin
+ self._fmsg = None
+ self._fmsgout = self.fout # configurable
+ self._fmsgerr = self.ferr # configurable
self._finoutredirected = False
+ self._loggers = {}
self.pageractive = False
self._disablepager = False
self._tweaked = False
@@ -339,7 +348,7 @@
(util.timer() - starttime) * 1000
@contextlib.contextmanager
- def uninterruptable(self):
+ def uninterruptible(self):
"""Mark an operation as unsafe.
Most operations on a repository are safe to interrupt, but a
@@ -353,7 +362,7 @@
enabled = self.interactive()
if self._uninterruptible or not enabled:
# if nointerrupt support is turned off, the process isn't
- # interactive, or we're already in an uninterruptable
+ # interactive, or we're already in an uninterruptible
# block, do nothing.
yield
return
@@ -362,7 +371,7 @@
self.warn(
_("press ^C again to terminate immediately (dangerous)\n"))
return True
- with procutil.uninterruptable(warn):
+ with procutil.uninterruptible(warn):
try:
self._uninterruptible = True
yield
@@ -413,7 +422,7 @@
if self.plain():
for k in ('debug', 'fallbackencoding', 'quiet', 'slash',
- 'logtemplate', 'statuscopies', 'style',
+ 'logtemplate', 'message-output', 'statuscopies', 'style',
'traceback', 'verbose'):
if k in cfg['ui']:
del cfg['ui'][k]
@@ -466,6 +475,7 @@
if section in (None, 'ui'):
# update ui options
+ self._fmsgout, self._fmsgerr = _selectmsgdests(self)
self.debugflag = self.configbool('ui', 'debug')
self.verbose = self.debugflag or self.configbool('ui', 'verbose')
self.quiet = not self.debugflag and self.configbool('ui', 'quiet')
@@ -481,6 +491,14 @@
self._trustusers.update(self.configlist('trusted', 'users'))
self._trustgroups.update(self.configlist('trusted', 'groups'))
+ if section in (None, b'devel', b'ui') and self.debugflag:
+ tracked = set()
+ if self.configbool(b'devel', b'debug.extensions'):
+ tracked.add(b'extension')
+ if tracked:
+ logger = loggingutil.fileobjectlogger(self._ferr, tracked)
+ self.setlogger(b'debug', logger)
+
def backupconfig(self, section, item):
return (self._ocfg.backup(section, item),
self._tcfg.backup(section, item),
@@ -881,6 +899,43 @@
def paths(self):
return paths(self)
+ @property
+ def fout(self):
+ return self._fout
+
+ @fout.setter
+ def fout(self, f):
+ self._fout = f
+ self._fmsgout, self._fmsgerr = _selectmsgdests(self)
+
+ @property
+ def ferr(self):
+ return self._ferr
+
+ @ferr.setter
+ def ferr(self, f):
+ self._ferr = f
+ self._fmsgout, self._fmsgerr = _selectmsgdests(self)
+
+ @property
+ def fin(self):
+ return self._fin
+
+ @fin.setter
+ def fin(self, f):
+ self._fin = f
+
+ @property
+ def fmsg(self):
+ """Stream dedicated for status/error messages; may be None if
+ fout/ferr are used"""
+ return self._fmsg
+
+ @fmsg.setter
+ def fmsg(self, f):
+ self._fmsg = f
+ self._fmsgout, self._fmsgerr = _selectmsgdests(self)
+
def pushbuffer(self, error=False, subproc=False, labeled=False):
"""install a buffer to capture standard output of the ui object
@@ -910,6 +965,13 @@
return "".join(self._buffers.pop())
+ def _isbuffered(self, dest):
+ if dest is self._fout:
+ return bool(self._buffers)
+ if dest is self._ferr:
+ return bool(self._bufferstates and self._bufferstates[-1][0])
+ return False
+
def canwritewithoutlabels(self):
'''check if write skips the label'''
if self._buffers and not self._bufferapplylabels:
@@ -937,81 +999,75 @@
"cmdname.type" is recommended. For example, status issues
a label of "status.modified" for modified files.
'''
- if self._buffers:
+ self._write(self._fout, *args, **opts)
+
+ def write_err(self, *args, **opts):
+ self._write(self._ferr, *args, **opts)
+
+ def _write(self, dest, *args, **opts):
+ if self._isbuffered(dest):
if self._bufferapplylabels:
label = opts.get(r'label', '')
self._buffers[-1].extend(self.label(a, label) for a in args)
else:
self._buffers[-1].extend(args)
else:
- self._writenobuf(*args, **opts)
+ self._writenobuf(dest, *args, **opts)
- def _writenobuf(self, *args, **opts):
- if self._colormode == 'win32':
- # windows color printing is its own can of crab, defer to
- # the color module and that is it.
- color.win32print(self, self._write, *args, **opts)
- else:
- msgs = args
- if self._colormode is not None:
- label = opts.get(r'label', '')
- msgs = [self.label(a, label) for a in args]
- self._write(*msgs, **opts)
+ def _writenobuf(self, dest, *args, **opts):
+ self._progclear()
+ msg = b''.join(args)
- def _write(self, *msgs, **opts):
- self._progclear()
# opencode timeblockedsection because this is a critical path
starttime = util.timer()
try:
- self.fout.write(''.join(msgs))
+ if dest is self._ferr and not getattr(self._fout, 'closed', False):
+ self._fout.flush()
+ if getattr(dest, 'structured', False):
+ # channel for machine-readable output with metadata, where
+ # no extra colorization is necessary.
+ dest.write(msg, **opts)
+ elif self._colormode == 'win32':
+ # windows color printing is its own can of crab, defer to
+ # the color module and that is it.
+ color.win32print(self, dest.write, msg, **opts)
+ else:
+ if self._colormode is not None:
+ label = opts.get(r'label', '')
+ msg = self.label(msg, label)
+ dest.write(msg)
+ # stderr may be buffered under win32 when redirected to files,
+ # including stdout.
+ if dest is self._ferr and not getattr(self._ferr, 'closed', False):
+ dest.flush()
except IOError as err:
+ if (dest is self._ferr
+ and err.errno in (errno.EPIPE, errno.EIO, errno.EBADF)):
+ # no way to report the error, so ignore it
+ return
raise error.StdioError(err)
finally:
self._blockedtimes['stdio_blocked'] += \
(util.timer() - starttime) * 1000
- def write_err(self, *args, **opts):
- self._progclear()
- if self._bufferstates and self._bufferstates[-1][0]:
- self.write(*args, **opts)
- elif self._colormode == 'win32':
- # windows color printing is its own can of crab, defer to
- # the color module and that is it.
- color.win32print(self, self._write_err, *args, **opts)
- else:
- msgs = args
- if self._colormode is not None:
- label = opts.get(r'label', '')
- msgs = [self.label(a, label) for a in args]
- self._write_err(*msgs, **opts)
+ def _writemsg(self, dest, *args, **opts):
+ _writemsgwith(self._write, dest, *args, **opts)
- def _write_err(self, *msgs, **opts):
- try:
- with self.timeblockedsection('stdio'):
- if not getattr(self.fout, 'closed', False):
- self.fout.flush()
- for a in msgs:
- self.ferr.write(a)
- # stderr may be buffered under win32 when redirected to files,
- # including stdout.
- if not getattr(self.ferr, 'closed', False):
- self.ferr.flush()
- except IOError as inst:
- if inst.errno not in (errno.EPIPE, errno.EIO, errno.EBADF):
- raise error.StdioError(inst)
+ def _writemsgnobuf(self, dest, *args, **opts):
+ _writemsgwith(self._writenobuf, dest, *args, **opts)
def flush(self):
# opencode timeblockedsection because this is a critical path
starttime = util.timer()
try:
try:
- self.fout.flush()
+ self._fout.flush()
except IOError as err:
if err.errno not in (errno.EPIPE, errno.EIO, errno.EBADF):
raise error.StdioError(err)
finally:
try:
- self.ferr.flush()
+ self._ferr.flush()
except IOError as err:
if err.errno not in (errno.EPIPE, errno.EIO, errno.EBADF):
raise error.StdioError(err)
@@ -1024,6 +1080,39 @@
return False
return procutil.isatty(fh)
+ def protectfinout(self):
+ """Duplicate ui streams and redirect original if they are stdio
+
+ Returns (fin, fout) which point to the original ui fds, but may be
+ copy of them. The returned streams can be considered "owned" in that
+ print(), exec(), etc. never reach to them.
+ """
+ if self._finoutredirected:
+ # if already redirected, protectstdio() would just create another
+ # nullfd pair, which is equivalent to returning self._fin/_fout.
+ return self._fin, self._fout
+ fin, fout = procutil.protectstdio(self._fin, self._fout)
+ self._finoutredirected = (fin, fout) != (self._fin, self._fout)
+ return fin, fout
+
+ def restorefinout(self, fin, fout):
+ """Restore ui streams from possibly duplicated (fin, fout)"""
+ if (fin, fout) == (self._fin, self._fout):
+ return
+ procutil.restorestdio(self._fin, self._fout, fin, fout)
+ # protectfinout() won't create more than one duplicated streams,
+ # so we can just turn the redirection flag off.
+ self._finoutredirected = False
+
+ @contextlib.contextmanager
+ def protectedfinout(self):
+ """Run code block with protected standard streams"""
+ fin, fout = self.protectfinout()
+ try:
+ yield fin, fout
+ finally:
+ self.restorefinout(fin, fout)
+
def disablepager(self):
self._disablepager = True
@@ -1200,7 +1289,11 @@
"chunkselector": [
"text",
"curses",
- ]
+ ],
+ "histedit": [
+ "text",
+ "curses",
+ ],
}
# Feature-specific interface
@@ -1261,7 +1354,7 @@
if i is None:
# some environments replace stdin without implementing isatty
# usually those are non-interactive
- return self._isatty(self.fin)
+ return self._isatty(self._fin)
return i
@@ -1299,7 +1392,7 @@
if i is None:
# some environments replace stdout without implementing isatty
# usually those are non-interactive
- return self._isatty(self.fout)
+ return self._isatty(self._fout)
return i
@@ -1308,9 +1401,9 @@
# because they have to be text streams with *no buffering*. Instead,
# we use rawinput() only if call_readline() will be invoked by
# PyOS_Readline(), so no I/O will be made at Python layer.
- usereadline = (self._isatty(self.fin) and self._isatty(self.fout)
- and procutil.isstdin(self.fin)
- and procutil.isstdout(self.fout))
+ usereadline = (self._isatty(self._fin) and self._isatty(self._fout)
+ and procutil.isstdin(self._fin)
+ and procutil.isstdout(self._fout))
if usereadline:
try:
# magically add command line editing support, where
@@ -1332,9 +1425,9 @@
if pycompat.oslinesep == b'\r\n' and line.endswith(b'\r'):
line = line[:-1]
else:
- self.fout.write(b' ')
- self.fout.flush()
- line = self.fin.readline()
+ self._fout.write(b' ')
+ self._fout.flush()
+ line = self._fin.readline()
if not line:
raise EOFError
line = line.rstrip(pycompat.oslinesep)
@@ -1345,17 +1438,23 @@
"""Prompt user with msg, read response.
If ui is not interactive, the default is returned.
"""
+ return self._prompt(msg, default=default)
+
+ def _prompt(self, msg, **opts):
+ default = opts[r'default']
if not self.interactive():
- self.write(msg, ' ', default or '', "\n")
+ self._writemsg(self._fmsgout, msg, ' ', type='prompt', **opts)
+ self._writemsg(self._fmsgout, default or '', "\n",
+ type='promptecho')
return default
- self._writenobuf(msg, label='ui.prompt')
+ self._writemsgnobuf(self._fmsgout, msg, type='prompt', **opts)
self.flush()
try:
r = self._readline()
if not r:
r = default
if self.configbool('ui', 'promptecho'):
- self.write(r, "\n")
+ self._writemsg(self._fmsgout, r, "\n", type='promptecho')
return r
except EOFError:
raise error.ResponseExpected()
@@ -1402,21 +1501,23 @@
msg, choices = self.extractchoices(prompt)
resps = [r for r, t in choices]
while True:
- r = self.prompt(msg, resps[default])
+ r = self._prompt(msg, default=resps[default], choices=choices)
if r.lower() in resps:
return resps.index(r.lower())
- self.write(_("unrecognized response\n"))
+ # TODO: shouldn't it be a warning?
+ self._writemsg(self._fmsgout, _("unrecognized response\n"))
def getpass(self, prompt=None, default=None):
if not self.interactive():
return default
try:
- self.write_err(self.label(prompt or _('password: '), 'ui.prompt'))
+ self._writemsg(self._fmsgerr, prompt or _('password: '),
+ type='prompt', password=True)
# disable getpass() only if explicitly specified. it's still valid
# to interact with tty even if fin is not a tty.
with self.timeblockedsection('stdio'):
if self.configbool('ui', 'nontty'):
- l = self.fin.readline()
+ l = self._fin.readline()
if not l:
raise EOFError
return l.rstrip('\n')
@@ -1431,24 +1532,21 @@
This adds an output label of "ui.status".
'''
if not self.quiet:
- opts[r'label'] = opts.get(r'label', '') + ' ui.status'
- self.write(*msg, **opts)
+ self._writemsg(self._fmsgout, type='status', *msg, **opts)
def warn(self, *msg, **opts):
'''write warning message to output (stderr)
This adds an output label of "ui.warning".
'''
- opts[r'label'] = opts.get(r'label', '') + ' ui.warning'
- self.write_err(*msg, **opts)
+ self._writemsg(self._fmsgerr, type='warning', *msg, **opts)
def error(self, *msg, **opts):
'''write error message to output (stderr)
This adds an output label of "ui.error".
'''
- opts[r'label'] = opts.get(r'label', '') + ' ui.error'
- self.write_err(*msg, **opts)
+ self._writemsg(self._fmsgerr, type='error', *msg, **opts)
def note(self, *msg, **opts):
'''write note to output (if ui.verbose is True)
@@ -1456,8 +1554,7 @@
This adds an output label of "ui.note".
'''
if self.verbose:
- opts[r'label'] = opts.get(r'label', '') + ' ui.note'
- self.write(*msg, **opts)
+ self._writemsg(self._fmsgout, type='note', *msg, **opts)
def debug(self, *msg, **opts):
'''write debug message to output (if ui.debugflag is True)
@@ -1465,8 +1562,8 @@
This adds an output label of "ui.debug".
'''
if self.debugflag:
- opts[r'label'] = opts.get(r'label', '') + ' ui.debug'
- self.write(*msg, **opts)
+ self._writemsg(self._fmsgout, type='debug', *msg, **opts)
+ self.log(b'debug', b'%s', b''.join(msg))
def edit(self, text, user, extra=None, editform=None, pending=None,
repopath=None, action=None):
@@ -1542,7 +1639,7 @@
# the tail end instead
cmdsuffix = cmd.translate(None, _keepalnum)[-85:]
blockedtag = 'unknown_system_' + cmdsuffix
- out = self.fout
+ out = self._fout
if any(s[1] for s in self._bufferstates):
out = self
with self.timeblockedsection(blockedtag):
@@ -1627,39 +1724,71 @@
All topics should be marked closed by setting pos to None at
termination.
'''
- if self._progbar is not None:
- self._progbar.progress(topic, pos, item=item, unit=unit,
- total=total)
- if pos is None or not self.configbool('progress', 'debug'):
- return
-
- if unit:
- unit = ' ' + unit
- if item:
- item = ' ' + item
-
- if total:
- pct = 100.0 * pos / total
- self.debug('%s:%s %d/%d%s (%4.2f%%)\n'
- % (topic, item, pos, total, unit, pct))
+ self.deprecwarn("use ui.makeprogress() instead of ui.progress()",
+ "5.1")
+ progress = self.makeprogress(topic, unit, total)
+ if pos is not None:
+ progress.update(pos, item=item)
else:
- self.debug('%s:%s %d%s\n' % (topic, item, pos, unit))
+ progress.complete()
def makeprogress(self, topic, unit="", total=None):
- '''exists only so low-level modules won't need to import scmutil'''
- return scmutil.progress(self, topic, unit, total)
+ """Create a progress helper for the specified topic"""
+ if getattr(self._fmsgerr, 'structured', False):
+ # channel for machine-readable output with metadata, just send
+ # raw information
+ # TODO: consider porting some useful information (e.g. estimated
+ # time) from progbar. we might want to support update delay to
+ # reduce the cost of transferring progress messages.
+ def updatebar(topic, pos, item, unit, total):
+ self._fmsgerr.write(None, type=b'progress', topic=topic,
+ pos=pos, item=item, unit=unit, total=total)
+ elif self._progbar is not None:
+ updatebar = self._progbar.progress
+ else:
+ def updatebar(topic, pos, item, unit, total):
+ pass
+ return scmutil.progress(self, updatebar, topic, unit, total)
- def log(self, service, *msg, **opts):
+ def getlogger(self, name):
+ """Returns a logger of the given name; or None if not registered"""
+ return self._loggers.get(name)
+
+ def setlogger(self, name, logger):
+ """Install logger which can be identified later by the given name
+
+ More than one loggers can be registered. Use extension or module
+ name to uniquely identify the logger instance.
+ """
+ self._loggers[name] = logger
+
+ def log(self, event, msgfmt, *msgargs, **opts):
'''hook for logging facility extensions
- service should be a readily-identifiable subsystem, which will
+ event should be a readily-identifiable subsystem, which will
allow filtering.
- *msg should be a newline-terminated format string to log, and
- then any values to %-format into that format string.
+ msgfmt should be a newline-terminated format string to log, and
+ *msgargs are %-formatted into it.
**opts currently has no defined meanings.
'''
+ if not self._loggers:
+ return
+ activeloggers = [l for l in self._loggers.itervalues()
+ if l.tracked(event)]
+ if not activeloggers:
+ return
+ msg = msgfmt % msgargs
+ opts = pycompat.byteskwargs(opts)
+ # guard against recursion from e.g. ui.debug()
+ registeredloggers = self._loggers
+ self._loggers = {}
+ try:
+ for logger in activeloggers:
+ logger.log(self, event, msg, opts)
+ finally:
+ self._loggers = registeredloggers
def label(self, msg, label):
'''style msg based on supplied label
@@ -1687,7 +1816,7 @@
msg = 'devel-warn: ' + msg
stacklevel += 1 # get in develwarn
if self.tracebackflag:
- util.debugstacktrace(msg, stacklevel, self.ferr, self.fout)
+ util.debugstacktrace(msg, stacklevel, self._ferr, self._fout)
self.log('develwarn', '%s at:\n%s' %
(msg, ''.join(util.getstackframes(stacklevel))))
else:
@@ -1920,3 +2049,29 @@
def haveprogbar():
return _progresssingleton is not None
+
+def _selectmsgdests(ui):
+ name = ui.config(b'ui', b'message-output')
+ if name == b'channel':
+ if ui.fmsg:
+ return ui.fmsg, ui.fmsg
+ else:
+ # fall back to ferr if channel isn't ready so that status/error
+ # messages can be printed
+ return ui.ferr, ui.ferr
+ if name == b'stdio':
+ return ui.fout, ui.ferr
+ if name == b'stderr':
+ return ui.ferr, ui.ferr
+ raise error.Abort(b'invalid ui.message-output destination: %s' % name)
+
+def _writemsgwith(write, dest, *args, **opts):
+ """Write ui message with the given ui._write*() function
+
+ The specified message type is translated to 'ui.<type>' label if the dest
+ isn't a structured channel, so that the message will be colorized.
+ """
+ # TODO: maybe change 'type' to a mandatory option
+ if r'type' in opts and not getattr(dest, 'structured', False):
+ opts[r'label'] = opts.get(r'label', '') + ' ui.%s' % opts.pop(r'type')
+ write(dest, *args, **opts)
--- a/mercurial/unionrepo.py Wed Jan 09 20:00:35 2019 -0800
+++ b/mercurial/unionrepo.py Fri Jan 18 13:28:22 2019 -0500
@@ -212,7 +212,8 @@
def manifestlog(self):
rootstore = unionmanifest(self.svfs, self.repo2.svfs,
self.unfiltered()._clrev)
- return manifest.manifestlog(self.svfs, self, rootstore)
+ return manifest.manifestlog(self.svfs, self, rootstore,
+ self.narrowmatch())
def _clrev(self, rev2):
"""map from repo2 changelog rev to temporary rev in self.changelog"""
--- a/mercurial/upgrade.py Wed Jan 09 20:00:35 2019 -0800
+++ b/mercurial/upgrade.py Fri Jan 18 13:28:22 2019 -0500
@@ -142,7 +142,7 @@
return self.name == other.name
def __ne__(self, other):
- return not self == other
+ return not (self == other)
def __hash__(self):
return hash(self.name)
@@ -269,7 +269,7 @@
_requirement = localrepo.SPARSEREVLOG_REQUIREMENT
- default = False
+ default = True
description = _('in order to limit disk reading and memory usage on older '
'version, the span of a delta chain from its root to its '
@@ -348,6 +348,19 @@
return deficiencies
+# search without '-' to support older form on newer client.
+#
+# We don't enforce backward compatibility for debug command so this
+# might eventually be dropped. However, having to use two different
+# forms in script when comparing result is anoying enough to add
+# backward compatibility for a while.
+legacy_opts_map = {
+ 'redeltaparent': 're-delta-parent',
+ 'redeltamultibase': 're-delta-multibase',
+ 'redeltaall': 're-delta-all',
+ 'redeltafulladd': 're-delta-fulladd',
+}
+
def findoptimizations(repo):
"""Determine optimisation that could be used during upgrade"""
# These are unconditionally added. There is logic later that figures out
@@ -355,7 +368,7 @@
optimizations = []
optimizations.append(improvement(
- name='redeltaparent',
+ name='re-delta-parent',
type=optimisation,
description=_('deltas within internal storage will be recalculated to '
'choose an optimal base revision where this was not '
@@ -368,7 +381,7 @@
'base revision if needed')))
optimizations.append(improvement(
- name='redeltamultibase',
+ name='re-delta-multibase',
type=optimisation,
description=_('deltas within internal storage will be recalculated '
'against multiple base revision and the smallest '
@@ -385,7 +398,7 @@
'significantly')))
optimizations.append(improvement(
- name='redeltaall',
+ name='re-delta-all',
type=optimisation,
description=_('deltas within internal storage will always be '
'recalculated without reusing prior deltas; this will '
@@ -396,12 +409,12 @@
'execution time')))
optimizations.append(improvement(
- name='redeltafulladd',
+ name='re-delta-fulladd',
type=optimisation,
description=_('every revision will be re-added as if it was new '
'content. It will go through the full storage '
'mechanism giving extensions a chance to process it '
- '(eg. lfs). This is similar to "redeltaall" but even '
+ '(eg. lfs). This is similar to "re-delta-all" but even '
'slower since more logic is involved.'),
upgrademessage=_('each revision will be added as new content to the '
'internal storage; this will likely drastically slow '
@@ -456,7 +469,7 @@
#reverse of "/".join(("data", path + ".i"))
return filelog.filelog(repo.svfs, path[5:-2])
-def _copyrevlogs(ui, srcrepo, dstrepo, tr, deltareuse, deltabothparents):
+def _copyrevlogs(ui, srcrepo, dstrepo, tr, deltareuse, forcedeltabothparents):
"""Copy revlogs between 2 repos."""
revcount = 0
srcsize = 0
@@ -578,7 +591,7 @@
ui.note(_('cloning %d revisions from %s\n') % (len(oldrl), unencoded))
oldrl.clone(tr, newrl, addrevisioncb=oncopiedrevision,
deltareuse=deltareuse,
- deltabothparents=deltabothparents)
+ forcedeltabothparents=forcedeltabothparents)
info = newrl.storageinfo(storedsize=True)
datasize = info['storedsize'] or 0
@@ -654,20 +667,20 @@
ui.write(_('(it is safe to interrupt this process any time before '
'data migration completes)\n'))
- if 'redeltaall' in actions:
+ if 're-delta-all' in actions:
deltareuse = revlog.revlog.DELTAREUSENEVER
- elif 'redeltaparent' in actions:
+ elif 're-delta-parent' in actions:
deltareuse = revlog.revlog.DELTAREUSESAMEREVS
- elif 'redeltamultibase' in actions:
+ elif 're-delta-multibase' in actions:
deltareuse = revlog.revlog.DELTAREUSESAMEREVS
- elif 'redeltafulladd' in actions:
+ elif 're-delta-fulladd' in actions:
deltareuse = revlog.revlog.DELTAREUSEFULLADD
else:
deltareuse = revlog.revlog.DELTAREUSEALWAYS
with dstrepo.transaction('upgrade') as tr:
_copyrevlogs(ui, srcrepo, dstrepo, tr, deltareuse,
- 'redeltamultibase' in actions)
+ 're-delta-multibase' in actions)
# Now copy other files in the store directory.
# The sorted() makes execution deterministic.
@@ -729,9 +742,11 @@
return backuppath
-def upgraderepo(ui, repo, run=False, optimize=None):
+def upgraderepo(ui, repo, run=False, optimize=None, backup=True):
"""Upgrade a repository in place."""
- optimize = set(optimize or [])
+ if optimize is None:
+ optimize = []
+ optimize = set(legacy_opts_map.get(o, o) for o in optimize)
repo = repo.unfiltered()
# Ensure the repository can be upgraded.
@@ -884,6 +899,10 @@
with dstrepo.wlock(), dstrepo.lock():
backuppath = _upgraderepo(ui, repo, dstrepo, newreqs,
upgradeactions)
+ if not (backup or backuppath is None):
+ ui.write(_('removing old repository content%s\n') % backuppath)
+ repo.vfs.rmtree(backuppath, forcibly=True)
+ backuppath = None
finally:
ui.write(_('removing temporary repository %s\n') % tmppath)
--- a/mercurial/util.py Wed Jan 09 20:00:35 2019 -0800
+++ b/mercurial/util.py Fri Jan 18 13:28:22 2019 -0500
@@ -1320,7 +1320,16 @@
self.insert(k, v)
def __delitem__(self, k):
- node = self._cache.pop(k)
+ self.pop(k)
+
+ def pop(self, k, default=_notset):
+ try:
+ node = self._cache.pop(k)
+ except KeyError:
+ if default is _notset:
+ raise
+ return default
+ value = node.value
self.totalcost -= node.cost
node.markempty()
@@ -1329,6 +1338,8 @@
self._movetohead(node)
self._head = node.next
+ return value
+
# Additional dict methods.
def get(self, k, default=None):
@@ -1337,6 +1348,20 @@
except KeyError:
return default
+ def peek(self, k, default=_notset):
+ """Get the specified item without moving it to the head
+
+ Unlike get(), this doesn't mutate the internal state. But be aware
+ that it doesn't mean peek() is thread safe.
+ """
+ try:
+ node = self._cache[k]
+ return node.value
+ except KeyError:
+ if default is _notset:
+ raise
+ return default
+
def clear(self):
n = self._head
while n.key is not _notset:
@@ -1535,6 +1560,7 @@
def clearcachedproperty(obj, prop):
'''clear a cached property value, if one has been set'''
+ prop = pycompat.sysstr(prop)
if prop in obj.__dict__:
del obj.__dict__[prop]
@@ -2019,7 +2045,7 @@
function if need.'''
return path.split(pycompat.ossep)
-def mktempcopy(name, emptyok=False, createmode=None):
+def mktempcopy(name, emptyok=False, createmode=None, enforcewritable=False):
"""Create a temporary file with the same contents from name
The permission bits are copied from the original file.
@@ -2035,7 +2061,8 @@
# Temporary files are created with mode 0600, which is usually not
# what we want. If the original file already exists, just copy
# its mode. Otherwise, manually obey umask.
- copymode(name, temp, createmode)
+ copymode(name, temp, createmode, enforcewritable)
+
if emptyok:
return temp
try:
@@ -2178,7 +2205,9 @@
def __init__(self, name, mode='w+b', createmode=None, checkambig=False):
self.__name = name # permanent name
self._tempname = mktempcopy(name, emptyok=('w' in mode),
- createmode=createmode)
+ createmode=createmode,
+ enforcewritable=('w' in mode))
+
self._fp = posixfile(self._tempname, mode)
self._checkambig = checkambig
--- a/mercurial/utils/procutil.py Wed Jan 09 20:00:35 2019 -0800
+++ b/mercurial/utils/procutil.py Fri Jan 18 13:28:22 2019 -0500
@@ -10,6 +10,7 @@
from __future__ import absolute_import
import contextlib
+import errno
import imp
import io
import os
@@ -240,7 +241,7 @@
_sethgexecutable(encoding.environ['EXECUTABLEPATH'])
else:
_sethgexecutable(pycompat.sysexecutable)
- elif (os.path.basename(
+ elif (not pycompat.iswindows and os.path.basename(
pycompat.fsencode(getattr(mainmod, '__file__', ''))) == 'hg'):
_sethgexecutable(pycompat.fsencode(mainmod.__file__))
else:
@@ -298,15 +299,6 @@
os.dup2(f.fileno(), uif.fileno())
f.close()
-@contextlib.contextmanager
-def protectedstdio(uin, uout):
- """Run code block with protected standard streams"""
- fin, fout = protectstdio(uin, uout)
- try:
- yield fin, fout
- finally:
- restorestdio(uin, uout, fin, fout)
-
def shellenviron(environ=None):
"""return environ with optional override, useful for shelling out"""
def py2shell(val):
@@ -436,7 +428,7 @@
signal.signal(signal.SIGCHLD, prevhandler)
@contextlib.contextmanager
-def uninterruptable(warn):
+def uninterruptible(warn):
"""Inhibit SIGINT handling on a region of code.
Note that if this is called in a non-main thread, it turns into a no-op.
@@ -467,3 +459,79 @@
signal.signal(signal.SIGINT, oldsiginthandler[0])
if shouldbail:
raise KeyboardInterrupt
+
+if pycompat.iswindows:
+ # no fork on Windows, but we can create a detached process
+ # https://msdn.microsoft.com/en-us/library/windows/desktop/ms684863.aspx
+ # No stdlib constant exists for this value
+ DETACHED_PROCESS = 0x00000008
+ # Following creation flags might create a console GUI window.
+ # Using subprocess.CREATE_NEW_CONSOLE might helps.
+ # See https://phab.mercurial-scm.org/D1701 for discussion
+ _creationflags = DETACHED_PROCESS | subprocess.CREATE_NEW_PROCESS_GROUP
+
+ def runbgcommand(script, env, shell=False, stdout=None, stderr=None):
+ '''Spawn a command without waiting for it to finish.'''
+ # we can't use close_fds *and* redirect stdin. I'm not sure that we
+ # need to because the detached process has no console connection.
+ subprocess.Popen(
+ tonativestr(script),
+ shell=shell, env=tonativeenv(env), close_fds=True,
+ creationflags=_creationflags, stdout=stdout,
+ stderr=stderr)
+else:
+ def runbgcommand(cmd, env, shell=False, stdout=None, stderr=None):
+ '''Spawn a command without waiting for it to finish.'''
+ # double-fork to completely detach from the parent process
+ # based on http://code.activestate.com/recipes/278731
+ pid = os.fork()
+ if pid:
+ # Parent process
+ (_pid, status) = os.waitpid(pid, 0)
+ if os.WIFEXITED(status):
+ returncode = os.WEXITSTATUS(status)
+ else:
+ returncode = -os.WTERMSIG(status)
+ if returncode != 0:
+ # The child process's return code is 0 on success, an errno
+ # value on failure, or 255 if we don't have a valid errno
+ # value.
+ #
+ # (It would be slightly nicer to return the full exception info
+ # over a pipe as the subprocess module does. For now it
+ # doesn't seem worth adding that complexity here, though.)
+ if returncode == 255:
+ returncode = errno.EINVAL
+ raise OSError(returncode, 'error running %r: %s' %
+ (cmd, os.strerror(returncode)))
+ return
+
+ returncode = 255
+ try:
+ # Start a new session
+ os.setsid()
+
+ stdin = open(os.devnull, 'r')
+ if stdout is None:
+ stdout = open(os.devnull, 'w')
+ if stderr is None:
+ stderr = open(os.devnull, 'w')
+
+ # connect stdin to devnull to make sure the subprocess can't
+ # muck up that stream for mercurial.
+ subprocess.Popen(
+ cmd, shell=shell, env=env, close_fds=True,
+ stdin=stdin, stdout=stdout, stderr=stderr)
+ returncode = 0
+ except EnvironmentError as ex:
+ returncode = (ex.errno & 0xff)
+ if returncode == 0:
+ # This shouldn't happen, but just in case make sure the
+ # return code is never 0 here.
+ returncode = 255
+ except Exception:
+ returncode = 255
+ finally:
+ # mission accomplished, this child needs to exit and not
+ # continue the hg process here.
+ os._exit(returncode)
--- a/mercurial/utils/storageutil.py Wed Jan 09 20:00:35 2019 -0800
+++ b/mercurial/utils/storageutil.py Fri Jan 18 13:28:22 2019 -0500
@@ -22,6 +22,7 @@
error,
mdiff,
pycompat,
+ repository,
)
_nullhash = hashlib.sha1(nullid)
@@ -269,9 +270,8 @@
def emitrevisions(store, nodes, nodesorder, resultcls, deltaparentfn=None,
candeltafn=None, rawsizefn=None, revdifffn=None, flagsfn=None,
- sendfulltext=False,
- revisiondata=False, assumehaveparentrevisions=False,
- deltaprevious=False):
+ deltamode=repository.CG_DELTAMODE_STD,
+ revisiondata=False, assumehaveparentrevisions=False):
"""Generic implementation of ifiledata.emitrevisions().
Emitting revision data is subtly complex. This function attempts to
@@ -322,14 +322,17 @@
Callable receiving a revision number and returns the integer flags
value for it. If not defined, flags value will be 0.
- ``sendfulltext``
+ ``deltamode``
+ constaint on delta to be sent:
+ * CG_DELTAMODE_STD - normal mode, try to reuse storage deltas,
+ * CG_DELTAMODE_PREV - only delta against "prev",
+ * CG_DELTAMODE_FULL - only issue full snapshot.
+
Whether to send fulltext revisions instead of deltas, if allowed.
``nodesorder``
``revisiondata``
``assumehaveparentrevisions``
- ``deltaprevious``
- See ``ifiledata.emitrevisions()`` interface documentation.
"""
fnode = store.node
@@ -345,7 +348,7 @@
prevrev = None
- if deltaprevious or assumehaveparentrevisions:
+ if deltamode == repository.CG_DELTAMODE_PREV or assumehaveparentrevisions:
prevrev = store.parentrevs(revs[0])[0]
# Set of revs available to delta against.
@@ -364,12 +367,15 @@
deltaparentrev = nullrev
# Forced delta against previous mode.
- if deltaprevious:
+ if deltamode == repository.CG_DELTAMODE_PREV:
baserev = prevrev
# We're instructed to send fulltext. Honor that.
- elif sendfulltext:
+ elif deltamode == repository.CG_DELTAMODE_FULL:
baserev = nullrev
+ # We're instructed to use p1. Honor that
+ elif deltamode == repository.CG_DELTAMODE_P1:
+ baserev = p1rev
# There is a delta in storage. We try to use that because it
# amounts to effectively copying data from storage and is
@@ -427,7 +433,8 @@
baserevisionsize = len(store.revision(baserev,
raw=True))
- elif baserev == nullrev and not deltaprevious:
+ elif (baserev == nullrev
+ and deltamode != repository.CG_DELTAMODE_PREV):
revision = store.revision(node, raw=True)
available.add(rev)
else:
--- a/mercurial/utils/stringutil.py Wed Jan 09 20:00:35 2019 -0800
+++ b/mercurial/utils/stringutil.py Fri Jan 18 13:28:22 2019 -0500
@@ -28,6 +28,7 @@
# which was part of Python 3.7.
_respecial = pycompat.bytestr(b'()[]{}?*+-|^$\\.&~# \t\n\r\v\f')
_regexescapemap = {ord(i): (b'\\' + i).decode('latin1') for i in _respecial}
+regexbytesescapemap = {i: (b'\\' + i) for i in _respecial}
def reescape(pat):
"""Drop-in replacement for re.escape."""
--- a/mercurial/vfs.py Wed Jan 09 20:00:35 2019 -0800
+++ b/mercurial/vfs.py Fri Jan 18 13:28:22 2019 -0500
@@ -46,6 +46,9 @@
'''Prevent instantiation; don't call this from subclasses.'''
raise NotImplementedError('attempted instantiating ' + str(type(self)))
+ def _auditpath(self, path, mode):
+ raise NotImplementedError
+
def tryread(self, path):
'''gracefully return an empty string for missing files'''
try:
@@ -196,6 +199,7 @@
checkambig=True only in limited cases (see also issue5418 and
issue5584 for detail).
"""
+ self._auditpath(dst, 'w')
srcpath = self.join(src)
dstpath = self.join(dst)
oldstat = checkambig and util.filestat.frompath(dstpath)
@@ -337,13 +341,24 @@
return
os.chmod(name, self.createmode & 0o666)
+ def _auditpath(self, path, mode):
+ if self._audit:
+ if os.path.isabs(path) and path.startswith(self.base):
+ path = os.path.relpath(path, self.base)
+ r = util.checkosfilename(path)
+ if r:
+ raise error.Abort("%s: %r" % (r, path))
+ self.audit(path, mode=mode)
+
def __call__(self, path, mode="r", atomictemp=False, notindexed=False,
- backgroundclose=False, checkambig=False, auditpath=True):
+ backgroundclose=False, checkambig=False, auditpath=True,
+ makeparentdirs=True):
'''Open ``path`` file, which is relative to vfs root.
- Newly created directories are marked as "not to be indexed by
- the content indexing service", if ``notindexed`` is specified
- for "write" mode access.
+ By default, parent directories are created as needed. Newly created
+ directories are marked as "not to be indexed by the content indexing
+ service", if ``notindexed`` is specified for "write" mode access.
+ Set ``makeparentdirs=False`` to not create directories implicitly.
If ``backgroundclose`` is passed, the file may be closed asynchronously.
It can only be used if the ``self.backgroundclosing()`` context manager
@@ -369,11 +384,7 @@
cases (see also issue5418 and issue5584 for detail).
'''
if auditpath:
- if self._audit:
- r = util.checkosfilename(path)
- if r:
- raise error.Abort("%s: %r" % (r, path))
- self.audit(path, mode=mode)
+ self._auditpath(path, mode)
f = self.join(path)
if "b" not in mode:
@@ -386,7 +397,8 @@
# to a directory. Let the posixfile() call below raise IOError.
if basename:
if atomictemp:
- util.makedirs(dirname, self.createmode, notindexed)
+ if makeparentdirs:
+ util.makedirs(dirname, self.createmode, notindexed)
return util.atomictempfile(f, mode, self.createmode,
checkambig=checkambig)
try:
@@ -404,7 +416,8 @@
if e.errno != errno.ENOENT:
raise
nlink = 0
- util.makedirs(dirname, self.createmode, notindexed)
+ if makeparentdirs:
+ util.makedirs(dirname, self.createmode, notindexed)
if nlink > 0:
if self._trustnlink is None:
self._trustnlink = nlink > 1 or util.checknlink(f)
@@ -456,10 +469,13 @@
opener = vfs
-class proxyvfs(object):
+class proxyvfs(abstractvfs):
def __init__(self, vfs):
self.vfs = vfs
+ def _auditpath(self, path, mode):
+ return self.vfs._auditpath(path, mode)
+
@property
def options(self):
return self.vfs.options
@@ -468,7 +484,7 @@
def options(self, value):
self.vfs.options = value
-class filtervfs(abstractvfs, proxyvfs):
+class filtervfs(proxyvfs, abstractvfs):
'''Wrapper vfs for filtering filenames with a function.'''
def __init__(self, vfs, filter):
@@ -486,7 +502,7 @@
filteropener = filtervfs
-class readonlyvfs(abstractvfs, proxyvfs):
+class readonlyvfs(proxyvfs):
'''Wrapper vfs preventing any writing.'''
def __init__(self, vfs):
--- a/mercurial/win32.py Wed Jan 09 20:00:35 2019 -0800
+++ b/mercurial/win32.py Fri Jan 18 13:28:22 2019 -0500
@@ -588,6 +588,9 @@
if not res:
raise ctypes.WinError()
+ _kernel32.CloseHandle(pi.hProcess)
+ _kernel32.CloseHandle(pi.hThread)
+
return pi.dwProcessId
def unlink(f):
--- a/mercurial/windows.py Wed Jan 09 20:00:35 2019 -0800
+++ b/mercurial/windows.py Fri Jan 18 13:28:22 2019 -0500
@@ -248,7 +248,7 @@
def setflags(f, l, x):
pass
-def copymode(src, dst, mode=None):
+def copymode(src, dst, mode=None, enforcewritable=False):
pass
def checkexec(path):
--- a/mercurial/wireprotoserver.py Wed Jan 09 20:00:35 2019 -0800
+++ b/mercurial/wireprotoserver.py Fri Jan 18 13:28:22 2019 -0500
@@ -24,7 +24,6 @@
from .utils import (
cborutil,
interfaceutil,
- procutil,
)
stringio = util.stringio
@@ -782,9 +781,7 @@
def __init__(self, ui, repo, logfh=None):
self._ui = ui
self._repo = repo
- self._fin, self._fout = procutil.protectstdio(ui.fin, ui.fout)
- # TODO: manage the redirection flag internally by ui
- ui._finoutredirected = (self._fin, self._fout) != (ui.fin, ui.fout)
+ self._fin, self._fout = ui.protectfinout()
# Log write I/O to stdout and stderr if configured.
if logfh:
@@ -795,8 +792,7 @@
def serve_forever(self):
self.serveuntil(threading.Event())
- procutil.restorestdio(self._ui.fin, self._ui.fout,
- self._fin, self._fout)
+ self._ui.restorefinout(self._fin, self._fout)
sys.exit(0)
def serveuntil(self, ev):
--- a/mercurial/wireprototypes.py Wed Jan 09 20:00:35 2019 -0800
+++ b/mercurial/wireprototypes.py Fri Jan 18 13:28:22 2019 -0500
@@ -162,6 +162,8 @@
'cg': 'boolean',
'cbattempted': 'boolean',
'stream': 'boolean',
+ 'includepats': 'csv',
+ 'excludepats': 'csv',
}
class baseprotocolhandler(interfaceutil.Interface):
--- a/mercurial/wireprotov1peer.py Wed Jan 09 20:00:35 2019 -0800
+++ b/mercurial/wireprotov1peer.py Fri Jan 18 13:28:22 2019 -0500
@@ -240,13 +240,16 @@
# Encoded arguments and future holding remote result.
try:
- encodedargs, fremote = next(batchable)
+ encargsorres, fremote = next(batchable)
except Exception:
pycompat.future_set_exception_info(f, sys.exc_info()[1:])
return
- requests.append((command, encodedargs))
- states.append((command, f, batchable, fremote))
+ if not fremote:
+ f.set_result(encargsorres)
+ else:
+ requests.append((command, encargsorres))
+ states.append((command, f, batchable, fremote))
if not requests:
return
--- a/mercurial/wireprotov2server.py Wed Jan 09 20:00:35 2019 -0800
+++ b/mercurial/wireprotov2server.py Fri Jan 18 13:28:22 2019 -0500
@@ -823,7 +823,7 @@
'%s key not present in changesetexplicitdepth revision '
'specifier', (key,))
- for rev in repo.revs(b'ancestors(%ln, %d)', spec[b'nodes'],
+ for rev in repo.revs(b'ancestors(%ln, %s)', spec[b'nodes'],
spec[b'depth'] - 1):
node = cl.node(rev)
@@ -984,9 +984,7 @@
return fl
-def emitfilerevisions(repo, path, revisions, fields):
- clnode = repo.changelog.node
-
+def emitfilerevisions(repo, path, revisions, linknodes, fields):
for revision in revisions:
d = {
b'node': revision.node,
@@ -996,13 +994,7 @@
d[b'parents'] = [revision.p1node, revision.p2node]
if b'linknode' in fields:
- # TODO by creating the filectx against a specific file revision
- # instead of changeset, linkrev() is always used. This is wrong for
- # cases where linkrev() may refer to a hidden changeset. We need an
- # API for performing linkrev adjustment that takes this into
- # account.
- fctx = repo.filectx(path, fileid=revision.node)
- d[b'linknode'] = clnode(fctx.introrev())
+ d[b'linknode'] = linknodes[revision.node]
followingmeta = []
followingdata = []
@@ -1045,7 +1037,7 @@
# Requested patterns could include files not in the local store. So
# filter those out.
- return matchmod.intersectmatchers(repo.narrowmatch(), matcher)
+ return repo.narrowmatch(matcher)
@wireprotocommand(
'filedata',
@@ -1086,6 +1078,9 @@
except FileAccessError as e:
raise error.WireprotoCommandError(e.msg, e.args)
+ clnode = repo.changelog.node
+ linknodes = {}
+
# Validate requested nodes.
for node in nodes:
try:
@@ -1094,6 +1089,14 @@
raise error.WireprotoCommandError('unknown file node: %s',
(hex(node),))
+ # TODO by creating the filectx against a specific file revision
+ # instead of changeset, linkrev() is always used. This is wrong for
+ # cases where linkrev() may refer to a hidden changeset. But since this
+ # API doesn't know anything about changesets, we're not sure how to
+ # disambiguate the linknode. Perhaps we should delete this API?
+ fctx = repo.filectx(path, fileid=node)
+ linknodes[node] = clnode(fctx.introrev())
+
revisions = store.emitrevisions(nodes,
revisiondata=b'revision' in fields,
assumehaveparentrevisions=haveparents)
@@ -1102,7 +1105,7 @@
b'totalitems': len(nodes),
}
- for o in emitfilerevisions(repo, path, revisions, fields):
+ for o in emitfilerevisions(repo, path, revisions, linknodes, fields):
yield o
def filesdatacapabilities(repo, proto):
@@ -1153,46 +1156,38 @@
# changeset, it should probably be allowed to access files data for that
# changeset.
- cl = repo.changelog
outgoing = resolvenodes(repo, revisions)
filematcher = makefilematcher(repo, pathfilter)
- # Figure out what needs to be emitted.
- changedpaths = set()
- fnodes = collections.defaultdict(set)
+ # path -> {fnode: linknode}
+ fnodes = collections.defaultdict(dict)
+ # We collect the set of relevant file revisions by iterating the changeset
+ # revisions and either walking the set of files recorded in the changeset
+ # or by walking the manifest at that revision. There is probably room for a
+ # storage-level API to request this data, as it can be expensive to compute
+ # and would benefit from caching or alternate storage from what revlogs
+ # provide.
for node in outgoing:
ctx = repo[node]
- changedpaths.update(ctx.files())
-
- changedpaths = sorted(p for p in changedpaths if filematcher(p))
+ mctx = ctx.manifestctx()
+ md = mctx.read()
- # If ancestors are known, we send file revisions having a linkrev in the
- # outgoing set of changeset revisions.
- if haveparents:
- outgoingclrevs = set(cl.rev(n) for n in outgoing)
-
- for path in changedpaths:
- try:
- store = getfilestore(repo, proto, path)
- except FileAccessError as e:
- raise error.WireprotoCommandError(e.msg, e.args)
+ if haveparents:
+ checkpaths = ctx.files()
+ else:
+ checkpaths = md.keys()
- for rev in store:
- linkrev = store.linkrev(rev)
-
- if linkrev in outgoingclrevs:
- fnodes[path].add(store.node(rev))
+ for path in checkpaths:
+ fnode = md[path]
- # If ancestors aren't known, we walk the manifests and send all
- # encountered file revisions.
- else:
- for node in outgoing:
- mctx = repo[node].manifestctx()
+ if path in fnodes and fnode in fnodes[path]:
+ continue
- for path, fnode in mctx.read().items():
- if filematcher(path):
- fnodes[path].add(fnode)
+ if not filematcher(path):
+ continue
+
+ fnodes[path].setdefault(fnode, node)
yield {
b'totalpaths': len(fnodes),
@@ -1210,11 +1205,11 @@
b'totalitems': len(filenodes),
}
- revisions = store.emitrevisions(filenodes,
+ revisions = store.emitrevisions(filenodes.keys(),
revisiondata=b'revision' in fields,
assumehaveparentrevisions=haveparents)
- for o in emitfilerevisions(repo, path, revisions, fields):
+ for o in emitfilerevisions(repo, path, revisions, filenodes, fields):
yield o
@wireprotocommand(
--- a/mercurial/worker.py Wed Jan 09 20:00:35 2019 -0800
+++ b/mercurial/worker.py Fri Jan 18 13:28:22 2019 -0500
@@ -250,10 +250,9 @@
def _windowsworker(ui, func, staticargs, args):
class Worker(threading.Thread):
- def __init__(self, taskqueue, resultqueue, func, staticargs,
- group=None, target=None, name=None, verbose=None):
- threading.Thread.__init__(self, group=group, target=target,
- name=name, verbose=verbose)
+ def __init__(self, taskqueue, resultqueue, func, staticargs, *args,
+ **kwargs):
+ threading.Thread.__init__(self, *args, **kwargs)
self._taskqueue = taskqueue
self._resultqueue = resultqueue
self._func = func
--- a/rust/Cargo.lock Wed Jan 09 20:00:35 2019 -0800
+++ b/rust/Cargo.lock Fri Jan 18 13:28:22 2019 -0500
@@ -1,19 +1,25 @@
[[package]]
name = "aho-corasick"
-version = "0.5.3"
+version = "0.6.9"
source = "registry+https://github.com/rust-lang/crates.io-index"
dependencies = [
- "memchr 0.1.11 (registry+https://github.com/rust-lang/crates.io-index)",
+ "memchr 2.1.2 (registry+https://github.com/rust-lang/crates.io-index)",
]
[[package]]
+name = "cfg-if"
+version = "0.1.6"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+
+[[package]]
name = "cpython"
-version = "0.1.0"
-source = "git+https://github.com/indygreg/rust-cpython.git?rev=c90d65cf84abfffce7ef54476bbfed56017a2f52#c90d65cf84abfffce7ef54476bbfed56017a2f52"
+version = "0.2.1"
+source = "registry+https://github.com/rust-lang/crates.io-index"
dependencies = [
- "libc 0.2.35 (registry+https://github.com/rust-lang/crates.io-index)",
- "num-traits 0.1.41 (registry+https://github.com/rust-lang/crates.io-index)",
- "python27-sys 0.1.2 (git+https://github.com/indygreg/rust-cpython.git?rev=c90d65cf84abfffce7ef54476bbfed56017a2f52)",
+ "libc 0.2.45 (registry+https://github.com/rust-lang/crates.io-index)",
+ "num-traits 0.2.6 (registry+https://github.com/rust-lang/crates.io-index)",
+ "python27-sys 0.2.1 (registry+https://github.com/rust-lang/crates.io-index)",
+ "python3-sys 0.2.1 (registry+https://github.com/rust-lang/crates.io-index)",
]
[[package]]
@@ -21,12 +27,14 @@
version = "0.1.0"
[[package]]
-name = "hgcli"
+name = "hg-cpython"
version = "0.1.0"
dependencies = [
- "cpython 0.1.0 (git+https://github.com/indygreg/rust-cpython.git?rev=c90d65cf84abfffce7ef54476bbfed56017a2f52)",
- "libc 0.2.35 (registry+https://github.com/rust-lang/crates.io-index)",
- "python27-sys 0.1.2 (git+https://github.com/indygreg/rust-cpython.git?rev=c90d65cf84abfffce7ef54476bbfed56017a2f52)",
+ "cpython 0.2.1 (registry+https://github.com/rust-lang/crates.io-index)",
+ "hg-core 0.1.0",
+ "libc 0.2.45 (registry+https://github.com/rust-lang/crates.io-index)",
+ "python27-sys 0.2.1 (registry+https://github.com/rust-lang/crates.io-index)",
+ "python3-sys 0.2.1 (registry+https://github.com/rust-lang/crates.io-index)",
]
[[package]]
@@ -34,106 +42,108 @@
version = "0.1.0"
dependencies = [
"hg-core 0.1.0",
- "libc 0.2.35 (registry+https://github.com/rust-lang/crates.io-index)",
+ "libc 0.2.45 (registry+https://github.com/rust-lang/crates.io-index)",
]
[[package]]
-name = "kernel32-sys"
-version = "0.2.2"
+name = "lazy_static"
+version = "1.2.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
-dependencies = [
- "winapi 0.2.8 (registry+https://github.com/rust-lang/crates.io-index)",
- "winapi-build 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)",
-]
[[package]]
name = "libc"
-version = "0.2.35"
+version = "0.2.45"
source = "registry+https://github.com/rust-lang/crates.io-index"
[[package]]
name = "memchr"
-version = "0.1.11"
+version = "2.1.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
dependencies = [
- "libc 0.2.35 (registry+https://github.com/rust-lang/crates.io-index)",
+ "cfg-if 0.1.6 (registry+https://github.com/rust-lang/crates.io-index)",
+ "libc 0.2.45 (registry+https://github.com/rust-lang/crates.io-index)",
+ "version_check 0.1.5 (registry+https://github.com/rust-lang/crates.io-index)",
]
[[package]]
name = "num-traits"
-version = "0.1.41"
+version = "0.2.6"
source = "registry+https://github.com/rust-lang/crates.io-index"
[[package]]
name = "python27-sys"
-version = "0.1.2"
-source = "git+https://github.com/indygreg/rust-cpython.git?rev=c90d65cf84abfffce7ef54476bbfed56017a2f52#c90d65cf84abfffce7ef54476bbfed56017a2f52"
+version = "0.2.1"
+source = "registry+https://github.com/rust-lang/crates.io-index"
dependencies = [
- "libc 0.2.35 (registry+https://github.com/rust-lang/crates.io-index)",
- "regex 0.1.80 (registry+https://github.com/rust-lang/crates.io-index)",
+ "libc 0.2.45 (registry+https://github.com/rust-lang/crates.io-index)",
+ "regex 1.1.0 (registry+https://github.com/rust-lang/crates.io-index)",
+]
+
+[[package]]
+name = "python3-sys"
+version = "0.2.1"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+dependencies = [
+ "libc 0.2.45 (registry+https://github.com/rust-lang/crates.io-index)",
+ "regex 1.1.0 (registry+https://github.com/rust-lang/crates.io-index)",
]
[[package]]
name = "regex"
-version = "0.1.80"
+version = "1.1.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
dependencies = [
- "aho-corasick 0.5.3 (registry+https://github.com/rust-lang/crates.io-index)",
- "memchr 0.1.11 (registry+https://github.com/rust-lang/crates.io-index)",
- "regex-syntax 0.3.9 (registry+https://github.com/rust-lang/crates.io-index)",
- "thread_local 0.2.7 (registry+https://github.com/rust-lang/crates.io-index)",
- "utf8-ranges 0.1.3 (registry+https://github.com/rust-lang/crates.io-index)",
+ "aho-corasick 0.6.9 (registry+https://github.com/rust-lang/crates.io-index)",
+ "memchr 2.1.2 (registry+https://github.com/rust-lang/crates.io-index)",
+ "regex-syntax 0.6.4 (registry+https://github.com/rust-lang/crates.io-index)",
+ "thread_local 0.3.6 (registry+https://github.com/rust-lang/crates.io-index)",
+ "utf8-ranges 1.0.2 (registry+https://github.com/rust-lang/crates.io-index)",
]
[[package]]
name = "regex-syntax"
-version = "0.3.9"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-
-[[package]]
-name = "thread-id"
-version = "2.0.0"
+version = "0.6.4"
source = "registry+https://github.com/rust-lang/crates.io-index"
dependencies = [
- "kernel32-sys 0.2.2 (registry+https://github.com/rust-lang/crates.io-index)",
- "libc 0.2.35 (registry+https://github.com/rust-lang/crates.io-index)",
+ "ucd-util 0.1.3 (registry+https://github.com/rust-lang/crates.io-index)",
]
[[package]]
name = "thread_local"
-version = "0.2.7"
+version = "0.3.6"
source = "registry+https://github.com/rust-lang/crates.io-index"
dependencies = [
- "thread-id 2.0.0 (registry+https://github.com/rust-lang/crates.io-index)",
+ "lazy_static 1.2.0 (registry+https://github.com/rust-lang/crates.io-index)",
]
[[package]]
-name = "utf8-ranges"
+name = "ucd-util"
version = "0.1.3"
source = "registry+https://github.com/rust-lang/crates.io-index"
[[package]]
-name = "winapi"
-version = "0.2.8"
+name = "utf8-ranges"
+version = "1.0.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
[[package]]
-name = "winapi-build"
-version = "0.1.1"
+name = "version_check"
+version = "0.1.5"
source = "registry+https://github.com/rust-lang/crates.io-index"
[metadata]
-"checksum aho-corasick 0.5.3 (registry+https://github.com/rust-lang/crates.io-index)" = "ca972c2ea5f742bfce5687b9aef75506a764f61d37f8f649047846a9686ddb66"
-"checksum cpython 0.1.0 (git+https://github.com/indygreg/rust-cpython.git?rev=c90d65cf84abfffce7ef54476bbfed56017a2f52)" = "<none>"
-"checksum kernel32-sys 0.2.2 (registry+https://github.com/rust-lang/crates.io-index)" = "7507624b29483431c0ba2d82aece8ca6cdba9382bff4ddd0f7490560c056098d"
-"checksum libc 0.2.35 (registry+https://github.com/rust-lang/crates.io-index)" = "96264e9b293e95d25bfcbbf8a88ffd1aedc85b754eba8b7d78012f638ba220eb"
-"checksum memchr 0.1.11 (registry+https://github.com/rust-lang/crates.io-index)" = "d8b629fb514376c675b98c1421e80b151d3817ac42d7c667717d282761418d20"
-"checksum num-traits 0.1.41 (registry+https://github.com/rust-lang/crates.io-index)" = "cacfcab5eb48250ee7d0c7896b51a2c5eec99c1feea5f32025635f5ae4b00070"
-"checksum python27-sys 0.1.2 (git+https://github.com/indygreg/rust-cpython.git?rev=c90d65cf84abfffce7ef54476bbfed56017a2f52)" = "<none>"
-"checksum regex 0.1.80 (registry+https://github.com/rust-lang/crates.io-index)" = "4fd4ace6a8cf7860714a2c2280d6c1f7e6a413486c13298bbc86fd3da019402f"
-"checksum regex-syntax 0.3.9 (registry+https://github.com/rust-lang/crates.io-index)" = "f9ec002c35e86791825ed294b50008eea9ddfc8def4420124fbc6b08db834957"
-"checksum thread-id 2.0.0 (registry+https://github.com/rust-lang/crates.io-index)" = "a9539db560102d1cef46b8b78ce737ff0bb64e7e18d35b2a5688f7d097d0ff03"
-"checksum thread_local 0.2.7 (registry+https://github.com/rust-lang/crates.io-index)" = "8576dbbfcaef9641452d5cf0df9b0e7eeab7694956dd33bb61515fb8f18cfdd5"
-"checksum utf8-ranges 0.1.3 (registry+https://github.com/rust-lang/crates.io-index)" = "a1ca13c08c41c9c3e04224ed9ff80461d97e121589ff27c753a16cb10830ae0f"
-"checksum winapi 0.2.8 (registry+https://github.com/rust-lang/crates.io-index)" = "167dc9d6949a9b857f3451275e911c3f44255842c1f7a76f33c55103a909087a"
-"checksum winapi-build 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)" = "2d315eee3b34aca4797b2da6b13ed88266e6d612562a0c46390af8299fc699bc"
+"checksum aho-corasick 0.6.9 (registry+https://github.com/rust-lang/crates.io-index)" = "1e9a933f4e58658d7b12defcf96dc5c720f20832deebe3e0a19efd3b6aaeeb9e"
+"checksum cfg-if 0.1.6 (registry+https://github.com/rust-lang/crates.io-index)" = "082bb9b28e00d3c9d39cc03e64ce4cea0f1bb9b3fde493f0cbc008472d22bdf4"
+"checksum cpython 0.2.1 (registry+https://github.com/rust-lang/crates.io-index)" = "b489034e723e7f5109fecd19b719e664f89ef925be785885252469e9822fa940"
+"checksum lazy_static 1.2.0 (registry+https://github.com/rust-lang/crates.io-index)" = "a374c89b9db55895453a74c1e38861d9deec0b01b405a82516e9d5de4820dea1"
+"checksum libc 0.2.45 (registry+https://github.com/rust-lang/crates.io-index)" = "2d2857ec59fadc0773853c664d2d18e7198e83883e7060b63c924cb077bd5c74"
+"checksum memchr 2.1.2 (registry+https://github.com/rust-lang/crates.io-index)" = "db4c41318937f6e76648f42826b1d9ade5c09cafb5aef7e351240a70f39206e9"
+"checksum num-traits 0.2.6 (registry+https://github.com/rust-lang/crates.io-index)" = "0b3a5d7cc97d6d30d8b9bc8fa19bf45349ffe46241e8816f50f62f6d6aaabee1"
+"checksum python27-sys 0.2.1 (registry+https://github.com/rust-lang/crates.io-index)" = "56114c37d4dca82526d74009df7782a28c871ac9d36b19d4cb9e67672258527e"
+"checksum python3-sys 0.2.1 (registry+https://github.com/rust-lang/crates.io-index)" = "61e4aac43f833fd637e429506cb2ac9d7df672c4b68f2eaaa163649b7fdc0444"
+"checksum regex 1.1.0 (registry+https://github.com/rust-lang/crates.io-index)" = "37e7cbbd370869ce2e8dff25c7018702d10b21a20ef7135316f8daecd6c25b7f"
+"checksum regex-syntax 0.6.4 (registry+https://github.com/rust-lang/crates.io-index)" = "4e47a2ed29da7a9e1960e1639e7a982e6edc6d49be308a3b02daf511504a16d1"
+"checksum thread_local 0.3.6 (registry+https://github.com/rust-lang/crates.io-index)" = "c6b53e329000edc2b34dbe8545fd20e55a333362d0a321909685a19bd28c3f1b"
+"checksum ucd-util 0.1.3 (registry+https://github.com/rust-lang/crates.io-index)" = "535c204ee4d8434478593480b8f86ab45ec9aae0e83c568ca81abf0fd0e88f86"
+"checksum utf8-ranges 1.0.2 (registry+https://github.com/rust-lang/crates.io-index)" = "796f7e48bef87609f7ade7e06495a87d5cd06c7866e6a5cbfceffc558a243737"
+"checksum version_check 0.1.5 (registry+https://github.com/rust-lang/crates.io-index)" = "914b1a6776c4c929a602fafd8bc742e06365d4bcbe48c30f9cca5824f70dc9dd"
--- a/rust/Cargo.toml Wed Jan 09 20:00:35 2019 -0800
+++ b/rust/Cargo.toml Fri Jan 18 13:28:22 2019 -0500
@@ -1,3 +1,3 @@
[workspace]
-members = ["hgcli", "hg-core", "hg-direct-ffi"]
-exclude = ["chg"]
+members = ["hg-core", "hg-direct-ffi", "hg-cpython"]
+exclude = ["chg", "hgcli"]
--- a/rust/hg-core/src/ancestors.rs Wed Jan 09 20:00:35 2019 -0800
+++ b/rust/hg-core/src/ancestors.rs Fri Jan 18 13:28:22 2019 -0500
@@ -8,7 +8,9 @@
//! Rust versions of generic DAG ancestors algorithms for Mercurial
use super::{Graph, GraphError, Revision, NULL_REVISION};
+use std::cmp::max;
use std::collections::{BinaryHeap, HashSet};
+use crate::dagops;
/// Iterator over the ancestors of a given list of revisions
/// This is a generic type, defined and implemented for any Graph, so that
@@ -24,20 +26,31 @@
stoprev: Revision,
}
+/// Lazy ancestors set, backed by AncestorsIterator
+pub struct LazyAncestors<G: Graph + Clone> {
+ graph: G,
+ containsiter: AncestorsIterator<G>,
+ initrevs: Vec<Revision>,
+ stoprev: Revision,
+ inclusive: bool,
+}
+
+pub struct MissingAncestors<G: Graph> {
+ graph: G,
+ bases: HashSet<Revision>,
+}
+
impl<G: Graph> AncestorsIterator<G> {
/// Constructor.
///
/// if `inclusive` is true, then the init revisions are emitted in
/// particular, otherwise iteration starts from their parents.
- pub fn new<I>(
+ pub fn new(
graph: G,
- initrevs: I,
+ initrevs: impl IntoIterator<Item = Revision>,
stoprev: Revision,
inclusive: bool,
- ) -> Result<Self, GraphError>
- where
- I: IntoIterator<Item = Revision>,
- {
+ ) -> Result<Self, GraphError> {
let filtered_initrevs = initrevs.into_iter().filter(|&r| r >= stoprev);
if inclusive {
let visit: BinaryHeap<Revision> = filtered_initrevs.collect();
@@ -57,7 +70,9 @@
};
this.seen.insert(NULL_REVISION);
for rev in filtered_initrevs {
- this.conditionally_push_parents(rev)?;
+ for parent in this.graph.parents(rev)?.iter().cloned() {
+ this.conditionally_push_rev(parent);
+ }
}
Ok(this)
}
@@ -70,73 +85,309 @@
}
}
- #[inline]
- fn conditionally_push_parents(
- &mut self,
- rev: Revision,
- ) -> Result<(), GraphError> {
- let parents = self.graph.parents(rev)?;
- self.conditionally_push_rev(parents.0);
- self.conditionally_push_rev(parents.1);
- Ok(())
- }
-
/// Consumes partially the iterator to tell if the given target
/// revision
/// is in the ancestors it emits.
/// This is meant for iterators actually dedicated to that kind of
/// purpose
- pub fn contains(&mut self, target: Revision) -> bool {
+ pub fn contains(&mut self, target: Revision) -> Result<bool, GraphError> {
if self.seen.contains(&target) && target != NULL_REVISION {
- return true;
+ return Ok(true);
}
- for rev in self {
+ for item in self {
+ let rev = item?;
if rev == target {
- return true;
+ return Ok(true);
}
if rev < target {
- return false;
+ return Ok(false);
}
}
- false
+ Ok(false)
+ }
+
+ pub fn peek(&self) -> Option<Revision> {
+ self.visit.peek().map(|&r| r)
+ }
+
+ /// Tell if the iterator is about an empty set
+ ///
+ /// The result does not depend whether the iterator has been consumed
+ /// or not.
+ /// This is mostly meant for iterators backing a lazy ancestors set
+ pub fn is_empty(&self) -> bool {
+ if self.visit.len() > 0 {
+ return false;
+ }
+ if self.seen.len() > 1 {
+ return false;
+ }
+ // at this point, the seen set is at most a singleton.
+ // If not `self.inclusive`, it's still possible that it has only
+ // the null revision
+ self.seen.is_empty() || self.seen.contains(&NULL_REVISION)
}
}
-/// Main implementation.
+/// Main implementation for the iterator
///
/// The algorithm is the same as in `_lazyancestorsiter()` from `ancestors.py`
/// with a few non crucial differences:
///
/// - there's no filtering of invalid parent revisions. Actually, it should be
/// consistent and more efficient to filter them from the end caller.
-/// - we don't use the equivalent of `heapq.heapreplace()`, but we should, for
-/// the same reasons (using `peek_mut`)
-/// - we don't have the optimization for adjacent revs (case where p1 == rev-1)
+/// - we don't have the optimization for adjacent revisions (i.e., the case
+/// where `p1 == rev - 1`), because it amounts to update the first element of
+/// the heap without sifting, which Rust's BinaryHeap doesn't let us do.
/// - we save a few pushes by comparing with `stoprev` before pushing
-///
-/// Error treatment:
-/// We swallow the possible GraphError of conditionally_push_parents() to
-/// respect the Iterator trait in a simple manner: never emitting parents
-/// for the returned revision. We finds this good enough for now, because:
-///
-/// - there's a good chance that invalid revisionss are fed from the start,
-/// and `new()` doesn't swallow the error result.
-/// - this is probably what the Python implementation produces anyway, due
-/// to filtering at each step, and Python code is currently the only
-/// concrete caller we target, so we shouldn't need a finer error treatment
-/// for the time being.
impl<G: Graph> Iterator for AncestorsIterator<G> {
- type Item = Revision;
+ type Item = Result<Revision, GraphError>;
- fn next(&mut self) -> Option<Revision> {
- let current = match self.visit.pop() {
+ fn next(&mut self) -> Option<Self::Item> {
+ let current = match self.visit.peek() {
None => {
return None;
}
- Some(i) => i,
+ Some(c) => *c,
+ };
+ let [p1, p2] = match self.graph.parents(current) {
+ Ok(ps) => ps,
+ Err(e) => return Some(Err(e)),
+ };
+ if p1 < self.stoprev || self.seen.contains(&p1) {
+ self.visit.pop();
+ } else {
+ *(self.visit.peek_mut().unwrap()) = p1;
+ self.seen.insert(p1);
};
- self.conditionally_push_parents(current).unwrap_or(());
- Some(current)
+
+ self.conditionally_push_rev(p2);
+ Some(Ok(current))
+ }
+}
+
+impl<G: Graph + Clone> LazyAncestors<G> {
+ pub fn new(
+ graph: G,
+ initrevs: impl IntoIterator<Item = Revision>,
+ stoprev: Revision,
+ inclusive: bool,
+ ) -> Result<Self, GraphError> {
+ let v: Vec<Revision> = initrevs.into_iter().collect();
+ Ok(LazyAncestors {
+ graph: graph.clone(),
+ containsiter: AncestorsIterator::new(
+ graph,
+ v.iter().cloned(),
+ stoprev,
+ inclusive,
+ )?,
+ initrevs: v,
+ stoprev: stoprev,
+ inclusive: inclusive,
+ })
+ }
+
+ pub fn contains(&mut self, rev: Revision) -> Result<bool, GraphError> {
+ self.containsiter.contains(rev)
+ }
+
+ pub fn is_empty(&self) -> bool {
+ self.containsiter.is_empty()
+ }
+
+ pub fn iter(&self) -> AncestorsIterator<G> {
+ // the arguments being the same as for self.containsiter, we know
+ // for sure that AncestorsIterator constructor can't fail
+ AncestorsIterator::new(
+ self.graph.clone(),
+ self.initrevs.iter().cloned(),
+ self.stoprev,
+ self.inclusive,
+ )
+ .unwrap()
+ }
+}
+
+impl<G: Graph> MissingAncestors<G> {
+ pub fn new(graph: G, bases: impl IntoIterator<Item = Revision>) -> Self {
+ let mut bases: HashSet<Revision> = bases.into_iter().collect();
+ if bases.is_empty() {
+ bases.insert(NULL_REVISION);
+ }
+ MissingAncestors { graph, bases }
+ }
+
+ pub fn has_bases(&self) -> bool {
+ self.bases.iter().any(|&b| b != NULL_REVISION)
+ }
+
+ /// Return a reference to current bases.
+ ///
+ /// This is useful in unit tests, but also setdiscovery.py does
+ /// read the bases attribute of a ancestor.missingancestors instance.
+ pub fn get_bases<'a>(&'a self) -> &'a HashSet<Revision> {
+ &self.bases
+ }
+
+ /// Computes the relative heads of current bases.
+ ///
+ /// The object is still usable after this.
+ pub fn bases_heads(&self) -> Result<HashSet<Revision>, GraphError> {
+ dagops::heads(&self.graph, self.bases.iter())
+ }
+
+ /// Consumes the object and returns the relative heads of its bases.
+ pub fn into_bases_heads(mut self) -> Result<HashSet<Revision>, GraphError> {
+ dagops::retain_heads(&self.graph, &mut self.bases)?;
+ Ok(self.bases)
+ }
+
+ pub fn add_bases(
+ &mut self,
+ new_bases: impl IntoIterator<Item = Revision>,
+ ) {
+ self.bases.extend(new_bases);
+ }
+
+ /// Remove all ancestors of self.bases from the revs set (in place)
+ pub fn remove_ancestors_from(
+ &mut self,
+ revs: &mut HashSet<Revision>,
+ ) -> Result<(), GraphError> {
+ revs.retain(|r| !self.bases.contains(r));
+ // the null revision is always an ancestor
+ revs.remove(&NULL_REVISION);
+ if revs.is_empty() {
+ return Ok(());
+ }
+ // anything in revs > start is definitely not an ancestor of bases
+ // revs <= start need to be investigated
+ // TODO optim: if a missingancestors is to be used several times,
+ // we shouldn't need to iterate each time on bases
+ let start = match self.bases.iter().cloned().max() {
+ Some(m) => m,
+ None => {
+ // bases is empty (shouldn't happen, but let's be safe)
+ return Ok(());
+ }
+ };
+ // whatever happens, we'll keep at least keepcount of them
+ // knowing this gives us a earlier stop condition than
+ // going all the way to the root
+ let keepcount = revs.iter().filter(|r| **r > start).count();
+
+ let mut curr = start;
+ while curr != NULL_REVISION && revs.len() > keepcount {
+ if self.bases.contains(&curr) {
+ revs.remove(&curr);
+ self.add_parents(curr)?;
+ }
+ curr -= 1;
+ }
+ Ok(())
+ }
+
+ /// Add rev's parents to self.bases
+ #[inline]
+ fn add_parents(&mut self, rev: Revision) -> Result<(), GraphError> {
+ // No need to bother the set with inserting NULL_REVISION over and
+ // over
+ for p in self.graph.parents(rev)?.iter().cloned() {
+ if p != NULL_REVISION {
+ self.bases.insert(p);
+ }
+ }
+ Ok(())
+ }
+
+ /// Return all the ancestors of revs that are not ancestors of self.bases
+ ///
+ /// This may include elements from revs.
+ ///
+ /// Equivalent to the revset (::revs - ::self.bases). Revs are returned in
+ /// revision number order, which is a topological order.
+ pub fn missing_ancestors(
+ &mut self,
+ revs: impl IntoIterator<Item = Revision>,
+ ) -> Result<Vec<Revision>, GraphError> {
+ // just for convenience and comparison with Python version
+ let bases_visit = &mut self.bases;
+ let mut revs: HashSet<Revision> = revs
+ .into_iter()
+ .filter(|r| !bases_visit.contains(r))
+ .collect();
+ let revs_visit = &mut revs;
+ let mut both_visit: HashSet<Revision> =
+ revs_visit.intersection(&bases_visit).cloned().collect();
+ if revs_visit.is_empty() {
+ return Ok(Vec::new());
+ }
+
+ let max_bases =
+ bases_visit.iter().cloned().max().unwrap_or(NULL_REVISION);
+ let max_revs =
+ revs_visit.iter().cloned().max().unwrap_or(NULL_REVISION);
+ let start = max(max_bases, max_revs);
+
+ // TODO heuristics for with_capacity()?
+ let mut missing: Vec<Revision> = Vec::new();
+ for curr in (0..=start).rev() {
+ if revs_visit.is_empty() {
+ break;
+ }
+ if both_visit.contains(&curr) {
+ // curr's parents might have made it into revs_visit through
+ // another path
+ // TODO optim: Rust's HashSet.remove returns a boolean telling
+ // if it happened. This will spare us one set lookup
+ both_visit.remove(&curr);
+ for p in self.graph.parents(curr)?.iter().cloned() {
+ if p == NULL_REVISION {
+ continue;
+ }
+ revs_visit.remove(&p);
+ bases_visit.insert(p);
+ both_visit.insert(p);
+ }
+ } else if revs_visit.remove(&curr) {
+ missing.push(curr);
+ for p in self.graph.parents(curr)?.iter().cloned() {
+ if p == NULL_REVISION {
+ continue;
+ }
+ if bases_visit.contains(&p) || both_visit.contains(&p) {
+ // p is an ancestor of revs_visit, and is implicitly
+ // in bases_visit, which means p is ::revs & ::bases.
+ // TODO optim: hence if bothvisit, we look up twice
+ revs_visit.remove(&p);
+ bases_visit.insert(p);
+ both_visit.insert(p);
+ } else {
+ // visit later
+ revs_visit.insert(p);
+ }
+ }
+ } else if bases_visit.contains(&curr) {
+ for p in self.graph.parents(curr)?.iter().cloned() {
+ if p == NULL_REVISION {
+ continue;
+ }
+ if revs_visit.contains(&p) || both_visit.contains(&p) {
+ // p is an ancestor of bases_visit, and is implicitly
+ // in revs_visit, which means p is ::revs & ::bases.
+ // TODO optim: hence if bothvisit, we look up twice
+ revs_visit.remove(&p);
+ bases_visit.insert(p);
+ both_visit.insert(p);
+ } else {
+ bases_visit.insert(p);
+ }
+ }
+ }
+ }
+ missing.reverse();
+ Ok(missing)
}
}
@@ -144,35 +395,8 @@
mod tests {
use super::*;
-
- #[derive(Clone, Debug)]
- struct Stub;
-
- /// This is the same as the dict from test-ancestors.py
- impl Graph for Stub {
- fn parents(
- &self,
- rev: Revision,
- ) -> Result<(Revision, Revision), GraphError> {
- match rev {
- 0 => Ok((-1, -1)),
- 1 => Ok((0, -1)),
- 2 => Ok((1, -1)),
- 3 => Ok((1, -1)),
- 4 => Ok((2, -1)),
- 5 => Ok((4, -1)),
- 6 => Ok((4, -1)),
- 7 => Ok((4, -1)),
- 8 => Ok((-1, -1)),
- 9 => Ok((6, 7)),
- 10 => Ok((5, -1)),
- 11 => Ok((3, 7)),
- 12 => Ok((9, -1)),
- 13 => Ok((8, -1)),
- r => Err(GraphError::ParentOutOfRange(r)),
- }
- }
- }
+ use crate::testing::{SampleGraph, VecGraph};
+ use std::iter::FromIterator;
fn list_ancestors<G: Graph>(
graph: G,
@@ -182,6 +406,7 @@
) -> Vec<Revision> {
AncestorsIterator::new(graph, initrevs, stoprev, inclusive)
.unwrap()
+ .map(|res| res.unwrap())
.collect()
}
@@ -189,25 +414,37 @@
/// Same tests as test-ancestor.py, without membership
/// (see also test-ancestor.py.out)
fn test_list_ancestor() {
- assert_eq!(list_ancestors(Stub, vec![], 0, false), vec![]);
+ assert_eq!(list_ancestors(SampleGraph, vec![], 0, false), vec![]);
assert_eq!(
- list_ancestors(Stub, vec![11, 13], 0, false),
+ list_ancestors(SampleGraph, vec![11, 13], 0, false),
vec![8, 7, 4, 3, 2, 1, 0]
);
- assert_eq!(list_ancestors(Stub, vec![1, 3], 0, false), vec![1, 0]);
assert_eq!(
- list_ancestors(Stub, vec![11, 13], 0, true),
+ list_ancestors(SampleGraph, vec![1, 3], 0, false),
+ vec![1, 0]
+ );
+ assert_eq!(
+ list_ancestors(SampleGraph, vec![11, 13], 0, true),
vec![13, 11, 8, 7, 4, 3, 2, 1, 0]
);
- assert_eq!(list_ancestors(Stub, vec![11, 13], 6, false), vec![8, 7]);
assert_eq!(
- list_ancestors(Stub, vec![11, 13], 6, true),
+ list_ancestors(SampleGraph, vec![11, 13], 6, false),
+ vec![8, 7]
+ );
+ assert_eq!(
+ list_ancestors(SampleGraph, vec![11, 13], 6, true),
vec![13, 11, 8, 7]
);
- assert_eq!(list_ancestors(Stub, vec![11, 13], 11, true), vec![13, 11]);
- assert_eq!(list_ancestors(Stub, vec![11, 13], 12, true), vec![13]);
+ assert_eq!(
+ list_ancestors(SampleGraph, vec![11, 13], 11, true),
+ vec![13, 11]
+ );
assert_eq!(
- list_ancestors(Stub, vec![10, 1], 0, true),
+ list_ancestors(SampleGraph, vec![11, 13], 12, true),
+ vec![13]
+ );
+ assert_eq!(
+ list_ancestors(SampleGraph, vec![10, 1], 0, true),
vec![10, 5, 4, 2, 1, 0]
);
}
@@ -219,32 +456,64 @@
/// For instance, run tests/test-obsolete-checkheads.t
fn test_nullrev_input() {
let mut iter =
- AncestorsIterator::new(Stub, vec![-1], 0, false).unwrap();
+ AncestorsIterator::new(SampleGraph, vec![-1], 0, false).unwrap();
assert_eq!(iter.next(), None)
}
#[test]
fn test_contains() {
let mut lazy =
- AncestorsIterator::new(Stub, vec![10, 1], 0, true).unwrap();
- assert!(lazy.contains(1));
- assert!(!lazy.contains(3));
+ AncestorsIterator::new(SampleGraph, vec![10, 1], 0, true).unwrap();
+ assert!(lazy.contains(1).unwrap());
+ assert!(!lazy.contains(3).unwrap());
let mut lazy =
- AncestorsIterator::new(Stub, vec![0], 0, false).unwrap();
- assert!(!lazy.contains(NULL_REVISION));
+ AncestorsIterator::new(SampleGraph, vec![0], 0, false).unwrap();
+ assert!(!lazy.contains(NULL_REVISION).unwrap());
+ }
+
+ #[test]
+ fn test_peek() {
+ let mut iter =
+ AncestorsIterator::new(SampleGraph, vec![10], 0, true).unwrap();
+ // peek() gives us the next value
+ assert_eq!(iter.peek(), Some(10));
+ // but it's not been consumed
+ assert_eq!(iter.next(), Some(Ok(10)));
+ // and iteration resumes normally
+ assert_eq!(iter.next(), Some(Ok(5)));
+
+ // let's drain the iterator to test peek() at the end
+ while iter.next().is_some() {}
+ assert_eq!(iter.peek(), None);
+ }
+
+ #[test]
+ fn test_empty() {
+ let mut iter =
+ AncestorsIterator::new(SampleGraph, vec![10], 0, true).unwrap();
+ assert!(!iter.is_empty());
+ while iter.next().is_some() {}
+ assert!(!iter.is_empty());
+
+ let iter =
+ AncestorsIterator::new(SampleGraph, vec![], 0, true).unwrap();
+ assert!(iter.is_empty());
+
+ // case where iter.seen == {NULL_REVISION}
+ let iter =
+ AncestorsIterator::new(SampleGraph, vec![0], 0, false).unwrap();
+ assert!(iter.is_empty());
}
/// A corrupted Graph, supporting error handling tests
+ #[derive(Clone, Debug)]
struct Corrupted;
impl Graph for Corrupted {
- fn parents(
- &self,
- rev: Revision,
- ) -> Result<(Revision, Revision), GraphError> {
+ fn parents(&self, rev: Revision) -> Result<[Revision; 2], GraphError> {
match rev {
- 1 => Ok((0, -1)),
+ 1 => Ok([0, -1]),
r => Err(GraphError::ParentOutOfRange(r)),
}
}
@@ -253,7 +522,7 @@
#[test]
fn test_initrev_out_of_range() {
// inclusive=false looks up initrev's parents right away
- match AncestorsIterator::new(Stub, vec![25], 0, false) {
+ match AncestorsIterator::new(SampleGraph, vec![25], 0, false) {
Ok(_) => panic!("Should have been ParentOutOfRange"),
Err(e) => assert_eq!(e, GraphError::ParentOutOfRange(25)),
}
@@ -264,7 +533,241 @@
// inclusive=false looks up initrev's parents right away
let mut iter =
AncestorsIterator::new(Corrupted, vec![1], 0, false).unwrap();
- assert_eq!(iter.next(), Some(0));
- assert_eq!(iter.next(), None);
+ assert_eq!(iter.next(), Some(Err(GraphError::ParentOutOfRange(0))));
+ }
+
+ #[test]
+ fn test_lazy_iter_contains() {
+ let mut lazy =
+ LazyAncestors::new(SampleGraph, vec![11, 13], 0, false).unwrap();
+
+ let revs: Vec<Revision> = lazy.iter().map(|r| r.unwrap()).collect();
+ // compare with iterator tests on the same initial revisions
+ assert_eq!(revs, vec![8, 7, 4, 3, 2, 1, 0]);
+
+ // contains() results are correct, unaffected by the fact that
+ // we consumed entirely an iterator out of lazy
+ assert_eq!(lazy.contains(2), Ok(true));
+ assert_eq!(lazy.contains(9), Ok(false));
+ }
+
+ #[test]
+ fn test_lazy_contains_iter() {
+ let mut lazy =
+ LazyAncestors::new(SampleGraph, vec![11, 13], 0, false).unwrap(); // reminder: [8, 7, 4, 3, 2, 1, 0]
+
+ assert_eq!(lazy.contains(2), Ok(true));
+ assert_eq!(lazy.contains(6), Ok(false));
+
+ // after consumption of 2 by the inner iterator, results stay
+ // consistent
+ assert_eq!(lazy.contains(2), Ok(true));
+ assert_eq!(lazy.contains(5), Ok(false));
+
+ // iter() still gives us a fresh iterator
+ let revs: Vec<Revision> = lazy.iter().map(|r| r.unwrap()).collect();
+ assert_eq!(revs, vec![8, 7, 4, 3, 2, 1, 0]);
+ }
+
+ #[test]
+ /// Test constructor, add/get bases and heads
+ fn test_missing_bases() -> Result<(), GraphError> {
+ let mut missing_ancestors =
+ MissingAncestors::new(SampleGraph, [5, 3, 1, 3].iter().cloned());
+ let mut as_vec: Vec<Revision> =
+ missing_ancestors.get_bases().iter().cloned().collect();
+ as_vec.sort();
+ assert_eq!(as_vec, [1, 3, 5]);
+
+ missing_ancestors.add_bases([3, 7, 8].iter().cloned());
+ as_vec = missing_ancestors.get_bases().iter().cloned().collect();
+ as_vec.sort();
+ assert_eq!(as_vec, [1, 3, 5, 7, 8]);
+
+ as_vec = missing_ancestors.bases_heads()?.iter().cloned().collect();
+ as_vec.sort();
+ assert_eq!(as_vec, [3, 5, 7, 8]);
+ Ok(())
+ }
+
+ fn assert_missing_remove(
+ bases: &[Revision],
+ revs: &[Revision],
+ expected: &[Revision],
+ ) {
+ let mut missing_ancestors =
+ MissingAncestors::new(SampleGraph, bases.iter().cloned());
+ let mut revset: HashSet<Revision> = revs.iter().cloned().collect();
+ missing_ancestors
+ .remove_ancestors_from(&mut revset)
+ .unwrap();
+ let mut as_vec: Vec<Revision> = revset.into_iter().collect();
+ as_vec.sort();
+ assert_eq!(as_vec.as_slice(), expected);
+ }
+
+ #[test]
+ fn test_missing_remove() {
+ assert_missing_remove(
+ &[1, 2, 3, 4, 7],
+ Vec::from_iter(1..10).as_slice(),
+ &[5, 6, 8, 9],
+ );
+ assert_missing_remove(&[10], &[11, 12, 13, 14], &[11, 12, 13, 14]);
+ assert_missing_remove(&[7], &[1, 2, 3, 4, 5], &[3, 5]);
+ }
+
+ fn assert_missing_ancestors(
+ bases: &[Revision],
+ revs: &[Revision],
+ expected: &[Revision],
+ ) {
+ let mut missing_ancestors =
+ MissingAncestors::new(SampleGraph, bases.iter().cloned());
+ let missing = missing_ancestors
+ .missing_ancestors(revs.iter().cloned())
+ .unwrap();
+ assert_eq!(missing.as_slice(), expected);
+ }
+
+ #[test]
+ fn test_missing_ancestors() {
+ // examples taken from test-ancestors.py by having it run
+ // on the same graph (both naive and fast Python algs)
+ assert_missing_ancestors(&[10], &[11], &[3, 7, 11]);
+ assert_missing_ancestors(&[11], &[10], &[5, 10]);
+ assert_missing_ancestors(&[7], &[9, 11], &[3, 6, 9, 11]);
}
+
+ /// An interesting case found by a random generator similar to
+ /// the one in test-ancestor.py. An early version of Rust MissingAncestors
+ /// failed this, yet none of the integration tests of the whole suite
+ /// catched it.
+ #[test]
+ fn test_remove_ancestors_from_case1() {
+ let graph: VecGraph = vec![
+ [NULL_REVISION, NULL_REVISION],
+ [0, NULL_REVISION],
+ [1, 0],
+ [2, 1],
+ [3, NULL_REVISION],
+ [4, NULL_REVISION],
+ [5, 1],
+ [2, NULL_REVISION],
+ [7, NULL_REVISION],
+ [8, NULL_REVISION],
+ [9, NULL_REVISION],
+ [10, 1],
+ [3, NULL_REVISION],
+ [12, NULL_REVISION],
+ [13, NULL_REVISION],
+ [14, NULL_REVISION],
+ [4, NULL_REVISION],
+ [16, NULL_REVISION],
+ [17, NULL_REVISION],
+ [18, NULL_REVISION],
+ [19, 11],
+ [20, NULL_REVISION],
+ [21, NULL_REVISION],
+ [22, NULL_REVISION],
+ [23, NULL_REVISION],
+ [2, NULL_REVISION],
+ [3, NULL_REVISION],
+ [26, 24],
+ [27, NULL_REVISION],
+ [28, NULL_REVISION],
+ [12, NULL_REVISION],
+ [1, NULL_REVISION],
+ [1, 9],
+ [32, NULL_REVISION],
+ [33, NULL_REVISION],
+ [34, 31],
+ [35, NULL_REVISION],
+ [36, 26],
+ [37, NULL_REVISION],
+ [38, NULL_REVISION],
+ [39, NULL_REVISION],
+ [40, NULL_REVISION],
+ [41, NULL_REVISION],
+ [42, 26],
+ [0, NULL_REVISION],
+ [44, NULL_REVISION],
+ [45, 4],
+ [40, NULL_REVISION],
+ [47, NULL_REVISION],
+ [36, 0],
+ [49, NULL_REVISION],
+ [NULL_REVISION, NULL_REVISION],
+ [51, NULL_REVISION],
+ [52, NULL_REVISION],
+ [53, NULL_REVISION],
+ [14, NULL_REVISION],
+ [55, NULL_REVISION],
+ [15, NULL_REVISION],
+ [23, NULL_REVISION],
+ [58, NULL_REVISION],
+ [59, NULL_REVISION],
+ [2, NULL_REVISION],
+ [61, 59],
+ [62, NULL_REVISION],
+ [63, NULL_REVISION],
+ [NULL_REVISION, NULL_REVISION],
+ [65, NULL_REVISION],
+ [66, NULL_REVISION],
+ [67, NULL_REVISION],
+ [68, NULL_REVISION],
+ [37, 28],
+ [69, 25],
+ [71, NULL_REVISION],
+ [72, NULL_REVISION],
+ [50, 2],
+ [74, NULL_REVISION],
+ [12, NULL_REVISION],
+ [18, NULL_REVISION],
+ [77, NULL_REVISION],
+ [78, NULL_REVISION],
+ [79, NULL_REVISION],
+ [43, 33],
+ [81, NULL_REVISION],
+ [82, NULL_REVISION],
+ [83, NULL_REVISION],
+ [84, 45],
+ [85, NULL_REVISION],
+ [86, NULL_REVISION],
+ [NULL_REVISION, NULL_REVISION],
+ [88, NULL_REVISION],
+ [NULL_REVISION, NULL_REVISION],
+ [76, 83],
+ [44, NULL_REVISION],
+ [92, NULL_REVISION],
+ [93, NULL_REVISION],
+ [9, NULL_REVISION],
+ [95, 67],
+ [96, NULL_REVISION],
+ [97, NULL_REVISION],
+ [NULL_REVISION, NULL_REVISION],
+ ];
+ let problem_rev = 28 as Revision;
+ let problem_base = 70 as Revision;
+ // making the problem obvious: problem_rev is a parent of problem_base
+ assert_eq!(graph.parents(problem_base).unwrap()[1], problem_rev);
+
+ let mut missing_ancestors: MissingAncestors<VecGraph> =
+ MissingAncestors::new(
+ graph,
+ [60, 26, 70, 3, 96, 19, 98, 49, 97, 47, 1, 6]
+ .iter()
+ .cloned(),
+ );
+ assert!(missing_ancestors.bases.contains(&problem_base));
+
+ let mut revs: HashSet<Revision> =
+ [4, 12, 41, 28, 68, 38, 1, 30, 56, 44]
+ .iter()
+ .cloned()
+ .collect();
+ missing_ancestors.remove_ancestors_from(&mut revs).unwrap();
+ assert!(!revs.contains(&problem_rev));
+ }
+
}
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/rust/hg-core/src/dagops.rs Fri Jan 18 13:28:22 2019 -0500
@@ -0,0 +1,136 @@
+// dagops.rs
+//
+// Copyright 2019 Georges Racinet <georges.racinet@octobus.net>
+//
+// This software may be used and distributed according to the terms of the
+// GNU General Public License version 2 or any later version.
+
+//! Miscellaneous DAG operations
+//!
+//! # Terminology
+//! - By *relative heads* of a collection of revision numbers (`Revision`),
+//! we mean those revisions that have no children among the collection.
+//! - Similarly *relative roots* of a collection of `Revision`, we mean
+//! those whose parents, if any, don't belong to the collection.
+use super::{Graph, GraphError, Revision, NULL_REVISION};
+use std::collections::HashSet;
+
+fn remove_parents(
+ graph: &impl Graph,
+ rev: Revision,
+ set: &mut HashSet<Revision>,
+) -> Result<(), GraphError> {
+ for parent in graph.parents(rev)?.iter() {
+ if *parent != NULL_REVISION {
+ set.remove(parent);
+ }
+ }
+ Ok(())
+}
+
+/// Relative heads out of some revisions, passed as an iterator.
+///
+/// These heads are defined as those revisions that have no children
+/// among those emitted by the iterator.
+///
+/// # Performance notes
+/// Internally, this clones the iterator, and builds a `HashSet` out of it.
+///
+/// This function takes an `Iterator` instead of `impl IntoIterator` to
+/// guarantee that cloning the iterator doesn't result in cloning the full
+/// construct it comes from.
+pub fn heads<'a>(
+ graph: &impl Graph,
+ iter_revs: impl Clone + Iterator<Item = &'a Revision>,
+) -> Result<HashSet<Revision>, GraphError> {
+ let mut heads: HashSet<Revision> = iter_revs.clone().cloned().collect();
+ heads.remove(&NULL_REVISION);
+ for rev in iter_revs {
+ remove_parents(graph, *rev, &mut heads)?;
+ }
+ Ok(heads)
+}
+
+/// Retain in `revs` only its relative heads.
+///
+/// This is an in-place operation, so that control of the incoming
+/// set is left to the caller.
+/// - a direct Python binding would probably need to build its own `HashSet`
+/// from an incoming iterable, even if its sole purpose is to extract the
+/// heads.
+/// - a Rust caller can decide whether cloning beforehand is appropriate
+///
+/// # Performance notes
+/// Internally, this function will store a full copy of `revs` in a `Vec`.
+pub fn retain_heads(
+ graph: &impl Graph,
+ revs: &mut HashSet<Revision>,
+) -> Result<(), GraphError> {
+ revs.remove(&NULL_REVISION);
+ // we need to construct an iterable copy of revs to avoid itering while
+ // mutating
+ let as_vec: Vec<Revision> = revs.iter().cloned().collect();
+ for rev in as_vec {
+ remove_parents(graph, rev, revs)?;
+ }
+ Ok(())
+}
+
+#[cfg(test)]
+mod tests {
+
+ use super::*;
+ use crate::testing::SampleGraph;
+
+ /// Apply `retain_heads()` to the given slice and return as a sorted `Vec`
+ fn retain_heads_sorted(
+ graph: &impl Graph,
+ revs: &[Revision],
+ ) -> Result<Vec<Revision>, GraphError> {
+ let mut revs: HashSet<Revision> = revs.iter().cloned().collect();
+ retain_heads(graph, &mut revs)?;
+ let mut as_vec: Vec<Revision> = revs.iter().cloned().collect();
+ as_vec.sort();
+ Ok(as_vec)
+ }
+
+ #[test]
+ fn test_retain_heads() -> Result<(), GraphError> {
+ assert_eq!(retain_heads_sorted(&SampleGraph, &[4, 5, 6])?, vec![5, 6]);
+ assert_eq!(
+ retain_heads_sorted(&SampleGraph, &[4, 1, 6, 12, 0])?,
+ vec![1, 6, 12]
+ );
+ assert_eq!(
+ retain_heads_sorted(&SampleGraph, &[1, 2, 3, 4, 5, 6, 7, 8, 9])?,
+ vec![3, 5, 8, 9]
+ );
+ Ok(())
+ }
+
+ /// Apply `heads()` to the given slice and return as a sorted `Vec`
+ fn heads_sorted(
+ graph: &impl Graph,
+ revs: &[Revision],
+ ) -> Result<Vec<Revision>, GraphError> {
+ let heads = heads(graph, revs.iter())?;
+ let mut as_vec: Vec<Revision> = heads.iter().cloned().collect();
+ as_vec.sort();
+ Ok(as_vec)
+ }
+
+ #[test]
+ fn test_heads() -> Result<(), GraphError> {
+ assert_eq!(heads_sorted(&SampleGraph, &[4, 5, 6])?, vec![5, 6]);
+ assert_eq!(
+ heads_sorted(&SampleGraph, &[4, 1, 6, 12, 0])?,
+ vec![1, 6, 12]
+ );
+ assert_eq!(
+ heads_sorted(&SampleGraph, &[1, 2, 3, 4, 5, 6, 7, 8, 9])?,
+ vec![3, 5, 8, 9]
+ );
+ Ok(())
+ }
+
+}
--- a/rust/hg-core/src/lib.rs Wed Jan 09 20:00:35 2019 -0800
+++ b/rust/hg-core/src/lib.rs Fri Jan 18 13:28:22 2019 -0500
@@ -3,7 +3,10 @@
// This software may be used and distributed according to the terms of the
// GNU General Public License version 2 or any later version.
mod ancestors;
-pub use ancestors::AncestorsIterator;
+pub mod dagops;
+pub use ancestors::{AncestorsIterator, LazyAncestors, MissingAncestors};
+#[cfg(test)]
+pub mod testing;
/// Mercurial revision numbers
///
@@ -15,7 +18,10 @@
/// The simplest expression of what we need of Mercurial DAGs.
pub trait Graph {
- fn parents(&self, Revision) -> Result<(Revision, Revision), GraphError>;
+ /// Return the two parents of the given `Revision`.
+ ///
+ /// Each of the parents can be independently `NULL_REVISION`
+ fn parents(&self, Revision) -> Result<[Revision; 2], GraphError>;
}
#[derive(Clone, Debug, PartialEq)]
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/rust/hg-core/src/testing.rs Fri Jan 18 13:28:22 2019 -0500
@@ -0,0 +1,72 @@
+// testing.rs
+//
+// Copyright 2018 Georges Racinet <georges.racinet@octobus.net>
+//
+// This software may be used and distributed according to the terms of the
+// GNU General Public License version 2 or any later version.
+
+use crate::{Graph, GraphError, Revision, NULL_REVISION};
+
+/// A stub `Graph`, same as the one from `test-ancestor.py`
+///
+/// o 13
+/// |
+/// | o 12
+/// | |
+/// | | o 11
+/// | | |\
+/// | | | | o 10
+/// | | | | |
+/// | o---+ | 9
+/// | | | | |
+/// o | | | | 8
+/// / / / /
+/// | | o | 7
+/// | | | |
+/// o---+ | 6
+/// / / /
+/// | | o 5
+/// | |/
+/// | o 4
+/// | |
+/// o | 3
+/// | |
+/// | o 2
+/// |/
+/// o 1
+/// |
+/// o 0
+#[derive(Clone, Debug)]
+pub struct SampleGraph;
+
+impl Graph for SampleGraph {
+ fn parents(&self, rev: Revision) -> Result<[Revision; 2], GraphError> {
+ match rev {
+ 0 => Ok([NULL_REVISION, NULL_REVISION]),
+ 1 => Ok([0, NULL_REVISION]),
+ 2 => Ok([1, NULL_REVISION]),
+ 3 => Ok([1, NULL_REVISION]),
+ 4 => Ok([2, NULL_REVISION]),
+ 5 => Ok([4, NULL_REVISION]),
+ 6 => Ok([4, NULL_REVISION]),
+ 7 => Ok([4, NULL_REVISION]),
+ 8 => Ok([NULL_REVISION, NULL_REVISION]),
+ 9 => Ok([6, 7]),
+ 10 => Ok([5, NULL_REVISION]),
+ 11 => Ok([3, 7]),
+ 12 => Ok([9, NULL_REVISION]),
+ 13 => Ok([8, NULL_REVISION]),
+ r => Err(GraphError::ParentOutOfRange(r)),
+ }
+ }
+}
+
+// A Graph represented by a vector whose indices are revisions
+// and values are parents of the revisions
+pub type VecGraph = Vec<[Revision; 2]>;
+
+impl Graph for VecGraph {
+ fn parents(&self, rev: Revision) -> Result<[Revision; 2], GraphError> {
+ Ok(self[rev as usize])
+ }
+}
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/rust/hg-cpython/Cargo.toml Fri Jan 18 13:28:22 2019 -0500
@@ -0,0 +1,34 @@
+[package]
+name = "hg-cpython"
+version = "0.1.0"
+authors = ["Georges Racinet <gracinet@anybox.fr>"]
+
+[lib]
+name='rusthg'
+crate-type = ["cdylib"]
+
+[features]
+default = ["python27"]
+
+python27 = ["cpython/python27-sys",
+ "cpython/extension-module-2-7",
+ "python27-sys",
+ ]
+
+python3 = ["python3-sys", "cpython/python3-sys", "cpython/extension-module"]
+
+[dependencies]
+hg-core = { path = "../hg-core" }
+libc = '*'
+
+[dependencies.cpython]
+version = "*"
+default-features = false
+
+[dependencies.python27-sys]
+version = "0.2.1"
+optional = true
+
+[dependencies.python3-sys]
+version = "0.2.1"
+optional = true
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/rust/hg-cpython/rustfmt.toml Fri Jan 18 13:28:22 2019 -0500
@@ -0,0 +1,3 @@
+max_width = 79
+wrap_comments = true
+error_on_line_overflow = true
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/rust/hg-cpython/src/ancestors.rs Fri Jan 18 13:28:22 2019 -0500
@@ -0,0 +1,238 @@
+// ancestors.rs
+//
+// Copyright 2018 Georges Racinet <gracinet@anybox.fr>
+//
+// This software may be used and distributed according to the terms of the
+// GNU General Public License version 2 or any later version.
+
+//! Bindings for the `hg::ancestors` module provided by the
+//! `hg-core` crate. From Python, this will be seen as `rustext.ancestor`
+//! and can be used as replacement for the the pure `ancestor` Python module.
+//!
+//! # Classes visible from Python:
+//! - [`LazyAncestors`] is the Rust implementation of
+//! `mercurial.ancestor.lazyancestors`. The only difference is that it is
+//! instantiated with a C `parsers.index` instance instead of a parents
+//! function.
+//!
+//! - [`MissingAncestors`] is the Rust implementation of
+//! `mercurial.ancestor.incrementalmissingancestors`.
+//!
+//! API differences:
+//! + it is instantiated with a C `parsers.index`
+//! instance instead of a parents function.
+//! + `MissingAncestors.bases` is a method returning a tuple instead of
+//! a set-valued attribute. We could return a Python set easily if our
+//! [PySet PR](https://github.com/dgrunwald/rust-cpython/pull/165)
+//! is accepted.
+//!
+//! - [`AncestorsIterator`] is the Rust counterpart of the
+//! `ancestor._lazyancestorsiter` Python generator. From Python, instances of
+//! this should be mainly obtained by calling `iter()` on a [`LazyAncestors`]
+//! instance.
+//!
+//! [`LazyAncestors`]: struct.LazyAncestors.html
+//! [`MissingAncestors`]: struct.MissingAncestors.html
+//! [`AncestorsIterator`]: struct.AncestorsIterator.html
+use crate::conversion::rev_pyiter_collect;
+use cindex::Index;
+use cpython::{
+ ObjectProtocol, PyClone, PyDict, PyList, PyModule, PyObject, PyResult,
+ PyTuple, Python, PythonObject, ToPyObject,
+};
+use exceptions::GraphError;
+use hg::Revision;
+use hg::{
+ AncestorsIterator as CoreIterator, LazyAncestors as CoreLazy,
+ MissingAncestors as CoreMissing,
+};
+use std::cell::RefCell;
+use std::collections::HashSet;
+
+py_class!(pub class AncestorsIterator |py| {
+ data inner: RefCell<Box<CoreIterator<Index>>>;
+
+ def __next__(&self) -> PyResult<Option<Revision>> {
+ match self.inner(py).borrow_mut().next() {
+ Some(Err(e)) => Err(GraphError::pynew(py, e)),
+ None => Ok(None),
+ Some(Ok(r)) => Ok(Some(r)),
+ }
+ }
+
+ def __contains__(&self, rev: Revision) -> PyResult<bool> {
+ self.inner(py).borrow_mut().contains(rev)
+ .map_err(|e| GraphError::pynew(py, e))
+ }
+
+ def __iter__(&self) -> PyResult<Self> {
+ Ok(self.clone_ref(py))
+ }
+
+ def __new__(_cls, index: PyObject, initrevs: PyObject, stoprev: Revision,
+ inclusive: bool) -> PyResult<AncestorsIterator> {
+ let initvec: Vec<Revision> = rev_pyiter_collect(py, &initrevs)?;
+ let ait = CoreIterator::new(
+ Index::new(py, index)?,
+ initvec,
+ stoprev,
+ inclusive,
+ )
+ .map_err(|e| GraphError::pynew(py, e))?;
+ AncestorsIterator::from_inner(py, ait)
+ }
+
+});
+
+impl AncestorsIterator {
+ pub fn from_inner(py: Python, ait: CoreIterator<Index>) -> PyResult<Self> {
+ Self::create_instance(py, RefCell::new(Box::new(ait)))
+ }
+}
+
+/// Copy and convert an `HashSet<Revision>` in a Python set
+///
+/// This will probably turn useless once `PySet` support lands in
+/// `rust-cpython`.
+///
+/// This builds a Python tuple, then calls Python's "set()" on it
+fn py_set(py: Python, set: &HashSet<Revision>) -> PyResult<PyObject> {
+ let as_vec: Vec<PyObject> = set
+ .iter()
+ .map(|rev| rev.to_py_object(py).into_object())
+ .collect();
+ let as_pytuple = PyTuple::new(py, as_vec.as_slice());
+
+ let locals = PyDict::new(py);
+ locals.set_item(py, "obj", as_pytuple.to_py_object(py))?;
+ py.eval("set(obj)", None, Some(&locals))
+}
+
+py_class!(pub class LazyAncestors |py| {
+ data inner: RefCell<Box<CoreLazy<Index>>>;
+
+ def __contains__(&self, rev: Revision) -> PyResult<bool> {
+ self.inner(py)
+ .borrow_mut()
+ .contains(rev)
+ .map_err(|e| GraphError::pynew(py, e))
+ }
+
+ def __iter__(&self) -> PyResult<AncestorsIterator> {
+ AncestorsIterator::from_inner(py, self.inner(py).borrow().iter())
+ }
+
+ def __bool__(&self) -> PyResult<bool> {
+ Ok(!self.inner(py).borrow().is_empty())
+ }
+
+ def __new__(_cls, index: PyObject, initrevs: PyObject, stoprev: Revision,
+ inclusive: bool) -> PyResult<Self> {
+ let initvec: Vec<Revision> = rev_pyiter_collect(py, &initrevs)?;
+
+ let lazy =
+ CoreLazy::new(Index::new(py, index)?, initvec, stoprev, inclusive)
+ .map_err(|e| GraphError::pynew(py, e))?;
+
+ Self::create_instance(py, RefCell::new(Box::new(lazy)))
+ }
+
+});
+
+py_class!(pub class MissingAncestors |py| {
+ data inner: RefCell<Box<CoreMissing<Index>>>;
+
+ def __new__(_cls, index: PyObject, bases: PyObject) -> PyResult<MissingAncestors> {
+ let bases_vec: Vec<Revision> = rev_pyiter_collect(py, &bases)?;
+ let inner = CoreMissing::new(Index::new(py, index)?, bases_vec);
+ MissingAncestors::create_instance(py, RefCell::new(Box::new(inner)))
+ }
+
+ def hasbases(&self) -> PyResult<bool> {
+ Ok(self.inner(py).borrow().has_bases())
+ }
+
+ def addbases(&self, bases: PyObject) -> PyResult<PyObject> {
+ let mut inner = self.inner(py).borrow_mut();
+ let bases_vec: Vec<Revision> = rev_pyiter_collect(py, &bases)?;
+ inner.add_bases(bases_vec);
+ // cpython doc has examples with PyResult<()> but this gives me
+ // the trait `cpython::ToPyObject` is not implemented for `()`
+ // so let's return an explicit None
+ Ok(py.None())
+ }
+
+ def bases(&self) -> PyResult<PyObject> {
+ py_set(py, self.inner(py).borrow().get_bases())
+ }
+
+ def basesheads(&self) -> PyResult<PyObject> {
+ let inner = self.inner(py).borrow();
+ py_set(py, &inner.bases_heads().map_err(|e| GraphError::pynew(py, e))?)
+ }
+
+ def removeancestorsfrom(&self, revs: PyObject) -> PyResult<PyObject> {
+ let mut inner = self.inner(py).borrow_mut();
+ // this is very lame: we convert to a Rust set, update it in place
+ // and then convert back to Python, only to have Python remove the
+ // excess (thankfully, Python is happy with a list or even an iterator)
+ // Leads to improve this:
+ // - have the CoreMissing instead do something emit revisions to
+ // discard
+ // - define a trait for sets of revisions in the core and implement
+ // it for a Python set rewrapped with the GIL marker
+ let mut revs_pyset: HashSet<Revision> = rev_pyiter_collect(py, &revs)?;
+ inner.remove_ancestors_from(&mut revs_pyset)
+ .map_err(|e| GraphError::pynew(py, e))?;
+
+ // convert as Python list
+ let mut remaining_pyint_vec: Vec<PyObject> = Vec::with_capacity(
+ revs_pyset.len());
+ for rev in revs_pyset {
+ remaining_pyint_vec.push(rev.to_py_object(py).into_object());
+ }
+ let remaining_pylist = PyList::new(py, remaining_pyint_vec.as_slice());
+ revs.call_method(py, "intersection_update", (remaining_pylist, ), None)
+ }
+
+ def missingancestors(&self, revs: PyObject) -> PyResult<PyList> {
+ let mut inner = self.inner(py).borrow_mut();
+ let revs_vec: Vec<Revision> = rev_pyiter_collect(py, &revs)?;
+ let missing_vec = match inner.missing_ancestors(revs_vec) {
+ Ok(missing) => missing,
+ Err(e) => {
+ return Err(GraphError::pynew(py, e));
+ }
+ };
+ // convert as Python list
+ let mut missing_pyint_vec: Vec<PyObject> = Vec::with_capacity(
+ missing_vec.len());
+ for rev in missing_vec {
+ missing_pyint_vec.push(rev.to_py_object(py).into_object());
+ }
+ Ok(PyList::new(py, missing_pyint_vec.as_slice()))
+ }
+});
+
+/// Create the module, with __package__ given from parent
+pub fn init_module(py: Python, package: &str) -> PyResult<PyModule> {
+ let dotted_name = &format!("{}.ancestor", package);
+ let m = PyModule::new(py, dotted_name)?;
+ m.add(py, "__package__", package)?;
+ m.add(
+ py,
+ "__doc__",
+ "Generic DAG ancestor algorithms - Rust implementation",
+ )?;
+ m.add_class::<AncestorsIterator>(py)?;
+ m.add_class::<LazyAncestors>(py)?;
+ m.add_class::<MissingAncestors>(py)?;
+
+ let sys = PyModule::import(py, "sys")?;
+ let sys_modules: PyDict = sys.get(py, "modules")?.extract(py)?;
+ sys_modules.set_item(py, dotted_name, &m)?;
+ // Example C code (see pyexpat.c and import.c) will "give away the
+ // reference", but we won't because it will be consumed once the
+ // Rust PyObject is dropped.
+ Ok(m)
+}
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/rust/hg-cpython/src/cindex.rs Fri Jan 18 13:28:22 2019 -0500
@@ -0,0 +1,130 @@
+// cindex.rs
+//
+// Copyright 2018 Georges Racinet <gracinet@anybox.fr>
+//
+// This software may be used and distributed according to the terms of the
+// GNU General Public License version 2 or any later version.
+
+//! Bindings to use the Index defined by the parsers C extension
+//!
+//! Ideally, we should use an Index entirely implemented in Rust,
+//! but this will take some time to get there.
+#[cfg(feature = "python27")]
+extern crate python27_sys as python_sys;
+#[cfg(feature = "python3")]
+extern crate python3_sys as python_sys;
+
+use self::python_sys::PyCapsule_Import;
+use cpython::{PyClone, PyErr, PyObject, PyResult, Python};
+use hg::{Graph, GraphError, Revision};
+use libc::c_int;
+use std::ffi::CStr;
+use std::mem::transmute;
+
+type IndexParentsFn = unsafe extern "C" fn(
+ index: *mut python_sys::PyObject,
+ rev: c_int,
+ ps: *mut [c_int; 2],
+) -> c_int;
+
+/// A `Graph` backed up by objects and functions from revlog.c
+///
+/// This implementation of the `Graph` trait, relies on (pointers to)
+/// - the C index object (`index` member)
+/// - the `index_get_parents()` function (`parents` member)
+///
+/// # Safety
+///
+/// The C index itself is mutable, and this Rust exposition is **not
+/// protected by the GIL**, meaning that this construct isn't safe with respect
+/// to Python threads.
+///
+/// All callers of this `Index` must acquire the GIL and must not release it
+/// while working.
+///
+/// # TODO find a solution to make it GIL safe again.
+///
+/// This is non trivial, and can wait until we have a clearer picture with
+/// more Rust Mercurial constructs.
+///
+/// One possibility would be to a `GILProtectedIndex` wrapper enclosing
+/// a `Python<'p>` marker and have it be the one implementing the
+/// `Graph` trait, but this would mean the `Graph` implementor would become
+/// likely to change between subsequent method invocations of the `hg-core`
+/// objects (a serious change of the `hg-core` API):
+/// either exposing ways to mutate the `Graph`, or making it a non persistent
+/// parameter in the relevant methods that need one.
+///
+/// Another possibility would be to introduce an abstract lock handle into
+/// the core API, that would be tied to `GILGuard` / `Python<'p>`
+/// in the case of the `cpython` crate bindings yet could leave room for other
+/// mechanisms in other contexts.
+pub struct Index {
+ index: PyObject,
+ parents: IndexParentsFn,
+}
+
+impl Index {
+ pub fn new(py: Python, index: PyObject) -> PyResult<Self> {
+ Ok(Index {
+ index: index,
+ parents: decapsule_parents_fn(py)?,
+ })
+ }
+}
+
+impl Clone for Index {
+ fn clone(&self) -> Self {
+ let guard = Python::acquire_gil();
+ Index {
+ index: self.index.clone_ref(guard.python()),
+ parents: self.parents.clone(),
+ }
+ }
+}
+
+impl Graph for Index {
+ /// wrap a call to the C extern parents function
+ fn parents(&self, rev: Revision) -> Result<[Revision; 2], GraphError> {
+ let mut res: [c_int; 2] = [0; 2];
+ let code = unsafe {
+ (self.parents)(
+ self.index.as_ptr(),
+ rev as c_int,
+ &mut res as *mut [c_int; 2],
+ )
+ };
+ match code {
+ 0 => Ok(res),
+ _ => Err(GraphError::ParentOutOfRange(rev)),
+ }
+ }
+}
+
+/// Return the `index_get_parents` function of the parsers C Extension module.
+///
+/// A pointer to the function is stored in the `parsers` module as a
+/// standard [Python capsule](https://docs.python.org/2/c-api/capsule.html).
+///
+/// This function retrieves the capsule and casts the function pointer
+///
+/// Casting function pointers is one of the rare cases of
+/// legitimate use cases of `mem::transmute()` (see
+/// https://doc.rust-lang.org/std/mem/fn.transmute.html of
+/// `mem::transmute()`.
+/// It is inappropriate for architectures where
+/// function and data pointer sizes differ (so-called "Harvard
+/// architectures"), but these are nowadays mostly DSPs
+/// and microcontrollers, hence out of our scope.
+fn decapsule_parents_fn(py: Python) -> PyResult<IndexParentsFn> {
+ unsafe {
+ let caps_name = CStr::from_bytes_with_nul_unchecked(
+ b"mercurial.cext.parsers.index_get_parents_CAPI\0",
+ );
+ let from_caps = PyCapsule_Import(caps_name.as_ptr(), 0);
+ if from_caps.is_null() {
+ return Err(PyErr::fetch(py));
+ }
+ Ok(transmute(from_caps))
+ }
+}
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/rust/hg-cpython/src/conversion.rs Fri Jan 18 13:28:22 2019 -0500
@@ -0,0 +1,28 @@
+// conversion.rs
+//
+// Copyright 2019 Georges Racinet <georges.racinet@octobus.net>
+//
+// This software may be used and distributed according to the terms of the
+// GNU General Public License version 2 or any later version.
+
+//! Bindings for the hg::ancestors module provided by the
+//! `hg-core` crate. From Python, this will be seen as `rustext.ancestor`
+
+use cpython::{ObjectProtocol, PyObject, PyResult, Python};
+use hg::Revision;
+use std::iter::FromIterator;
+
+/// Utility function to convert a Python iterable into various collections
+///
+/// We need this in particular to feed to various methods of inner objects
+/// with `impl IntoIterator<Item=Revision>` arguments, because
+/// a `PyErr` can arise at each step of iteration, whereas these methods
+/// expect iterables over `Revision`, not over some `Result<Revision, PyErr>`
+pub fn rev_pyiter_collect<C>(py: Python, revs: &PyObject) -> PyResult<C>
+where
+ C: FromIterator<Revision>,
+{
+ revs.iter(py)?
+ .map(|r| r.and_then(|o| o.extract::<Revision>(py)))
+ .collect()
+}
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/rust/hg-cpython/src/exceptions.rs Fri Jan 18 13:28:22 2019 -0500
@@ -0,0 +1,27 @@
+// ancestors.rs
+//
+// Copyright 2018 Georges Racinet <gracinet@anybox.fr>
+//
+// This software may be used and distributed according to the terms of the
+// GNU General Public License version 2 or any later version.
+
+//! Bindings for Rust errors
+//!
+//! [`GraphError`] exposes `hg::GraphError` as a subclass of `ValueError`
+//!
+//! [`GraphError`]: struct.GraphError.html
+use cpython::exc::ValueError;
+use cpython::{PyErr, Python};
+use hg;
+
+py_exception!(rustext, GraphError, ValueError);
+
+impl GraphError {
+ pub fn pynew(py: Python, inner: hg::GraphError) -> PyErr {
+ match inner {
+ hg::GraphError::ParentOutOfRange(r) => {
+ GraphError::new(py, ("ParentOutOfRange", r))
+ }
+ }
+ }
+}
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/rust/hg-cpython/src/lib.rs Fri Jan 18 13:28:22 2019 -0500
@@ -0,0 +1,43 @@
+// lib.rs
+//
+// Copyright 2018 Georges Racinet <gracinet@anybox.fr>
+//
+// This software may be used and distributed according to the terms of the
+// GNU General Public License version 2 or any later version.
+
+//! Python bindings of `hg-core` objects using the `cpython` crate.
+//! Once compiled, the resulting single shared library object can be placed in
+//! the `mercurial` package directly as `rustext.so` or `rustext.dll`.
+//! It holds several modules, so that from the point of view of Python,
+//! it behaves as the `cext` package.
+//!
+//! Example:
+//!
+//! ```text
+//! >>> from mercurial.rustext import ancestor
+//! >>> ancestor.__doc__
+//! 'Generic DAG ancestor algorithms - Rust implementation'
+//! ```
+
+#[macro_use]
+extern crate cpython;
+extern crate hg;
+extern crate libc;
+
+pub mod ancestors;
+mod cindex;
+mod conversion;
+pub mod exceptions;
+
+py_module_initializer!(rustext, initrustext, PyInit_rustext, |py, m| {
+ m.add(
+ py,
+ "__doc__",
+ "Mercurial core concepts - Rust implementation",
+ )?;
+
+ let dotted_name: String = m.get(py, "__name__")?.extract(py)?;
+ m.add(py, "ancestor", ancestors::init_module(py, &dotted_name)?)?;
+ m.add(py, "GraphError", py.get_type::<exceptions::GraphError>())?;
+ Ok(())
+});
--- a/rust/hg-direct-ffi/src/ancestors.rs Wed Jan 09 20:00:35 2019 -0800
+++ b/rust/hg-direct-ffi/src/ancestors.rs Fri Jan 18 13:28:22 2019 -0500
@@ -16,9 +16,14 @@
use std::slice;
type IndexPtr = *mut c_void;
-type IndexParentsFn =
- unsafe extern "C" fn(index: IndexPtr, rev: ssize_t, ps: *mut [c_int; 2], max_rev: c_int)
- -> c_int;
+
+extern "C" {
+ fn HgRevlogIndex_GetParents(
+ op: IndexPtr,
+ rev: c_int,
+ parents: *mut [c_int; 2],
+ ) -> c_int;
+}
/// A Graph backed up by objects and functions from revlog.c
///
@@ -27,26 +32,24 @@
/// - the `index_get_parents()` function (`parents` member)
pub struct Index {
index: IndexPtr,
- parents: IndexParentsFn,
}
impl Index {
- pub fn new(index: IndexPtr, parents: IndexParentsFn) -> Self {
+ pub fn new(index: IndexPtr) -> Self {
Index {
index: index,
- parents: parents,
}
}
}
impl Graph for Index {
/// wrap a call to the C extern parents function
- fn parents(&self, rev: Revision) -> Result<(Revision, Revision), GraphError> {
+ fn parents(&self, rev: Revision) -> Result<[Revision; 2], GraphError> {
let mut res: [c_int; 2] = [0; 2];
let code =
- unsafe { (self.parents)(self.index, rev as ssize_t, &mut res as *mut [c_int; 2], rev) };
+ unsafe { HgRevlogIndex_GetParents(self.index, rev, &mut res as *mut [c_int; 2]) };
match code {
- 0 => Ok((res[0], res[1])),
+ 0 => Ok(res),
_ => Err(GraphError::ParentOutOfRange(rev)),
}
}
@@ -59,7 +62,6 @@
#[no_mangle]
pub extern "C" fn rustlazyancestors_init(
index: IndexPtr,
- parents: IndexParentsFn,
initrevslen: ssize_t,
initrevs: *mut c_long,
stoprev: c_long,
@@ -68,7 +70,7 @@
assert!(initrevslen >= 0);
unsafe {
raw_init(
- Index::new(index, parents),
+ Index::new(index),
initrevslen as usize,
initrevs,
stoprev,
@@ -137,7 +139,11 @@
#[inline]
fn raw_next<G: Graph>(raw: *mut AncestorsIterator<G>) -> c_long {
let as_ref = unsafe { &mut *raw };
- as_ref.next().unwrap_or(NULL_REVISION) as c_long
+ let rev = match as_ref.next() {
+ Some(Ok(rev)) => rev,
+ Some(Err(_)) | None => NULL_REVISION,
+ };
+ rev as c_long
}
#[no_mangle]
@@ -155,10 +161,10 @@
target: c_long,
) -> c_int {
let as_ref = unsafe { &mut *raw };
- if as_ref.contains(target as Revision) {
- return 1;
+ match as_ref.contains(target as Revision) {
+ Ok(r) => r as c_int,
+ Err(_) => -1,
}
- 0
}
#[cfg(test)]
@@ -170,10 +176,10 @@
struct Stub;
impl Graph for Stub {
- fn parents(&self, r: Revision) -> Result<(Revision, Revision), GraphError> {
+ fn parents(&self, r: Revision) -> Result<[Revision; 2], GraphError> {
match r {
25 => Err(GraphError::ParentOutOfRange(25)),
- _ => Ok((1, 2)),
+ _ => Ok([1, 2]),
}
}
}
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/rust/hgcli/Cargo.lock Fri Jan 18 13:28:22 2019 -0500
@@ -0,0 +1,136 @@
+[[package]]
+name = "aho-corasick"
+version = "0.5.3"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+dependencies = [
+ "memchr 0.1.11 (registry+https://github.com/rust-lang/crates.io-index)",
+]
+
+[[package]]
+name = "cpython"
+version = "0.1.0"
+source = "git+https://github.com/indygreg/rust-cpython.git?rev=c90d65cf84abfffce7ef54476bbfed56017a2f52#c90d65cf84abfffce7ef54476bbfed56017a2f52"
+dependencies = [
+ "libc 0.2.45 (registry+https://github.com/rust-lang/crates.io-index)",
+ "num-traits 0.1.43 (registry+https://github.com/rust-lang/crates.io-index)",
+ "python27-sys 0.1.2 (git+https://github.com/indygreg/rust-cpython.git?rev=c90d65cf84abfffce7ef54476bbfed56017a2f52)",
+]
+
+[[package]]
+name = "hgcli"
+version = "0.1.0"
+dependencies = [
+ "cpython 0.1.0 (git+https://github.com/indygreg/rust-cpython.git?rev=c90d65cf84abfffce7ef54476bbfed56017a2f52)",
+ "libc 0.2.45 (registry+https://github.com/rust-lang/crates.io-index)",
+ "python27-sys 0.1.2 (git+https://github.com/indygreg/rust-cpython.git?rev=c90d65cf84abfffce7ef54476bbfed56017a2f52)",
+]
+
+[[package]]
+name = "kernel32-sys"
+version = "0.2.2"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+dependencies = [
+ "winapi 0.2.8 (registry+https://github.com/rust-lang/crates.io-index)",
+ "winapi-build 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)",
+]
+
+[[package]]
+name = "libc"
+version = "0.2.45"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+
+[[package]]
+name = "memchr"
+version = "0.1.11"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+dependencies = [
+ "libc 0.2.45 (registry+https://github.com/rust-lang/crates.io-index)",
+]
+
+[[package]]
+name = "num-traits"
+version = "0.1.43"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+dependencies = [
+ "num-traits 0.2.6 (registry+https://github.com/rust-lang/crates.io-index)",
+]
+
+[[package]]
+name = "num-traits"
+version = "0.2.6"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+
+[[package]]
+name = "python27-sys"
+version = "0.1.2"
+source = "git+https://github.com/indygreg/rust-cpython.git?rev=c90d65cf84abfffce7ef54476bbfed56017a2f52#c90d65cf84abfffce7ef54476bbfed56017a2f52"
+dependencies = [
+ "libc 0.2.45 (registry+https://github.com/rust-lang/crates.io-index)",
+ "regex 0.1.80 (registry+https://github.com/rust-lang/crates.io-index)",
+]
+
+[[package]]
+name = "regex"
+version = "0.1.80"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+dependencies = [
+ "aho-corasick 0.5.3 (registry+https://github.com/rust-lang/crates.io-index)",
+ "memchr 0.1.11 (registry+https://github.com/rust-lang/crates.io-index)",
+ "regex-syntax 0.3.9 (registry+https://github.com/rust-lang/crates.io-index)",
+ "thread_local 0.2.7 (registry+https://github.com/rust-lang/crates.io-index)",
+ "utf8-ranges 0.1.3 (registry+https://github.com/rust-lang/crates.io-index)",
+]
+
+[[package]]
+name = "regex-syntax"
+version = "0.3.9"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+
+[[package]]
+name = "thread-id"
+version = "2.0.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+dependencies = [
+ "kernel32-sys 0.2.2 (registry+https://github.com/rust-lang/crates.io-index)",
+ "libc 0.2.45 (registry+https://github.com/rust-lang/crates.io-index)",
+]
+
+[[package]]
+name = "thread_local"
+version = "0.2.7"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+dependencies = [
+ "thread-id 2.0.0 (registry+https://github.com/rust-lang/crates.io-index)",
+]
+
+[[package]]
+name = "utf8-ranges"
+version = "0.1.3"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+
+[[package]]
+name = "winapi"
+version = "0.2.8"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+
+[[package]]
+name = "winapi-build"
+version = "0.1.1"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+
+[metadata]
+"checksum aho-corasick 0.5.3 (registry+https://github.com/rust-lang/crates.io-index)" = "ca972c2ea5f742bfce5687b9aef75506a764f61d37f8f649047846a9686ddb66"
+"checksum cpython 0.1.0 (git+https://github.com/indygreg/rust-cpython.git?rev=c90d65cf84abfffce7ef54476bbfed56017a2f52)" = "<none>"
+"checksum kernel32-sys 0.2.2 (registry+https://github.com/rust-lang/crates.io-index)" = "7507624b29483431c0ba2d82aece8ca6cdba9382bff4ddd0f7490560c056098d"
+"checksum libc 0.2.45 (registry+https://github.com/rust-lang/crates.io-index)" = "2d2857ec59fadc0773853c664d2d18e7198e83883e7060b63c924cb077bd5c74"
+"checksum memchr 0.1.11 (registry+https://github.com/rust-lang/crates.io-index)" = "d8b629fb514376c675b98c1421e80b151d3817ac42d7c667717d282761418d20"
+"checksum num-traits 0.1.43 (registry+https://github.com/rust-lang/crates.io-index)" = "92e5113e9fd4cc14ded8e499429f396a20f98c772a47cc8622a736e1ec843c31"
+"checksum num-traits 0.2.6 (registry+https://github.com/rust-lang/crates.io-index)" = "0b3a5d7cc97d6d30d8b9bc8fa19bf45349ffe46241e8816f50f62f6d6aaabee1"
+"checksum python27-sys 0.1.2 (git+https://github.com/indygreg/rust-cpython.git?rev=c90d65cf84abfffce7ef54476bbfed56017a2f52)" = "<none>"
+"checksum regex 0.1.80 (registry+https://github.com/rust-lang/crates.io-index)" = "4fd4ace6a8cf7860714a2c2280d6c1f7e6a413486c13298bbc86fd3da019402f"
+"checksum regex-syntax 0.3.9 (registry+https://github.com/rust-lang/crates.io-index)" = "f9ec002c35e86791825ed294b50008eea9ddfc8def4420124fbc6b08db834957"
+"checksum thread-id 2.0.0 (registry+https://github.com/rust-lang/crates.io-index)" = "a9539db560102d1cef46b8b78ce737ff0bb64e7e18d35b2a5688f7d097d0ff03"
+"checksum thread_local 0.2.7 (registry+https://github.com/rust-lang/crates.io-index)" = "8576dbbfcaef9641452d5cf0df9b0e7eeab7694956dd33bb61515fb8f18cfdd5"
+"checksum utf8-ranges 0.1.3 (registry+https://github.com/rust-lang/crates.io-index)" = "a1ca13c08c41c9c3e04224ed9ff80461d97e121589ff27c753a16cb10830ae0f"
+"checksum winapi 0.2.8 (registry+https://github.com/rust-lang/crates.io-index)" = "167dc9d6949a9b857f3451275e911c3f44255842c1f7a76f33c55103a909087a"
+"checksum winapi-build 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)" = "2d315eee3b34aca4797b2da6b13ed88266e6d612562a0c46390af8299fc699bc"
--- a/setup.py Wed Jan 09 20:00:35 2019 -0800
+++ b/setup.py Fri Jan 18 13:28:22 2019 -0500
@@ -132,9 +132,14 @@
ispypy = "PyPy" in sys.version
-iswithrustextensions = 'HGWITHRUSTEXT' in os.environ
+hgrustext = os.environ.get('HGWITHRUSTEXT')
+# TODO record it for proper rebuild upon changes
+# (see mercurial/__modulepolicy__.py)
+if hgrustext != 'cpython' and hgrustext is not None:
+ hgrustext = 'direct-ffi'
import ctypes
+import errno
import stat, subprocess, time
import re
import shutil
@@ -289,14 +294,17 @@
hgenv['LANGUAGE'] = 'C'
hgcmd = ['hg']
# Run a simple "hg log" command just to see if using hg from the user's
- # path works and can successfully interact with this repository.
+ # path works and can successfully interact with this repository. Windows
+ # gives precedence to hg.exe in the current directory, so fall back to the
+ # python invocation of local hg, where pythonXY.dll can always be found.
check_cmd = ['log', '-r.', '-Ttest']
- try:
- retcode, out, err = runcmd(hgcmd + check_cmd, hgenv)
- except EnvironmentError:
- retcode = -1
- if retcode == 0 and not filterhgerr(err):
- return hgcommand(hgcmd, hgenv)
+ if os.name != 'nt':
+ try:
+ retcode, out, err = runcmd(hgcmd + check_cmd, hgenv)
+ except EnvironmentError:
+ retcode = -1
+ if retcode == 0 and not filterhgerr(err):
+ return hgcommand(hgcmd, hgenv)
# Fall back to trying the local hg installation.
hgenv = localhgenv()
@@ -457,11 +465,18 @@
return build_ext.initialize_options(self)
def build_extensions(self):
+ ruststandalones = [e for e in self.extensions
+ if isinstance(e, RustStandaloneExtension)]
+ self.extensions = [e for e in self.extensions
+ if e not in ruststandalones]
# Filter out zstd if disabled via argument.
if not self.zstd:
self.extensions = [e for e in self.extensions
if e.name != 'mercurial.zstd']
+ for rustext in ruststandalones:
+ rustext.build('' if self.inplace else self.build_lib)
+
return build_ext.build_extensions(self)
def build_extension(self, ext):
@@ -831,8 +846,6 @@
'mercurial.pure',
'mercurial.thirdparty',
'mercurial.thirdparty.attr',
- 'mercurial.thirdparty.cbor',
- 'mercurial.thirdparty.cbor.cbor2',
'mercurial.thirdparty.zope',
'mercurial.thirdparty.zope.interface',
'mercurial.utils',
@@ -844,6 +857,7 @@
'hgext.infinitepush',
'hgext.highlight',
'hgext.largefiles', 'hgext.lfs', 'hgext.narrow',
+ 'hgext.remotefilelog',
'hgext.zeroconf', 'hgext3rd',
'hgdemandimport']
if sys.version_info[0] == 2:
@@ -897,21 +911,22 @@
'mercurial/thirdparty/xdiff/xutils.h',
]
+class RustCompilationError(CCompilerError):
+ """Exception class for Rust compilation errors."""
+
class RustExtension(Extension):
- """A C Extension, conditionnally enhanced with Rust code.
-
- if iswithrustextensions is False, does nothing else than plain Extension
+ """Base classes for concrete Rust Extension classes.
"""
rusttargetdir = os.path.join('rust', 'target', 'release')
- def __init__(self, mpath, sources, rustlibname, subcrate, **kw):
+ def __init__(self, mpath, sources, rustlibname, subcrate,
+ py3_features=None, **kw):
Extension.__init__(self, mpath, sources, **kw)
- if not iswithrustextensions:
+ if hgrustext is None:
return
srcdir = self.rustsrcdir = os.path.join('rust', subcrate)
- self.libraries.append(rustlibname)
- self.extra_compile_args.append('-DWITH_RUST')
+ self.py3_features = py3_features
# adding Rust source and control files to depends so that the extension
# gets rebuilt if they've changed
@@ -925,7 +940,7 @@
if os.path.splitext(fname)[1] == '.rs')
def rustbuild(self):
- if not iswithrustextensions:
+ if hgrustext is None:
return
env = os.environ.copy()
if 'HGTEST_RESTOREENV' in env:
@@ -941,10 +956,59 @@
import pwd
env['HOME'] = pwd.getpwuid(os.getuid()).pw_dir
- subprocess.check_call(['cargo', 'build', '-vv', '--release'],
- env=env, cwd=self.rustsrcdir)
+ cargocmd = ['cargo', 'build', '-vv', '--release']
+ if sys.version_info[0] == 3 and self.py3_features is not None:
+ cargocmd.extend(('--features', self.py3_features,
+ '--no-default-features'))
+ try:
+ subprocess.check_call(cargocmd, env=env, cwd=self.rustsrcdir)
+ except OSError as exc:
+ if exc.errno == errno.ENOENT:
+ raise RustCompilationError("Cargo not found")
+ elif exc.errno == errno.EACCES:
+ raise RustCompilationError(
+ "Cargo found, but permisssion to execute it is denied")
+ else:
+ raise
+ except subprocess.CalledProcessError:
+ raise RustCompilationError(
+ "Cargo failed. Working directory: %r, "
+ "command: %r, environment: %r" % (self.rustsrcdir, cmd, env))
+
+class RustEnhancedExtension(RustExtension):
+ """A C Extension, conditionally enhanced with Rust code.
+
+ If the HGRUSTEXT environment variable is set to something else
+ than 'cpython', the Rust sources get compiled and linked within the
+ C target shared library object.
+ """
+
+ def __init__(self, mpath, sources, rustlibname, subcrate, **kw):
+ RustExtension.__init__(self, mpath, sources, rustlibname, subcrate,
+ **kw)
+ if hgrustext != 'direct-ffi':
+ return
+ self.extra_compile_args.append('-DWITH_RUST')
+ self.libraries.append(rustlibname)
self.library_dirs.append(self.rusttargetdir)
+class RustStandaloneExtension(RustExtension):
+
+ def __init__(self, pydottedname, rustcrate, dylibname, **kw):
+ RustExtension.__init__(self, pydottedname, [], dylibname, rustcrate,
+ **kw)
+ self.dylibname = dylibname
+
+ def build(self, target_dir):
+ self.rustbuild()
+ target = [target_dir]
+ target.extend(self.name.split('.'))
+ ext = '.so' # TODO Unix only
+ target[-1] += ext
+ shutil.copy2(os.path.join(self.rusttargetdir, self.dylibname + ext),
+ os.path.join(*target))
+
+
extmodules = [
Extension('mercurial.cext.base85', ['mercurial/cext/base85.c'],
include_dirs=common_include_dirs,
@@ -957,19 +1021,20 @@
'mercurial/cext/mpatch.c'],
include_dirs=common_include_dirs,
depends=common_depends),
- RustExtension('mercurial.cext.parsers', ['mercurial/cext/charencode.c',
- 'mercurial/cext/dirs.c',
- 'mercurial/cext/manifest.c',
- 'mercurial/cext/parsers.c',
- 'mercurial/cext/pathencode.c',
- 'mercurial/cext/revlog.c'],
- 'hgdirectffi',
- 'hg-direct-ffi',
- include_dirs=common_include_dirs,
- depends=common_depends + ['mercurial/cext/charencode.h',
- 'mercurial/rust/src/lib.rs',
- 'mercurial/rust/src/ancestors.rs',
- 'mercurial/rust/src/cpython.rs']),
+ RustEnhancedExtension(
+ 'mercurial.cext.parsers', ['mercurial/cext/charencode.c',
+ 'mercurial/cext/dirs.c',
+ 'mercurial/cext/manifest.c',
+ 'mercurial/cext/parsers.c',
+ 'mercurial/cext/pathencode.c',
+ 'mercurial/cext/revlog.c'],
+ 'hgdirectffi',
+ 'hg-direct-ffi',
+ include_dirs=common_include_dirs,
+ depends=common_depends + ['mercurial/cext/charencode.h',
+ 'mercurial/cext/revlog.h',
+ 'rust/hg-core/src/ancestors.rs',
+ 'rust/hg-core/src/lib.rs']),
Extension('mercurial.cext.osutil', ['mercurial/cext/osutil.c'],
include_dirs=common_include_dirs,
extra_compile_args=osutil_cflags,
@@ -983,6 +1048,13 @@
['hgext/fsmonitor/pywatchman/bser.c']),
]
+if hgrustext == 'cpython':
+ extmodules.append(
+ RustStandaloneExtension('mercurial.rustext', 'hg-cpython', 'librusthg',
+ py3_features='python3')
+ )
+
+
sys.path.insert(0, 'contrib/python-zstandard')
import setup_zstd
extmodules.append(setup_zstd.get_c_extension(
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/tests/.balto.toml Fri Jan 18 13:28:22 2019 -0500
@@ -0,0 +1,13 @@
+# Balto (https://bitbucket.org/lothiraldan/balto/src) is a test orchestrator
+# which is compatible with all test runner that can emit the LITF
+# (https://github.com/lothiraldan/litf) test format.
+
+# The plugin for the Mercurial test runner is mercurial-litf
+# (https://pypi.org/project/mercurial-litf/). Make sure to follow the
+# instruction and configuration instructions here:
+# https://bitbucket.org/lothiraldan/mercurial_litf/src/default/
+
+# You can launch Balto with `balto /path/to/mercurial/tests/`
+
+name = "Mercurial Test Suite"
+tool = "mercurial"
--- a/tests/blackbox-readonly-dispatch.py Wed Jan 09 20:00:35 2019 -0800
+++ b/tests/blackbox-readonly-dispatch.py Fri Jan 18 13:28:22 2019 -0500
@@ -2,6 +2,7 @@
import os
from mercurial import (
dispatch,
+ extensions,
ui as uimod,
)
@@ -11,6 +12,7 @@
Prints command and result value, but does not handle quoting.
"""
ui = uimod.ui.load()
+ extensions.populateui(ui)
ui.status(b"running: %s\n" % cmd)
req = dispatch.request(cmd.split(), ui)
result = dispatch.dispatch(req)
--- a/tests/check-gendoc Wed Jan 09 20:00:35 2019 -0800
+++ b/tests/check-gendoc Fri Jan 18 13:28:22 2019 -0500
@@ -4,8 +4,8 @@
echo ".. -*- coding: utf-8 -*-" > gendoc.txt
echo "" >> gendoc.txt
-LANGUAGE=$1 python "$TESTDIR/../doc/gendoc.py" >> gendoc.txt 2> /dev/null || exit
+LANGUAGE=$1 "$PYTHON" "$TESTDIR/../doc/gendoc.py" >> gendoc.txt 2> /dev/null || exit
echo "checking for parse errors"
-python "$TESTDIR/../doc/docchecker" gendoc.txt
-python "$TESTDIR/../doc/runrst" html gendoc.txt /dev/null
+"$PYTHON" "$TESTDIR/../doc/docchecker" gendoc.txt
+"$PYTHON" "$TESTDIR/../doc/runrst" html gendoc.txt /dev/null
--- a/tests/common-pattern.py Wed Jan 09 20:00:35 2019 -0800
+++ b/tests/common-pattern.py Fri Jan 18 13:28:22 2019 -0500
@@ -143,6 +143,12 @@
br'Only one usage of each socket address'
br' \(protocol/network address/port\) is normally permitted',
),
+ br'$EADDRNOTAVAIL$': (
+ # strerror()
+ br'Cannot assign requested address',
+
+ # FormatMessage(WSAEADDRNOTAVAIL)
+ )
}
for replace, msgs in _errors.items():
--- a/tests/hghave.py Wed Jan 09 20:00:35 2019 -0800
+++ b/tests/hghave.py Fri Jan 18 13:28:22 2019 -0500
@@ -16,6 +16,16 @@
"false": (lambda: False, "nail clipper"),
}
+try:
+ import msvcrt
+ msvcrt.setmode(sys.stdout.fileno(), os.O_BINARY)
+ msvcrt.setmode(sys.stderr.fileno(), os.O_BINARY)
+except ImportError:
+ pass
+
+stdout = getattr(sys.stdout, 'buffer', sys.stdout)
+stderr = getattr(sys.stderr, 'buffer', sys.stderr)
+
if sys.version_info[0] >= 3:
def _bytespath(p):
if p is None:
@@ -90,11 +100,12 @@
result = checkfeatures(features)
for missing in result['missing']:
- sys.stderr.write('skipped: unknown feature: %s\n' % missing)
+ stderr.write(('skipped: unknown feature: %s\n'
+ % missing).encode('utf-8'))
for msg in result['skipped']:
- sys.stderr.write('skipped: %s\n' % msg)
+ stderr.write(('skipped: %s\n' % msg).encode('utf-8'))
for msg in result['error']:
- sys.stderr.write('%s\n' % msg)
+ stderr.write(('%s\n' % msg).encode('utf-8'))
if result['missing']:
sys.exit(2)
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/tests/ls-l.py Fri Jan 18 13:28:22 2019 -0500
@@ -0,0 +1,37 @@
+#!/usr/bin/env python
+
+# like ls -l, but do not print date, user, or non-common mode bit, to avoid
+# using globs in tests.
+from __future__ import absolute_import, print_function
+
+import os
+import stat
+import sys
+
+def modestr(st):
+ mode = st.st_mode
+ result = ''
+ if mode & stat.S_IFDIR:
+ result += 'd'
+ else:
+ result += '-'
+ for owner in ['USR', 'GRP', 'OTH']:
+ for action in ['R', 'W', 'X']:
+ if mode & getattr(stat, 'S_I%s%s' % (action, owner)):
+ result += action.lower()
+ else:
+ result += '-'
+ return result
+
+def sizestr(st):
+ if st.st_mode & stat.S_IFREG:
+ return '%7d' % st.st_size
+ else:
+ # do not show size for non regular files
+ return ' ' * 7
+
+os.chdir((sys.argv[1:] + ['.'])[0])
+
+for name in sorted(os.listdir('.')):
+ st = os.stat(name)
+ print('%s %s %s' % (modestr(st), sizestr(st), name))
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/tests/mockmakedate.py Fri Jan 18 13:28:22 2019 -0500
@@ -0,0 +1,21 @@
+# mock out util.makedate() to supply testable values
+
+from __future__ import absolute_import
+
+import os
+
+from mercurial import pycompat
+from mercurial.utils import dateutil
+
+def mockmakedate():
+ filename = os.path.join(os.environ['TESTTMP'], 'testtime')
+ try:
+ with open(filename, 'rb') as timef:
+ time = float(timef.read()) + 1
+ except IOError:
+ time = 0.0
+ with open(filename, 'wb') as timef:
+ timef.write(pycompat.bytestr(time))
+ return (time, 0)
+
+dateutil.makedate = mockmakedate
--- a/tests/narrow-library.sh Wed Jan 09 20:00:35 2019 -0800
+++ b/tests/narrow-library.sh Fri Jan 18 13:28:22 2019 -0500
@@ -2,7 +2,7 @@
[extensions]
narrow=
[ui]
-ssh=python "$TESTDIR/dummyssh"
+ssh=python "$RUNTESTDIR/dummyssh"
[experimental]
changegroup3 = True
EOF
--- a/tests/notcapable Wed Jan 09 20:00:35 2019 -0800
+++ b/tests/notcapable Fri Jan 18 13:28:22 2019 -0500
@@ -7,7 +7,7 @@
cat > notcapable-$CAP.py << EOF
from mercurial import extensions, localrepo, repository
-def extsetup():
+def extsetup(ui):
extensions.wrapfunction(repository.peer, 'capable', wrapcapable)
extensions.wrapfunction(localrepo.localrepository, 'peer', wrappeer)
def wrapcapable(orig, self, name, *args, **kwargs):
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/tests/phabricator/phabsend-create-public.json Fri Jan 18 13:28:22 2019 -0500
@@ -0,0 +1,957 @@
+{
+ "interactions": [
+ {
+ "response": {
+ "headers": {
+ "content-type": [
+ "application/json"
+ ],
+ "date": [
+ "Thu, 10 Jan 2019 04:08:24 GMT"
+ ],
+ "x-content-type-options": [
+ "nosniff"
+ ],
+ "cache-control": [
+ "no-store"
+ ],
+ "server": [
+ "Apache/2.4.10 (Debian)"
+ ],
+ "x-xss-protection": [
+ "1; mode=block"
+ ],
+ "x-frame-options": [
+ "Deny"
+ ],
+ "expires": [
+ "Sat, 01 Jan 2000 00:00:00 GMT"
+ ],
+ "set-cookie": [
+ "phsid=A%2F5faozuxaekgxbyfcc43jvrcmbr5fscbki46mvcvl; expires=Tue, 09-Jan-2024 04:08:24 GMT; Max-Age=157680000; path=/; domain=phab.mercurial-scm.org; secure; httponly"
+ ],
+ "transfer-encoding": [
+ "chunked"
+ ],
+ "strict-transport-security": [
+ "max-age=0; includeSubdomains; preload"
+ ]
+ },
+ "status": {
+ "message": "OK",
+ "code": 200
+ },
+ "body": {
+ "string": "{\"result\":{\"data\":[{\"id\":2,\"type\":\"REPO\",\"phid\":\"PHID-REPO-bvunnehri4u2isyr7bc3\",\"fields\":{\"name\":\"Mercurial\",\"vcs\":\"hg\",\"callsign\":\"HG\",\"shortName\":null,\"status\":\"active\",\"isImporting\":false,\"spacePHID\":null,\"dateCreated\":1498761653,\"dateModified\":1500403184,\"policy\":{\"view\":\"public\",\"edit\":\"admin\",\"diffusion.push\":\"users\"}},\"attachments\":{}}],\"maps\":{},\"query\":{\"queryKey\":null},\"cursor\":{\"limit\":100,\"after\":null,\"before\":null,\"order\":null}},\"error_code\":null,\"error_info\":null}"
+ }
+ },
+ "request": {
+ "method": "POST",
+ "uri": "https://phab.mercurial-scm.org//api/diffusion.repository.search",
+ "headers": {
+ "content-length": [
+ "79"
+ ],
+ "accept": [
+ "application/mercurial-0.1"
+ ],
+ "content-type": [
+ "application/x-www-form-urlencoded"
+ ],
+ "user-agent": [
+ "mercurial/proto-1.0 (Mercurial 4.8.2+682-e2cf04a597cc+20190109)"
+ ],
+ "host": [
+ "phab.mercurial-scm.org"
+ ]
+ },
+ "body": "constraints%5Bcallsigns%5D%5B0%5D=HG&api.token=cli-hahayouwish"
+ }
+ },
+ {
+ "response": {
+ "headers": {
+ "content-type": [
+ "application/json"
+ ],
+ "date": [
+ "Thu, 10 Jan 2019 04:08:25 GMT"
+ ],
+ "x-content-type-options": [
+ "nosniff"
+ ],
+ "cache-control": [
+ "no-store"
+ ],
+ "server": [
+ "Apache/2.4.10 (Debian)"
+ ],
+ "x-xss-protection": [
+ "1; mode=block"
+ ],
+ "x-frame-options": [
+ "Deny"
+ ],
+ "expires": [
+ "Sat, 01 Jan 2000 00:00:00 GMT"
+ ],
+ "set-cookie": [
+ "phsid=A%2Fkb72422mbpyuyoultl4hkizat6qscjgrl5hi6k2n; expires=Tue, 09-Jan-2024 04:08:25 GMT; Max-Age=157680000; path=/; domain=phab.mercurial-scm.org; secure; httponly"
+ ],
+ "transfer-encoding": [
+ "chunked"
+ ],
+ "strict-transport-security": [
+ "max-age=0; includeSubdomains; preload"
+ ]
+ },
+ "status": {
+ "message": "OK",
+ "code": 200
+ },
+ "body": {
+ "string": "{\"result\":{\"id\":13121,\"phid\":\"PHID-DIFF-xrku5f3mlveqr3hhj6a7\",\"uri\":\"https:\\/\\/phab.mercurial-scm.org\\/differential\\/diff\\/13121\\/\"},\"error_code\":null,\"error_info\":null}"
+ }
+ },
+ "request": {
+ "method": "POST",
+ "uri": "https://phab.mercurial-scm.org//api/differential.createrawdiff",
+ "headers": {
+ "content-length": [
+ "220"
+ ],
+ "accept": [
+ "application/mercurial-0.1"
+ ],
+ "content-type": [
+ "application/x-www-form-urlencoded"
+ ],
+ "user-agent": [
+ "mercurial/proto-1.0 (Mercurial 4.8.2+682-e2cf04a597cc+20190109)"
+ ],
+ "host": [
+ "phab.mercurial-scm.org"
+ ]
+ },
+ "body": "repositoryPHID=PHID-REPO-bvunnehri4u2isyr7bc3&diff=diff+--git+a%2Fbeta+b%2Fbeta%0A---+a%2Fbeta%0A%2B%2B%2B+b%2Fbeta%0A%40%40+-1%2C1+%2B1%2C1+%40%40%0A-beta%0A%2Bpublic+change%0A&api.token=cli-hahayouwish"
+ }
+ },
+ {
+ "response": {
+ "headers": {
+ "content-type": [
+ "application/json"
+ ],
+ "date": [
+ "Thu, 10 Jan 2019 04:08:25 GMT"
+ ],
+ "x-content-type-options": [
+ "nosniff"
+ ],
+ "cache-control": [
+ "no-store"
+ ],
+ "server": [
+ "Apache/2.4.10 (Debian)"
+ ],
+ "x-xss-protection": [
+ "1; mode=block"
+ ],
+ "x-frame-options": [
+ "Deny"
+ ],
+ "expires": [
+ "Sat, 01 Jan 2000 00:00:00 GMT"
+ ],
+ "set-cookie": [
+ "phsid=A%2Fpyr677mjsjvlsn3wwzl2iignpppablawwz7dn5ap; expires=Tue, 09-Jan-2024 04:08:25 GMT; Max-Age=157680000; path=/; domain=phab.mercurial-scm.org; secure; httponly"
+ ],
+ "transfer-encoding": [
+ "chunked"
+ ],
+ "strict-transport-security": [
+ "max-age=0; includeSubdomains; preload"
+ ]
+ },
+ "status": {
+ "message": "OK",
+ "code": 200
+ },
+ "body": {
+ "string": "{\"result\":null,\"error_code\":null,\"error_info\":null}"
+ }
+ },
+ "request": {
+ "method": "POST",
+ "uri": "https://phab.mercurial-scm.org//api/differential.setdiffproperty",
+ "headers": {
+ "content-length": [
+ "264"
+ ],
+ "accept": [
+ "application/mercurial-0.1"
+ ],
+ "content-type": [
+ "application/x-www-form-urlencoded"
+ ],
+ "user-agent": [
+ "mercurial/proto-1.0 (Mercurial 4.8.2+682-e2cf04a597cc+20190109)"
+ ],
+ "host": [
+ "phab.mercurial-scm.org"
+ ]
+ },
+ "body": "name=hg%3Ameta&api.token=cli-hahayouwish&data=%7B%22date%22%3A+%220+0%22%2C+%22user%22%3A+%22test%22%2C+%22node%22%3A+%22540a21d3fbeb7c56cafe726bba6cd9fdcc94f29c%22%2C+%22parent%22%3A+%22c2b605ada280b38c38031b5d31622869c72b0d8d%22%7D&diff_id=13121"
+ }
+ },
+ {
+ "response": {
+ "headers": {
+ "content-type": [
+ "application/json"
+ ],
+ "date": [
+ "Thu, 10 Jan 2019 04:08:26 GMT"
+ ],
+ "x-content-type-options": [
+ "nosniff"
+ ],
+ "cache-control": [
+ "no-store"
+ ],
+ "server": [
+ "Apache/2.4.10 (Debian)"
+ ],
+ "x-xss-protection": [
+ "1; mode=block"
+ ],
+ "x-frame-options": [
+ "Deny"
+ ],
+ "expires": [
+ "Sat, 01 Jan 2000 00:00:00 GMT"
+ ],
+ "set-cookie": [
+ "phsid=A%2Fegvbvujn6hykhurzyjtaq4xduxl6sz7gavenbcou; expires=Tue, 09-Jan-2024 04:08:26 GMT; Max-Age=157680000; path=/; domain=phab.mercurial-scm.org; secure; httponly"
+ ],
+ "transfer-encoding": [
+ "chunked"
+ ],
+ "strict-transport-security": [
+ "max-age=0; includeSubdomains; preload"
+ ]
+ },
+ "status": {
+ "message": "OK",
+ "code": 200
+ },
+ "body": {
+ "string": "{\"result\":null,\"error_code\":null,\"error_info\":null}"
+ }
+ },
+ "request": {
+ "method": "POST",
+ "uri": "https://phab.mercurial-scm.org//api/differential.setdiffproperty",
+ "headers": {
+ "content-length": [
+ "227"
+ ],
+ "accept": [
+ "application/mercurial-0.1"
+ ],
+ "content-type": [
+ "application/x-www-form-urlencoded"
+ ],
+ "user-agent": [
+ "mercurial/proto-1.0 (Mercurial 4.8.2+682-e2cf04a597cc+20190109)"
+ ],
+ "host": [
+ "phab.mercurial-scm.org"
+ ]
+ },
+ "body": "name=local%3Acommits&api.token=cli-hahayouwish&data=%7B%22540a21d3fbeb7c56cafe726bba6cd9fdcc94f29c%22%3A+%7B%22author%22%3A+%22test%22%2C+%22authorEmail%22%3A+%22test%22%2C+%22time%22%3A+0.0%7D%7D&diff_id=13121"
+ }
+ },
+ {
+ "response": {
+ "headers": {
+ "content-type": [
+ "application/json"
+ ],
+ "date": [
+ "Thu, 10 Jan 2019 04:08:26 GMT"
+ ],
+ "x-content-type-options": [
+ "nosniff"
+ ],
+ "cache-control": [
+ "no-store"
+ ],
+ "server": [
+ "Apache/2.4.10 (Debian)"
+ ],
+ "x-xss-protection": [
+ "1; mode=block"
+ ],
+ "x-frame-options": [
+ "Deny"
+ ],
+ "expires": [
+ "Sat, 01 Jan 2000 00:00:00 GMT"
+ ],
+ "set-cookie": [
+ "phsid=A%2Flbjzqvie4g24kmhnqws2bwhmeiijd3qvvkd22isg; expires=Tue, 09-Jan-2024 04:08:27 GMT; Max-Age=157680000; path=/; domain=phab.mercurial-scm.org; secure; httponly"
+ ],
+ "transfer-encoding": [
+ "chunked"
+ ],
+ "strict-transport-security": [
+ "max-age=0; includeSubdomains; preload"
+ ]
+ },
+ "status": {
+ "message": "OK",
+ "code": 200
+ },
+ "body": {
+ "string": "{\"result\":{\"errors\":[],\"fields\":{\"title\":\"create public change for phabricator testing\"},\"revisionIDFieldInfo\":{\"value\":null,\"validDomain\":\"https:\\/\\/phab.mercurial-scm.org\"}},\"error_code\":null,\"error_info\":null}"
+ }
+ },
+ "request": {
+ "method": "POST",
+ "uri": "https://phab.mercurial-scm.org//api/differential.parsecommitmessage",
+ "headers": {
+ "content-length": [
+ "94"
+ ],
+ "accept": [
+ "application/mercurial-0.1"
+ ],
+ "content-type": [
+ "application/x-www-form-urlencoded"
+ ],
+ "user-agent": [
+ "mercurial/proto-1.0 (Mercurial 4.8.2+682-e2cf04a597cc+20190109)"
+ ],
+ "host": [
+ "phab.mercurial-scm.org"
+ ]
+ },
+ "body": "corpus=create+public+change+for+phabricator+testing&api.token=cli-hahayouwish"
+ }
+ },
+ {
+ "response": {
+ "headers": {
+ "content-type": [
+ "application/json"
+ ],
+ "date": [
+ "Thu, 10 Jan 2019 04:08:27 GMT"
+ ],
+ "x-content-type-options": [
+ "nosniff"
+ ],
+ "cache-control": [
+ "no-store"
+ ],
+ "server": [
+ "Apache/2.4.10 (Debian)"
+ ],
+ "x-xss-protection": [
+ "1; mode=block"
+ ],
+ "x-frame-options": [
+ "Deny"
+ ],
+ "expires": [
+ "Sat, 01 Jan 2000 00:00:00 GMT"
+ ],
+ "set-cookie": [
+ "phsid=A%2Fkclyjmm2warvrxwksppx3qxupj4f72ejvxuavrn5; expires=Tue, 09-Jan-2024 04:08:27 GMT; Max-Age=157680000; path=/; domain=phab.mercurial-scm.org; secure; httponly"
+ ],
+ "transfer-encoding": [
+ "chunked"
+ ],
+ "strict-transport-security": [
+ "max-age=0; includeSubdomains; preload"
+ ]
+ },
+ "status": {
+ "message": "OK",
+ "code": 200
+ },
+ "body": {
+ "string": "{\"result\":{\"object\":{\"id\":5544,\"phid\":\"PHID-DREV-bwugldlyieuwzrk76xzy\"},\"transactions\":[{\"phid\":\"PHID-XACT-DREV-wojlvnhodzdoqh6\"},{\"phid\":\"PHID-XACT-DREV-ju3bw7rltmmwpbf\"},{\"phid\":\"PHID-XACT-DREV-2hwwi7dagftdp6q\"},{\"phid\":\"PHID-XACT-DREV-zfsyu5o7wkqzh6s\"},{\"phid\":\"PHID-XACT-DREV-srrkwmheqn6gssk\"}]},\"error_code\":null,\"error_info\":null}"
+ }
+ },
+ "request": {
+ "method": "POST",
+ "uri": "https://phab.mercurial-scm.org//api/differential.revision.edit",
+ "headers": {
+ "content-length": [
+ "253"
+ ],
+ "accept": [
+ "application/mercurial-0.1"
+ ],
+ "content-type": [
+ "application/x-www-form-urlencoded"
+ ],
+ "user-agent": [
+ "mercurial/proto-1.0 (Mercurial 4.8.2+682-e2cf04a597cc+20190109)"
+ ],
+ "host": [
+ "phab.mercurial-scm.org"
+ ]
+ },
+ "body": "transactions%5B0%5D%5Btype%5D=update&transactions%5B0%5D%5Bvalue%5D=PHID-DIFF-xrku5f3mlveqr3hhj6a7&transactions%5B1%5D%5Btype%5D=title&transactions%5B1%5D%5Bvalue%5D=create+public+change+for+phabricator+testing&api.token=cli-hahayouwish"
+ }
+ },
+ {
+ "response": {
+ "headers": {
+ "content-type": [
+ "application/json"
+ ],
+ "date": [
+ "Thu, 10 Jan 2019 04:08:28 GMT"
+ ],
+ "x-content-type-options": [
+ "nosniff"
+ ],
+ "cache-control": [
+ "no-store"
+ ],
+ "server": [
+ "Apache/2.4.10 (Debian)"
+ ],
+ "x-xss-protection": [
+ "1; mode=block"
+ ],
+ "x-frame-options": [
+ "Deny"
+ ],
+ "expires": [
+ "Sat, 01 Jan 2000 00:00:00 GMT"
+ ],
+ "set-cookie": [
+ "phsid=A%2Fbw4ordbzl7d4hcgyyxnoawhrfhycrvvkk6arnz5p; expires=Tue, 09-Jan-2024 04:08:28 GMT; Max-Age=157680000; path=/; domain=phab.mercurial-scm.org; secure; httponly"
+ ],
+ "transfer-encoding": [
+ "chunked"
+ ],
+ "strict-transport-security": [
+ "max-age=0; includeSubdomains; preload"
+ ]
+ },
+ "status": {
+ "message": "OK",
+ "code": 200
+ },
+ "body": {
+ "string": "{\"result\":{\"id\":13122,\"phid\":\"PHID-DIFF-iksauhhfhmxfjijyqxji\",\"uri\":\"https:\\/\\/phab.mercurial-scm.org\\/differential\\/diff\\/13122\\/\"},\"error_code\":null,\"error_info\":null}"
+ }
+ },
+ "request": {
+ "method": "POST",
+ "uri": "https://phab.mercurial-scm.org//api/differential.createrawdiff",
+ "headers": {
+ "content-length": [
+ "232"
+ ],
+ "accept": [
+ "application/mercurial-0.1"
+ ],
+ "content-type": [
+ "application/x-www-form-urlencoded"
+ ],
+ "user-agent": [
+ "mercurial/proto-1.0 (Mercurial 4.8.2+682-e2cf04a597cc+20190109)"
+ ],
+ "host": [
+ "phab.mercurial-scm.org"
+ ]
+ },
+ "body": "repositoryPHID=PHID-REPO-bvunnehri4u2isyr7bc3&diff=diff+--git+a%2Falpha+b%2Falpha%0A---+a%2Falpha%0A%2B%2B%2B+b%2Falpha%0A%40%40+-1%2C2+%2B1%2C1+%40%40%0A-alpha%0A-more%0A%2Bdraft+change%0A&api.token=cli-hahayouwish"
+ }
+ },
+ {
+ "response": {
+ "headers": {
+ "content-type": [
+ "application/json"
+ ],
+ "date": [
+ "Thu, 10 Jan 2019 04:08:29 GMT"
+ ],
+ "x-content-type-options": [
+ "nosniff"
+ ],
+ "cache-control": [
+ "no-store"
+ ],
+ "server": [
+ "Apache/2.4.10 (Debian)"
+ ],
+ "x-xss-protection": [
+ "1; mode=block"
+ ],
+ "x-frame-options": [
+ "Deny"
+ ],
+ "expires": [
+ "Sat, 01 Jan 2000 00:00:00 GMT"
+ ],
+ "set-cookie": [
+ "phsid=A%2Fgt3wmrrlkmpdhyaj5rsesxcwbabhpjlhoa6matcg; expires=Tue, 09-Jan-2024 04:08:29 GMT; Max-Age=157680000; path=/; domain=phab.mercurial-scm.org; secure; httponly"
+ ],
+ "transfer-encoding": [
+ "chunked"
+ ],
+ "strict-transport-security": [
+ "max-age=0; includeSubdomains; preload"
+ ]
+ },
+ "status": {
+ "message": "OK",
+ "code": 200
+ },
+ "body": {
+ "string": "{\"result\":null,\"error_code\":null,\"error_info\":null}"
+ }
+ },
+ "request": {
+ "method": "POST",
+ "uri": "https://phab.mercurial-scm.org//api/differential.setdiffproperty",
+ "headers": {
+ "content-length": [
+ "264"
+ ],
+ "accept": [
+ "application/mercurial-0.1"
+ ],
+ "content-type": [
+ "application/x-www-form-urlencoded"
+ ],
+ "user-agent": [
+ "mercurial/proto-1.0 (Mercurial 4.8.2+682-e2cf04a597cc+20190109)"
+ ],
+ "host": [
+ "phab.mercurial-scm.org"
+ ]
+ },
+ "body": "name=hg%3Ameta&api.token=cli-hahayouwish&data=%7B%22date%22%3A+%220+0%22%2C+%22user%22%3A+%22test%22%2C+%22node%22%3A+%226bca752686cd24e603094ef55574655c0017723a%22%2C+%22parent%22%3A+%22540a21d3fbeb7c56cafe726bba6cd9fdcc94f29c%22%7D&diff_id=13122"
+ }
+ },
+ {
+ "response": {
+ "headers": {
+ "content-type": [
+ "application/json"
+ ],
+ "date": [
+ "Thu, 10 Jan 2019 04:08:29 GMT"
+ ],
+ "x-content-type-options": [
+ "nosniff"
+ ],
+ "cache-control": [
+ "no-store"
+ ],
+ "server": [
+ "Apache/2.4.10 (Debian)"
+ ],
+ "x-xss-protection": [
+ "1; mode=block"
+ ],
+ "x-frame-options": [
+ "Deny"
+ ],
+ "expires": [
+ "Sat, 01 Jan 2000 00:00:00 GMT"
+ ],
+ "set-cookie": [
+ "phsid=A%2Fntcsqzh6pptdkfnebvmck6l3y3rrwxzotvsq4phl; expires=Tue, 09-Jan-2024 04:08:29 GMT; Max-Age=157680000; path=/; domain=phab.mercurial-scm.org; secure; httponly"
+ ],
+ "transfer-encoding": [
+ "chunked"
+ ],
+ "strict-transport-security": [
+ "max-age=0; includeSubdomains; preload"
+ ]
+ },
+ "status": {
+ "message": "OK",
+ "code": 200
+ },
+ "body": {
+ "string": "{\"result\":null,\"error_code\":null,\"error_info\":null}"
+ }
+ },
+ "request": {
+ "method": "POST",
+ "uri": "https://phab.mercurial-scm.org//api/differential.setdiffproperty",
+ "headers": {
+ "content-length": [
+ "227"
+ ],
+ "accept": [
+ "application/mercurial-0.1"
+ ],
+ "content-type": [
+ "application/x-www-form-urlencoded"
+ ],
+ "user-agent": [
+ "mercurial/proto-1.0 (Mercurial 4.8.2+682-e2cf04a597cc+20190109)"
+ ],
+ "host": [
+ "phab.mercurial-scm.org"
+ ]
+ },
+ "body": "name=local%3Acommits&api.token=cli-hahayouwish&data=%7B%226bca752686cd24e603094ef55574655c0017723a%22%3A+%7B%22author%22%3A+%22test%22%2C+%22authorEmail%22%3A+%22test%22%2C+%22time%22%3A+0.0%7D%7D&diff_id=13122"
+ }
+ },
+ {
+ "response": {
+ "headers": {
+ "content-type": [
+ "application/json"
+ ],
+ "date": [
+ "Thu, 10 Jan 2019 04:08:30 GMT"
+ ],
+ "x-content-type-options": [
+ "nosniff"
+ ],
+ "cache-control": [
+ "no-store"
+ ],
+ "server": [
+ "Apache/2.4.10 (Debian)"
+ ],
+ "x-xss-protection": [
+ "1; mode=block"
+ ],
+ "x-frame-options": [
+ "Deny"
+ ],
+ "expires": [
+ "Sat, 01 Jan 2000 00:00:00 GMT"
+ ],
+ "set-cookie": [
+ "phsid=A%2Fgturi5p5fz64q26mztdrzjldzynp62pp7opcxsnm; expires=Tue, 09-Jan-2024 04:08:30 GMT; Max-Age=157680000; path=/; domain=phab.mercurial-scm.org; secure; httponly"
+ ],
+ "transfer-encoding": [
+ "chunked"
+ ],
+ "strict-transport-security": [
+ "max-age=0; includeSubdomains; preload"
+ ]
+ },
+ "status": {
+ "message": "OK",
+ "code": 200
+ },
+ "body": {
+ "string": "{\"result\":{\"errors\":[],\"fields\":{\"title\":\"create draft change for phabricator testing\"},\"revisionIDFieldInfo\":{\"value\":null,\"validDomain\":\"https:\\/\\/phab.mercurial-scm.org\"}},\"error_code\":null,\"error_info\":null}"
+ }
+ },
+ "request": {
+ "method": "POST",
+ "uri": "https://phab.mercurial-scm.org//api/differential.parsecommitmessage",
+ "headers": {
+ "content-length": [
+ "93"
+ ],
+ "accept": [
+ "application/mercurial-0.1"
+ ],
+ "content-type": [
+ "application/x-www-form-urlencoded"
+ ],
+ "user-agent": [
+ "mercurial/proto-1.0 (Mercurial 4.8.2+682-e2cf04a597cc+20190109)"
+ ],
+ "host": [
+ "phab.mercurial-scm.org"
+ ]
+ },
+ "body": "corpus=create+draft+change+for+phabricator+testing&api.token=cli-hahayouwish"
+ }
+ },
+ {
+ "response": {
+ "headers": {
+ "content-type": [
+ "application/json"
+ ],
+ "date": [
+ "Thu, 10 Jan 2019 04:08:31 GMT"
+ ],
+ "x-content-type-options": [
+ "nosniff"
+ ],
+ "cache-control": [
+ "no-store"
+ ],
+ "server": [
+ "Apache/2.4.10 (Debian)"
+ ],
+ "x-xss-protection": [
+ "1; mode=block"
+ ],
+ "x-frame-options": [
+ "Deny"
+ ],
+ "expires": [
+ "Sat, 01 Jan 2000 00:00:00 GMT"
+ ],
+ "set-cookie": [
+ "phsid=A%2F4vyvyabatbn7y5bhav6nthgdt4mm6oeh6ybvnrl5; expires=Tue, 09-Jan-2024 04:08:31 GMT; Max-Age=157680000; path=/; domain=phab.mercurial-scm.org; secure; httponly"
+ ],
+ "transfer-encoding": [
+ "chunked"
+ ],
+ "strict-transport-security": [
+ "max-age=0; includeSubdomains; preload"
+ ]
+ },
+ "status": {
+ "message": "OK",
+ "code": 200
+ },
+ "body": {
+ "string": "{\"result\":{\"object\":{\"id\":5545,\"phid\":\"PHID-DREV-ga6i6vbmatvd2fszrr2o\"},\"transactions\":[{\"phid\":\"PHID-XACT-DREV-epqu5uekkf4ig67\"},{\"phid\":\"PHID-XACT-DREV-y3t5z573bwbqv7e\"},{\"phid\":\"PHID-XACT-DREV-dmjvlq7wngqgwxv\"},{\"phid\":\"PHID-XACT-DREV-rkm576j6wvji3ye\"},{\"phid\":\"PHID-XACT-DREV-mb7ttr44lno6j2w\"},{\"phid\":\"PHID-XACT-DREV-ma747d2dkzk3eun\"},{\"phid\":\"PHID-XACT-DREV-3u7lqg7mwxrix5w\"},{\"phid\":\"PHID-XACT-DREV-r33n73dqn7doz7b\"}]},\"error_code\":null,\"error_info\":null}"
+ }
+ },
+ "request": {
+ "method": "POST",
+ "uri": "https://phab.mercurial-scm.org//api/differential.revision.edit",
+ "headers": {
+ "content-length": [
+ "409"
+ ],
+ "accept": [
+ "application/mercurial-0.1"
+ ],
+ "content-type": [
+ "application/x-www-form-urlencoded"
+ ],
+ "user-agent": [
+ "mercurial/proto-1.0 (Mercurial 4.8.2+682-e2cf04a597cc+20190109)"
+ ],
+ "host": [
+ "phab.mercurial-scm.org"
+ ]
+ },
+ "body": "transactions%5B0%5D%5Btype%5D=update&transactions%5B0%5D%5Bvalue%5D=PHID-DIFF-iksauhhfhmxfjijyqxji&transactions%5B1%5D%5Btype%5D=summary&transactions%5B1%5D%5Bvalue%5D=Depends+on+D5544&transactions%5B2%5D%5Btype%5D=summary&transactions%5B2%5D%5Bvalue%5D=+&transactions%5B3%5D%5Btype%5D=title&transactions%5B3%5D%5Bvalue%5D=create+draft+change+for+phabricator+testing&api.token=cli-hahayouwish"
+ }
+ },
+ {
+ "response": {
+ "headers": {
+ "content-type": [
+ "application/json"
+ ],
+ "date": [
+ "Thu, 10 Jan 2019 04:08:32 GMT"
+ ],
+ "x-content-type-options": [
+ "nosniff"
+ ],
+ "cache-control": [
+ "no-store"
+ ],
+ "server": [
+ "Apache/2.4.10 (Debian)"
+ ],
+ "x-xss-protection": [
+ "1; mode=block"
+ ],
+ "x-frame-options": [
+ "Deny"
+ ],
+ "expires": [
+ "Sat, 01 Jan 2000 00:00:00 GMT"
+ ],
+ "set-cookie": [
+ "phsid=A%2Fvd66cz7uxztfwfapgqrlmfmoj7szo5wvwk7vqc2u; expires=Tue, 09-Jan-2024 04:08:32 GMT; Max-Age=157680000; path=/; domain=phab.mercurial-scm.org; secure; httponly"
+ ],
+ "transfer-encoding": [
+ "chunked"
+ ],
+ "strict-transport-security": [
+ "max-age=0; includeSubdomains; preload"
+ ]
+ },
+ "status": {
+ "message": "OK",
+ "code": 200
+ },
+ "body": {
+ "string": "{\"result\":[{\"id\":\"5545\",\"phid\":\"PHID-DREV-ga6i6vbmatvd2fszrr2o\",\"title\":\"create draft change for phabricator testing\",\"uri\":\"https:\\/\\/phab.mercurial-scm.org\\/D5545\",\"dateCreated\":\"1547093311\",\"dateModified\":\"1547093311\",\"authorPHID\":\"PHID-USER-tzhaient733lwrlbcag5\",\"status\":\"0\",\"statusName\":\"Needs Review\",\"properties\":[],\"branch\":null,\"summary\":\" \",\"testPlan\":\"\",\"lineCount\":\"3\",\"activeDiffPHID\":\"PHID-DIFF-iksauhhfhmxfjijyqxji\",\"diffs\":[\"13122\"],\"commits\":[],\"reviewers\":{\"PHID-PROJ-3dvcxzznrjru2xmmses3\":\"PHID-PROJ-3dvcxzznrjru2xmmses3\"},\"ccs\":[\"PHID-USER-q42dn7cc3donqriafhjx\"],\"hashes\":[],\"auxiliary\":{\"phabricator:projects\":[],\"phabricator:depends-on\":[\"PHID-DREV-bwugldlyieuwzrk76xzy\"]},\"repositoryPHID\":\"PHID-REPO-bvunnehri4u2isyr7bc3\",\"sourcePath\":null},{\"id\":\"5544\",\"phid\":\"PHID-DREV-bwugldlyieuwzrk76xzy\",\"title\":\"create public change for phabricator testing\",\"uri\":\"https:\\/\\/phab.mercurial-scm.org\\/D5544\",\"dateCreated\":\"1547093307\",\"dateModified\":\"1547093311\",\"authorPHID\":\"PHID-USER-tzhaient733lwrlbcag5\",\"status\":\"0\",\"statusName\":\"Needs Review\",\"properties\":[],\"branch\":null,\"summary\":\"\",\"testPlan\":\"\",\"lineCount\":\"2\",\"activeDiffPHID\":\"PHID-DIFF-xrku5f3mlveqr3hhj6a7\",\"diffs\":[\"13121\"],\"commits\":[],\"reviewers\":{\"PHID-PROJ-3dvcxzznrjru2xmmses3\":\"PHID-PROJ-3dvcxzznrjru2xmmses3\"},\"ccs\":[\"PHID-USER-q42dn7cc3donqriafhjx\"],\"hashes\":[],\"auxiliary\":{\"phabricator:projects\":[],\"phabricator:depends-on\":[]},\"repositoryPHID\":\"PHID-REPO-bvunnehri4u2isyr7bc3\",\"sourcePath\":null}],\"error_code\":null,\"error_info\":null}"
+ }
+ },
+ "request": {
+ "method": "POST",
+ "uri": "https://phab.mercurial-scm.org//api/differential.query",
+ "headers": {
+ "content-length": [
+ "74"
+ ],
+ "accept": [
+ "application/mercurial-0.1"
+ ],
+ "content-type": [
+ "application/x-www-form-urlencoded"
+ ],
+ "user-agent": [
+ "mercurial/proto-1.0 (Mercurial 4.8.2+682-e2cf04a597cc+20190109)"
+ ],
+ "host": [
+ "phab.mercurial-scm.org"
+ ]
+ },
+ "body": "ids%5B0%5D=5544&ids%5B1%5D=5545&api.token=cli-hahayouwish"
+ }
+ },
+ {
+ "response": {
+ "headers": {
+ "content-type": [
+ "application/json"
+ ],
+ "date": [
+ "Thu, 10 Jan 2019 04:08:32 GMT"
+ ],
+ "x-content-type-options": [
+ "nosniff"
+ ],
+ "cache-control": [
+ "no-store"
+ ],
+ "server": [
+ "Apache/2.4.10 (Debian)"
+ ],
+ "x-xss-protection": [
+ "1; mode=block"
+ ],
+ "x-frame-options": [
+ "Deny"
+ ],
+ "expires": [
+ "Sat, 01 Jan 2000 00:00:00 GMT"
+ ],
+ "set-cookie": [
+ "phsid=A%2Fbqbv2blmnjqe3a5qkpewf5wghxqwcuewjbgfrtq7; expires=Tue, 09-Jan-2024 04:08:32 GMT; Max-Age=157680000; path=/; domain=phab.mercurial-scm.org; secure; httponly"
+ ],
+ "transfer-encoding": [
+ "chunked"
+ ],
+ "strict-transport-security": [
+ "max-age=0; includeSubdomains; preload"
+ ]
+ },
+ "status": {
+ "message": "OK",
+ "code": 200
+ },
+ "body": {
+ "string": "{\"result\":null,\"error_code\":null,\"error_info\":null}"
+ }
+ },
+ "request": {
+ "method": "POST",
+ "uri": "https://phab.mercurial-scm.org//api/differential.setdiffproperty",
+ "headers": {
+ "content-length": [
+ "264"
+ ],
+ "accept": [
+ "application/mercurial-0.1"
+ ],
+ "content-type": [
+ "application/x-www-form-urlencoded"
+ ],
+ "user-agent": [
+ "mercurial/proto-1.0 (Mercurial 4.8.2+682-e2cf04a597cc+20190109)"
+ ],
+ "host": [
+ "phab.mercurial-scm.org"
+ ]
+ },
+ "body": "name=hg%3Ameta&api.token=cli-hahayouwish&data=%7B%22date%22%3A+%220+0%22%2C+%22user%22%3A+%22test%22%2C+%22node%22%3A+%22620a50fd6ed958bbee178052de67acc31dcac66e%22%2C+%22parent%22%3A+%22540a21d3fbeb7c56cafe726bba6cd9fdcc94f29c%22%7D&diff_id=13122"
+ }
+ },
+ {
+ "response": {
+ "headers": {
+ "content-type": [
+ "application/json"
+ ],
+ "date": [
+ "Thu, 10 Jan 2019 04:08:33 GMT"
+ ],
+ "x-content-type-options": [
+ "nosniff"
+ ],
+ "cache-control": [
+ "no-store"
+ ],
+ "server": [
+ "Apache/2.4.10 (Debian)"
+ ],
+ "x-xss-protection": [
+ "1; mode=block"
+ ],
+ "x-frame-options": [
+ "Deny"
+ ],
+ "expires": [
+ "Sat, 01 Jan 2000 00:00:00 GMT"
+ ],
+ "set-cookie": [
+ "phsid=A%2Fic7sfd33zs7c44ojloujnoicm3roxnre45glurgz; expires=Tue, 09-Jan-2024 04:08:33 GMT; Max-Age=157680000; path=/; domain=phab.mercurial-scm.org; secure; httponly"
+ ],
+ "transfer-encoding": [
+ "chunked"
+ ],
+ "strict-transport-security": [
+ "max-age=0; includeSubdomains; preload"
+ ]
+ },
+ "status": {
+ "message": "OK",
+ "code": 200
+ },
+ "body": {
+ "string": "{\"result\":null,\"error_code\":null,\"error_info\":null}"
+ }
+ },
+ "request": {
+ "method": "POST",
+ "uri": "https://phab.mercurial-scm.org//api/differential.setdiffproperty",
+ "headers": {
+ "content-length": [
+ "227"
+ ],
+ "accept": [
+ "application/mercurial-0.1"
+ ],
+ "content-type": [
+ "application/x-www-form-urlencoded"
+ ],
+ "user-agent": [
+ "mercurial/proto-1.0 (Mercurial 4.8.2+682-e2cf04a597cc+20190109)"
+ ],
+ "host": [
+ "phab.mercurial-scm.org"
+ ]
+ },
+ "body": "name=local%3Acommits&api.token=cli-hahayouwish&data=%7B%22620a50fd6ed958bbee178052de67acc31dcac66e%22%3A+%7B%22author%22%3A+%22test%22%2C+%22authorEmail%22%3A+%22test%22%2C+%22time%22%3A+0.0%7D%7D&diff_id=13122"
+ }
+ }
+ ],
+ "version": 1
+}
\ No newline at end of file
--- a/tests/printenv.py Wed Jan 09 20:00:35 2019 -0800
+++ b/tests/printenv.py Fri Jan 18 13:28:22 2019 -0500
@@ -13,6 +13,7 @@
# the file will be opened in append mode.
#
from __future__ import absolute_import
+import argparse
import os
import sys
@@ -24,15 +25,30 @@
except ImportError:
pass
-exitcode = 0
-out = sys.stdout
-out = getattr(out, 'buffer', out)
+parser = argparse.ArgumentParser()
+parser.add_argument("name", help="the hook name, used for display")
+parser.add_argument(
+ "exitcode",
+ nargs="?",
+ default=0,
+ type=int,
+ help="the exit code for the hook",
+)
+parser.add_argument(
+ "out", nargs="?", default=None, help="where to write the output"
+)
+parser.add_argument(
+ "--line",
+ action="store_true",
+ help="print environment variables one per line instead of on a single line",
+)
+args = parser.parse_args()
-name = sys.argv[1]
-if len(sys.argv) > 2:
- exitcode = int(sys.argv[2])
- if len(sys.argv) > 3:
- out = open(sys.argv[3], "ab")
+if args.out is None:
+ out = sys.stdout
+ out = getattr(out, "buffer", out)
+else:
+ out = open(args.out, "ab")
# variables with empty values may not exist on all platforms, filter
# them now for portability sake.
@@ -40,15 +56,24 @@
if k.startswith("HG_") and v]
env.sort()
-out.write(b"%s hook: " % name.encode('ascii'))
+out.write(b"%s hook: " % args.name.encode('ascii'))
if os.name == 'nt':
filter = lambda x: x.replace('\\', '/')
else:
filter = lambda x: x
+
vars = [b"%s=%s" % (k.encode('ascii'), filter(v).encode('ascii'))
for k, v in env]
-out.write(b" ".join(vars))
+
+# Print variables on out
+if not args.line:
+ out.write(b" ".join(vars))
+else:
+ for var in vars:
+ out.write(var)
+ out.write(b"\n")
+
out.write(b"\n")
out.close()
-sys.exit(exitcode)
+sys.exit(args.exitcode)
--- a/tests/pullext.py Wed Jan 09 20:00:35 2019 -0800
+++ b/tests/pullext.py Fri Jan 18 13:28:22 2019 -0500
@@ -32,19 +32,19 @@
features.add(repository.NARROW_REQUIREMENT)
def extsetup(ui):
- entry = extensions.wrapcommand(commands.table, 'clone', clonecommand)
+ entry = extensions.wrapcommand(commands.table, b'clone', clonecommand)
- hasinclude = any(x[1] == 'include' for x in entry[1])
- hasdepth = any(x[1] == 'depth' for x in entry[1])
+ hasinclude = any(x[1] == b'include' for x in entry[1])
+ hasdepth = any(x[1] == b'depth' for x in entry[1])
if not hasinclude:
- entry[1].append(('', 'include', [],
- _('pattern of file/directory to clone')))
- entry[1].append(('', 'exclude', [],
- _('pattern of file/directory to not clone')))
+ entry[1].append((b'', b'include', [],
+ _(b'pattern of file/directory to clone')))
+ entry[1].append((b'', b'exclude', [],
+ _(b'pattern of file/directory to not clone')))
if not hasdepth:
- entry[1].append(('', 'depth', '',
- _('ancestry depth of changesets to fetch')))
+ entry[1].append((b'', b'depth', b'',
+ _(b'ancestry depth of changesets to fetch')))
localrepo.featuresetupfuncs.add(featuresetup)
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/tests/remotefilelog-getflogheads.py Fri Jan 18 13:28:22 2019 -0500
@@ -0,0 +1,31 @@
+from __future__ import absolute_import
+
+from mercurial.i18n import _
+from mercurial import (
+ hg,
+ registrar,
+)
+
+cmdtable = {}
+command = registrar.command(cmdtable)
+
+@command(b'getflogheads',
+ [],
+ b'path')
+def getflogheads(ui, repo, path):
+ """
+ Extension printing a remotefilelog's heads
+
+ Used for testing purpose
+ """
+
+ dest = repo.ui.expandpath(b'default')
+ peer = hg.peer(repo, {}, dest)
+
+ flogheads = peer.x_rfl_getflogheads(path)
+
+ if flogheads:
+ for head in flogheads:
+ ui.write(head + b'\n')
+ else:
+ ui.write(_(b'EMPTY\n'))
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/tests/remotefilelog-library.sh Fri Jan 18 13:28:22 2019 -0500
@@ -0,0 +1,75 @@
+CACHEDIR=$PWD/hgcache
+cat >> $HGRCPATH <<EOF
+[remotefilelog]
+cachepath=$CACHEDIR
+debug=True
+[extensions]
+remotefilelog=
+rebase=
+strip=
+[ui]
+ssh=python "$TESTDIR/dummyssh"
+[server]
+preferuncompressed=True
+[experimental]
+changegroup3=True
+[rebase]
+singletransaction=True
+EOF
+
+hgcloneshallow() {
+ local name
+ local dest
+ orig=$1
+ shift
+ dest=$1
+ shift
+ hg clone --shallow --config remotefilelog.reponame=master $orig $dest $@
+ cat >> $dest/.hg/hgrc <<EOF
+[remotefilelog]
+reponame=master
+[phases]
+publish=False
+EOF
+}
+
+hgcloneshallowlfs() {
+ local name
+ local dest
+ local lfsdir
+ orig=$1
+ shift
+ dest=$1
+ shift
+ lfsdir=$1
+ shift
+ hg clone --shallow --config "extensions.lfs=" --config "lfs.url=$lfsdir" --config remotefilelog.reponame=master $orig $dest $@
+ cat >> $dest/.hg/hgrc <<EOF
+[extensions]
+lfs=
+[lfs]
+url=$lfsdir
+[remotefilelog]
+reponame=master
+[phases]
+publish=False
+EOF
+}
+
+clearcache() {
+ rm -rf $CACHEDIR/*
+}
+
+mkcommit() {
+ echo "$1" > "$1"
+ hg add "$1"
+ hg ci -m "$1"
+}
+
+ls_l() {
+ $PYTHON $TESTDIR/ls-l.py "$@"
+}
+
+identifyrflcaps() {
+ xargs -n 1 echo | egrep '(remotefilelog|getflogheads|getfile)' | sort
+}
--- a/tests/run-tests.py Wed Jan 09 20:00:35 2019 -0800
+++ b/tests/run-tests.py Fri Jan 18 13:28:22 2019 -0500
@@ -482,6 +482,7 @@
parser.error('--with-hg must specify an executable hg script')
if os.path.basename(options.with_hg) not in [b'hg', b'hg.exe']:
sys.stderr.write('warning: --with-hg should specify an hg script\n')
+ sys.stderr.flush()
if options.local:
testdir = os.path.dirname(_bytespath(canonpath(sys.argv[0])))
reporootdir = os.path.dirname(testdir)
@@ -1095,14 +1096,17 @@
b'daemon.pids'))
env["HGEDITOR"] = ('"' + sys.executable + '"'
+ ' -c "import sys; sys.exit(0)"')
- env["HGMERGE"] = "internal:merge"
env["HGUSER"] = "test"
env["HGENCODING"] = "ascii"
env["HGENCODINGMODE"] = "strict"
env["HGHOSTNAME"] = "test-hostname"
env['HGIPV6'] = str(int(self._useipv6))
- if 'HGCATAPULTSERVERPIPE' not in env:
- env['HGCATAPULTSERVERPIPE'] = os.devnull
+ # See contrib/catapipe.py for how to use this functionality.
+ if 'HGTESTCATAPULTSERVERPIPE' not in env:
+ # If we don't have HGTESTCATAPULTSERVERPIPE explicitly set, pull the
+ # non-test one in as a default, otherwise set to devnull
+ env['HGTESTCATAPULTSERVERPIPE'] = \
+ env.get('HGCATAPULTSERVERPIPE', os.devnull)
extraextensions = []
for opt in self._extraconfigopts:
@@ -1119,6 +1123,12 @@
# IP addresses.
env['LOCALIP'] = _strpath(self._localip())
+ # This has the same effect as Py_LegacyWindowsStdioFlag in exewrapper.c,
+ # but this is needed for testing python instances like dummyssh,
+ # dummysmtpd.py, and dumbhttp.py.
+ if PYTHON3 and os.name == 'nt':
+ env['PYTHONLEGACYWINDOWSSTDIO'] = '1'
+
# Reset some environment variables to well-known values so that
# the tests produce repeatable output.
env['LANG'] = env['LC_ALL'] = env['LANGUAGE'] = 'C'
@@ -1127,9 +1137,24 @@
env['COLUMNS'] = '80'
env['TERM'] = 'xterm'
- for k in ('HG HGPROF CDPATH GREP_OPTIONS http_proxy no_proxy ' +
- 'HGPLAIN HGPLAINEXCEPT EDITOR VISUAL PAGER ' +
- 'NO_PROXY CHGDEBUG').split():
+ dropped = [
+ 'CDPATH',
+ 'CHGDEBUG',
+ 'EDITOR',
+ 'GREP_OPTIONS',
+ 'HG',
+ 'HGMERGE',
+ 'HGPLAIN',
+ 'HGPLAINEXCEPT',
+ 'HGPROF',
+ 'http_proxy',
+ 'no_proxy',
+ 'NO_PROXY',
+ 'PAGER',
+ 'VISUAL',
+ ]
+
+ for k in dropped:
if k in env:
del env[k]
@@ -1149,6 +1174,7 @@
hgrc.write(b'[ui]\n')
hgrc.write(b'slash = True\n')
hgrc.write(b'interactive = False\n')
+ hgrc.write(b'merge = internal:merge\n')
hgrc.write(b'mergemarkers = detailed\n')
hgrc.write(b'promptecho = True\n')
hgrc.write(b'[defaults]\n')
@@ -1379,24 +1405,32 @@
script.append(b'%s %d 0\n' % (salt, line))
else:
script.append(b'echo %s %d $?\n' % (salt, line))
- active = []
+ activetrace = []
session = str(uuid.uuid4())
if PYTHON3:
session = session.encode('ascii')
- def toggletrace(cmd):
+ hgcatapult = os.getenv('HGTESTCATAPULTSERVERPIPE') or \
+ os.getenv('HGCATAPULTSERVERPIPE')
+ def toggletrace(cmd=None):
+ if not hgcatapult or hgcatapult == os.devnull:
+ return
+
+ if activetrace:
+ script.append(
+ b'echo END %s %s >> "$HGTESTCATAPULTSERVERPIPE"\n' % (
+ session, activetrace[0]))
+ if cmd is None:
+ return
+
if isinstance(cmd, str):
quoted = shellquote(cmd.strip())
else:
quoted = shellquote(cmd.strip().decode('utf8')).encode('utf8')
quoted = quoted.replace(b'\\', b'\\\\')
- if active:
- script.append(
- b'echo END %s %s >> "$HGCATAPULTSERVERPIPE"\n' % (
- session, active[0]))
- script.append(
- b'echo START %s %s >> "$HGCATAPULTSERVERPIPE"\n' % (
- session, quoted))
- active[0:] = [quoted]
+ script.append(
+ b'echo START %s %s >> "$HGTESTCATAPULTSERVERPIPE"\n' % (
+ session, quoted))
+ activetrace[0:] = [quoted]
script = []
@@ -1425,7 +1459,6 @@
if os.getenv('MSYSTEM'):
script.append(b'alias pwd="pwd -W"\n')
- hgcatapult = os.getenv('HGCATAPULTSERVERPIPE')
if hgcatapult and hgcatapult != os.devnull:
# Kludge: use a while loop to keep the pipe from getting
# closed by our echo commands. The still-running file gets
@@ -1433,18 +1466,19 @@
# loop to exit and closes the pipe. Sigh.
script.append(
b'rtendtracing() {\n'
- b' echo END %(session)s %(name)s >> $HGCATAPULTSERVERPIPE\n'
+ b' echo END %(session)s %(name)s >> %(catapult)s\n'
b' rm -f "$TESTTMP/.still-running"\n'
b'}\n'
b'trap "rtendtracing" 0\n'
b'touch "$TESTTMP/.still-running"\n'
b'while [ -f "$TESTTMP/.still-running" ]; do sleep 1; done '
- b'> $HGCATAPULTSERVERPIPE &\n'
+ b'> %(catapult)s &\n'
b'HGCATAPULTSESSION=%(session)s ; export HGCATAPULTSESSION\n'
- b'echo START %(session)s %(name)s >> $HGCATAPULTSERVERPIPE\n'
+ b'echo START %(session)s %(name)s >> %(catapult)s\n'
% {
'name': self.name,
'session': session,
+ 'catapult': hgcatapult,
}
)
@@ -1537,6 +1571,9 @@
if skipping is not None:
after.setdefault(pos, []).append(' !!! missing #endif\n')
addsalt(n + 1, False)
+ # Need to end any current per-command trace
+ if activetrace:
+ toggletrace()
return salt, script, after, expected
def _processoutput(self, exitcode, output, salt, after, expected):
@@ -2544,17 +2581,18 @@
os.umask(oldmask)
def _run(self, testdescs):
+ testdir = getcwdb()
self._testdir = osenvironb[b'TESTDIR'] = getcwdb()
# assume all tests in same folder for now
if testdescs:
pathname = os.path.dirname(testdescs[0]['path'])
if pathname:
- osenvironb[b'TESTDIR'] = os.path.join(osenvironb[b'TESTDIR'],
- pathname)
+ testdir = os.path.join(testdir, pathname)
+ self._testdir = osenvironb[b'TESTDIR'] = testdir
if self.options.outputdir:
self._outputdir = canonpath(_bytespath(self.options.outputdir))
else:
- self._outputdir = self._testdir
+ self._outputdir = getcwdb()
if testdescs and pathname:
self._outputdir = os.path.join(self._outputdir, pathname)
previoustimes = {}
@@ -2626,6 +2664,13 @@
self._tmpbindir = self._bindir
self._pythondir = os.path.join(self._installdir, b"lib", b"python")
+ # Force the use of hg.exe instead of relying on MSYS to recognize hg is
+ # a python script and feed it to python.exe. Legacy stdio is force
+ # enabled by hg.exe, and this is a more realistic way to launch hg
+ # anyway.
+ if os.name == 'nt' and not self._hgcommand.endswith(b'.exe'):
+ self._hgcommand += b'.exe'
+
# set CHGHG, then replace "hg" command by "chg"
chgbindir = self._bindir
if self.options.chg or self.options.with_chg:
@@ -2742,7 +2787,8 @@
expanded_args.append(arg)
args = expanded_args
- testcasepattern = re.compile(br'([\w-]+\.t|py)(#([a-zA-Z0-9_\-\.#]+))')
+ testcasepattern = re.compile(
+ br'([\w-]+\.t|py)(?:#([a-zA-Z0-9_\-\.#]+))')
tests = []
for t in args:
case = []
@@ -2750,9 +2796,10 @@
if not (os.path.basename(t).startswith(b'test-')
and (t.endswith(b'.py') or t.endswith(b'.t'))):
- m = testcasepattern.match(t)
+ m = testcasepattern.match(os.path.basename(t))
if m is not None:
- t, _, casestr = m.groups()
+ t_basename, casestr = m.groups()
+ t = os.path.join(os.path.dirname(t), t_basename)
if casestr:
case = casestr.split(b'#')
else:
@@ -2813,8 +2860,9 @@
testdescs = orig
tests = [self._gettest(d, i) for i, d in enumerate(testdescs)]
-
- jobs = min(len(tests), self.options.jobs)
+ num_tests = len(tests) * self.options.runs_per_test
+
+ jobs = min(num_tests, self.options.jobs)
failed = False
kws = self.options.keywords
@@ -2851,7 +2899,7 @@
self._installchg()
log('running %d tests using %d parallel processes' % (
- len(tests), jobs))
+ num_tests, jobs))
result = runner.run(suite)
@@ -2902,7 +2950,7 @@
testcls = cls
break
- refpath = os.path.join(self._testdir, path)
+ refpath = os.path.join(getcwdb(), path)
tmpdir = os.path.join(self._hgtmp, b'child%d' % count)
# extra keyword parameters. 'case' is used by .t tests
@@ -3005,7 +3053,7 @@
# least on Windows for now, deal with .pydistutils.cfg bugs
# when they happen.
nohome = b''
- cmd = (b'%(exe)s setup.py %(pure)s clean --all'
+ cmd = (b'"%(exe)s" setup.py %(pure)s clean --all'
b' build %(compiler)s --build-base="%(base)s"'
b' install --force --prefix="%(prefix)s"'
b' --install-lib="%(libdir)s"'
@@ -3028,7 +3076,7 @@
makedirs(self._bindir)
vlog("# Running", cmd)
- if os.system(_strpath(cmd)) == 0:
+ if subprocess.call(_strpath(cmd), shell=True) == 0:
if not self.options.verbose:
try:
os.remove(installerrs)
@@ -3107,15 +3155,15 @@
if self._hgpath is not None:
return self._hgpath
- cmd = b'%s -c "import mercurial; print (mercurial.__path__[0])"'
+ cmd = b'"%s" -c "import mercurial; print (mercurial.__path__[0])"'
cmd = cmd % PYTHON
if PYTHON3:
cmd = _strpath(cmd)
- pipe = os.popen(cmd)
- try:
- self._hgpath = _bytespath(pipe.read().strip())
- finally:
- pipe.close()
+
+ p = subprocess.Popen(cmd, stdout=subprocess.PIPE, shell=True)
+ out, err = p.communicate()
+
+ self._hgpath = out.strip()
return self._hgpath
--- a/tests/seq.py Wed Jan 09 20:00:35 2019 -0800
+++ b/tests/seq.py Fri Jan 18 13:28:22 2019 -0500
@@ -8,8 +8,17 @@
# seq START STEP STOP [START, STOP] stepping by STEP
from __future__ import absolute_import, print_function
+import os
import sys
+try:
+ import msvcrt
+ msvcrt.setmode(sys.stdin.fileno(), os.O_BINARY)
+ msvcrt.setmode(sys.stdout.fileno(), os.O_BINARY)
+ msvcrt.setmode(sys.stderr.fileno(), os.O_BINARY)
+except ImportError:
+ pass
+
if sys.version_info[0] >= 3:
xrange = range
--- a/tests/simplestorerepo.py Wed Jan 09 20:00:35 2019 -0800
+++ b/tests/simplestorerepo.py Fri Jan 18 13:28:22 2019 -0500
@@ -23,7 +23,6 @@
)
from mercurial.thirdparty import (
attr,
- cbor,
)
from mercurial import (
ancestor,
@@ -39,6 +38,7 @@
verify,
)
from mercurial.utils import (
+ cborutil,
interfaceutil,
storageutil,
)
@@ -66,17 +66,24 @@
pass
@interfaceutil.implementer(repository.irevisiondelta)
-@attr.s(slots=True, frozen=True)
+@attr.s(slots=True)
class simplestorerevisiondelta(object):
node = attr.ib()
p1node = attr.ib()
p2node = attr.ib()
basenode = attr.ib()
- linknode = attr.ib()
flags = attr.ib()
baserevisionsize = attr.ib()
revision = attr.ib()
delta = attr.ib()
+ linknode = attr.ib(default=None)
+
+@interfaceutil.implementer(repository.iverifyproblem)
+@attr.s(frozen=True)
+class simplefilestoreproblem(object):
+ warning = attr.ib(default=None)
+ error = attr.ib(default=None)
+ node = attr.ib(default=None)
@interfaceutil.implementer(repository.ifilestorage)
class filestorage(object):
@@ -99,7 +106,7 @@
indexdata = self._svfs.tryread(self._indexpath)
if indexdata:
- indexdata = cbor.loads(indexdata)
+ indexdata = cborutil.decodeall(indexdata)
self._indexdata = indexdata or []
self._indexbynode = {}
@@ -192,6 +199,13 @@
return self._indexbyrev[rev][b'node']
+ def hasnode(self, node):
+ validatenode(node)
+ return node in self._indexbynode
+
+ def censorrevision(self, tr, censornode, tombstone=b''):
+ raise NotImplementedError('TODO')
+
def lookup(self, node):
if isinstance(node, int):
return self.node(node)
@@ -290,7 +304,11 @@
raise simplestoreerror(_("integrity check failed on %s") %
self._path)
- def revision(self, node, raw=False):
+ def revision(self, nodeorrev, raw=False):
+ if isinstance(nodeorrev, int):
+ node = self.node(nodeorrev)
+ else:
+ node = nodeorrev
validatenode(node)
if node == nullid:
@@ -409,6 +427,44 @@
return [b'/'.join((self._storepath, f)) for f in entries]
+ def storageinfo(self, exclusivefiles=False, sharedfiles=False,
+ revisionscount=False, trackedsize=False,
+ storedsize=False):
+ # TODO do a real implementation of this
+ return {
+ 'exclusivefiles': [],
+ 'sharedfiles': [],
+ 'revisionscount': len(self),
+ 'trackedsize': 0,
+ 'storedsize': None,
+ }
+
+ def verifyintegrity(self, state):
+ state['skipread'] = set()
+ for rev in self:
+ node = self.node(rev)
+ try:
+ self.revision(node)
+ except Exception as e:
+ yield simplefilestoreproblem(
+ error='unpacking %s: %s' % (node, e),
+ node=node)
+ state['skipread'].add(node)
+
+ def emitrevisions(self, nodes, nodesorder=None, revisiondata=False,
+ assumehaveparentrevisions=False,
+ deltamode=repository.CG_DELTAMODE_STD):
+ # TODO this will probably break on some ordering options.
+ nodes = [n for n in nodes if n != nullid]
+ if not nodes:
+ return
+ for delta in storageutil.emitrevisions(
+ self, nodes, nodesorder, simplestorerevisiondelta,
+ revisiondata=revisiondata,
+ assumehaveparentrevisions=assumehaveparentrevisions,
+ deltamode=deltamode):
+ yield delta
+
def add(self, text, meta, transaction, linkrev, p1, p2):
if meta or text.startswith(b'\1\n'):
text = storageutil.packmeta(meta, text)
@@ -457,7 +513,8 @@
def _reflectindexupdate(self):
self._refreshindex()
- self._svfs.write(self._indexpath, cbor.dumps(self._indexdata))
+ self._svfs.write(self._indexpath,
+ ''.join(cborutil.streamencode(self._indexdata)))
def addgroup(self, deltas, linkmapper, transaction, addrevisioncb=None,
maybemissingparents=False):
@@ -489,15 +546,26 @@
if addrevisioncb:
addrevisioncb(self, node)
+ return nodes
- return nodes
+ def _headrevs(self):
+ # Assume all revisions are heads by default.
+ revishead = {rev: True for rev in self._indexbyrev}
+
+ for rev, entry in self._indexbyrev.items():
+ # Unset head flag for all seen parents.
+ revishead[self.rev(entry[b'p1'])] = False
+ revishead[self.rev(entry[b'p2'])] = False
+
+ return [rev for rev, ishead in sorted(revishead.items())
+ if ishead]
def heads(self, start=None, stop=None):
# This is copied from revlog.py.
if start is None and stop is None:
if not len(self):
return [nullid]
- return [self.node(r) for r in self.headrevs()]
+ return [self.node(r) for r in self._headrevs()]
if start is None:
start = nullid
@@ -537,41 +605,9 @@
return c
def getstrippoint(self, minlink):
-
- # This is largely a copy of revlog.getstrippoint().
- brokenrevs = set()
- strippoint = len(self)
-
- heads = {}
- futurelargelinkrevs = set()
- for head in self.heads():
- headlinkrev = self.linkrev(self.rev(head))
- heads[head] = headlinkrev
- if headlinkrev >= minlink:
- futurelargelinkrevs.add(headlinkrev)
-
- # This algorithm involves walking down the rev graph, starting at the
- # heads. Since the revs are topologically sorted according to linkrev,
- # once all head linkrevs are below the minlink, we know there are
- # no more revs that could have a linkrev greater than minlink.
- # So we can stop walking.
- while futurelargelinkrevs:
- strippoint -= 1
- linkrev = heads.pop(strippoint)
-
- if linkrev < minlink:
- brokenrevs.add(strippoint)
- else:
- futurelargelinkrevs.remove(linkrev)
-
- for p in self.parentrevs(strippoint):
- if p != nullrev:
- plinkrev = self.linkrev(p)
- heads[p] = plinkrev
- if plinkrev >= minlink:
- futurelargelinkrevs.add(plinkrev)
-
- return strippoint, brokenrevs
+ return storageutil.resolvestripinfo(
+ minlink, len(self) - 1, self._headrevs(), self.linkrev,
+ self.parentrevs)
def strip(self, minlink, transaction):
if not len(self):
@@ -631,9 +667,9 @@
def featuresetup(ui, supported):
supported.add(REQUIREMENT)
-def newreporequirements(orig, ui):
+def newreporequirements(orig, ui, createopts):
"""Modifies default requirements for new repos to use the simple store."""
- requirements = orig(ui)
+ requirements = orig(ui, createopts)
# These requirements are only used to affect creation of the store
# object. We have our own store. So we can remove them.
@@ -665,5 +701,5 @@
extensions.wrapfunction(localrepo, 'newreporequirements',
newreporequirements)
- extensions.wrapfunction(store, 'store', makestore)
+ extensions.wrapfunction(localrepo, 'makestore', makestore)
extensions.wrapfunction(verify.verifier, '__init__', verifierinit)
--- a/tests/test-absorb.t Wed Jan 09 20:00:35 2019 -0800
+++ b/tests/test-absorb.t Fri Jan 18 13:28:22 2019 -0500
@@ -129,6 +129,20 @@
nothing applied
[1]
+The prompt is not given if there are no changes to be applied, even if there
+are some changes that won't be applied:
+
+ $ hg absorb
+ showing changes for a
+ @@ -0,2 +0,1 @@
+ -2b
+ -4d
+ +1
+
+ 0 changesets affected
+ nothing applied
+ [1]
+
Insertaions:
$ cat > a << EOF
--- a/tests/test-alias.t Wed Jan 09 20:00:35 2019 -0800
+++ b/tests/test-alias.t Fri Jan 18 13:28:22 2019 -0500
@@ -68,17 +68,17 @@
help
$ hg help -c | grep myinit
- myinit This is my documented alias for init.
+ myinit This is my documented alias for init.
$ hg help -c | grep mycommit
- mycommit This is my alias with only doc.
+ mycommit This is my alias with only doc.
$ hg help -c | grep cleanstatus
- cleanstatus show changed files in the working directory
+ [1]
$ hg help -c | grep lognull
- lognull Logs the null rev
+ lognull Logs the null rev
$ hg help -c | grep dln
- dln Logs the null rev
+ [1]
$ hg help -c | grep recursivedoc
- recursivedoc Logs the null rev in debug mode
+ recursivedoc Logs the null rev in debug mode
$ hg help myinit
hg myinit [OPTIONS] [BLA] [BLE]
@@ -602,7 +602,7 @@
help for a shell alias
$ hg help -c | grep rebate
- rebate This is my alias which just prints something.
+ rebate This is my alias which just prints something.
$ hg help rebate
hg rebate [MYARGS]
@@ -623,9 +623,9 @@
>>> with open('.hg/hgrc', 'ab') as f:
... f.write(b'[alias]\n'
... b'invaliddoc = log\n'
- ... b'invaliddoc:doc = \xc0\n'
+ ... b'invaliddoc:doc = \xc3\xa9\n'
... b'invalidhelp = log\n'
- ... b'invalidhelp:help = \xc0\n') and None
+ ... b'invalidhelp:help = \xc3\xa9\n') and None
$ hg help invaliddoc
non-ASCII character in alias definition 'invaliddoc:doc'
$ hg help invalidhelp
--- a/tests/test-amend.t Wed Jan 09 20:00:35 2019 -0800
+++ b/tests/test-amend.t Fri Jan 18 13:28:22 2019 -0500
@@ -332,9 +332,9 @@
? missing_content2_content3-untracked
? missing_missing_content3-untracked
-==========================================
-Test history-editing-backup config option|
-==========================================
+=================================
+Test backup-bundle config option|
+=================================
$ hg init $TESTTMP/repo4
$ cd $TESTTMP/repo4
$ echo a>a
@@ -346,22 +346,106 @@
#if obsstore-off
$ hg amend
saved backup bundle to $TESTTMP/repo4/.hg/strip-backup/95e899acf2ce-f11cb050-amend.hg
-When history-editing-backup config option is set:
+When backup-bundle config option is set:
$ cat << EOF >> $HGRCPATH
- > [ui]
- > history-editing-backup = False
+ > [rewrite]
+ > backup-bundle = False
> EOF
$ echo fixed > b
$ hg amend
#else
$ hg amend
-When history-editing-backup config option is set:
+When backup-bundle config option is set:
$ cat << EOF >> $HGRCPATH
- > [ui]
- > history-editing-backup = False
+ > [rewrite]
+ > backup-bundle = False
> EOF
$ echo fixed > b
$ hg amend
#endif
+==========================================
+Test update-timestamp config option|
+==========================================
+
+ $ cat >> $HGRCPATH << EOF
+ > [extensions]
+ > amend=
+ > mockmakedate = $TESTDIR/mockmakedate.py
+ > EOF
+
+ $ hg init $TESTTMP/repo5
+ $ cd $TESTTMP/repo5
+ $ cat <<'EOF' >> .hg/hgrc
+ > [ui]
+ > logtemplate = 'user: {user}
+ > date: {date|date}
+ > summary: {desc|firstline}\n'
+ > EOF
+
+ $ echo a>a
+ $ hg ci -Am 'commit 1'
+ adding a
+
+When updatetimestamp is False
+
+ $ hg amend --date '1997-1-1 0:1'
+ $ hg log --limit 1
+ user: test
+ date: Wed Jan 01 00:01:00 1997 +0000
+ summary: commit 1
+
+ When update-timestamp is True and no other change than the date
+
+ $ hg amend --config rewrite.update-timestamp=True
+ nothing changed
+ [1]
+ $ hg log --limit 1
+ user: test
+ date: Wed Jan 01 00:01:00 1997 +0000
+ summary: commit 1
+
+When update-timestamp is True and there is other change than the date
+ $ hg amend --user foobar --config rewrite.update-timestamp=True
+ $ hg log --limit 1
+ user: foobar
+ date: Thu Jan 01 00:00:02 1970 +0000
+ summary: commit 1
+
+When date option is applicable and update-timestamp is True
+ $ hg amend --date '1998-1-1 0:1' --config rewrite.update-timestamp=True
+ $ hg log --limit 1
+ user: foobar
+ date: Thu Jan 01 00:01:00 1998 +0000
+ summary: commit 1
+
+Unlike rewrite.update-timestamp, -D/--currentdate always updates the timestamp
+
+ $ hg amend -D
+ $ hg log --limit 1
+ user: foobar
+ date: Thu Jan 01 00:00:04 1970 +0000
+ summary: commit 1
+
+ $ hg amend -D --config rewrite.update-timestamp=True
+ $ hg log --limit 1
+ user: foobar
+ date: Thu Jan 01 00:00:05 1970 +0000
+ summary: commit 1
+
+rewrite.update-timestamp can be negated by --no-currentdate
+
+ $ hg amend --config rewrite.update-timestamp=True --no-currentdate -u baz
+ $ hg log --limit 1
+ user: baz
+ date: Thu Jan 01 00:00:05 1970 +0000
+ summary: commit 1
+
+Bad combination of date options:
+
+ $ hg amend -D --date '0 0'
+ abort: --date and --currentdate are mutually exclusive
+ [255]
+
+ $ cd ..
--- a/tests/test-ancestor.py Wed Jan 09 20:00:35 2019 -0800
+++ b/tests/test-ancestor.py Fri Jan 18 13:28:22 2019 -0500
@@ -182,6 +182,64 @@
5: [4, -1], 6: [4, -1], 7: [4, -1], 8: [-1, -1], 9: [6, 7],
10: [5, -1], 11: [3, 7], 12: [9, -1], 13: [8, -1]}
+def test_missingancestors_explicit():
+ """A few explicit cases, easier to check for catching errors in refactors.
+
+ The bigger graph at the end has been produced by the random generator
+ above, and we have some evidence that the other tests don't cover it.
+ """
+ for i, (bases, revs) in enumerate((({1, 2, 3, 4, 7}, set(xrange(10))),
+ ({10}, set({11, 12, 13, 14})),
+ ({7}, set({1, 2, 3, 4, 5})),
+ )):
+ print("%% removeancestorsfrom(), example %d" % (i + 1))
+ missanc = ancestor.incrementalmissingancestors(graph.get, bases)
+ missanc.removeancestorsfrom(revs)
+ print("remaining (sorted): %s" % sorted(list(revs)))
+
+ for i, (bases, revs) in enumerate((({10}, {11}),
+ ({11}, {10}),
+ ({7}, {9, 11}),
+ )):
+ print("%% missingancestors(), example %d" % (i + 1))
+ missanc = ancestor.incrementalmissingancestors(graph.get, bases)
+ print("return %s" % missanc.missingancestors(revs))
+
+ print("% removeancestorsfrom(), bigger graph")
+ vecgraph = [
+ [-1, -1], [0, -1], [1, 0], [2, 1], [3, -1], [4, -1], [5, 1],
+ [2, -1], [7, -1], [8, -1], [9, -1], [10, 1], [3, -1], [12, -1],
+ [13, -1], [14, -1], [4, -1], [16, -1], [17, -1], [18, -1],
+ [19, 11], [20, -1], [21, -1], [22, -1], [23, -1], [2, -1],
+ [3, -1], [26, 24], [27, -1], [28, -1], [12, -1], [1, -1], [1, 9],
+ [32, -1], [33, -1], [34, 31], [35, -1], [36, 26], [37, -1],
+ [38, -1], [39, -1], [40, -1], [41, -1], [42, 26], [0, -1],
+ [44, -1], [45, 4], [40, -1], [47, -1], [36, 0], [49, -1],
+ [-1, -1], [51, -1], [52, -1], [53, -1], [14, -1],
+ [55, -1], [15, -1], [23, -1], [58, -1], [59, -1], [2, -1],
+ [61, 59], [62, -1], [63, -1], [-1, -1], [65, -1],
+ [66, -1], [67, -1], [68, -1], [37, 28], [69, 25],
+ [71, -1], [72, -1], [50, 2], [74, -1], [12, -1],
+ [18, -1], [77, -1], [78, -1], [79, -1], [43, 33],
+ [81, -1], [82, -1], [83, -1], [84, 45], [85, -1],
+ [86, -1], [-1, -1], [88, -1], [-1, -1], [76, 83], [44, -1],
+ [92, -1], [93, -1], [9, -1], [95, 67], [96, -1], [97, -1],
+ [-1, -1]]
+ problem_rev = 28
+ problem_base = 70
+ # problem_rev is a parent of problem_base, but a faulty implementation
+ # could forget to remove it.
+ bases = {60, 26, 70, 3, 96, 19, 98, 49, 97, 47, 1, 6}
+ if problem_rev not in vecgraph[problem_base] or problem_base not in bases:
+ print("Conditions have changed")
+ missanc = ancestor.incrementalmissingancestors(vecgraph.__getitem__, bases)
+ revs = {4, 12, 41, 28, 68, 38, 1, 30, 56, 44}
+ missanc.removeancestorsfrom(revs)
+ if 28 in revs:
+ print("Failed!")
+ else:
+ print("Ok")
+
def genlazyancestors(revs, stoprev=0, inclusive=False):
print(("%% lazy ancestor set for %s, stoprev = %s, inclusive = %s" %
(revs, stoprev, inclusive)))
@@ -276,6 +334,7 @@
seed = long(time.time() * 1000)
rng = random.Random(seed)
+ test_missingancestors_explicit()
test_missingancestors(seed, rng)
test_lazyancestors()
test_gca()
--- a/tests/test-ancestor.py.out Wed Jan 09 20:00:35 2019 -0800
+++ b/tests/test-ancestor.py.out Fri Jan 18 13:28:22 2019 -0500
@@ -1,3 +1,17 @@
+% removeancestorsfrom(), example 1
+remaining (sorted): [5, 6, 8, 9]
+% removeancestorsfrom(), example 2
+remaining (sorted): [11, 12, 13, 14]
+% removeancestorsfrom(), example 3
+remaining (sorted): [3, 5]
+% missingancestors(), example 1
+return [3, 7, 11]
+% missingancestors(), example 2
+return [5, 10]
+% missingancestors(), example 3
+return [3, 6, 9, 11]
+% removeancestorsfrom(), bigger graph
+Ok
% lazy ancestor set for [], stoprev = 0, inclusive = False
membership: []
iteration: []
--- a/tests/test-bad-extension.t Wed Jan 09 20:00:35 2019 -0800
+++ b/tests/test-bad-extension.t Fri Jan 18 13:28:22 2019 -0500
@@ -1,3 +1,7 @@
+ $ filterlog () {
+ > sed -e 's!^[0-9/]* [0-9:]* ([0-9]*)>!YYYY/MM/DD HH:MM:SS (PID)>!'
+ > }
+
ensure that failing ui.atexit handlers report sensibly
$ cat > $TESTTMP/bailatexit.py <<EOF
@@ -82,29 +86,30 @@
$ (hg help help --traceback --debug --config devel.debug.extensions=yes 2>&1) \
> | grep -v '^ ' \
- > | egrep 'extension..[^p]|^Exception|Traceback|ImportError|not import|ModuleNotFound'
- debug.extensions: loading extensions
- debug.extensions: - processing 5 entries
- debug.extensions: - loading extension: 'gpg'
- debug.extensions: > 'gpg' extension loaded in * (glob)
- debug.extensions: - validating extension tables: 'gpg'
- debug.extensions: - invoking registered callbacks: 'gpg'
- debug.extensions: > callbacks completed in * (glob)
- debug.extensions: - loading extension: 'badext'
+ > | filterlog \
+ > | egrep 'extension..[^p]|^Exception|Traceback|ImportError|^YYYY|not import|ModuleNotFound'
+ YYYY/MM/DD HH:MM:SS (PID)> loading extensions
+ YYYY/MM/DD HH:MM:SS (PID)> - processing 5 entries
+ YYYY/MM/DD HH:MM:SS (PID)> - loading extension: gpg
+ YYYY/MM/DD HH:MM:SS (PID)> > gpg extension loaded in * (glob)
+ YYYY/MM/DD HH:MM:SS (PID)> - validating extension tables: gpg
+ YYYY/MM/DD HH:MM:SS (PID)> - invoking registered callbacks: gpg
+ YYYY/MM/DD HH:MM:SS (PID)> > callbacks completed in * (glob)
+ YYYY/MM/DD HH:MM:SS (PID)> - loading extension: badext
*** failed to import extension badext from $TESTTMP/badext.py: bit bucket overflow
Traceback (most recent call last):
Exception: bit bucket overflow
- debug.extensions: - loading extension: 'baddocext'
- debug.extensions: > 'baddocext' extension loaded in * (glob)
- debug.extensions: - validating extension tables: 'baddocext'
- debug.extensions: - invoking registered callbacks: 'baddocext'
- debug.extensions: > callbacks completed in * (glob)
- debug.extensions: - loading extension: 'badext2'
- debug.extensions: - could not import hgext.badext2 (No module named *badext2*): trying hgext3rd.badext2 (glob)
+ YYYY/MM/DD HH:MM:SS (PID)> - loading extension: baddocext
+ YYYY/MM/DD HH:MM:SS (PID)> > baddocext extension loaded in * (glob)
+ YYYY/MM/DD HH:MM:SS (PID)> - validating extension tables: baddocext
+ YYYY/MM/DD HH:MM:SS (PID)> - invoking registered callbacks: baddocext
+ YYYY/MM/DD HH:MM:SS (PID)> > callbacks completed in * (glob)
+ YYYY/MM/DD HH:MM:SS (PID)> - loading extension: badext2
+ YYYY/MM/DD HH:MM:SS (PID)> - could not import hgext.badext2 (No module named *badext2*): trying hgext3rd.badext2 (glob)
Traceback (most recent call last):
ImportError: No module named badext2 (no-py3 !)
ModuleNotFoundError: No module named 'hgext.badext2' (py3 !)
- debug.extensions: - could not import hgext3rd.badext2 (No module named *badext2*): trying badext2 (glob)
+ YYYY/MM/DD HH:MM:SS (PID)> - could not import hgext3rd.badext2 (No module named *badext2*): trying badext2 (glob)
Traceback (most recent call last):
ImportError: No module named badext2 (no-py3 !)
ModuleNotFoundError: No module named 'hgext.badext2' (py3 !)
@@ -118,27 +123,27 @@
Traceback (most recent call last): (py3 !)
ModuleNotFoundError: No module named 'badext2' (py3 !)
ImportError: No module named badext2 (no-py3 !)
- debug.extensions: > loaded 2 extensions, total time * (glob)
- debug.extensions: - loading configtable attributes
- debug.extensions: - executing uisetup hooks
- debug.extensions: - running uisetup for 'gpg'
- debug.extensions: > uisetup for 'gpg' took * (glob)
- debug.extensions: - running uisetup for 'baddocext'
- debug.extensions: > uisetup for 'baddocext' took * (glob)
- debug.extensions: > all uisetup took * (glob)
- debug.extensions: - executing extsetup hooks
- debug.extensions: - running extsetup for 'gpg'
- debug.extensions: > extsetup for 'gpg' took * (glob)
- debug.extensions: - running extsetup for 'baddocext'
- debug.extensions: > extsetup for 'baddocext' took * (glob)
- debug.extensions: > all extsetup took * (glob)
- debug.extensions: - executing remaining aftercallbacks
- debug.extensions: > remaining aftercallbacks completed in * (glob)
- debug.extensions: - loading extension registration objects
- debug.extensions: > extension registration object loading took * (glob)
- debug.extensions: > extension baddocext take a total of * to load (glob)
- debug.extensions: > extension gpg take a total of * to load (glob)
- debug.extensions: extension loading complete
+ YYYY/MM/DD HH:MM:SS (PID)> > loaded 2 extensions, total time * (glob)
+ YYYY/MM/DD HH:MM:SS (PID)> - loading configtable attributes
+ YYYY/MM/DD HH:MM:SS (PID)> - executing uisetup hooks
+ YYYY/MM/DD HH:MM:SS (PID)> - running uisetup for gpg
+ YYYY/MM/DD HH:MM:SS (PID)> > uisetup for gpg took * (glob)
+ YYYY/MM/DD HH:MM:SS (PID)> - running uisetup for baddocext
+ YYYY/MM/DD HH:MM:SS (PID)> > uisetup for baddocext took * (glob)
+ YYYY/MM/DD HH:MM:SS (PID)> > all uisetup took * (glob)
+ YYYY/MM/DD HH:MM:SS (PID)> - executing extsetup hooks
+ YYYY/MM/DD HH:MM:SS (PID)> - running extsetup for gpg
+ YYYY/MM/DD HH:MM:SS (PID)> > extsetup for gpg took * (glob)
+ YYYY/MM/DD HH:MM:SS (PID)> - running extsetup for baddocext
+ YYYY/MM/DD HH:MM:SS (PID)> > extsetup for baddocext took * (glob)
+ YYYY/MM/DD HH:MM:SS (PID)> > all extsetup took * (glob)
+ YYYY/MM/DD HH:MM:SS (PID)> - executing remaining aftercallbacks
+ YYYY/MM/DD HH:MM:SS (PID)> > remaining aftercallbacks completed in * (glob)
+ YYYY/MM/DD HH:MM:SS (PID)> - loading extension registration objects
+ YYYY/MM/DD HH:MM:SS (PID)> > extension registration object loading took * (glob)
+ YYYY/MM/DD HH:MM:SS (PID)> > extension baddocext take a total of * to load (glob)
+ YYYY/MM/DD HH:MM:SS (PID)> > extension gpg take a total of * to load (glob)
+ YYYY/MM/DD HH:MM:SS (PID)> extension loading complete
#endif
confirm that there's no crash when an extension's documentation is bad
--- a/tests/test-basic.t Wed Jan 09 20:00:35 2019 -0800
+++ b/tests/test-basic.t Fri Jan 18 13:28:22 2019 -0500
@@ -9,6 +9,7 @@
lfs.usercache=$TESTTMP/.cache/lfs
ui.slash=True
ui.interactive=False
+ ui.merge=internal:merge
ui.mergemarkers=detailed
ui.promptecho=True
web.address=localhost
@@ -101,3 +102,118 @@
At the end...
$ cd ..
+
+Status message redirection:
+
+ $ hg init empty
+
+ status messages are sent to stdout by default:
+
+ $ hg outgoing -R t empty -Tjson 2>/dev/null
+ comparing with empty
+ searching for changes
+ [
+ {
+ "bookmarks": [],
+ "branch": "default",
+ "date": [0, 0],
+ "desc": "test",
+ "node": "acb14030fe0a21b60322c440ad2d20cf7685a376",
+ "parents": ["0000000000000000000000000000000000000000"],
+ "phase": "draft",
+ "rev": 0,
+ "tags": ["tip"],
+ "user": "test"
+ }
+ ]
+
+ which can be configured to send to stderr, so the output wouldn't be
+ interleaved:
+
+ $ cat <<'EOF' >> "$HGRCPATH"
+ > [ui]
+ > message-output = stderr
+ > EOF
+ $ hg outgoing -R t empty -Tjson 2>/dev/null
+ [
+ {
+ "bookmarks": [],
+ "branch": "default",
+ "date": [0, 0],
+ "desc": "test",
+ "node": "acb14030fe0a21b60322c440ad2d20cf7685a376",
+ "parents": ["0000000000000000000000000000000000000000"],
+ "phase": "draft",
+ "rev": 0,
+ "tags": ["tip"],
+ "user": "test"
+ }
+ ]
+ $ hg outgoing -R t empty -Tjson >/dev/null
+ comparing with empty
+ searching for changes
+
+ this option should be turned off by HGPLAIN= since it may break scripting use:
+
+ $ HGPLAIN= hg outgoing -R t empty -Tjson 2>/dev/null
+ comparing with empty
+ searching for changes
+ [
+ {
+ "bookmarks": [],
+ "branch": "default",
+ "date": [0, 0],
+ "desc": "test",
+ "node": "acb14030fe0a21b60322c440ad2d20cf7685a376",
+ "parents": ["0000000000000000000000000000000000000000"],
+ "phase": "draft",
+ "rev": 0,
+ "tags": ["tip"],
+ "user": "test"
+ }
+ ]
+
+ but still overridden by --config:
+
+ $ HGPLAIN= hg outgoing -R t empty -Tjson --config ui.message-output=stderr \
+ > 2>/dev/null
+ [
+ {
+ "bookmarks": [],
+ "branch": "default",
+ "date": [0, 0],
+ "desc": "test",
+ "node": "acb14030fe0a21b60322c440ad2d20cf7685a376",
+ "parents": ["0000000000000000000000000000000000000000"],
+ "phase": "draft",
+ "rev": 0,
+ "tags": ["tip"],
+ "user": "test"
+ }
+ ]
+
+Invalid ui.message-output option:
+
+ $ hg log -R t --config ui.message-output=bad
+ abort: invalid ui.message-output destination: bad
+ [255]
+
+Underlying message streams should be updated when ui.fout/ferr are set:
+
+ $ cat <<'EOF' > capui.py
+ > from mercurial import pycompat, registrar
+ > cmdtable = {}
+ > command = registrar.command(cmdtable)
+ > @command(b'capui', norepo=True)
+ > def capui(ui):
+ > out = ui.fout
+ > ui.fout = pycompat.bytesio()
+ > ui.status(b'status\n')
+ > ui.ferr = pycompat.bytesio()
+ > ui.warn(b'warn\n')
+ > out.write(b'stdout: %s' % ui.fout.getvalue())
+ > out.write(b'stderr: %s' % ui.ferr.getvalue())
+ > EOF
+ $ hg --config extensions.capui=capui.py --config ui.message-output=stdio capui
+ stdout: status
+ stderr: warn
--- a/tests/test-blackbox.t Wed Jan 09 20:00:35 2019 -0800
+++ b/tests/test-blackbox.t Fri Jan 18 13:28:22 2019 -0500
@@ -22,6 +22,9 @@
> [alias]
> confuse = log --limit 3
> so-confusing = confuse --style compact
+ > [blackbox]
+ > track = backupbundle, branchcache, command, commandalias, commandexception,
+ > commandfinish, debug, exthook, incoming, pythonhook, tagscache
> EOF
$ hg init blackboxtest
@@ -82,6 +85,16 @@
1970/01/01 00:00:00 bob @0000000000000000000000000000000000000000 (5000)> so-confusing exited 0 after * seconds (glob)
1970/01/01 00:00:00 bob @0000000000000000000000000000000000000000 (5000)> blackbox
+custom date format
+ $ rm ./.hg/blackbox.log
+ $ hg --config blackbox.date-format='%Y-%m-%d @ %H:%M:%S' \
+ > --config devel.default-date='1334347993 0' --traceback status
+ A a
+ $ hg blackbox
+ 2012-04-13 @ 20:13:13 bob @0000000000000000000000000000000000000000 (5000)> --config *blackbox.date-format=%Y-%m-%d @ %H:%M:%S* --config *devel.default-date=1334347993 0* --traceback status (glob)
+ 2012-04-13 @ 20:13:13 bob @0000000000000000000000000000000000000000 (5000)> --config *blackbox.date-format=%Y-%m-%d @ %H:%M:%S* --config *devel.default-date=1334347993 0* --traceback status exited 0 after * seconds (glob)
+ 1970/01/01 00:00:00 bob @0000000000000000000000000000000000000000 (5000)> blackbox
+
incoming change tracking
create two heads to verify that we only see one change in the log later
@@ -317,6 +330,30 @@
cleanup
$ cd ..
+Test missing log directory, which shouldn't be created automatically
+
+ $ cat <<'EOF' > closeremove.py
+ > def reposetup(ui, repo):
+ > class rmrepo(repo.__class__):
+ > def close(self):
+ > super(rmrepo, self).close()
+ > self.ui.debug(b'removing %s\n' % self.vfs.base)
+ > self.vfs.rmtree()
+ > repo.__class__ = rmrepo
+ > EOF
+
+ $ hg init gone
+ $ cd gone
+ $ cat <<'EOF' > .hg/hgrc
+ > [extensions]
+ > closeremove = ../closeremove.py
+ > EOF
+ $ hg log --debug
+ removing $TESTTMP/gone/.hg
+ warning: cannot write to blackbox.log: $ENOENT$ (no-windows !)
+ warning: cannot write to blackbox.log: $TESTTMP/gone/.hg/blackbox.log: $ENOTDIR$ (windows !)
+ $ cd ..
+
#if chg
when using chg, blackbox.log should get rotated correctly
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/tests/test-bookflow.t Fri Jan 18 13:28:22 2019 -0500
@@ -0,0 +1,292 @@
+initialize
+ $ make_changes() {
+ > d=`pwd`
+ > [ ! -z $1 ] && cd $1
+ > echo "test `basename \`pwd\``" >> test
+ > hg commit -Am"${2:-test}"
+ > r=$?
+ > cd $d
+ > return $r
+ > }
+ $ ls -1a
+ .
+ ..
+ $ hg init a
+ $ cd a
+ $ echo 'test' > test; hg commit -Am'test'
+ adding test
+
+clone to b
+
+ $ mkdir ../b
+ $ cd ../b
+ $ hg clone ../a .
+ updating to branch default
+ 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
+ $ echo "[extensions]" >> .hg/hgrc
+ $ echo "bookflow=" >> .hg/hgrc
+ $ hg branch X
+ abort: creating named branches is disabled and you should use bookmarks
+ (see 'hg help bookflow')
+ [255]
+ $ hg bookmark X
+ $ hg bookmarks
+ * X 0:* (glob)
+ $ hg bookmark X
+ abort: bookmark X already exists, to move use the --rev option
+ [255]
+ $ make_changes
+ $ hg push ../a -q
+
+ $ hg bookmarks
+ \* X 1:* (glob)
+
+change a
+ $ cd ../a
+ $ hg up
+ 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
+ $ echo 'test' >> test; hg commit -Am'test'
+
+
+pull in b
+ $ cd ../b
+ $ hg pull -u
+ pulling from $TESTTMP/a
+ searching for changes
+ adding changesets
+ adding manifests
+ adding file changes
+ added 1 changesets with 1 changes to 1 files
+ new changesets * (glob)
+ 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
+ (leaving bookmark X)
+ $ hg status
+ $ hg bookmarks
+ X 1:* (glob)
+
+check protection of @ bookmark
+ $ hg bookmark @
+ $ hg bookmarks
+ \* @ 2:* (glob)
+ X 1:* (glob)
+ $ make_changes
+ abort: cannot commit, bookmark @ is protected
+ [255]
+
+ $ hg status
+ M test
+ $ hg bookmarks
+ \* @ 2:* (glob)
+ X 1:* (glob)
+
+ $ hg --config bookflow.protect= commit -Am"Updated test"
+
+ $ hg bookmarks
+ \* @ 3:* (glob)
+ X 1:* (glob)
+
+check requirement for an active bookmark
+ $ hg bookmark -i
+ $ hg bookmarks
+ @ 3:* (glob)
+ X 1:* (glob)
+ $ make_changes
+ abort: cannot commit without an active bookmark
+ [255]
+ $ hg revert test
+ $ rm test.orig
+ $ hg status
+
+
+make the bookmark move by updating it on a, and then pulling
+# add a commit to a
+ $ cd ../a
+ $ hg bookmark X
+ $ hg bookmarks
+ \* X 2:* (glob)
+ $ make_changes
+ $ hg bookmarks
+ * X 3:81af7977fdb9
+
+# go back to b, and check out X
+ $ cd ../b
+ $ hg up X
+ 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
+ (activating bookmark X)
+ $ hg bookmarks
+ @ 3:* (glob)
+ \* X 1:* (glob)
+
+# pull, this should move the bookmark forward, because it was changed remotely
+ $ hg pull -u | grep "updating to active bookmark X"
+ updating to active bookmark X
+
+ $ hg bookmarks
+ @ 3:* (glob)
+ * X 4:81af7977fdb9
+
+the bookmark should not move if it diverged from remote
+ $ hg -R ../a status
+ $ hg -R ../b status
+ $ make_changes ../a
+ $ make_changes ../b
+ $ hg -R ../a status
+ $ hg -R ../b status
+ $ hg -R ../a bookmarks
+ * X 4:238292f60a57
+ $ hg -R ../b bookmarks
+ @ 3:* (glob)
+ * X 5:096f7e86892d
+ $ cd ../b
+ $ # make sure we cannot push after bookmarks diverged
+ $ hg push -B X | grep abort
+ abort: push creates new remote head * with bookmark 'X'! (glob)
+ (pull and merge or see 'hg help push' for details about pushing new heads)
+ [1]
+ $ hg pull -u | grep divergent
+ divergent bookmark X stored as X@default
+ 1 other divergent bookmarks for "X"
+ $ hg bookmarks
+ @ 3:* (glob)
+ * X 5:096f7e86892d
+ X@default 6:238292f60a57
+ $ hg id -in
+ 096f7e86892d 5
+ $ make_changes
+ $ hg status
+ $ hg bookmarks
+ @ 3:* (glob)
+ * X 7:227f941aeb07
+ X@default 6:238292f60a57
+
+now merge with the remote bookmark
+ $ hg merge X@default --tool :local -q
+ $ hg status
+ M test
+ $ hg commit -m"Merged with X@default"
+ $ hg bookmarks
+ @ 3:* (glob)
+ * X 8:26fed9bb3219
+ $ hg push -B X | grep bookmark
+ pushing to $TESTTMP/a (?)
+ updating bookmark X
+ $ cd ../a
+ $ hg up -q
+ $ hg bookmarks
+ * X 7:26fed9bb3219
+
+test hg pull when there is more than one descendant
+ $ cd ../a
+ $ hg bookmark Z
+ $ hg bookmark Y
+ $ make_changes . YY
+ $ hg up Z -q
+ $ make_changes . ZZ
+ created new head
+ $ hg bookmarks
+ X 7:26fed9bb3219
+ Y 8:131e663dbd2a
+ * Z 9:b74a4149df25
+ $ hg log -r 'p1(Y)' -r 'p1(Z)' -T '{rev}\n' # prove that Y and Z share the same parent
+ 7
+ $ hg log -r 'Y%Z' -T '{rev}\n' # revs in Y but not in Z
+ 8
+ $ hg log -r 'Z%Y' -T '{rev}\n' # revs in Z but not in Y
+ 9
+ $ cd ../b
+ $ hg pull -uq
+ $ hg id
+ b74a4149df25 tip Z
+ $ hg bookmarks | grep \* # no active bookmark
+ [1]
+
+
+test shelving
+ $ cd ../a
+ $ echo anotherfile > anotherfile # this change should not conflict
+ $ hg add anotherfile
+ $ hg commit -m"Change in a"
+ $ cd ../b
+ $ hg up Z | grep Z
+ (activating bookmark Z)
+ $ hg book | grep \* # make sure active bookmark
+ \* Z 10:* (glob)
+ $ echo "test b" >> test
+ $ hg diff --stat
+ test | 1 +
+ 1 files changed, 1 insertions(+), 0 deletions(-)
+ $ hg --config extensions.shelve= shelve
+ shelved as Z
+ 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
+ $ hg pull -uq
+ $ hg --trace --config extensions.shelve= unshelve
+ unshelving change 'Z'
+ rebasing shelved changes
+ $ hg diff --stat
+ test | 1 +
+ 1 files changed, 1 insertions(+), 0 deletions(-)
+
+
+make the bookmark move by updating it on a, and then pulling with a local change
+# add a commit to a
+ $ cd ../a
+ $ hg up -C X |fgrep "activating bookmark X"
+ (activating bookmark X)
+# go back to b, and check out X
+ $ cd ../b
+ $ hg up -C X |fgrep "activating bookmark X"
+ (activating bookmark X)
+# update and push from a
+ $ make_changes ../a
+ created new head
+ $ echo "more" >> test
+ $ hg pull -u 2>&1 | fgrep -v TESTTMP| fgrep -v "searching for changes" | fgrep -v adding
+ pulling from $TESTTMP/a
+ added 1 changesets with 0 changes to 0 files (+1 heads)
+ updating bookmark X
+ new changesets * (glob)
+ updating to active bookmark X
+ merging test
+ warning: conflicts while merging test! (edit, then use 'hg resolve --mark')
+ 0 files updated, 0 files merged, 0 files removed, 1 files unresolved
+ use 'hg resolve' to retry unresolved file merges
+ $ hg update -Cq
+ $ rm test.orig
+
+make sure that commits aren't possible if working directory is not pointing to active bookmark
+ $ hg -R ../a status
+ $ hg -R ../b status
+ $ hg -R ../a id -i
+ 36a6e592ec06
+ $ hg -R ../a book | grep X
+ \* X \d+:36a6e592ec06 (re)
+ $ hg -R ../b id -i
+ 36a6e592ec06
+ $ hg -R ../b book | grep X
+ \* X \d+:36a6e592ec06 (re)
+ $ make_changes ../a
+ $ hg -R ../a book | grep X
+ \* X \d+:f73a71c992b8 (re)
+ $ cd ../b
+ $ hg pull 2>&1 | grep -v add | grep -v pulling | grep -v searching | grep -v changeset
+ updating bookmark X
+ (run 'hg update' to get a working copy)
+ working directory out of sync with active bookmark, run 'hg up X'
+ $ hg id -i # we're still on the old commit
+ 36a6e592ec06
+ $ hg book | grep X # while the bookmark moved
+ \* X \d+:f73a71c992b8 (re)
+ $ make_changes
+ abort: cannot commit, working directory out of sync with active bookmark
+ (run 'hg up X')
+ [255]
+ $ hg up -Cq -r . # cleanup local changes
+ $ hg status
+ $ hg id -i # we're still on the old commit
+ 36a6e592ec06
+ $ hg up X -q
+ $ hg id -i # now we're on X
+ f73a71c992b8
+ $ hg book | grep X
+ \* X \d+:f73a71c992b8 (re)
+
--- a/tests/test-bookmarks-pushpull.t Wed Jan 09 20:00:35 2019 -0800
+++ b/tests/test-bookmarks-pushpull.t Fri Jan 18 13:28:22 2019 -0500
@@ -609,13 +609,12 @@
> # call.
> listkeys.makecommit= sh $TESTTMP/listkeys_makecommit.sh
> EOF
-
-(new config need server restart)
-
- $ killdaemons.py
- $ hg serve -R ../pull-race -p $HGPORT -d --pid-file=../pull-race.pid -E main-error.log
- $ cat ../pull-race.pid >> $DAEMON_PIDS
-
+ $ restart_server() {
+ > "$TESTDIR/killdaemons.py" $DAEMON_PIDS
+ > hg serve -R ../pull-race -p $HGPORT -d --pid-file=../pull-race.pid -E main-error.log
+ > cat ../pull-race.pid >> $DAEMON_PIDS
+ > }
+ $ restart_server # new config need server restart
$ hg -R $TESTTMP/pull-race book
@ 1:0d2164f0ce0d
X 1:0d2164f0ce0d
@@ -640,6 +639,54 @@
* Y 5:35d1ef0a8d1b
Z 1:0d2164f0ce0d
+Update a bookmark right after the initial lookup -r (issue4700)
+
+ $ echo c7 > ../pull-race/f3 # to be committed during the race
+ $ cat <<EOF > ../lookuphook.py
+ > """small extensions adding a hook after wireprotocol lookup to test race"""
+ > import functools
+ > from mercurial import wireprotov1server, wireprotov2server
+ >
+ > def wrappedlookup(orig, repo, *args, **kwargs):
+ > ret = orig(repo, *args, **kwargs)
+ > repo.hook(b'lookup')
+ > return ret
+ > for table in [wireprotov1server.commands, wireprotov2server.COMMANDS]:
+ > table[b'lookup'].func = functools.partial(wrappedlookup, table[b'lookup'].func)
+ > EOF
+ $ cat <<EOF > ../pull-race/.hg/hgrc
+ > [extensions]
+ > lookuphook=$TESTTMP/lookuphook.py
+ > [hooks]
+ > lookup.makecommit= sh $TESTTMP/listkeys_makecommit.sh
+ > EOF
+ $ restart_server # new config need server restart
+ $ hg -R $TESTTMP/pull-race book
+ @ 1:0d2164f0ce0d
+ X 1:0d2164f0ce0d
+ * Y 6:0d60821d2197
+ Z 1:0d2164f0ce0d
+ $ hg pull -r Y
+ pulling from http://localhost:$HGPORT/
+ searching for changes
+ adding changesets
+ adding manifests
+ adding file changes
+ added 1 changesets with 1 changes to 1 files
+ updating bookmark Y
+ new changesets 0d60821d2197 (1 drafts)
+ (run 'hg update' to get a working copy)
+ $ hg book
+ @ 1:0d2164f0ce0d
+ X 1:0d2164f0ce0d
+ * Y 6:0d60821d2197
+ Z 1:0d2164f0ce0d
+ $ hg -R $TESTTMP/pull-race book
+ @ 1:0d2164f0ce0d
+ X 1:0d2164f0ce0d
+ * Y 7:714424d9e8b8
+ Z 1:0d2164f0ce0d
+
(done with this section of the test)
$ killdaemons.py
--- a/tests/test-bookmarks.t Wed Jan 09 20:00:35 2019 -0800
+++ b/tests/test-bookmarks.t Fri Jan 18 13:28:22 2019 -0500
@@ -202,8 +202,6 @@
abort: bookmark 'unknown' does not exist!
[255]
$ hg log -r 'bookmark("re:unknown")'
- abort: no bookmarks exist that match 'unknown'!
- [255]
$ hg log -r 'present(bookmark("literal:unknown"))'
$ hg log -r 'present(bookmark("re:unknown"))'
--- a/tests/test-branch-change.t Wed Jan 09 20:00:35 2019 -0800
+++ b/tests/test-branch-change.t Fri Jan 18 13:28:22 2019 -0500
@@ -308,24 +308,116 @@
o 18:204d2769eca2 Added a
stable ()
-Testing on merge
+Changing branch of a merge commit
- $ hg merge -r 26
- 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
+ $ hg branch -q ghi
+ $ echo f > f
+ $ hg ci -qAm 'Added f'
+ $ hg up -q 27
+ $ hg branch -q jkl
+ $ echo g > g
+ $ hg ci -qAm 'Added g'
+ $ hg glog -r 'heads(:)'
+ @ 29:6bc1c6c2c9da Added g
+ | jkl ()
+ ~
+ o 28:2f1019bd29d2 Added f
+ | ghi (b1)
+ ~
+
+ $ hg branch -q default
+ $ hg merge -r 28
+ 4 files updated, 0 files merged, 0 files removed, 0 files unresolved
(branch merge, don't forget to commit)
-
$ hg branch -r . abcd
abort: outstanding uncommitted merge
[255]
+
$ hg ci -m "Merge commit"
- $ hg branch -r '(.^)::' def
- abort: cannot change branch of a merge commit
+ $ hg glog -r 'parents(.)::'
+ @ 30:4d56e6b1eb6b Merge commit
+ |\ default ()
+ | o 29:6bc1c6c2c9da Added g
+ | | jkl ()
+ | ~
+ o 28:2f1019bd29d2 Added f
+ | ghi (b1)
+ ~
+
+ $ hg branch -r . ghi
+ 0 files updated, 0 files merged, 4 files removed, 0 files unresolved
+ changed branch on 1 changesets
+ $ hg branch -r . jkl
+ changed branch on 1 changesets
+ $ hg branch -r . default
+ changed branch on 1 changesets
+ $ hg branch -r . stable
+ abort: a branch of the same name already exists
[255]
Changing branch on public changeset
- $ hg phase -r 27 -p
- $ hg branch -r 27 def
+ $ hg phase -r . -p
+ $ hg branch -r . def
abort: cannot change branch of public changesets
(see 'hg help phases' for details)
[255]
+
+Merge commit with conflicts, with evolution and without
+
+ $ mklozenge() {
+ > echo foo > a
+ > hg ci -qAm foo
+ > echo bar > a
+ > hg ci -qm bar
+ > hg up -q '.^'
+ > echo baz > a
+ > hg ci -qm baz
+ > hg merge -q -t :local
+ > echo neither > a
+ > hg ci -qm neither
+ > }
+
+ $ cd ..
+ $ hg init merge-with-evolution
+ $ cd merge-with-evolution
+ $ mklozenge
+
+ $ hg branch -r '(.^)::' abc
+ changed branch on 2 changesets
+ $ hg glog
+ @ 5:c07fa8b34d54 neither
+ |\ abc ()
+ | o 4:f2aa51777cc9 baz
+ | | abc ()
+ o | 1:2e33c4f0856b bar
+ |/ default ()
+ o 0:91cfb6004abf foo
+ default ()
+ $ hg cat a
+ neither
+
+ $ cd ..
+ $ hg init merge-without-evolution
+ $ cd merge-without-evolution
+ $ mklozenge
+ $ cat > .hg/hgrc << EOF
+ > [experimental]
+ > evolution = no
+ > evolution.allowunstable = no
+ > EOF
+
+ $ hg branch -r '(.^)::' abc
+ changed branch on 2 changesets
+ saved backup bundle to $TESTTMP/merge-without-evolution/.hg/strip-backup/9a3a2af368f4-8db1a361-branch-change.hg
+ $ hg glog
+ @ 3:c07fa8b34d54 neither
+ |\ abc ()
+ | o 2:f2aa51777cc9 baz
+ | | abc ()
+ o | 1:2e33c4f0856b bar
+ |/ default ()
+ o 0:91cfb6004abf foo
+ default ()
+ $ hg cat a
+ neither
--- a/tests/test-branches.t Wed Jan 09 20:00:35 2019 -0800
+++ b/tests/test-branches.t Fri Jan 18 13:28:22 2019 -0500
@@ -157,6 +157,18 @@
summary: Adding b branch
+---- going to test branch listing by rev
+ $ hg branches -r0
+ default 0:19709c5a4e75 (inactive)
+ $ hg branches -qr0
+ default
+--- now more than one rev
+ $ hg branches -r2:5
+ b 4:aee39cd168d0
+ a 5:d8cbc61dbaa6 (inactive)
+ $ hg branches -qr2:5
+ b
+ a
---- going to test branch closing
$ hg branches
--- a/tests/test-bundle.t Wed Jan 09 20:00:35 2019 -0800
+++ b/tests/test-bundle.t Fri Jan 18 13:28:22 2019 -0500
@@ -212,6 +212,7 @@
cache
requires
store
+ wcache
Pull ../full.hg into empty (with hook)
@@ -274,17 +275,17 @@
$ hg -R test debugcreatestreamclonebundle packed.hg
writing 2664 bytes for 6 files
- bundle requirements: generaldelta, revlogv1
+ bundle requirements: generaldelta, revlogv1, sparserevlog
$ f -B 64 --size --sha1 --hexdump packed.hg
- packed.hg: size=2827, sha1=9d14cb90c66a21462d915ab33656f38b9deed686
+ packed.hg: size=2840, sha1=12bf3eee3eb8a04c503ce2d29b48f0135c7edff5
0000: 48 47 53 31 55 4e 00 00 00 00 00 00 00 06 00 00 |HGS1UN..........|
- 0010: 00 00 00 00 0a 68 00 16 67 65 6e 65 72 61 6c 64 |.....h..generald|
- 0020: 65 6c 74 61 2c 72 65 76 6c 6f 67 76 31 00 64 61 |elta,revlogv1.da|
- 0030: 74 61 2f 61 64 69 66 66 65 72 65 6e 74 66 69 6c |ta/adifferentfil|
+ 0010: 00 00 00 00 0a 68 00 23 67 65 6e 65 72 61 6c 64 |.....h.#generald|
+ 0020: 65 6c 74 61 2c 72 65 76 6c 6f 67 76 31 2c 73 70 |elta,revlogv1,sp|
+ 0030: 61 72 73 65 72 65 76 6c 6f 67 00 64 61 74 61 2f |arserevlog.data/|
$ hg debugbundle --spec packed.hg
- none-packed1;requirements%3Dgeneraldelta%2Crevlogv1
+ none-packed1;requirements%3Dgeneraldelta%2Crevlogv1%2Csparserevlog
generaldelta requirement is not listed in stream clone bundles unless used
@@ -319,7 +320,7 @@
$ hg -R testsecret debugcreatestreamclonebundle packedsecret.hg
(warning: stream clone bundle will contain secret revisions)
writing 301 bytes for 3 files
- bundle requirements: generaldelta, revlogv1
+ bundle requirements: generaldelta, revlogv1, sparserevlog
Unpacking packed1 bundles with "hg unbundle" isn't allowed
@@ -900,3 +901,12 @@
$ hg update -R ../update2bundled.hg -r 0
0 files updated, 0 files merged, 2 files removed, 0 files unresolved
#endif
+
+Test the option that create slim bundle
+
+ $ hg bundle -a --config devel.bundle.delta=p1 ./slim.hg
+ 3 changesets found
+
+Test the option that create and no-delta's bundle
+ $ hg bundle -a --config devel.bundle.delta=full ./full.hg
+ 3 changesets found
--- a/tests/test-cbor.py Wed Jan 09 20:00:35 2019 -0800
+++ b/tests/test-cbor.py Fri Jan 18 13:28:22 2019 -0500
@@ -1,10 +1,19 @@
from __future__ import absolute_import
+import os
+import sys
import unittest
-from mercurial.thirdparty import (
- cbor,
-)
+# TODO migrate to canned cbor test strings and stop using thirdparty.cbor
+tpp = os.path.normpath(os.path.join(os.path.dirname(__file__),
+ '..', 'mercurial', 'thirdparty'))
+if not os.path.exists(tpp):
+ # skip, not in a repo
+ sys.exit(80)
+sys.path[0:0] = [tpp]
+import cbor
+del sys.path[0]
+
from mercurial.utils import (
cborutil,
)
--- a/tests/test-check-help.t Wed Jan 09 20:00:35 2019 -0800
+++ b/tests/test-check-help.t Fri Jan 18 13:28:22 2019 -0500
@@ -10,13 +10,14 @@
> import msvcrt
> import os
> msvcrt.setmode(sys.stdout.fileno(), os.O_BINARY)
+ > stdout = getattr(sys.stdout, 'buffer', sys.stdout)
> topics = set()
> topicre = re.compile(br':hg:`help ([a-z0-9\-.]+)`')
> for fname in sys.argv:
> with open(fname, 'rb') as f:
> topics.update(m.group(1) for m in topicre.finditer(f.read()))
> for s in sorted(topics):
- > print(s)
+ > stdout.write(b'%s\n' % s)
> EOF
$ cd "$TESTDIR"/..
--- a/tests/test-check-interfaces.py Wed Jan 09 20:00:35 2019 -0800
+++ b/tests/test-check-interfaces.py Fri Jan 18 13:28:22 2019 -0500
@@ -40,8 +40,12 @@
wireprotov2server,
)
-rootdir = pycompat.fsencode(
- os.path.normpath(os.path.join(os.path.dirname(__file__), '..')))
+testdir = os.path.dirname(__file__)
+rootdir = pycompat.fsencode(os.path.normpath(os.path.join(testdir, '..')))
+
+sys.path[0:0] = [testdir]
+import simplestorerepo
+del sys.path[0]
def checkzobject(o, allowextra=False):
"""Verify an object with a zope interface."""
@@ -177,12 +181,19 @@
ziverify.verifyClass(repository.imanifestlog, manifest.manifestlog)
ziverify.verifyClass(repository.imanifeststorage, manifest.manifestrevlog)
+ ziverify.verifyClass(repository.irevisiondelta,
+ simplestorerepo.simplestorerevisiondelta)
+ ziverify.verifyClass(repository.ifilestorage, simplestorerepo.filestorage)
+ ziverify.verifyClass(repository.iverifyproblem,
+ simplestorerepo.simplefilestoreproblem)
+
vfs = vfsmod.vfs(b'.')
fl = filelog.filelog(vfs, b'dummy.i')
checkzobject(fl, allowextra=True)
# Conforms to imanifestlog.
- ml = manifest.manifestlog(vfs, repo, manifest.manifestrevlog(repo.svfs))
+ ml = manifest.manifestlog(vfs, repo, manifest.manifestrevlog(repo.svfs),
+ repo.narrowmatch())
checkzobject(ml)
checkzobject(repo.manifestlog)
--- a/tests/test-check-module-imports.t Wed Jan 09 20:00:35 2019 -0800
+++ b/tests/test-check-module-imports.t Fri Jan 18 13:28:22 2019 -0500
@@ -23,6 +23,7 @@
> -X contrib/packaging/hg-docker \
> -X contrib/python-zstandard/ \
> -X contrib/win32/hgwebdir_wsgi.py \
+ > -X contrib/perf-utils/perf-revlog-write-plot.py \
> -X doc/gendoc.py \
> -X doc/hgmanpage.py \
> -X i18n/posplit \
--- a/tests/test-chg.t Wed Jan 09 20:00:35 2019 -0800
+++ b/tests/test-chg.t Fri Jan 18 13:28:22 2019 -0500
@@ -1,7 +1,24 @@
#require chg
+ $ mkdir log
+ $ cp $HGRCPATH $HGRCPATH.unconfigured
+ $ cat <<'EOF' >> $HGRCPATH
+ > [cmdserver]
+ > log = $TESTTMP/log/server.log
+ > max-log-files = 1
+ > max-log-size = 10 kB
+ > EOF
$ cp $HGRCPATH $HGRCPATH.orig
+ $ filterlog () {
+ > sed -e 's!^[0-9/]* [0-9:]* ([0-9]*)>!YYYY/MM/DD HH:MM:SS (PID)>!' \
+ > -e 's!\(setprocname\|received fds\|setenv\): .*!\1: ...!' \
+ > -e 's!\(confighash\|mtimehash\) = [0-9a-f]*!\1 = ...!g' \
+ > -e 's!\(in \)[0-9.]*s\b!\1 ...s!g' \
+ > -e 's!\(pid\)=[0-9]*!\1=...!g' \
+ > -e 's!\(/server-\)[0-9a-f]*!\1...!g'
+ > }
+
init repo
$ chg init foo
@@ -201,5 +218,114 @@
shut down servers and restore environment:
$ rm -R chgsock
+ $ sleep 2
$ CHGSOCKNAME=$OLDCHGSOCKNAME
$ cd ..
+
+check that server events are recorded:
+
+ $ ls log
+ server.log
+ server.log.1
+
+print only the last 10 lines, since we aren't sure how many records are
+preserved:
+
+ $ cat log/server.log.1 log/server.log | tail -10 | filterlog
+ YYYY/MM/DD HH:MM:SS (PID)> forked worker process (pid=...)
+ YYYY/MM/DD HH:MM:SS (PID)> setprocname: ...
+ YYYY/MM/DD HH:MM:SS (PID)> received fds: ...
+ YYYY/MM/DD HH:MM:SS (PID)> chdir to '$TESTTMP/extreload'
+ YYYY/MM/DD HH:MM:SS (PID)> setumask 18
+ YYYY/MM/DD HH:MM:SS (PID)> setenv: ...
+ YYYY/MM/DD HH:MM:SS (PID)> confighash = ... mtimehash = ...
+ YYYY/MM/DD HH:MM:SS (PID)> validate: []
+ YYYY/MM/DD HH:MM:SS (PID)> worker process exited (pid=...)
+ YYYY/MM/DD HH:MM:SS (PID)> $TESTTMP/extreload/chgsock/server-... is not owned, exiting.
+
+repository cache
+----------------
+
+ $ rm log/server.log*
+ $ cp $HGRCPATH.unconfigured $HGRCPATH
+ $ cat <<'EOF' >> $HGRCPATH
+ > [cmdserver]
+ > log = $TESTTMP/log/server.log
+ > max-repo-cache = 1
+ > track-log = command, repocache
+ > EOF
+
+isolate socket directory for stable result:
+
+ $ OLDCHGSOCKNAME=$CHGSOCKNAME
+ $ mkdir chgsock
+ $ CHGSOCKNAME=`pwd`/chgsock/server
+
+create empty repo and cache it:
+
+ $ hg init cached
+ $ hg id -R cached
+ 000000000000 tip
+ $ sleep 1
+
+modify repo (and cache will be invalidated):
+
+ $ touch cached/a
+ $ hg ci -R cached -Am 'add a'
+ adding a
+ $ sleep 1
+
+read cached repo:
+
+ $ hg log -R cached
+ changeset: 0:ac82d8b1f7c4
+ tag: tip
+ user: test
+ date: Thu Jan 01 00:00:00 1970 +0000
+ summary: add a
+
+ $ sleep 1
+
+discard cached from LRU cache:
+
+ $ hg clone cached cached2
+ updating to branch default
+ 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
+ $ hg id -R cached2
+ ac82d8b1f7c4 tip
+ $ sleep 1
+
+read uncached repo:
+
+ $ hg log -R cached
+ changeset: 0:ac82d8b1f7c4
+ tag: tip
+ user: test
+ date: Thu Jan 01 00:00:00 1970 +0000
+ summary: add a
+
+ $ sleep 1
+
+shut down servers and restore environment:
+
+ $ rm -R chgsock
+ $ sleep 2
+ $ CHGSOCKNAME=$OLDCHGSOCKNAME
+
+check server log:
+
+ $ cat log/server.log | filterlog
+ YYYY/MM/DD HH:MM:SS (PID)> init cached
+ YYYY/MM/DD HH:MM:SS (PID)> id -R cached
+ YYYY/MM/DD HH:MM:SS (PID)> loaded repo into cache: $TESTTMP/cached (in ...s)
+ YYYY/MM/DD HH:MM:SS (PID)> repo from cache: $TESTTMP/cached
+ YYYY/MM/DD HH:MM:SS (PID)> ci -R cached -Am 'add a'
+ YYYY/MM/DD HH:MM:SS (PID)> loaded repo into cache: $TESTTMP/cached (in ...s)
+ YYYY/MM/DD HH:MM:SS (PID)> repo from cache: $TESTTMP/cached
+ YYYY/MM/DD HH:MM:SS (PID)> log -R cached
+ YYYY/MM/DD HH:MM:SS (PID)> loaded repo into cache: $TESTTMP/cached (in ...s)
+ YYYY/MM/DD HH:MM:SS (PID)> clone cached cached2
+ YYYY/MM/DD HH:MM:SS (PID)> id -R cached2
+ YYYY/MM/DD HH:MM:SS (PID)> loaded repo into cache: $TESTTMP/cached2 (in ...s)
+ YYYY/MM/DD HH:MM:SS (PID)> log -R cached
+ YYYY/MM/DD HH:MM:SS (PID)> loaded repo into cache: $TESTTMP/cached (in ...s)
--- a/tests/test-clone-uncompressed.t Wed Jan 09 20:00:35 2019 -0800
+++ b/tests/test-clone-uncompressed.t Fri Jan 18 13:28:22 2019 -0500
@@ -184,8 +184,8 @@
#if stream-bundle2
$ hg clone --stream -U http://localhost:$HGPORT clone1
streaming all changes
- 1030 files to transfer, 96.4 KB of data
- transferred 96.4 KB in * seconds (* */sec) (glob)
+ 1030 files to transfer, 96.5 KB of data
+ transferred 96.5 KB in * seconds (* */sec) (glob)
$ ls -1 clone1/.hg/cache
branch2-served
@@ -201,23 +201,23 @@
$ f --size --hex --bytes 256 body
- body: size=112230
+ body: size=112262
0000: 04 6e 6f 6e 65 48 47 32 30 00 00 00 00 00 00 00 |.noneHG20.......|
- 0010: 70 07 53 54 52 45 41 4d 32 00 00 00 00 03 00 09 |p.STREAM2.......|
- 0020: 05 09 04 0c 35 62 79 74 65 63 6f 75 6e 74 39 38 |....5bytecount98|
- 0030: 37 35 38 66 69 6c 65 63 6f 75 6e 74 31 30 33 30 |758filecount1030|
+ 0010: 7f 07 53 54 52 45 41 4d 32 00 00 00 00 03 00 09 |..STREAM2.......|
+ 0020: 05 09 04 0c 44 62 79 74 65 63 6f 75 6e 74 39 38 |....Dbytecount98|
+ 0030: 37 37 35 66 69 6c 65 63 6f 75 6e 74 31 30 33 30 |775filecount1030|
0040: 72 65 71 75 69 72 65 6d 65 6e 74 73 64 6f 74 65 |requirementsdote|
0050: 6e 63 6f 64 65 25 32 43 66 6e 63 61 63 68 65 25 |ncode%2Cfncache%|
0060: 32 43 67 65 6e 65 72 61 6c 64 65 6c 74 61 25 32 |2Cgeneraldelta%2|
- 0070: 43 72 65 76 6c 6f 67 76 31 25 32 43 73 74 6f 72 |Crevlogv1%2Cstor|
- 0080: 65 00 00 80 00 73 08 42 64 61 74 61 2f 30 2e 69 |e....s.Bdata/0.i|
- 0090: 00 03 00 01 00 00 00 00 00 00 00 02 00 00 00 01 |................|
- 00a0: 00 00 00 00 00 00 00 01 ff ff ff ff ff ff ff ff |................|
- 00b0: 80 29 63 a0 49 d3 23 87 bf ce fe 56 67 92 67 2c |.)c.I.#....Vg.g,|
- 00c0: 69 d1 ec 39 00 00 00 00 00 00 00 00 00 00 00 00 |i..9............|
- 00d0: 75 30 73 08 42 64 61 74 61 2f 31 2e 69 00 03 00 |u0s.Bdata/1.i...|
- 00e0: 01 00 00 00 00 00 00 00 02 00 00 00 01 00 00 00 |................|
- 00f0: 00 00 00 00 01 ff ff ff ff ff ff ff ff f9 76 da |..............v.|
+ 0070: 43 72 65 76 6c 6f 67 76 31 25 32 43 73 70 61 72 |Crevlogv1%2Cspar|
+ 0080: 73 65 72 65 76 6c 6f 67 25 32 43 73 74 6f 72 65 |serevlog%2Cstore|
+ 0090: 00 00 80 00 73 08 42 64 61 74 61 2f 30 2e 69 00 |....s.Bdata/0.i.|
+ 00a0: 03 00 01 00 00 00 00 00 00 00 02 00 00 00 01 00 |................|
+ 00b0: 00 00 00 00 00 00 01 ff ff ff ff ff ff ff ff 80 |................|
+ 00c0: 29 63 a0 49 d3 23 87 bf ce fe 56 67 92 67 2c 69 |)c.I.#....Vg.g,i|
+ 00d0: d1 ec 39 00 00 00 00 00 00 00 00 00 00 00 00 75 |..9............u|
+ 00e0: 30 73 08 42 64 61 74 61 2f 31 2e 69 00 03 00 01 |0s.Bdata/1.i....|
+ 00f0: 00 00 00 00 00 00 00 02 00 00 00 01 00 00 00 00 |................|
--uncompressed is an alias to --stream
@@ -232,8 +232,8 @@
#if stream-bundle2
$ hg clone --uncompressed -U http://localhost:$HGPORT clone1-uncompressed
streaming all changes
- 1030 files to transfer, 96.4 KB of data
- transferred 96.4 KB in * seconds (* */sec) (glob)
+ 1030 files to transfer, 96.5 KB of data
+ transferred 96.5 KB in * seconds (* */sec) (glob)
#endif
Clone with background file closing enabled
@@ -274,12 +274,12 @@
bundle2-input-bundle: with-transaction
bundle2-input-part: "stream2" (params: 3 mandatory) supported
applying stream bundle
- 1030 files to transfer, 96.4 KB of data
+ 1030 files to transfer, 96.5 KB of data
starting 4 threads for background file closing
starting 4 threads for background file closing
updating the branch cache
- transferred 96.4 KB in * seconds (* */sec) (glob)
- bundle2-input-part: total payload size 112077
+ transferred 96.5 KB in * seconds (* */sec) (glob)
+ bundle2-input-part: total payload size 112094
bundle2-input-part: "listkeys" (params: 1 mandatory) supported
bundle2-input-bundle: 1 parts total
checking for updated bookmarks
@@ -318,8 +318,8 @@
#if stream-bundle2
$ hg clone --stream -U http://localhost:$HGPORT secret-allowed
streaming all changes
- 1030 files to transfer, 96.4 KB of data
- transferred 96.4 KB in * seconds (* */sec) (glob)
+ 1030 files to transfer, 96.5 KB of data
+ transferred 96.5 KB in * seconds (* */sec) (glob)
#endif
$ killdaemons.py
--- a/tests/test-clone.t Wed Jan 09 20:00:35 2019 -0800
+++ b/tests/test-clone.t Fri Jan 18 13:28:22 2019 -0500
@@ -43,10 +43,6 @@
default 10:a7949464abda
$ ls .hg/cache
branch2-served
- checkisexec (execbit !)
- checklink (symlink !)
- checklink-target (symlink !)
- checknoexec (execbit !)
manifestfulltextcache (reporevlogstore !)
rbc-names-v1
rbc-revs-v1
@@ -62,9 +58,6 @@
$ ls .hg/cache
branch2-served
- checkisexec (execbit !)
- checklink (symlink !)
- checklink-target (symlink !)
rbc-names-v1
rbc-revs-v1
@@ -574,6 +567,7 @@
> from mercurial import extensions, hg, ui as uimod
> myui = uimod.ui.load()
> extensions.loadall(myui)
+ > extensions.populateui(myui)
> repo = hg.repository(myui, b'a')
> hg.clone(myui, {}, repo, dest=b"ua", branch=[b"stable",])
> EOF
--- a/tests/test-clonebundles.t Wed Jan 09 20:00:35 2019 -0800
+++ b/tests/test-clonebundles.t Fri Jan 18 13:28:22 2019 -0500
@@ -64,7 +64,7 @@
$ echo "http://localhost:$HGPORT1/bundle.hg" > server/.hg/clonebundles.manifest
$ hg clone http://localhost:$HGPORT server-not-runner
applying clone bundle from http://localhost:$HGPORT1/bundle.hg
- error fetching bundle: (.* refused.*|Protocol not supported|(.* )?Cannot assign requested address) (re)
+ error fetching bundle: (.* refused.*|Protocol not supported|(.* )?\$EADDRNOTAVAIL\$) (re)
abort: error applying bundle
(if this error persists, consider contacting the server operator or disable clone bundles via "--config ui.clonebundles=false")
[255]
@@ -295,7 +295,7 @@
$ hg -R server debugcreatestreamclonebundle packed.hg
writing 613 bytes for 4 files
- bundle requirements: generaldelta, revlogv1
+ bundle requirements: generaldelta, revlogv1, sparserevlog
No bundle spec should work
--- a/tests/test-commandserver.t Wed Jan 09 20:00:35 2019 -0800
+++ b/tests/test-commandserver.t Fri Jan 18 13:28:22 2019 -0500
@@ -211,6 +211,7 @@
lfs.usercache=$TESTTMP/.cache/lfs
ui.slash=True
ui.interactive=False
+ ui.merge=internal:merge
ui.mergemarkers=detailed
ui.foo=bar
ui.nontty=true
@@ -221,6 +222,7 @@
*** runcommand -R foo showconfig ui defaults
ui.slash=True
ui.interactive=False
+ ui.merge=internal:merge
ui.mergemarkers=detailed
ui.nontty=true
#endif
@@ -605,7 +607,7 @@
*** runcommand qqueue --active
foo
- $ cat <<EOF > dbgui.py
+ $ cat <<'EOF' > ../dbgui.py
> import os
> import sys
> from mercurial import commands, registrar
@@ -613,10 +615,14 @@
> command = registrar.command(cmdtable)
> @command(b"debuggetpass", norepo=True)
> def debuggetpass(ui):
- > ui.write(b"%s\\n" % ui.getpass())
+ > ui.write(b"%s\n" % ui.getpass())
> @command(b"debugprompt", norepo=True)
> def debugprompt(ui):
- > ui.write(b"%s\\n" % ui.prompt(b"prompt:"))
+ > ui.write(b"%s\n" % ui.prompt(b"prompt:"))
+ > @command(b"debugpromptchoice", norepo=True)
+ > def debugpromptchoice(ui):
+ > msg = b"promptchoice (y/n)? $$ &Yes $$ &No"
+ > ui.write(b"%d\n" % ui.promptchoice(msg))
> @command(b"debugreadstdin", norepo=True)
> def debugreadstdin(ui):
> ui.write(b"read: %r\n" % sys.stdin.read(1))
@@ -628,7 +634,7 @@
> EOF
$ cat <<EOF >> .hg/hgrc
> [extensions]
- > dbgui = dbgui.py
+ > dbgui = ../dbgui.py
> EOF
>>> from hgclient import check, readchannel, runcommand, stringio
@@ -722,6 +728,70 @@
$ cd ..
+structured message channel:
+
+ $ cat <<'EOF' >> repo2/.hg/hgrc
+ > [ui]
+ > # server --config should precede repository option
+ > message-output = stdio
+ > EOF
+
+ >>> from hgclient import bprint, checkwith, readchannel, runcommand
+ >>> @checkwith(extraargs=[b'--config', b'ui.message-output=channel',
+ ... b'--config', b'cmdserver.message-encodings=foo cbor'])
+ ... def verify(server):
+ ... _ch, data = readchannel(server)
+ ... bprint(data)
+ ... runcommand(server, [b'-R', b'repo2', b'verify'])
+ capabilities: getencoding runcommand
+ encoding: ascii
+ message-encoding: cbor
+ pid: * (glob)
+ pgid: * (glob) (no-windows !)
+ *** runcommand -R repo2 verify
+ message: '\xa2DdataTchecking changesets\nDtypeFstatus'
+ message: '\xa6Ditem@Cpos\xf6EtopicHcheckingEtotal\xf6DtypeHprogressDunit@'
+ message: '\xa2DdataSchecking manifests\nDtypeFstatus'
+ message: '\xa6Ditem@Cpos\xf6EtopicHcheckingEtotal\xf6DtypeHprogressDunit@'
+ message: '\xa2DdataX0crosschecking files in changesets and manifests\nDtypeFstatus'
+ message: '\xa6Ditem@Cpos\xf6EtopicMcrosscheckingEtotal\xf6DtypeHprogressDunit@'
+ message: '\xa2DdataOchecking files\nDtypeFstatus'
+ message: '\xa6Ditem@Cpos\xf6EtopicHcheckingEtotal\xf6DtypeHprogressDunit@'
+ message: '\xa2DdataX/checked 0 changesets with 0 changes to 0 files\nDtypeFstatus'
+
+ >>> from hgclient import checkwith, readchannel, runcommand, stringio
+ >>> @checkwith(extraargs=[b'--config', b'ui.message-output=channel',
+ ... b'--config', b'cmdserver.message-encodings=cbor',
+ ... b'--config', b'extensions.dbgui=dbgui.py'])
+ ... def prompt(server):
+ ... readchannel(server)
+ ... interactive = [b'--config', b'ui.interactive=True']
+ ... runcommand(server, [b'debuggetpass'] + interactive,
+ ... input=stringio(b'1234\n'))
+ ... runcommand(server, [b'debugprompt'] + interactive,
+ ... input=stringio(b'5678\n'))
+ ... runcommand(server, [b'debugpromptchoice'] + interactive,
+ ... input=stringio(b'n\n'))
+ *** runcommand debuggetpass --config ui.interactive=True
+ message: '\xa3DdataJpassword: Hpassword\xf5DtypeFprompt'
+ 1234
+ *** runcommand debugprompt --config ui.interactive=True
+ message: '\xa3DdataGprompt:GdefaultAyDtypeFprompt'
+ 5678
+ *** runcommand debugpromptchoice --config ui.interactive=True
+ message: '\xa4Gchoices\x82\x82AyCYes\x82AnBNoDdataTpromptchoice (y/n)? GdefaultAyDtypeFprompt'
+ 1
+
+bad message encoding:
+
+ $ hg serve --cmdserver pipe --config ui.message-output=channel
+ abort: no supported message encodings:
+ [255]
+ $ hg serve --cmdserver pipe --config ui.message-output=channel \
+ > --config cmdserver.message-encodings='foo bar'
+ abort: no supported message encodings: foo bar
+ [255]
+
unix domain socket:
$ cd repo
@@ -776,9 +846,18 @@
if server crashed before hello, traceback will be sent to 'e' channel as
last ditch:
+ $ cat <<'EOF' > ../earlycrasher.py
+ > from mercurial import commandserver, extensions
+ > def _serverequest(orig, ui, repo, conn, createcmdserver, prereposetups):
+ > def createcmdserver(*args, **kwargs):
+ > raise Exception('crash')
+ > return orig(ui, repo, conn, createcmdserver, prereposetups)
+ > def extsetup(ui):
+ > extensions.wrapfunction(commandserver, b'_serverequest', _serverequest)
+ > EOF
$ cat <<EOF >> .hg/hgrc
- > [cmdserver]
- > log = inexistent/path.log
+ > [extensions]
+ > earlycrasher = ../earlycrasher.py
> EOF
>>> from hgclient import bprint, check, readchannel, unixserver
>>> server = unixserver(b'.hg/server.sock', b'.hg/server.log')
@@ -793,13 +872,13 @@
... break
>>> check(earlycrash, server.connect)
e, 'Traceback (most recent call last):\n'
- e, "(IOError|FileNotFoundError): .*" (re)
+ e, 'Exception: crash\n'
>>> server.shutdown()
$ cat .hg/server.log | grep -v '^ '
listening at .hg/server.sock
Traceback (most recent call last):
- (IOError|FileNotFoundError): .* (re)
+ Exception: crash
killed!
#endif
#if no-unix-socket
--- a/tests/test-completion.t Wed Jan 09 20:00:35 2019 -0800
+++ b/tests/test-completion.t Fri Jan 18 13:28:22 2019 -0500
@@ -238,7 +238,7 @@
bisect: reset, good, bad, skip, extend, command, noupdate
bookmarks: force, rev, delete, rename, inactive, list, template
branch: force, clean, rev
- branches: active, closed, template
+ branches: active, closed, rev, template
bundle: force, rev, branch, base, all, type, ssh, remotecmd, insecure
cat: output, rev, decode, include, exclude, template
clone: noupdate, updaterev, rev, branch, pull, uncompressed, stream, ssh, remotecmd, insecure
@@ -299,7 +299,7 @@
debuguigetpass: prompt
debuguiprompt: prompt
debugupdatecaches:
- debugupgraderepo: optimize, run
+ debugupgraderepo: optimize, run, backup
debugwalk: include, exclude
debugwhyunstable:
debugwireargs: three, four, five, ssh, remotecmd, insecure
@@ -308,7 +308,7 @@
export: bookmark, output, switch-parent, rev, text, git, binary, nodates, template
files: rev, print0, include, exclude, template, subrepos
forget: interactive, include, exclude, dry-run
- graft: rev, continue, stop, abort, edit, log, no-commit, force, currentdate, currentuser, date, user, tool, dry-run
+ graft: rev, base, continue, stop, abort, edit, log, no-commit, force, currentdate, currentuser, date, user, tool, dry-run
grep: print0, all, diff, text, follow, ignore-case, files-with-matches, line-number, rev, all-files, user, date, template, include, exclude
heads: rev, topo, active, closed, style, template
help: extension, command, keyword, system
@@ -325,7 +325,7 @@
paths: template
phase: public, draft, secret, force, rev
pull: update, force, rev, bookmark, branch, ssh, remotecmd, insecure
- push: force, rev, bookmark, branch, new-branch, pushvars, ssh, remotecmd, insecure
+ push: force, rev, bookmark, branch, new-branch, pushvars, publish, ssh, remotecmd, insecure
recover:
remove: after, force, subrepos, include, exclude, dry-run
rename: after, force, include, exclude, dry-run
--- a/tests/test-contrib-perf.t Wed Jan 09 20:00:35 2019 -0800
+++ b/tests/test-contrib-perf.t Fri Jan 18 13:28:22 2019 -0500
@@ -57,6 +57,9 @@
benchmark the update of a branchmap
perfbranchmapload
benchmark reading the branchmap
+ perfbranchmapupdate
+ benchmark branchmap update from for <base> revs to <target>
+ revs
perfbundleread
Benchmark reading of bundle files.
perfcca (no help text available)
@@ -76,6 +79,9 @@
(no help text available)
perfdirstatewrite
(no help text available)
+ perfdiscovery
+ benchmark discovery between local repo and the peer at given
+ path
perffncacheencode
(no help text available)
perffncacheload
@@ -83,6 +89,10 @@
perffncachewrite
(no help text available)
perfheads (no help text available)
+ perfhelper-pathcopies
+ find statistic about potential parameters for the
+ 'perftracecopies'
+ perfignore benchmark operation related to computing ignore
perfindex (no help text available)
perflinelogedits
(no help text available)
@@ -101,10 +111,11 @@
(no help text available)
perfparents (no help text available)
perfpathcopies
- (no help text available)
+ benchmark the copy tracing logic
perfphases benchmark phasesets computation
perfphasesremote
benchmark time needed to analyse phases of the remote server
+ perfprogress printing of progress bars
perfrawfiles (no help text available)
perfrevlogchunks
Benchmark operations on revlog chunks.
@@ -114,6 +125,8 @@
Benchmark obtaining a revlog revision.
perfrevlogrevisions
Benchmark reading a series of revisions from a revlog.
+ perfrevlogwrite
+ Benchmark writing a series of revisions to a revlog.
perfrevrange (no help text available)
perfrevset benchmark the execution time of a revset
perfstartup (no help text available)
@@ -138,8 +151,12 @@
$ hg perfunidiff --alldata 1
$ hg perfbookmarks
$ hg perfbranchmap
+ $ hg perfbranchmapload
+ $ hg perfbranchmapupdate --base "not tip" --target "tip"
+ benchmark of branchmap with 3 revisions with 1 new ones
$ hg perfcca
$ hg perfchangegroupchangelog
+ $ hg perfchangegroupchangelog --cgversion 01
$ hg perfchangeset 2
$ hg perfctxfiles 2
$ hg perfdiffwd
@@ -159,6 +176,7 @@
fncache already up to date
#endif
$ hg perfheads
+ $ hg perfignore
$ hg perfindex
$ hg perflinelogedits -n 1
$ hg perfloadmarkers
@@ -174,6 +192,7 @@
$ hg perfmoonwalk
$ hg perfnodelookup 2
$ hg perfpathcopies 1 2
+ $ hg perfprogress --total 1000
$ hg perfrawfiles 2
$ hg perfrevlogindex -c
#if reporevlogstore
@@ -190,6 +209,7 @@
$ hg perfvolatilesets
$ hg perfwalk
$ hg perfparents
+ $ hg perfdiscovery -q .
test actual output
------------------
@@ -265,4 +285,16 @@
contrib/perf.py:\d+: (re)
> from mercurial import (
import newer module separately in try clause for early Mercurial
+ contrib/perf.py:\d+: (re)
+ > origindexpath = orig.opener.join(orig.indexfile)
+ use getvfs()/getsvfs() for early Mercurial
+ contrib/perf.py:\d+: (re)
+ > origdatapath = orig.opener.join(orig.datafile)
+ use getvfs()/getsvfs() for early Mercurial
+ contrib/perf.py:\d+: (re)
+ > vfs = vfsmod.vfs(tmpdir)
+ use getvfs()/getsvfs() for early Mercurial
+ contrib/perf.py:\d+: (re)
+ > vfs.options = getattr(orig.opener, 'options', None)
+ use getvfs()/getsvfs() for early Mercurial
[1]
--- a/tests/test-convert-filemap.t Wed Jan 09 20:00:35 2019 -0800
+++ b/tests/test-convert-filemap.t Fri Jan 18 13:28:22 2019 -0500
@@ -435,6 +435,32 @@
|
o 0 "addb" files: b
+Include directives dropped empty commits, but other directives don't
+
+ $ cat > branchpruning/exclude_filemap <<EOF
+ > exclude a
+ > EOF
+ $ hg convert --filemap branchpruning/exclude_filemap branchpruning branchpruning-hg-exclude
+ initializing destination branchpruning-hg-exclude repository
+ scanning source...
+ sorting...
+ converting...
+ 5 adda
+ 4 closefoo
+ 3 emptybranch
+ 2 closeempty
+ 1 addb
+ 0 closedefault
+
+ $ glog -R branchpruning-hg-exclude
+ _ 3 "closedefault" files:
+ |
+ o 2 "addb" files: b
+
+ _ 1 "closeempty" files:
+ |
+ o 0 "emptybranch" files:
+
Test rebuilding of map with unknown revisions in shamap - it used to crash
@@ -451,7 +477,7 @@
run hg source pre-conversion action
run hg sink pre-conversion action
scanning source...
- scanning: 1 revisions
+ scanning: 1/7 revisions (14.29%)
sorting...
converting...
0 merging something
--- a/tests/test-convert-git.t Wed Jan 09 20:00:35 2019 -0800
+++ b/tests/test-convert-git.t Fri Jan 18 13:28:22 2019 -0500
@@ -750,7 +750,7 @@
test missing .gitmodules
$ git submodule add ../git-repo4 >/dev/null 2>/dev/null
- $ git checkout HEAD .gitmodules
+ $ git checkout HEAD -- .gitmodules
$ git rm .gitmodules
rm '.gitmodules'
$ git commit -q -m "remove .gitmodules" .gitmodules
--- a/tests/test-custom-filters.t Wed Jan 09 20:00:35 2019 -0800
+++ b/tests/test-custom-filters.t Fri Jan 18 13:28:22 2019 -0500
@@ -27,6 +27,7 @@
> .hgignore
> prefix.py
> prefix.pyc
+ > __pycache__/
> EOF
$ cat > stuff.txt <<EOF
--- a/tests/test-debugcommands.t Wed Jan 09 20:00:35 2019 -0800
+++ b/tests/test-debugcommands.t Fri Jan 18 13:28:22 2019 -0500
@@ -195,10 +195,10 @@
#if reporevlogstore no-pure
$ hg debugdeltachain -m
- rev chain# chainlen prev delta size rawsize chainsize ratio lindist extradist extraratio
- 0 1 1 -1 base 44 43 44 1.02326 44 0 0.00000
- 1 2 1 -1 base 0 0 0 0.00000 0 0 0.00000
- 2 3 1 -1 base 44 43 44 1.02326 44 0 0.00000
+ rev chain# chainlen prev delta size rawsize chainsize ratio lindist extradist extraratio readsize largestblk rddensity srchunks
+ 0 1 1 -1 base 44 43 44 1.02326 44 0 0.00000 44 44 1.00000 1
+ 1 2 1 -1 base 0 0 0 0.00000 0 0 0.00000 0 0 1.00000 1
+ 2 3 1 -1 base 44 43 44 1.02326 44 0 0.00000 44 44 1.00000 1
$ hg debugdeltachain -m -T '{rev} {chainid} {chainlen}\n'
0 1 1
@@ -217,9 +217,13 @@
"deltatype": "base",
"extradist": 0,
"extraratio": 0.0,
+ "largestblock": 44,
"lindist": 44,
"prevrev": -1,
+ "readdensity": 1.0,
+ "readsize": 44,
"rev": 0,
+ "srchunks": 1,
"uncompsize": 43
},
{
@@ -231,9 +235,13 @@
"deltatype": "base",
"extradist": 0,
"extraratio": 0,
+ "largestblock": 0,
"lindist": 0,
"prevrev": -1,
+ "readdensity": 1,
+ "readsize": 0,
"rev": 1,
+ "srchunks": 1,
"uncompsize": 0
},
{
@@ -246,9 +254,13 @@
"deltatype": "base",
"extradist": 0,
"extraratio": 0.0,
+ "largestblock": 44,
"lindist": 44,
"prevrev": -1,
+ "readdensity": 1.0,
+ "readsize": 44,
"rev": 2,
+ "srchunks": 1,
"uncompsize": 43
}
]
@@ -631,8 +643,8 @@
devel-peer-request: pairs: 81 bytes
sending hello command
sending between command
- remote: 427
- remote: capabilities: batch branchmap $USUAL_BUNDLE2_CAPS$ changegroupsubset getbundle known lookup protocaps pushkey streamreqs=generaldelta,revlogv1 unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash
+ remote: 440
+ remote: capabilities: batch branchmap $USUAL_BUNDLE2_CAPS$ changegroupsubset getbundle known lookup protocaps pushkey streamreqs=generaldelta,revlogv1,sparserevlog unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash
remote: 1
devel-peer-request: protocaps
devel-peer-request: caps: * bytes (glob)
--- a/tests/test-devel-warnings.t Wed Jan 09 20:00:35 2019 -0800
+++ b/tests/test-devel-warnings.t Fri Jan 18 13:28:22 2019 -0500
@@ -83,6 +83,8 @@
> blackbox=
> [devel]
> all-warnings=1
+ > [blackbox]
+ > track = command, commandexception, commandfinish, develwarn
> EOF
$ hg init lock-checker
--- a/tests/test-dirstate-race.t Wed Jan 09 20:00:35 2019 -0800
+++ b/tests/test-dirstate-race.t Fri Jan 18 13:28:22 2019 -0500
@@ -53,7 +53,7 @@
> context,
> extensions,
> )
- > def extsetup():
+ > def extsetup(ui):
> extensions.wrapfunction(context.workingctx, '_checklookup', overridechecklookup)
> def overridechecklookup(orig, self, files):
> # make an update that changes the dirstate from underneath
--- a/tests/test-doctest.py Wed Jan 09 20:00:35 2019 -0800
+++ b/tests/test-doctest.py Fri Jan 18 13:28:22 2019 -0500
@@ -61,6 +61,7 @@
testmod('mercurial.parser')
testmod('mercurial.pycompat')
testmod('mercurial.revlog')
+testmod('mercurial.revlogutils.deltas')
testmod('mercurial.revsetlang')
testmod('mercurial.smartset')
testmod('mercurial.store')
--- a/tests/test-duplicateoptions.py Wed Jan 09 20:00:35 2019 -0800
+++ b/tests/test-duplicateoptions.py Fri Jan 18 13:28:22 2019 -0500
@@ -29,6 +29,7 @@
u = uimod.ui.load()
extensions.loadall(u)
+extensions.populateui(u)
globalshort = set()
globallong = set()
--- a/tests/test-empty.t Wed Jan 09 20:00:35 2019 -0800
+++ b/tests/test-empty.t Fri Jan 18 13:28:22 2019 -0500
@@ -20,8 +20,10 @@
$ ls .hg
00changelog.i
+ cache
requires
store
+ wcache
Should be empty:
--- a/tests/test-extension-timing.t Wed Jan 09 20:00:35 2019 -0800
+++ b/tests/test-extension-timing.t Fri Jan 18 13:28:22 2019 -0500
@@ -41,54 +41,58 @@
$ echo '[extensions]' >> $HGRCPATH
$ echo "foobar = $abspath" >> $HGRCPATH
+ $ filterlog () {
+ > sed -e 's!^[0-9/]* [0-9:]* ([0-9]*)>!YYYY/MM/DD HH:MM:SS (PID)>!'
+ > }
+
Test extension setup timings
- $ hg foo --traceback --config devel.debug.extensions=yes --debug 2>&1
- debug.extensions: loading extensions
- debug.extensions: - processing 1 entries
- debug.extensions: - loading extension: 'foobar'
- debug.extensions: > 'foobar' extension loaded in * (glob)
- debug.extensions: - validating extension tables: 'foobar'
- debug.extensions: - invoking registered callbacks: 'foobar'
- debug.extensions: > callbacks completed in * (glob)
- debug.extensions: > loaded 1 extensions, total time * (glob)
- debug.extensions: - loading configtable attributes
- debug.extensions: - executing uisetup hooks
- debug.extensions: - running uisetup for 'foobar'
+ $ hg foo --traceback --config devel.debug.extensions=yes --debug 2>&1 | filterlog
+ YYYY/MM/DD HH:MM:SS (PID)> loading extensions
+ YYYY/MM/DD HH:MM:SS (PID)> - processing 1 entries
+ YYYY/MM/DD HH:MM:SS (PID)> - loading extension: foobar
+ YYYY/MM/DD HH:MM:SS (PID)> > foobar extension loaded in * (glob)
+ YYYY/MM/DD HH:MM:SS (PID)> - validating extension tables: foobar
+ YYYY/MM/DD HH:MM:SS (PID)> - invoking registered callbacks: foobar
+ YYYY/MM/DD HH:MM:SS (PID)> > callbacks completed in * (glob)
+ YYYY/MM/DD HH:MM:SS (PID)> > loaded 1 extensions, total time * (glob)
+ YYYY/MM/DD HH:MM:SS (PID)> - loading configtable attributes
+ YYYY/MM/DD HH:MM:SS (PID)> - executing uisetup hooks
+ YYYY/MM/DD HH:MM:SS (PID)> - running uisetup for foobar
uisetup called [debug]
uisetup called
uisetup called [status]
- debug.extensions: > uisetup for 'foobar' took * (glob)
- debug.extensions: > all uisetup took * (glob)
- debug.extensions: - executing extsetup hooks
- debug.extensions: - running extsetup for 'foobar'
- debug.extensions: > extsetup for 'foobar' took * (glob)
- debug.extensions: > all extsetup took * (glob)
- debug.extensions: - executing remaining aftercallbacks
- debug.extensions: > remaining aftercallbacks completed in * (glob)
- debug.extensions: - loading extension registration objects
- debug.extensions: > extension registration object loading took * (glob)
- debug.extensions: > extension foobar take a total of * to load (glob)
- debug.extensions: extension loading complete
- debug.extensions: loading additional extensions
- debug.extensions: - processing 1 entries
- debug.extensions: > loaded 0 extensions, total time * (glob)
- debug.extensions: - loading configtable attributes
- debug.extensions: - executing uisetup hooks
- debug.extensions: > all uisetup took * (glob)
- debug.extensions: - executing extsetup hooks
- debug.extensions: > all extsetup took * (glob)
- debug.extensions: - executing remaining aftercallbacks
- debug.extensions: > remaining aftercallbacks completed in * (glob)
- debug.extensions: - loading extension registration objects
- debug.extensions: > extension registration object loading took * (glob)
- debug.extensions: extension loading complete
- debug.extensions: - executing reposetup hooks
- debug.extensions: - running reposetup for foobar
+ YYYY/MM/DD HH:MM:SS (PID)> > uisetup for foobar took * (glob)
+ YYYY/MM/DD HH:MM:SS (PID)> > all uisetup took * (glob)
+ YYYY/MM/DD HH:MM:SS (PID)> - executing extsetup hooks
+ YYYY/MM/DD HH:MM:SS (PID)> - running extsetup for foobar
+ YYYY/MM/DD HH:MM:SS (PID)> > extsetup for foobar took * (glob)
+ YYYY/MM/DD HH:MM:SS (PID)> > all extsetup took * (glob)
+ YYYY/MM/DD HH:MM:SS (PID)> - executing remaining aftercallbacks
+ YYYY/MM/DD HH:MM:SS (PID)> > remaining aftercallbacks completed in * (glob)
+ YYYY/MM/DD HH:MM:SS (PID)> - loading extension registration objects
+ YYYY/MM/DD HH:MM:SS (PID)> > extension registration object loading took * (glob)
+ YYYY/MM/DD HH:MM:SS (PID)> > extension foobar take a total of * to load (glob)
+ YYYY/MM/DD HH:MM:SS (PID)> extension loading complete
+ YYYY/MM/DD HH:MM:SS (PID)> loading additional extensions
+ YYYY/MM/DD HH:MM:SS (PID)> - processing 1 entries
+ YYYY/MM/DD HH:MM:SS (PID)> > loaded 0 extensions, total time * (glob)
+ YYYY/MM/DD HH:MM:SS (PID)> - loading configtable attributes
+ YYYY/MM/DD HH:MM:SS (PID)> - executing uisetup hooks
+ YYYY/MM/DD HH:MM:SS (PID)> > all uisetup took * (glob)
+ YYYY/MM/DD HH:MM:SS (PID)> - executing extsetup hooks
+ YYYY/MM/DD HH:MM:SS (PID)> > all extsetup took * (glob)
+ YYYY/MM/DD HH:MM:SS (PID)> - executing remaining aftercallbacks
+ YYYY/MM/DD HH:MM:SS (PID)> > remaining aftercallbacks completed in * (glob)
+ YYYY/MM/DD HH:MM:SS (PID)> - loading extension registration objects
+ YYYY/MM/DD HH:MM:SS (PID)> > extension registration object loading took * (glob)
+ YYYY/MM/DD HH:MM:SS (PID)> extension loading complete
+ YYYY/MM/DD HH:MM:SS (PID)> - executing reposetup hooks
+ YYYY/MM/DD HH:MM:SS (PID)> - running reposetup for foobar
reposetup called for a
ui == repo.ui
- debug.extensions: > reposetup for 'foobar' took * (glob)
- debug.extensions: > all reposetup took * (glob)
+ YYYY/MM/DD HH:MM:SS (PID)> > reposetup for foobar took * (glob)
+ YYYY/MM/DD HH:MM:SS (PID)> > all reposetup took * (glob)
Foo
$ cd ..
--- a/tests/test-extension.t Wed Jan 09 20:00:35 2019 -0800
+++ b/tests/test-extension.t Fri Jan 18 13:28:22 2019 -0500
@@ -13,27 +13,38 @@
$ cat > foobar.py <<EOF
> import os
- > from mercurial import commands, registrar
- > cmdtable = {}
- > command = registrar.command(cmdtable)
- > configtable = {}
- > configitem = registrar.configitem(configtable)
- > configitem(b'tests', b'foo', default=b"Foo")
- > def uisetup(ui):
+ > from mercurial import commands, exthelper, registrar
+ >
+ > eh = exthelper.exthelper()
+ > eh.configitem(b'tests', b'foo', default=b"Foo")
+ >
+ > uisetup = eh.finaluisetup
+ > uipopulate = eh.finaluipopulate
+ > reposetup = eh.finalreposetup
+ > cmdtable = eh.cmdtable
+ > configtable = eh.configtable
+ >
+ > @eh.uisetup
+ > def _uisetup(ui):
> ui.debug(b"uisetup called [debug]\\n")
> ui.write(b"uisetup called\\n")
> ui.status(b"uisetup called [status]\\n")
> ui.flush()
- > def reposetup(ui, repo):
+ > @eh.uipopulate
+ > def _uipopulate(ui):
+ > ui._populatecnt = getattr(ui, "_populatecnt", 0) + 1
+ > ui.write(b"uipopulate called (%d times)\n" % ui._populatecnt)
+ > @eh.reposetup
+ > def _reposetup(ui, repo):
> ui.write(b"reposetup called for %s\\n" % os.path.basename(repo.root))
> ui.write(b"ui %s= repo.ui\\n" % (ui == repo.ui and b"=" or b"!"))
> ui.flush()
- > @command(b'foo', [], b'hg foo')
+ > @eh.command(b'foo', [], b'hg foo')
> def foo(ui, *args, **kwargs):
> foo = ui.config(b'tests', b'foo')
> ui.write(foo)
> ui.write(b"\\n")
- > @command(b'bar', [], b'hg bar', norepo=True)
+ > @eh.command(b'bar', [], b'hg bar', norepo=True)
> def bar(ui, *args, **kwargs):
> ui.write(b"Bar\\n")
> EOF
@@ -54,13 +65,26 @@
$ hg foo
uisetup called
uisetup called [status]
+ uipopulate called (1 times)
+ uipopulate called (1 times)
+ uipopulate called (1 times)
reposetup called for a
ui == repo.ui
+ uipopulate called (1 times) (chg !)
+ uipopulate called (1 times) (chg !)
+ uipopulate called (1 times) (chg !)
+ uipopulate called (1 times) (chg !)
+ uipopulate called (1 times) (chg !)
reposetup called for a (chg !)
ui == repo.ui (chg !)
Foo
$ hg foo --quiet
uisetup called (no-chg !)
+ uipopulate called (1 times)
+ uipopulate called (1 times)
+ uipopulate called (1 times) (chg !)
+ uipopulate called (1 times) (chg !)
+ uipopulate called (1 times) (chg !)
reposetup called for a (chg !)
ui == repo.ui
Foo
@@ -68,6 +92,11 @@
uisetup called [debug] (no-chg !)
uisetup called (no-chg !)
uisetup called [status] (no-chg !)
+ uipopulate called (1 times)
+ uipopulate called (1 times)
+ uipopulate called (1 times) (chg !)
+ uipopulate called (1 times) (chg !)
+ uipopulate called (1 times) (chg !)
reposetup called for a (chg !)
ui == repo.ui
Foo
@@ -76,8 +105,12 @@
$ hg clone a b
uisetup called (no-chg !)
uisetup called [status] (no-chg !)
+ uipopulate called (1 times)
+ uipopulate called (1 times) (chg !)
+ uipopulate called (1 times) (chg !)
reposetup called for a
ui == repo.ui
+ uipopulate called (1 times)
reposetup called for b
ui == repo.ui
updating to branch default
@@ -86,6 +119,8 @@
$ hg bar
uisetup called (no-chg !)
uisetup called [status] (no-chg !)
+ uipopulate called (1 times)
+ uipopulate called (1 times) (chg !)
Bar
$ echo 'foobar = !' >> $HGRCPATH
@@ -96,8 +131,16 @@
$ hg foo
uisetup called
uisetup called [status]
+ uipopulate called (1 times)
+ uipopulate called (1 times)
+ uipopulate called (1 times)
reposetup called for a
ui == repo.ui
+ uipopulate called (1 times) (chg !)
+ uipopulate called (1 times) (chg !)
+ uipopulate called (1 times) (chg !)
+ uipopulate called (1 times) (chg !)
+ uipopulate called (1 times) (chg !)
reposetup called for a (chg !)
ui == repo.ui (chg !)
Foo
@@ -108,27 +151,39 @@
$ cat > foo.py <<EOF
> from __future__ import print_function
> import os
+ > from mercurial import exthelper
> name = os.path.basename(__file__).rsplit('.', 1)[0]
> print("1) %s imported" % name, flush=True)
- > def uisetup(ui):
+ > eh = exthelper.exthelper()
+ > @eh.uisetup
+ > def _uisetup(ui):
> print("2) %s uisetup" % name, flush=True)
- > def extsetup():
+ > @eh.extsetup
+ > def _extsetup(ui):
> print("3) %s extsetup" % name, flush=True)
- > def reposetup(ui, repo):
- > print("4) %s reposetup" % name, flush=True)
+ > @eh.uipopulate
+ > def _uipopulate(ui):
+ > print("4) %s uipopulate" % name, flush=True)
+ > @eh.reposetup
+ > def _reposetup(ui, repo):
+ > print("5) %s reposetup" % name, flush=True)
+ >
+ > extsetup = eh.finalextsetup
+ > reposetup = eh.finalreposetup
+ > uipopulate = eh.finaluipopulate
+ > uisetup = eh.finaluisetup
+ > revsetpredicate = eh.revsetpredicate
>
> bytesname = name.encode('utf-8')
> # custom predicate to check registration of functions at loading
> from mercurial import (
- > registrar,
> smartset,
> )
- > revsetpredicate = registrar.revsetpredicate()
- > @revsetpredicate(bytesname, safe=True) # safe=True for query via hgweb
+ > @eh.revsetpredicate(bytesname, safe=True) # safe=True for query via hgweb
> def custompredicate(repo, subset, x):
> return smartset.baseset([r for r in subset if r in {0}])
> EOF
- $ $PYTHON $TESTTMP/unflush.py foo.py
+ $ "$PYTHON" $TESTTMP/unflush.py foo.py
$ cp foo.py bar.py
$ echo 'foo = foo.py' >> $HGRCPATH
@@ -143,8 +198,14 @@
2) bar uisetup
3) foo extsetup
3) bar extsetup
- 4) foo reposetup
- 4) bar reposetup
+ 4) foo uipopulate
+ 4) bar uipopulate
+ 4) foo uipopulate
+ 4) bar uipopulate
+ 4) foo uipopulate
+ 4) bar uipopulate
+ 5) foo reposetup
+ 5) bar reposetup
0:c24b9ac61126
Check hgweb's load order of extensions and registration of functions
@@ -167,8 +228,12 @@
2) bar uisetup
3) foo extsetup
3) bar extsetup
- 4) foo reposetup
- 4) bar reposetup
+ 4) foo uipopulate
+ 4) bar uipopulate
+ 4) foo uipopulate
+ 4) bar uipopulate
+ 5) foo reposetup
+ 5) bar reposetup
(check that revset predicate foo() and bar() are available)
@@ -214,10 +279,10 @@
> NO_CHECK_EOF
$ cat > loadabs.py <<NO_CHECK_EOF
> import mod.ambigabs as ambigabs
- > def extsetup():
+ > def extsetup(ui):
> print('ambigabs.s=%s' % ambigabs.s, flush=True)
> NO_CHECK_EOF
- $ $PYTHON $TESTTMP/unflush.py loadabs.py
+ $ "$PYTHON" $TESTTMP/unflush.py loadabs.py
$ (PYTHONPATH=${PYTHONPATH}${PATHSEP}${TESTTMP}/libroot; hg --config extensions.loadabs=loadabs.py root)
ambigabs.s=libroot/ambig.py
$TESTTMP/a
@@ -230,10 +295,10 @@
> NO_CHECK_EOF
$ cat > loadrel.py <<NO_CHECK_EOF
> import mod.ambigrel as ambigrel
- > def extsetup():
+ > def extsetup(ui):
> print('ambigrel.s=%s' % ambigrel.s, flush=True)
> NO_CHECK_EOF
- $ $PYTHON $TESTTMP/unflush.py loadrel.py
+ $ "$PYTHON" $TESTTMP/unflush.py loadrel.py
$ (PYTHONPATH=${PYTHONPATH}${PATHSEP}${TESTTMP}/libroot; hg --config extensions.loadrel=loadrel.py root)
ambigrel.s=libroot/mod/ambig.py
$TESTTMP/a
@@ -1510,7 +1575,7 @@
> minimumhgversion = b'3.6'
> EOF
$ hg --config extensions.minversion=minversion1.py version
- (third party extension minversion requires version 3.6 or newer of Mercurial; disabling)
+ (third party extension minversion requires version 3.6 or newer of Mercurial (current: 3.5.2); disabling)
Mercurial Distributed SCM (version 3.5.2)
(see https://mercurial-scm.org for more information)
@@ -1524,7 +1589,7 @@
> minimumhgversion = b'3.7'
> EOF
$ hg --config extensions.minversion=minversion2.py version 2>&1 | egrep '\(third'
- (third party extension minversion requires version 3.7 or newer of Mercurial; disabling)
+ (third party extension minversion requires version 3.7 or newer of Mercurial (current: 3.6); disabling)
Can load version that is only off by point release
@@ -1778,7 +1843,7 @@
> def ext(*args, **opts):
> print(opts[b'opt'], flush=True)
> EOF
- $ $PYTHON $TESTTMP/unflush.py $TESTTMP/test_unicode_default_value.py
+ $ "$PYTHON" $TESTTMP/unflush.py $TESTTMP/test_unicode_default_value.py
$ cat > $TESTTMP/opt-unicode-default/.hg/hgrc << EOF
> [extensions]
> test_unicode_default_value = $TESTTMP/test_unicode_default_value.py
--- a/tests/test-extensions-afterloaded.t Wed Jan 09 20:00:35 2019 -0800
+++ b/tests/test-extensions-afterloaded.t Fri Jan 18 13:28:22 2019 -0500
@@ -87,7 +87,7 @@
$ echo "foo = $basepath/foo.py" >> .hg/hgrc
$ echo "bar = $basepath/minvers.py" >> .hg/hgrc
$ hg log -r. -T'{rev}\n'
- (third party extension bar requires version 9999.9999 or newer of Mercurial; disabling)
+ (third party extension bar requires version 9999.9999 or newer of Mercurial (current: *); disabling) (glob)
foo.uisetup
foo: bar loaded: False
0
@@ -107,7 +107,7 @@
$ echo "bar = $basepath/minvers.py" >> .hg/hgrc
$ echo "foo = $basepath/foo.py" >> .hg/hgrc
$ hg log -r. -T'{rev}\n'
- (third party extension bar requires version 9999.9999 or newer of Mercurial; disabling)
+ (third party extension bar requires version 9999.9999 or newer of Mercurial (current: *); disabling) (glob)
foo.uisetup
foo: bar loaded: False
0
--- a/tests/test-fastannotate-revmap.py Wed Jan 09 20:00:35 2019 -0800
+++ b/tests/test-fastannotate-revmap.py Fri Jan 18 13:28:22 2019 -0500
@@ -14,7 +14,7 @@
xrange = range
def genhsh(i):
- return chr(i) + b'\0' * 19
+ return pycompat.bytechr(i) + b'\0' * 19
def gettemppath():
fd, path = tempfile.mkstemp()
@@ -35,7 +35,8 @@
ensure(rm.rev2hsh(i) is None)
ensure(rm.hsh2rev(b'\0' * 20) is None)
- paths = ['', 'a', None, 'b', 'b', 'c', 'c', None, 'a', 'b', 'a', 'a']
+ paths = [
+ b'', b'a', None, b'b', b'b', b'c', b'c', None, b'a', b'b', b'a', b'a']
for i in xrange(1, 5):
ensure(rm.append(genhsh(i), sidebranch=(i & 1), path=paths[i]) == i)
@@ -88,7 +89,7 @@
path = gettemppath()
# incorrect header
- with open(path, 'w') as f:
+ with open(path, 'wb') as f:
f.write(b'NOT A VALID HEADER')
try:
revmap.revmap(path)
@@ -106,8 +107,8 @@
# corrupt the file by appending a byte
size = os.stat(path).st_size
- with open(path, 'a') as f:
- f.write('\xff')
+ with open(path, 'ab') as f:
+ f.write(b'\xff')
try:
revmap.revmap(path)
ensure(False)
@@ -116,7 +117,7 @@
# corrupt the file by removing the last byte
ensure(size > 0)
- with open(path, 'w') as f:
+ with open(path, 'wb') as f:
f.truncate(size - 1)
try:
revmap.revmap(path)
@@ -130,7 +131,8 @@
path = gettemppath()
rm = revmap.revmap(path)
for i in xrange(1, 10):
- ensure(rm.append(genhsh(i), sidebranch=(i & 1), path=str(i // 3)) == i)
+ ensure(rm.append(genhsh(i),
+ sidebranch=(i & 1), path=(b'%d' % (i // 3))) == i)
rm.flush()
# copy rm to rm2
@@ -174,10 +176,10 @@
# "contains" checks paths
rm = revmap.revmap()
for i in xrange(1, 5):
- ensure(rm.append(genhsh(i), path=str(i // 2)) == i)
+ ensure(rm.append(genhsh(i), path=(b'%d' % (i // 2))) == i)
for i in xrange(1, 5):
- ensure(fakefctx(genhsh(i), path=str(i // 2)) in rm)
- ensure(fakefctx(genhsh(i), path='a') not in rm)
+ ensure(fakefctx(genhsh(i), path=(b'%d' % (i // 2))) in rm)
+ ensure(fakefctx(genhsh(i), path=b'a') not in rm)
def testlastnode():
path = gettemppath()
@@ -186,7 +188,7 @@
ensure(revmap.getlastnode(path) is None)
for i in xrange(1, 10):
hsh = genhsh(i)
- rm.append(hsh, path=str(i // 2), flush=True)
+ rm.append(hsh, path=(b'%d' % (i // 2)), flush=True)
ensure(revmap.getlastnode(path) == hsh)
rm2 = revmap.revmap(path)
ensure(rm2.rev2hsh(rm2.maxrev) == hsh)
--- a/tests/test-filebranch.t Wed Jan 09 20:00:35 2019 -0800
+++ b/tests/test-filebranch.t Fri Jan 18 13:28:22 2019 -0500
@@ -6,7 +6,7 @@
> import sys, os
> print("merging for", os.path.basename(sys.argv[1]))
> EOF
- $ HGMERGE="$PYTHON ../merge"; export HGMERGE
+ $ HGMERGE="\"$PYTHON\" ../merge"; export HGMERGE
Creating base:
--- a/tests/test-filecache.py Wed Jan 09 20:00:35 2019 -0800
+++ b/tests/test-filecache.py Fri Jan 18 13:28:22 2019 -0500
@@ -177,7 +177,7 @@
def setbeforeget(repo):
os.remove('x')
os.remove('y')
- repo.cached = 'string set externally'
+ repo.__class__.cached.set(repo, 'string set externally')
repo.invalidate()
print("* neither file exists")
print(repo.cached)
@@ -188,7 +188,7 @@
print("* file x created")
print(repo.cached)
- repo.cached = 'string 2 set externally'
+ repo.__class__.cached.set(repo, 'string 2 set externally')
repo.invalidate()
print("* string set externally again")
print(repo.cached)
--- a/tests/test-fileset.t Wed Jan 09 20:00:35 2019 -0800
+++ b/tests/test-fileset.t Fri Jan 18 13:28:22 2019 -0500
@@ -19,18 +19,18 @@
$ fileset -v a1
(symbol 'a1')
* matcher:
- <patternmatcher patterns='(?:a1$)'>
+ <patternmatcher patterns='a1$'>
a1
$ fileset -v 'a*'
(symbol 'a*')
* matcher:
- <patternmatcher patterns='(?:a[^/]*$)'>
+ <patternmatcher patterns='a[^/]*$'>
a1
a2
$ fileset -v '"re:a\d"'
(string 're:a\\d')
* matcher:
- <patternmatcher patterns='(?:a\\d)'>
+ <patternmatcher patterns='a\\d'>
a1
a2
$ fileset -v '!re:"a\d"'
@@ -41,7 +41,7 @@
* matcher:
<predicatenmatcher
pred=<not
- <patternmatcher patterns='(?:a\\d)'>>>
+ <patternmatcher patterns='a\\d'>>>
b1
b2
$ fileset -v 'path:a1 or glob:b?'
@@ -53,7 +53,7 @@
(symbol 'glob')
(symbol 'b?')))
* matcher:
- <patternmatcher patterns='(?:a1(?:/|$)|b.$)'>
+ <patternmatcher patterns='a1(?:/|$)|b.$'>
a1
b1
b2
@@ -196,7 +196,7 @@
(string 'b'))))
* matcher:
<unionmatcher matchers=[
- <patternmatcher patterns='(?:a1$|a2$)'>,
+ <patternmatcher patterns='a1$|a2$'>,
<intersectionmatcher
m1=<predicatenmatcher pred=clean>,
m2=<predicatenmatcher pred=grep('b')>>]>
@@ -216,7 +216,7 @@
(symbol 'path')
(symbol 'b1')))
* matcher:
- <patternmatcher patterns='(?:a1$|a2$|b1(?:/|$))'>
+ <patternmatcher patterns='a1$|a2$|b1(?:/|$)'>
a1
a2
b1
@@ -237,7 +237,7 @@
(string 'b')))
* matcher:
<unionmatcher matchers=[
- <patternmatcher patterns='(?:a1$|b2$)'>,
+ <patternmatcher patterns='a1$|b2$'>,
<predicatenmatcher pred=grep('a')>,
<predicatenmatcher pred=grep('b')>]>
a1
@@ -254,8 +254,8 @@
(symbol 'a1'))
* matcher:
<differencematcher
- m1=<patternmatcher patterns='(?:a[^/]*$)'>,
- m2=<patternmatcher patterns='(?:a1$)'>>
+ m1=<patternmatcher patterns='a[^/]*$'>,
+ m2=<patternmatcher patterns='a1$'>>
a2
$ fileset -p optimized -s '!binary() and a*'
@@ -267,7 +267,7 @@
None))
* matcher:
<differencematcher
- m1=<patternmatcher patterns='(?:a[^/]*$)'>,
+ m1=<patternmatcher patterns='a[^/]*$'>,
m2=<predicatenmatcher pred=binary>>
a1
a2
@@ -286,8 +286,8 @@
(symbol 'a1'))
* matcher:
<differencematcher
- m1=<patternmatcher patterns='(?:a[^/]*$)'>,
- m2=<patternmatcher patterns='(?:a1$)'>>
+ m1=<patternmatcher patterns='a[^/]*$'>,
+ m2=<patternmatcher patterns='a1$'>>
a2
$ fileset -p analyzed -p optimized -s 'binary() - a*'
@@ -309,7 +309,7 @@
<intersectionmatcher
m1=<predicatenmatcher
pred=<not
- <patternmatcher patterns='(?:a[^/]*$)'>>>,
+ <patternmatcher patterns='a[^/]*$'>>>,
m2=<predicatenmatcher pred=binary>>
Test files status
@@ -551,7 +551,7 @@
None))
* matcher:
<intersectionmatcher
- m1=<patternmatcher patterns='(?:b[^/]*$)'>,
+ m1=<patternmatcher patterns='b[^/]*$'>,
m2=<predicatenmatcher pred=binary>>
bin
--- a/tests/test-fix-clang-format.t Wed Jan 09 20:00:35 2019 -0800
+++ b/tests/test-fix-clang-format.t Fri Jan 18 13:28:22 2019 -0500
@@ -11,7 +11,7 @@
> [fix]
> clang-format:command=clang-format --style=Google --assume-filename={rootpath}
> clang-format:linerange=--lines={first}:{last}
- > clang-format:fileset=set:**.cpp or **.hpp
+ > clang-format:pattern=set:**.cpp or **.hpp
> EOF
$ hg init repo
--- a/tests/test-fix-topology.t Wed Jan 09 20:00:35 2019 -0800
+++ b/tests/test-fix-topology.t Fri Jan 18 13:28:22 2019 -0500
@@ -23,7 +23,7 @@
> fix =
> [fix]
> uppercase-whole-file:command="$PYTHON" $UPPERCASEPY
- > uppercase-whole-file:fileset=set:**
+ > uppercase-whole-file:pattern=set:**
> EOF
This tests the only behavior that should really be affected by obsolescence, so
--- a/tests/test-fix.t Wed Jan 09 20:00:35 2019 -0800
+++ b/tests/test-fix.t Fri Jan 18 13:28:22 2019 -0500
@@ -66,10 +66,10 @@
> evolution.allowunstable=True
> [fix]
> uppercase-whole-file:command="$PYTHON" $UPPERCASEPY all
- > uppercase-whole-file:fileset=set:**.whole
+ > uppercase-whole-file:pattern=set:**.whole
> uppercase-changed-lines:command="$PYTHON" $UPPERCASEPY
> uppercase-changed-lines:linerange={first}-{last}
- > uppercase-changed-lines:fileset=set:**.changed
+ > uppercase-changed-lines:pattern=set:**.changed
> EOF
Help text for fix.
@@ -126,13 +126,15 @@
[fix]
clang-format:command=clang-format --assume-filename={rootpath}
clang-format:linerange=--lines={first}:{last}
- clang-format:fileset=set:**.cpp or **.hpp
+ clang-format:pattern=set:**.cpp or **.hpp
The :command suboption forms the first part of the shell command that will be
used to fix a file. The content of the file is passed on standard input, and
- the fixed file content is expected on standard output. If there is any output
- on standard error, the file will not be affected. Some values may be
- substituted into the command:
+ the fixed file content is expected on standard output. Any output on standard
+ error will be displayed as a warning. If the exit status is not zero, the file
+ will not be affected. A placeholder warning is displayed if there is a non-
+ zero exit status but no standard error output. Some values may be substituted
+ into the command:
{rootpath} The path of the file being fixed, relative to the repo root
{basename} The name of the file being fixed, without the directory path
@@ -145,15 +147,43 @@
{first} The 1-based line number of the first line in the modified range
{last} The 1-based line number of the last line in the modified range
- The :fileset suboption determines which files will be passed through each
- configured tool. See 'hg help fileset' for possible values. If there are file
- arguments to 'hg fix', the intersection of these filesets is used.
+ The :pattern suboption determines which files will be passed through each
+ configured tool. See 'hg help patterns' for possible values. If there are file
+ arguments to 'hg fix', the intersection of these patterns is used.
There is also a configurable limit for the maximum size of file that will be
processed by 'hg fix':
[fix]
- maxfilesize=2MB
+ maxfilesize = 2MB
+
+ Normally, execution of configured tools will continue after a failure
+ (indicated by a non-zero exit status). It can also be configured to abort
+ after the first such failure, so that no files will be affected if any tool
+ fails. This abort will also cause 'hg fix' to exit with a non-zero status:
+
+ [fix]
+ failure = abort
+
+ When multiple tools are configured to affect a file, they execute in an order
+ defined by the :priority suboption. The priority suboption has a default value
+ of zero for each tool. Tools are executed in order of descending priority. The
+ execution order of tools with equal priority is unspecified. For example, you
+ could use the 'sort' and 'head' utilities to keep only the 10 smallest numbers
+ in a text file by ensuring that 'sort' runs before 'head':
+
+ [fix]
+ sort:command = sort -n
+ head:command = head -n 10
+ sort:pattern = numbers.txt
+ head:pattern = numbers.txt
+ sort:priority = 2
+ head:priority = 1
+
+ To account for changes made by each tool, the line numbers used for
+ incremental formatting are recomputed before executing the next tool. So, each
+ tool may see different values for the arguments added by the :linerange
+ suboption.
list of commands:
@@ -361,7 +391,7 @@
$ hg --config "fix.fail:command=echo" \
> --config "fix.fail:linerange={first}:{last}" \
- > --config "fix.fail:fileset=foo.txt" \
+ > --config "fix.fail:pattern=foo.txt" \
> fix --working-dir
$ cat foo.txt
1:1 4:6 8:8
@@ -508,7 +538,9 @@
on stderr and nothing on stdout, which would cause us the clear the file,
except that they also exit with a non-zero code. We show the user which fixer
emitted the stderr, and which revision, but we assume that the fixer will print
-the filename if it is relevant (since the issue may be non-specific).
+the filename if it is relevant (since the issue may be non-specific). There is
+also a config to abort (without affecting any files whatsoever) if we see any
+tool with a non-zero exit status.
$ hg init showstderr
$ cd showstderr
@@ -516,35 +548,54 @@
$ printf "hello\n" > hello.txt
$ hg add
adding hello.txt
- $ cat > $TESTTMP/fail.sh <<'EOF'
+ $ cat > $TESTTMP/work.sh <<'EOF'
> printf 'HELLO\n'
- > printf "$@: some\nerror" >&2
+ > printf "$@: some\nerror that didn't stop the tool" >&2
> exit 0 # success despite the stderr output
> EOF
- $ hg --config "fix.fail:command=sh $TESTTMP/fail.sh {rootpath}" \
- > --config "fix.fail:fileset=hello.txt" \
+ $ hg --config "fix.work:command=sh $TESTTMP/work.sh {rootpath}" \
+ > --config "fix.work:pattern=hello.txt" \
> fix --working-dir
- [wdir] fail: hello.txt: some
- [wdir] fail: error
+ [wdir] work: hello.txt: some
+ [wdir] work: error that didn't stop the tool
$ cat hello.txt
HELLO
$ printf "goodbye\n" > hello.txt
- $ cat > $TESTTMP/work.sh <<'EOF'
+ $ printf "foo\n" > foo.whole
+ $ hg add
+ adding foo.whole
+ $ cat > $TESTTMP/fail.sh <<'EOF'
> printf 'GOODBYE\n'
- > printf "$@: some\nerror\n" >&2
+ > printf "$@: some\nerror that did stop the tool\n" >&2
> exit 42 # success despite the stdout output
> EOF
- $ hg --config "fix.fail:command=sh $TESTTMP/work.sh {rootpath}" \
- > --config "fix.fail:fileset=hello.txt" \
+ $ hg --config "fix.fail:command=sh $TESTTMP/fail.sh {rootpath}" \
+ > --config "fix.fail:pattern=hello.txt" \
+ > --config "fix.failure=abort" \
> fix --working-dir
[wdir] fail: hello.txt: some
- [wdir] fail: error
+ [wdir] fail: error that did stop the tool
+ abort: no fixes will be applied
+ (use --config fix.failure=continue to apply any successful fixes anyway)
+ [255]
$ cat hello.txt
goodbye
+ $ cat foo.whole
+ foo
+
+ $ hg --config "fix.fail:command=sh $TESTTMP/fail.sh {rootpath}" \
+ > --config "fix.fail:pattern=hello.txt" \
+ > fix --working-dir
+ [wdir] fail: hello.txt: some
+ [wdir] fail: error that did stop the tool
+ $ cat hello.txt
+ goodbye
+ $ cat foo.whole
+ FOO
$ hg --config "fix.fail:command=exit 42" \
- > --config "fix.fail:fileset=hello.txt" \
+ > --config "fix.fail:pattern=hello.txt" \
> fix --working-dir
[wdir] fail: exited with status 42
@@ -842,24 +893,24 @@
$ printf "BAR\n" > bar.whole
$ hg commit -Aqm "add bar"
- $ hg log --graph --template '{node|shortest} {files}'
- @ bc05 bar.whole
+ $ hg log --graph --template '{rev} {files}'
+ @ 2 bar.whole
|
- o 4fd2 foo.whole
+ o 1 foo.whole
|
- o f9ac foo.whole
+ o 0 foo.whole
$ hg fix -r 0:2
- $ hg log --graph --template '{node|shortest} {files}'
- o b4e2 bar.whole
+ $ hg log --graph --template '{rev} {files}'
+ o 4 bar.whole
|
- o 59f4
+ o 3
|
- | @ bc05 bar.whole
+ | @ 2 bar.whole
| |
- | x 4fd2 foo.whole
+ | x 1 foo.whole
|/
- o f9ac foo.whole
+ o 0 foo.whole
$ cd ..
@@ -996,7 +1047,7 @@
adding foo/bar
$ hg --config "fix.fail:command=printf '%s\n' '{rootpath}' '{basename}'" \
> --config "fix.fail:linerange='{first}' '{last}'" \
- > --config "fix.fail:fileset=foo/bar" \
+ > --config "fix.fail:pattern=foo/bar" \
> fix --working-dir
$ cat foo/bar
foo/bar
@@ -1074,3 +1125,107 @@
FOO2
$ cd ..
+
+The :fileset subconfig was a misnomer, so we renamed it to :pattern. We will
+still accept :fileset by itself as if it were :pattern, but this will issue a
+warning.
+
+ $ hg init filesetispattern
+ $ cd filesetispattern
+
+ $ printf "foo\n" > foo.whole
+ $ printf "first\nsecond\n" > bar.txt
+ $ hg add -q
+ $ hg fix -w --config fix.sometool:fileset=bar.txt \
+ > --config fix.sometool:command="sort -r"
+ the fix.tool:fileset config name is deprecated; please rename it to fix.tool:pattern
+
+ $ cat foo.whole
+ FOO
+ $ cat bar.txt
+ second
+ first
+
+ $ cd ..
+
+The execution order of tools can be controlled. This example doesn't work if
+you sort after truncating, but the config defines the correct order while the
+definitions are out of order (which might imply the incorrect order given the
+implementation of fix). The goal is to use multiple tools to select the lowest
+5 numbers in the file.
+
+ $ hg init priorityexample
+ $ cd priorityexample
+
+ $ cat >> .hg/hgrc <<EOF
+ > [fix]
+ > head:command = head -n 5
+ > head:pattern = numbers.txt
+ > head:priority = 1
+ > sort:command = sort -n
+ > sort:pattern = numbers.txt
+ > sort:priority = 2
+ > EOF
+
+ $ printf "8\n2\n3\n6\n7\n4\n9\n5\n1\n0\n" > numbers.txt
+ $ hg add -q
+ $ hg fix -w
+ $ cat numbers.txt
+ 0
+ 1
+ 2
+ 3
+ 4
+
+And of course we should be able to break this by reversing the execution order.
+Test negative priorities while we're at it.
+
+ $ cat >> .hg/hgrc <<EOF
+ > [fix]
+ > head:priority = -1
+ > sort:priority = -2
+ > EOF
+ $ printf "8\n2\n3\n6\n7\n4\n9\n5\n1\n0\n" > numbers.txt
+ $ hg fix -w
+ $ cat numbers.txt
+ 2
+ 3
+ 6
+ 7
+ 8
+
+ $ cd ..
+
+It's possible for repeated applications of a fixer tool to create cycles in the
+generated content of a file. For example, two users with different versions of
+a code formatter might fight over the formatting when they run hg fix. In the
+absence of other changes, this means we could produce commits with the same
+hash in subsequent runs of hg fix. This is a problem unless we support
+obsolescence cycles well. We avoid this by adding an extra field to the
+successor which forces it to have a new hash. That's why this test creates
+three revisions instead of two.
+
+ $ hg init cyclictool
+ $ cd cyclictool
+
+ $ cat >> .hg/hgrc <<EOF
+ > [fix]
+ > swapletters:command = tr ab ba
+ > swapletters:pattern = foo
+ > EOF
+
+ $ echo ab > foo
+ $ hg commit -Aqm foo
+
+ $ hg fix -r 0
+ $ hg fix -r 1
+
+ $ hg cat -r 0 foo --hidden
+ ab
+ $ hg cat -r 1 foo --hidden
+ ba
+ $ hg cat -r 2 foo
+ ab
+
+ $ cd ..
+
--- a/tests/test-fncache.t Wed Jan 09 20:00:35 2019 -0800
+++ b/tests/test-fncache.t Fri Jan 18 13:28:22 2019 -0500
@@ -88,9 +88,6 @@
.hg/00manifest.i
.hg/cache
.hg/cache/branch2-served
- .hg/cache/checkisexec (execbit !)
- .hg/cache/checklink (symlink !)
- .hg/cache/checklink-target (symlink !)
.hg/cache/manifestfulltextcache (reporevlogstore !)
.hg/cache/rbc-names-v1
.hg/cache/rbc-revs-v1
@@ -110,6 +107,10 @@
.hg/undo.desc
.hg/undo.dirstate
.hg/undo.phaseroots
+ .hg/wcache
+ .hg/wcache/checkisexec (execbit !)
+ .hg/wcache/checklink (symlink !)
+ .hg/wcache/checklink-target (symlink !)
$ cd ..
Non fncache repo:
@@ -125,9 +126,6 @@
.hg/00changelog.i
.hg/cache
.hg/cache/branch2-served
- .hg/cache/checkisexec (execbit !)
- .hg/cache/checklink (symlink !)
- .hg/cache/checklink-target (symlink !)
.hg/cache/manifestfulltextcache (reporevlogstore !)
.hg/cache/rbc-names-v1
.hg/cache/rbc-revs-v1
@@ -150,6 +148,10 @@
.hg/undo.branch
.hg/undo.desc
.hg/undo.dirstate
+ .hg/wcache
+ .hg/wcache/checkisexec (execbit !)
+ .hg/wcache/checklink (symlink !)
+ .hg/wcache/checklink-target (symlink !)
$ cd ..
Encoding of reserved / long paths in the store
--- a/tests/test-generaldelta.t Wed Jan 09 20:00:35 2019 -0800
+++ b/tests/test-generaldelta.t Fri Jan 18 13:28:22 2019 -0500
@@ -5,6 +5,11 @@
implementation of parentdelta: third manifest revision would be fully
inserted due to big distance from its paren revision (zero).
+ $ cat << EOF >> $HGRCPATH
+ > [format]
+ > sparse-revlog = no
+ > EOF
+
$ hg init repo --config format.generaldelta=no --config format.usegeneraldelta=no
$ cd repo
$ echo foo > foo
--- a/tests/test-graft.t Wed Jan 09 20:00:35 2019 -0800
+++ b/tests/test-graft.t Fri Jan 18 13:28:22 2019 -0500
@@ -25,7 +25,7 @@
$ echo b > e
$ hg branch -q stable
$ hg ci -m5
- $ hg merge -q default --tool internal:local
+ $ hg merge -q default --tool internal:local # for conflicts in e, choose 5 and ignore 4
$ hg branch -q default
$ hg ci -m6
$ hg phase --public 3
@@ -46,8 +46,40 @@
|
o test@0.public: 0
+Test --base for grafting the merge of 4 from the perspective of 5, thus only getting the change to d
+
+ $ hg up -cqr 3
+ $ hg graft -r 6 --base 5
+ grafting 6:25a2b029d3ae "6" (tip)
+ merging e
+ $ hg st --change .
+ M d
+
+ $ hg -q strip . --config extensions.strip=
+
+Test --base for collapsing changesets 2 and 3, thus getting both b and c
+
+ $ hg up -cqr 0
+ $ hg graft -r 3 --base 1
+ grafting 3:4c60f11aa304 "3"
+ merging a and b to b
+ merging a and c to c
+ $ hg st --change .
+ A b
+ A c
+ R a
+
+ $ hg -q strip . --config extensions.strip=
+
+Specifying child as --base revision fails safely (perhaps slightly confusing, but consistent)
+
+ $ hg graft -r 2 --base 3
+ grafting 2:5c095ad7e90f "2"
+ note: graft of 2:5c095ad7e90f created no changes to commit
+
Can't continue without starting:
+ $ hg -q up -cr tip
$ hg rm -q e
$ hg graft --continue
abort: no graft in progress
@@ -80,6 +112,16 @@
skipping ancestor revision 1:5d205f8b35b6
[255]
+Conflicting date/user options:
+
+ $ hg up -q 0
+ $ hg graft -U --user foo 2
+ abort: --user and --currentuser are mutually exclusive
+ [255]
+ $ hg graft -D --date '0 0' 2
+ abort: --date and --currentdate are mutually exclusive
+ [255]
+
Can't graft with dirty wd:
$ hg up -q 0
--- a/tests/test-hardlinks.t Wed Jan 09 20:00:35 2019 -0800
+++ b/tests/test-hardlinks.t Fri Jan 18 13:28:22 2019 -0500
@@ -3,10 +3,10 @@
$ cat > nlinks.py <<EOF
> from __future__ import print_function
> import sys
- > from mercurial import util
+ > from mercurial import pycompat, util
> for f in sorted(sys.stdin.readlines()):
> f = f[:-1]
- > print(util.nlinks(f), f)
+ > print(util.nlinks(pycompat.fsencode(f)), f)
> EOF
$ nlinksdir()
@@ -230,7 +230,7 @@
the symlink should be followed or not. It does behave differently on Linux and
BSD. Just remove it so the test pass on both platforms.
- $ rm -f r4/.hg/cache/checklink
+ $ rm -f r4/.hg/wcache/checklink
r4 has hardlinks in the working dir (not just inside .hg):
@@ -239,9 +239,6 @@
2 r4/.hg/branch
2 r4/.hg/cache/branch2-base
2 r4/.hg/cache/branch2-served
- 2 r4/.hg/cache/checkisexec (execbit !)
- ? r4/.hg/cache/checklink-target (glob) (symlink !)
- 2 r4/.hg/cache/checknoexec (execbit !)
2 r4/.hg/cache/manifestfulltextcache (reporevlogstore !)
2 r4/.hg/cache/rbc-names-v1
2 r4/.hg/cache/rbc-revs-v1
@@ -268,6 +265,9 @@
2 r4/.hg/undo.branch
2 r4/.hg/undo.desc
[24] r4/\.hg/undo\.dirstate (re)
+ 2 r4/.hg/wcache/checkisexec (execbit !)
+ 2 r4/.hg/wcache/checklink-target (symlink !)
+ 2 r4/.hg/wcache/checknoexec (execbit !)
2 r4/d1/data1
2 r4/d1/f2
2 r4/f1
@@ -290,9 +290,6 @@
1 r4/.hg/branch
2 r4/.hg/cache/branch2-base
2 r4/.hg/cache/branch2-served
- 2 r4/.hg/cache/checkisexec (execbit !)
- 2 r4/.hg/cache/checklink-target (symlink !)
- 2 r4/.hg/cache/checknoexec (execbit !)
2 r4/.hg/cache/manifestfulltextcache (reporevlogstore !)
2 r4/.hg/cache/rbc-names-v1
2 r4/.hg/cache/rbc-revs-v1
@@ -319,6 +316,9 @@
2 r4/.hg/undo.branch
2 r4/.hg/undo.desc
[24] r4/\.hg/undo\.dirstate (re)
+ 2 r4/.hg/wcache/checkisexec (execbit !)
+ 2 r4/.hg/wcache/checklink-target (symlink !)
+ 2 r4/.hg/wcache/checknoexec (execbit !)
2 r4/d1/data1
2 r4/d1/f2
1 r4/f1
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/tests/test-help-hide.t Fri Jan 18 13:28:22 2019 -0500
@@ -0,0 +1,255 @@
+Test hiding some commands (which also happens to hide an entire category).
+
+ $ hg --config help.hidden-command.clone=true \
+ > --config help.hidden-command.init=true help
+ Mercurial Distributed SCM
+
+ list of commands:
+
+ Remote repository management:
+
+ incoming show new changesets found in source
+ outgoing show changesets not found in the destination
+ paths show aliases for remote repositories
+ pull pull changes from the specified source
+ push push changes to the specified destination
+ serve start stand-alone webserver
+
+ Change creation:
+
+ commit commit the specified files or all outstanding changes
+
+ Change manipulation:
+
+ backout reverse effect of earlier changeset
+ graft copy changes from other branches onto the current branch
+ merge merge another revision into working directory
+
+ Change organization:
+
+ bookmarks create a new bookmark or list existing bookmarks
+ branch set or show the current branch name
+ branches list repository named branches
+ phase set or show the current phase name
+ tag add one or more tags for the current or given revision
+ tags list repository tags
+
+ File content management:
+
+ annotate show changeset information by line for each file
+ cat output the current or given revision of files
+ copy mark files as copied for the next commit
+ diff diff repository (or selected files)
+ grep search revision history for a pattern in specified files
+
+ Change navigation:
+
+ bisect subdivision search of changesets
+ heads show branch heads
+ identify identify the working directory or specified revision
+ log show revision history of entire repository or files
+
+ Working directory management:
+
+ add add the specified files on the next commit
+ addremove add all new files, delete all missing files
+ files list tracked files
+ forget forget the specified files on the next commit
+ remove remove the specified files on the next commit
+ rename rename files; equivalent of copy + remove
+ resolve redo merges or set/view the merge status of files
+ revert restore files to their checkout state
+ root print the root (top) of the current working directory
+ status show changed files in the working directory
+ summary summarize working directory state
+ update update working directory (or switch revisions)
+
+ Change import/export:
+
+ archive create an unversioned archive of a repository revision
+ bundle create a bundle file
+ export dump the header and diffs for one or more changesets
+ import import an ordered set of patches
+ unbundle apply one or more bundle files
+
+ Repository maintenance:
+
+ manifest output the current or given revision of the project manifest
+ recover roll back an interrupted transaction
+ verify verify the integrity of the repository
+
+ Help:
+
+ config show combined config settings from all hgrc files
+ help show help for a given topic or a help overview
+ version output version and copyright information
+
+ additional help topics:
+
+ Mercurial identifiers:
+
+ filesets Specifying File Sets
+ hgignore Syntax for Mercurial Ignore Files
+ patterns File Name Patterns
+ revisions Specifying Revisions
+ urls URL Paths
+
+ Mercurial output:
+
+ color Colorizing Outputs
+ dates Date Formats
+ diffs Diff Formats
+ templating Template Usage
+
+ Mercurial configuration:
+
+ config Configuration Files
+ environment Environment Variables
+ extensions Using Additional Features
+ flags Command-line flags
+ hgweb Configuring hgweb
+ merge-tools Merge Tools
+ pager Pager Support
+
+ Concepts:
+
+ bundlespec Bundle File Formats
+ glossary Glossary
+ phases Working with Phases
+ subrepos Subrepositories
+
+ Miscellaneous:
+
+ deprecated Deprecated Features
+ internals Technical implementation topics
+ scripting Using Mercurial from scripts and automation
+
+ (use 'hg help -v' to show built-in aliases and global options)
+
+Test hiding some topics.
+
+ $ hg --config help.hidden-topic.deprecated=true \
+ > --config help.hidden-topic.internals=true \
+ > --config help.hidden-topic.scripting=true help
+ Mercurial Distributed SCM
+
+ list of commands:
+
+ Repository creation:
+
+ clone make a copy of an existing repository
+ init create a new repository in the given directory
+
+ Remote repository management:
+
+ incoming show new changesets found in source
+ outgoing show changesets not found in the destination
+ paths show aliases for remote repositories
+ pull pull changes from the specified source
+ push push changes to the specified destination
+ serve start stand-alone webserver
+
+ Change creation:
+
+ commit commit the specified files or all outstanding changes
+
+ Change manipulation:
+
+ backout reverse effect of earlier changeset
+ graft copy changes from other branches onto the current branch
+ merge merge another revision into working directory
+
+ Change organization:
+
+ bookmarks create a new bookmark or list existing bookmarks
+ branch set or show the current branch name
+ branches list repository named branches
+ phase set or show the current phase name
+ tag add one or more tags for the current or given revision
+ tags list repository tags
+
+ File content management:
+
+ annotate show changeset information by line for each file
+ cat output the current or given revision of files
+ copy mark files as copied for the next commit
+ diff diff repository (or selected files)
+ grep search revision history for a pattern in specified files
+
+ Change navigation:
+
+ bisect subdivision search of changesets
+ heads show branch heads
+ identify identify the working directory or specified revision
+ log show revision history of entire repository or files
+
+ Working directory management:
+
+ add add the specified files on the next commit
+ addremove add all new files, delete all missing files
+ files list tracked files
+ forget forget the specified files on the next commit
+ remove remove the specified files on the next commit
+ rename rename files; equivalent of copy + remove
+ resolve redo merges or set/view the merge status of files
+ revert restore files to their checkout state
+ root print the root (top) of the current working directory
+ status show changed files in the working directory
+ summary summarize working directory state
+ update update working directory (or switch revisions)
+
+ Change import/export:
+
+ archive create an unversioned archive of a repository revision
+ bundle create a bundle file
+ export dump the header and diffs for one or more changesets
+ import import an ordered set of patches
+ unbundle apply one or more bundle files
+
+ Repository maintenance:
+
+ manifest output the current or given revision of the project manifest
+ recover roll back an interrupted transaction
+ verify verify the integrity of the repository
+
+ Help:
+
+ config show combined config settings from all hgrc files
+ help show help for a given topic or a help overview
+ version output version and copyright information
+
+ additional help topics:
+
+ Mercurial identifiers:
+
+ filesets Specifying File Sets
+ hgignore Syntax for Mercurial Ignore Files
+ patterns File Name Patterns
+ revisions Specifying Revisions
+ urls URL Paths
+
+ Mercurial output:
+
+ color Colorizing Outputs
+ dates Date Formats
+ diffs Diff Formats
+ templating Template Usage
+
+ Mercurial configuration:
+
+ config Configuration Files
+ environment Environment Variables
+ extensions Using Additional Features
+ flags Command-line flags
+ hgweb Configuring hgweb
+ merge-tools Merge Tools
+ pager Pager Support
+
+ Concepts:
+
+ bundlespec Bundle File Formats
+ glossary Glossary
+ phases Working with Phases
+ subrepos Subrepositories
+
+ (use 'hg help -v' to show built-in aliases and global options)
--- a/tests/test-help.t Wed Jan 09 20:00:35 2019 -0800
+++ b/tests/test-help.t Fri Jan 18 13:28:22 2019 -0500
@@ -809,6 +809,8 @@
> [(b'', b'longdesc', 3, b'x'*67),
> (b'n', b'', None, b'normal desc'),
> (b'', b'newline', b'', b'line1\nline2'),
+ > (b'', b'default-off', False, b'enable X'),
+ > (b'', b'default-on', True, b'enable Y'),
> (b'', b'callableopt', func, b'adds foo'),
> (b'', b'customopt', customopt(''), b'adds bar'),
> (b'', b'customopt-withdefault', customopt('foo'), b'adds bar')],
@@ -820,9 +822,17 @@
> def nohelp(ui, *args, **kwargs):
> pass
>
+ > @command(b'hashelp', [], b'hg hashelp', norepo=True)
+ > def hashelp(ui, *args, **kwargs):
+ > """Extension command's help"""
+ > pass
+ >
> def uisetup(ui):
> ui.setconfig(b'alias', b'shellalias', b'!echo hi', b'helpext')
> ui.setconfig(b'alias', b'hgalias', b'summary', b'helpext')
+ > ui.setconfig(b'alias', b'hgalias:doc', b'My doc', b'helpext')
+ > ui.setconfig(b'alias', b'hgalias:category', b'navigation', b'helpext')
+ > ui.setconfig(b'alias', b'hgaliasnodoc', b'summary', b'helpext')
>
> EOF
$ echo '[extensions]' >> $HGRCPATH
@@ -830,11 +840,28 @@
Test for aliases
+ $ hg help | grep hgalias
+ hgalias My doc
+
$ hg help hgalias
hg hgalias [--remote]
alias for: hg summary
+ My doc
+
+ defined by: helpext
+
+ options:
+
+ --remote check for push and pull
+
+ (some details hidden, use --verbose to show complete help)
+ $ hg help hgaliasnodoc
+ hg hgaliasnodoc [--remote]
+
+ alias for: hg summary
+
summarize working directory state
This generates a brief summary of the working directory state, including
@@ -878,12 +905,27 @@
xxxxxxxxxxxxxxxxxxxxxxx (default: 3)
-n -- normal desc
--newline VALUE line1 line2
+ --default-off enable X
+ --[no-]default-on enable Y (default: on)
--callableopt VALUE adds foo
--customopt VALUE adds bar
--customopt-withdefault VALUE adds bar (default: foo)
(some details hidden, use --verbose to show complete help)
+Test that default list of commands includes extension commands that have help,
+but not those that don't, except in verbose mode, when a keyword is passed, or
+when help about the extension is requested.
+
+#if no-extraextensions
+
+ $ hg help | grep hashelp
+ hashelp Extension command's help
+ $ hg help | grep nohelp
+ [1]
+ $ hg help -v | grep nohelp
+ nohelp (no help text available)
+
$ hg help -k nohelp
Commands:
@@ -893,143 +935,15 @@
nohelp (no help text available)
-Test that default list of commands omits extension commands
-
-#if no-extraextensions
-
- $ hg help
- Mercurial Distributed SCM
+ $ hg help helpext
+ helpext extension - no help text available
list of commands:
- Repository creation:
-
- clone make a copy of an existing repository
- init create a new repository in the given directory
-
- Remote repository management:
-
- incoming show new changesets found in source
- outgoing show changesets not found in the destination
- paths show aliases for remote repositories
- pull pull changes from the specified source
- push push changes to the specified destination
- serve start stand-alone webserver
-
- Change creation:
-
- commit commit the specified files or all outstanding changes
-
- Change manipulation:
-
- backout reverse effect of earlier changeset
- graft copy changes from other branches onto the current branch
- merge merge another revision into working directory
-
- Change organization:
-
- bookmarks create a new bookmark or list existing bookmarks
- branch set or show the current branch name
- branches list repository named branches
- phase set or show the current phase name
- tag add one or more tags for the current or given revision
- tags list repository tags
-
- File content management:
-
- annotate show changeset information by line for each file
- cat output the current or given revision of files
- copy mark files as copied for the next commit
- diff diff repository (or selected files)
- grep search revision history for a pattern in specified files
-
- Change navigation:
-
- bisect subdivision search of changesets
- heads show branch heads
- identify identify the working directory or specified revision
- log show revision history of entire repository or files
-
- Working directory management:
-
- add add the specified files on the next commit
- addremove add all new files, delete all missing files
- files list tracked files
- forget forget the specified files on the next commit
- remove remove the specified files on the next commit
- rename rename files; equivalent of copy + remove
- resolve redo merges or set/view the merge status of files
- revert restore files to their checkout state
- root print the root (top) of the current working directory
- status show changed files in the working directory
- summary summarize working directory state
- update update working directory (or switch revisions)
-
- Change import/export:
-
- archive create an unversioned archive of a repository revision
- bundle create a bundle file
- export dump the header and diffs for one or more changesets
- import import an ordered set of patches
- unbundle apply one or more bundle files
-
- Repository maintenance:
-
- manifest output the current or given revision of the project manifest
- recover roll back an interrupted transaction
- verify verify the integrity of the repository
-
- Help:
-
- config show combined config settings from all hgrc files
- help show help for a given topic or a help overview
- version output version and copyright information
-
- enabled extensions:
-
- helpext (no help text available)
-
- additional help topics:
-
- Mercurial identifiers:
-
- filesets Specifying File Sets
- hgignore Syntax for Mercurial Ignore Files
- patterns File Name Patterns
- revisions Specifying Revisions
- urls URL Paths
-
- Mercurial output:
-
- color Colorizing Outputs
- dates Date Formats
- diffs Diff Formats
- templating Template Usage
-
- Mercurial configuration:
-
- config Configuration Files
- environment Environment Variables
- extensions Using Additional Features
- flags Command-line flags
- hgweb Configuring hgweb
- merge-tools Merge Tools
- pager Pager Support
-
- Concepts:
-
- bundlespec Bundle File Formats
- glossary Glossary
- phases Working with Phases
- subrepos Subrepositories
-
- Miscellaneous:
-
- deprecated Deprecated Features
- internals Technical implementation topics
- scripting Using Mercurial from scripts and automation
-
- (use 'hg help -v' to show built-in aliases and global options)
+ hashelp Extension command's help
+ nohelp (no help text available)
+
+ (use 'hg help -v helpext' to show built-in aliases and global options)
#endif
@@ -1156,6 +1070,7 @@
censor Censor
changegroups Changegroups
config Config Registrar
+ extensions Extension API
requirements Repository Requirements
revlogs Revision Logs
wireprotocol Wire Protocol
@@ -1381,18 +1296,6 @@
*empty chunk* at the end of each *delta group* denotes the boundary to the
next filelog sub-segment.
-Test list of commands with command with no help text
-
- $ hg help helpext
- helpext extension - no help text available
-
- list of commands:
-
- nohelp (no help text available)
-
- (use 'hg help -v helpext' to show built-in aliases and global options)
-
-
test advanced, deprecated and experimental options are hidden in command help
$ hg help debugoptADV
hg debugoptADV
@@ -2644,6 +2547,13 @@
search revision history for a pattern in specified files
</td></tr>
<tr><td>
+ <a href="/help/hashelp">
+ hashelp
+ </a>
+ </td><td>
+ Extension command's help
+ </td></tr>
+ <tr><td>
<a href="/help/heads">
heads
</a>
@@ -2662,6 +2572,13 @@
hgalias
</a>
</td><td>
+ My doc
+ </td></tr>
+ <tr><td>
+ <a href="/help/hgaliasnodoc">
+ hgaliasnodoc
+ </a>
+ </td><td>
summarize working directory state
</td></tr>
<tr><td>
@@ -3492,6 +3409,13 @@
Config Registrar
</td></tr>
<tr><td>
+ <a href="/help/internals.extensions">
+ extensions
+ </a>
+ </td><td>
+ Extension API
+ </td></tr>
+ <tr><td>
<a href="/help/internals.requirements">
requirements
</a>
--- a/tests/test-hgignore.t Wed Jan 09 20:00:35 2019 -0800
+++ b/tests/test-hgignore.t Fri Jan 18 13:28:22 2019 -0500
@@ -197,7 +197,7 @@
A b.o
$ hg debugignore
- <includematcher includes='(?:(?:|.*/)[^/]*(?:/|$))'>
+ <includematcher includes='(?:|.*/)[^/]*(?:/|$)'>
$ hg debugignore b.o
b.o is ignored
@@ -239,6 +239,17 @@
dir/c.o is ignored
(ignore rule in $TESTTMP/ignorerepo/.hgignore, line 2: 'dir/**/c.o') (glob)
+Check rooted globs
+
+ $ hg purge --all --config extensions.purge=
+ $ echo "syntax: rootglob" > .hgignore
+ $ echo "a/*.ext" >> .hgignore
+ $ for p in a b/a aa; do mkdir -p $p; touch $p/b.ext; done
+ $ hg status -A 'set:**.ext'
+ ? aa/b.ext
+ ? b/a/b.ext
+ I a/b.ext
+
Check using 'include:' in ignore file
$ hg purge --all --config extensions.purge=
@@ -257,10 +268,15 @@
Check recursive uses of 'include:'
$ echo "include:nested/ignore" >> otherignore
- $ mkdir nested
+ $ mkdir nested nested/more
$ echo "glob:*ignore" > nested/ignore
+ $ echo "rootglob:a" >> nested/ignore
+ $ touch a nested/a nested/more/a
$ hg status
A dir/b.o
+ ? nested/a
+ ? nested/more/a
+ $ rm a nested/a nested/more/a
$ cp otherignore goodignore
$ echo "include:badignore" >> otherignore
@@ -291,18 +307,26 @@
? dir1/file2
? dir2/file1
-Check including subincludes with regexs
+Check including subincludes with other patterns
$ echo "subinclude:dir1/.hgignore" >> .hgignore
+
+ $ mkdir dir1/subdir
+ $ touch dir1/subdir/file1
+ $ echo "rootglob:f?le1" > dir1/.hgignore
+ $ hg status
+ ? dir1/file2
+ ? dir1/subdir/file1
+ ? dir2/file1
+ $ rm dir1/subdir/file1
+
$ echo "regexp:f.le1" > dir1/.hgignore
-
$ hg status
? dir1/file2
? dir2/file1
Check multiple levels of sub-ignores
- $ mkdir dir1/subdir
$ touch dir1/subdir/subfile1 dir1/subdir/subfile3 dir1/subdir/subfile4
$ echo "subinclude:subdir/.hgignore" >> dir1/.hgignore
$ echo "glob:subfil*3" >> dir1/subdir/.hgignore
--- a/tests/test-hgweb-auth.py Wed Jan 09 20:00:35 2019 -0800
+++ b/tests/test-hgweb-auth.py Fri Jan 18 13:28:22 2019 -0500
@@ -104,6 +104,39 @@
'y.password': 'ypassword'},
urls=['http://y@example.org/foo/bar'])
+print('\n*** Test user matching with name in prefix\n')
+
+# prefix, username and URL have the same user
+test({'x.prefix': 'https://example.org/foo',
+ 'x.username': None,
+ 'x.password': 'xpassword',
+ 'y.prefix': 'http://y@example.org/foo',
+ 'y.username': 'y',
+ 'y.password': 'ypassword'},
+ urls=['http://y@example.org/foo'])
+# Prefix has a different user from username and URL
+test({'y.prefix': 'http://z@example.org/foo',
+ 'y.username': 'y',
+ 'y.password': 'ypassword'},
+ urls=['http://y@example.org/foo'])
+# Prefix has a different user from URL; no username
+test({'y.prefix': 'http://z@example.org/foo',
+ 'y.password': 'ypassword'},
+ urls=['http://y@example.org/foo'])
+# Prefix and URL have same user, but doesn't match username
+test({'y.prefix': 'http://y@example.org/foo',
+ 'y.username': 'z',
+ 'y.password': 'ypassword'},
+ urls=['http://y@example.org/foo'])
+# Prefix and URL have the same user; no username
+test({'y.prefix': 'http://y@example.org/foo',
+ 'y.password': 'ypassword'},
+ urls=['http://y@example.org/foo'])
+# Prefix user, but no URL user or username
+test({'y.prefix': 'http://y@example.org/foo',
+ 'y.password': 'ypassword'},
+ urls=['http://example.org/foo'])
+
def testauthinfo(fullurl, authurl):
print('URIs:', fullurl, authurl)
pm = urlreq.httppasswordmgrwithdefaultrealm()
--- a/tests/test-hgweb-auth.py.out Wed Jan 09 20:00:35 2019 -0800
+++ b/tests/test-hgweb-auth.py.out Fri Jan 18 13:28:22 2019 -0500
@@ -190,6 +190,27 @@
URI: http://y@example.org/foo/bar
('y', 'xpassword')
+*** Test user matching with name in prefix
+
+CFG: {b'x.password': b'xpassword', b'x.prefix': b'https://example.org/foo', b'x.username': None, b'y.password': b'ypassword', b'y.prefix': b'http://y@example.org/foo', b'y.username': b'y'}
+URI: http://y@example.org/foo
+ ('y', 'ypassword')
+CFG: {b'y.password': b'ypassword', b'y.prefix': b'http://z@example.org/foo', b'y.username': b'y'}
+URI: http://y@example.org/foo
+ abort
+CFG: {b'y.password': b'ypassword', b'y.prefix': b'http://z@example.org/foo'}
+URI: http://y@example.org/foo
+ abort
+CFG: {b'y.password': b'ypassword', b'y.prefix': b'http://y@example.org/foo', b'y.username': b'z'}
+URI: http://y@example.org/foo
+ abort
+CFG: {b'y.password': b'ypassword', b'y.prefix': b'http://y@example.org/foo'}
+URI: http://y@example.org/foo
+ ('y', 'ypassword')
+CFG: {b'y.password': b'ypassword', b'y.prefix': b'http://y@example.org/foo'}
+URI: http://example.org/foo
+ abort
+
*** Test urllib2 and util.url
URIs: http://user@example.com:8080/foo http://example.com:8080/foo
--- a/tests/test-hgweb-commands.t Wed Jan 09 20:00:35 2019 -0800
+++ b/tests/test-hgweb-commands.t Fri Jan 18 13:28:22 2019 -0500
@@ -2193,7 +2193,7 @@
lookup
pushkey
stream-preferred
- streamreqs=generaldelta,revlogv1
+ streamreqs=generaldelta,revlogv1,sparserevlog
unbundle=HG10GZ,HG10BZ,HG10UN
unbundlehash
--- a/tests/test-histedit-edit.t Wed Jan 09 20:00:35 2019 -0800
+++ b/tests/test-histedit-edit.t Fri Jan 18 13:28:22 2019 -0500
@@ -4,6 +4,7 @@
> [extensions]
> histedit=
> strip=
+ > mockmakedate = $TESTDIR/mockmakedate.py
> EOF
$ initrepo ()
@@ -481,3 +482,74 @@
# f, fold = use commit, but combine it with the one above
# r, roll = like fold, but discard this commit's description and date
#
+
+ $ cd ..
+
+============================================
+Test update-timestamp config option in mess|
+============================================
+
+ $ addwithdate ()
+ > {
+ > echo $1 > $1
+ > hg add $1
+ > hg ci -m $1 -d "$2 0"
+ > }
+
+ $ initrepo ()
+ > {
+ > hg init r2
+ > cd r2
+ > addwithdate a 1
+ > addwithdate b 2
+ > addwithdate c 3
+ > addwithdate d 4
+ > addwithdate e 5
+ > addwithdate f 6
+ > }
+
+ $ initrepo
+
+log before edit
+
+ $ hg log --limit 1
+ changeset: 5:178e35e0ce73
+ tag: tip
+ user: test
+ date: Thu Jan 01 00:00:06 1970 +0000
+ summary: f
+
+ $ hg histedit tip --commands - 2>&1 --config rewrite.update-timestamp=True << EOF | fixbundle
+ > mess 178e35e0ce73 f
+ > EOF
+
+log after edit
+
+ $ hg log --limit 1
+ changeset: 5:98bf456d476b
+ tag: tip
+ user: test
+ date: Thu Jan 01 00:00:00 1970 +0000
+ summary: f
+
+
+ $ cd ..
+
+warn the user on editing tagged commits
+
+ $ hg init issue4017
+ $ cd issue4017
+ $ echo > a
+ $ hg ci -Am 'add a'
+ adding a
+ $ hg tag a
+ $ hg tags
+ tip 1:bd7ee4f3939b
+ a 0:a8a82d372bb3
+ $ hg histedit
+ warning: tags associated with the given changeset will be lost after histedit.
+ do you want to continue (yN)? n
+ abort: histedit cancelled
+
+ [255]
+ $ cd ..
--- a/tests/test-histedit-fold.t Wed Jan 09 20:00:35 2019 -0800
+++ b/tests/test-histedit-fold.t Fri Jan 18 13:28:22 2019 -0500
@@ -15,6 +15,7 @@
> logt = log --template '{rev}:{node|short} {desc|firstline}\n'
> [extensions]
> histedit=
+ > mockmakedate = $TESTDIR/mockmakedate.py
> EOF
@@ -597,3 +598,110 @@
o 8f0162e483d0 aa
+ $ cd ..
+
+====================================
+Test update-timestamp config option|
+====================================
+
+ $ addwithdate ()
+ > {
+ > echo $1 > $1
+ > hg add $1
+ > hg ci -m $1 -d "$2 0"
+ > }
+
+ $ initrepo ()
+ > {
+ > hg init r
+ > cd r
+ > addwithdate a 1
+ > addwithdate b 2
+ > addwithdate c 3
+ > addwithdate d 4
+ > addwithdate e 5
+ > addwithdate f 6
+ > }
+
+ $ initrepo
+
+log before edit
+
+ $ hg log
+ changeset: 5:178e35e0ce73
+ tag: tip
+ user: test
+ date: Thu Jan 01 00:00:06 1970 +0000
+ summary: f
+
+ changeset: 4:1ddb6c90f2ee
+ user: test
+ date: Thu Jan 01 00:00:05 1970 +0000
+ summary: e
+
+ changeset: 3:532247a8969b
+ user: test
+ date: Thu Jan 01 00:00:04 1970 +0000
+ summary: d
+
+ changeset: 2:ff2c9fa2018b
+ user: test
+ date: Thu Jan 01 00:00:03 1970 +0000
+ summary: c
+
+ changeset: 1:97d72e5f12c7
+ user: test
+ date: Thu Jan 01 00:00:02 1970 +0000
+ summary: b
+
+ changeset: 0:8580ff50825a
+ user: test
+ date: Thu Jan 01 00:00:01 1970 +0000
+ summary: a
+
+
+ $ hg histedit 1ddb6c90f2ee --commands - 2>&1 --config rewrite.update-timestamp=True <<EOF | fixbundle
+ > pick 178e35e0ce73 f
+ > fold 1ddb6c90f2ee e
+ > EOF
+
+log after edit
+observe time from f is updated
+
+ $ hg log
+ changeset: 4:f7909b1863a2
+ tag: tip
+ user: test
+ date: Thu Jan 01 00:00:01 1970 +0000
+ summary: f
+
+ changeset: 3:532247a8969b
+ user: test
+ date: Thu Jan 01 00:00:04 1970 +0000
+ summary: d
+
+ changeset: 2:ff2c9fa2018b
+ user: test
+ date: Thu Jan 01 00:00:03 1970 +0000
+ summary: c
+
+ changeset: 1:97d72e5f12c7
+ user: test
+ date: Thu Jan 01 00:00:02 1970 +0000
+ summary: b
+
+ changeset: 0:8580ff50825a
+ user: test
+ date: Thu Jan 01 00:00:01 1970 +0000
+ summary: a
+
+post-fold manifest
+ $ hg manifest
+ a
+ b
+ c
+ d
+ e
+ f
+
+ $ cd ..
--- a/tests/test-histedit-no-backup.t Wed Jan 09 20:00:35 2019 -0800
+++ b/tests/test-histedit-no-backup.t Fri Jan 18 13:28:22 2019 -0500
@@ -6,9 +6,9 @@
> histedit=
> EOF
-==========================================
-Test history-editing-backup config option|
-==========================================
+=================================
+Test backup-bundle config option|
+=================================
Repo setup:
$ hg init foo
$ cd foo
@@ -33,7 +33,7 @@
o 0 36b4bdd91f5b 1970-01-01 00:00 +0000 test
one
-Test when `history-editing-backup` config option is enabled:
+Test when `backup-bundle` config option is enabled:
$ hg histedit -r '36b4bdd91f5b' --commands - << EOF
> pick 36b4bdd91f5b 0 one
> pick 6153eb23e623 1 two
@@ -49,11 +49,11 @@
saved backup bundle to $TESTTMP/foo/.hg/strip-backup/1d8f701c7b35-cf7be322-backup.hg
saved backup bundle to $TESTTMP/foo/.hg/strip-backup/5c0056670bce-b54b65d0-backup.hg
-Test when `history-editing-backup` config option is not enabled
+Test when `backup-bundle` config option is not enabled
Enable config option:
$ cat >>$HGRCPATH <<EOF
- > [ui]
- > history-editing-backup=False
+ > [rewrite]
+ > backup-bundle = False
> EOF
$ hg histedit -r '36b4bdd91f5b' --commands - << EOF
--- a/tests/test-histedit-non-commute.t Wed Jan 09 20:00:35 2019 -0800
+++ b/tests/test-histedit-non-commute.t Fri Jan 18 13:28:22 2019 -0500
@@ -161,7 +161,10 @@
warning: conflicts while merging e! (edit, then use 'hg resolve --mark')
Fix up the change (pick 7b4e2f4b7bcd)
(hg histedit --continue to resume)
- $ hg histedit --continue 2>&1 | fixbundle
+We forcibly enable curses here so we can verify that continuing works
+with curses enabled.
+ $ hg histedit --continue --config ui.interactive=true \
+ > --config ui.interface=curses 2>&1 | fixbundle
abort: unresolved merge conflicts (see 'hg help resolve')
This failure is caused by 7b4e2f4b7bcd "e" not rebasing the non commutative
--- a/tests/test-http-bad-server.t Wed Jan 09 20:00:35 2019 -0800
+++ b/tests/test-http-bad-server.t Fri Jan 18 13:28:22 2019 -0500
@@ -5,12 +5,14 @@
$ cat > fakeversion.py << EOF
> from mercurial import util
- > util.version = lambda: '4.2'
+ > util.version = lambda: b'4.2'
> EOF
$ cat >> $HGRCPATH << EOF
> [extensions]
> fakeversion = `pwd`/fakeversion.py
+ > [format]
+ > sparse-revlog = no
> [devel]
> legacy.exchange = phases
> EOF
@@ -36,7 +38,7 @@
$ cat hg.pid > $DAEMON_PIDS
$ hg clone http://localhost:$HGPORT/ clone
- abort: error: $ECONNRESET$
+ abort: error: (\$ECONNRESET\$|\$EADDRNOTAVAIL\$) (re)
[255]
(The server exits on its own, but there is a race between that and starting a new server.
--- a/tests/test-http-bundle1.t Wed Jan 09 20:00:35 2019 -0800
+++ b/tests/test-http-bundle1.t Fri Jan 18 13:28:22 2019 -0500
@@ -186,7 +186,7 @@
> if base64.b64decode(auth.split()[1]).split(b':', 1) != [b'user',
> b'pass']:
> raise common.ErrorResponse(common.HTTP_FORBIDDEN, b'no')
- > def extsetup():
+ > def extsetup(ui):
> common.permhooks.insert(0, perform_authentication)
> EOT
$ hg serve --config extensions.x=userpass.py -p $HGPORT2 -d --pid-file=pid \
--- a/tests/test-http-protocol.t Wed Jan 09 20:00:35 2019 -0800
+++ b/tests/test-http-protocol.t Fri Jan 18 13:28:22 2019 -0500
@@ -192,7 +192,7 @@
s> Content-Type: application/mercurial-0.1\r\n
s> Content-Length: *\r\n (glob)
s> \r\n
- s> batch branchmap $USUAL_BUNDLE2_CAPS$ changegroupsubset compression=$BUNDLE2_COMPRESSIONS$ getbundle httpheader=1024 httpmediatype=0.1rx,0.1tx,0.2tx known lookup pushkey streamreqs=generaldelta,revlogv1 unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash
+ s> batch branchmap $USUAL_BUNDLE2_CAPS$ changegroupsubset compression=$BUNDLE2_COMPRESSIONS$ getbundle httpheader=1024 httpmediatype=0.1rx,0.1tx,0.2tx known lookup pushkey streamreqs=generaldelta,revlogv1,sparserevlog unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash
sending listkeys command
s> GET /?cmd=listkeys HTTP/1.1\r\n
s> Accept-Encoding: identity\r\n
@@ -266,7 +266,7 @@
s> Content-Type: application/mercurial-0.1\r\n
s> Content-Length: *\r\n (glob)
s> \r\n
- s> batch branchmap $USUAL_BUNDLE2_CAPS$ changegroupsubset compression=$BUNDLE2_COMPRESSIONS$ getbundle httpheader=1024 httpmediatype=0.1rx,0.1tx,0.2tx known lookup pushkey streamreqs=generaldelta,revlogv1 unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash
+ s> batch branchmap $USUAL_BUNDLE2_CAPS$ changegroupsubset compression=$BUNDLE2_COMPRESSIONS$ getbundle httpheader=1024 httpmediatype=0.1rx,0.1tx,0.2tx known lookup pushkey streamreqs=generaldelta,revlogv1,sparserevlog unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash
sending heads command
s> GET /?cmd=heads HTTP/1.1\r\n
s> Accept-Encoding: identity\r\n
@@ -315,7 +315,7 @@
s> Content-Type: application/mercurial-cbor\r\n
s> Content-Length: *\r\n (glob)
s> \r\n
- s> \xa3GapibaseDapi/Dapis\xa1Pexp-http-v2-0003\xa4Hcommands\xacIbranchmap\xa2Dargs\xa0Kpermissions\x81DpullLcapabilities\xa2Dargs\xa0Kpermissions\x81DpullMchangesetdata\xa2Dargs\xa2Ffields\xa4Gdefault\xd9\x01\x02\x80Hrequired\xf4DtypeCsetKvalidvalues\xd9\x01\x02\x84IbookmarksGparentsEphaseHrevisionIrevisions\xa2Hrequired\xf5DtypeDlistKpermissions\x81DpullHfiledata\xa2Dargs\xa4Ffields\xa4Gdefault\xd9\x01\x02\x80Hrequired\xf4DtypeCsetKvalidvalues\xd9\x01\x02\x83HlinknodeGparentsHrevisionKhaveparents\xa3Gdefault\xf4Hrequired\xf4DtypeDboolEnodes\xa2Hrequired\xf5DtypeDlistDpath\xa2Hrequired\xf5DtypeEbytesKpermissions\x81DpullIfilesdata\xa3Dargs\xa4Ffields\xa4Gdefault\xd9\x01\x02\x80Hrequired\xf4DtypeCsetKvalidvalues\xd9\x01\x02\x84NfirstchangesetHlinknodeGparentsHrevisionKhaveparents\xa3Gdefault\xf4Hrequired\xf4DtypeDboolJpathfilter\xa3Gdefault\xf6Hrequired\xf4DtypeDdictIrevisions\xa2Hrequired\xf5DtypeDlistKpermissions\x81DpullTrecommendedbatchsize\x19\xc3PEheads\xa2Dargs\xa1Jpubliconly\xa3Gdefault\xf4Hrequired\xf4DtypeDboolKpermissions\x81DpullEknown\xa2Dargs\xa1Enodes\xa3Gdefault\x80Hrequired\xf4DtypeDlistKpermissions\x81DpullHlistkeys\xa2Dargs\xa1Inamespace\xa2Hrequired\xf5DtypeEbytesKpermissions\x81DpullFlookup\xa2Dargs\xa1Ckey\xa2Hrequired\xf5DtypeEbytesKpermissions\x81DpullLmanifestdata\xa3Dargs\xa4Ffields\xa4Gdefault\xd9\x01\x02\x80Hrequired\xf4DtypeCsetKvalidvalues\xd9\x01\x02\x82GparentsHrevisionKhaveparents\xa3Gdefault\xf4Hrequired\xf4DtypeDboolEnodes\xa2Hrequired\xf5DtypeDlistDtree\xa2Hrequired\xf5DtypeEbytesKpermissions\x81DpullTrecommendedbatchsize\x1a\x00\x01\x86\xa0Gpushkey\xa2Dargs\xa4Ckey\xa2Hrequired\xf5DtypeEbytesInamespace\xa2Hrequired\xf5DtypeEbytesCnew\xa2Hrequired\xf5DtypeEbytesCold\xa2Hrequired\xf5DtypeEbytesKpermissions\x81DpushPrawstorefiledata\xa2Dargs\xa2Efiles\xa2Hrequired\xf5DtypeDlistJpathfilter\xa3Gdefault\xf6Hrequired\xf4DtypeDlistKpermissions\x81DpullQframingmediatypes\x81X&application/mercurial-exp-framing-0006Rpathfilterprefixes\xd9\x01\x02\x82Epath:Lrootfilesin:Nrawrepoformats\x82LgeneraldeltaHrevlogv1Nv1capabilitiesY\x01\xd3batch branchmap $USUAL_BUNDLE2_CAPS$ changegroupsubset compression=$BUNDLE2_COMPRESSIONS$ getbundle httpheader=1024 httpmediatype=0.1rx,0.1tx,0.2tx known lookup pushkey streamreqs=generaldelta,revlogv1 unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash
+ s> \xa3GapibaseDapi/Dapis\xa1Pexp-http-v2-0003\xa4Hcommands\xacIbranchmap\xa2Dargs\xa0Kpermissions\x81DpullLcapabilities\xa2Dargs\xa0Kpermissions\x81DpullMchangesetdata\xa2Dargs\xa2Ffields\xa4Gdefault\xd9\x01\x02\x80Hrequired\xf4DtypeCsetKvalidvalues\xd9\x01\x02\x84IbookmarksGparentsEphaseHrevisionIrevisions\xa2Hrequired\xf5DtypeDlistKpermissions\x81DpullHfiledata\xa2Dargs\xa4Ffields\xa4Gdefault\xd9\x01\x02\x80Hrequired\xf4DtypeCsetKvalidvalues\xd9\x01\x02\x83HlinknodeGparentsHrevisionKhaveparents\xa3Gdefault\xf4Hrequired\xf4DtypeDboolEnodes\xa2Hrequired\xf5DtypeDlistDpath\xa2Hrequired\xf5DtypeEbytesKpermissions\x81DpullIfilesdata\xa3Dargs\xa4Ffields\xa4Gdefault\xd9\x01\x02\x80Hrequired\xf4DtypeCsetKvalidvalues\xd9\x01\x02\x84NfirstchangesetHlinknodeGparentsHrevisionKhaveparents\xa3Gdefault\xf4Hrequired\xf4DtypeDboolJpathfilter\xa3Gdefault\xf6Hrequired\xf4DtypeDdictIrevisions\xa2Hrequired\xf5DtypeDlistKpermissions\x81DpullTrecommendedbatchsize\x19\xc3PEheads\xa2Dargs\xa1Jpubliconly\xa3Gdefault\xf4Hrequired\xf4DtypeDboolKpermissions\x81DpullEknown\xa2Dargs\xa1Enodes\xa3Gdefault\x80Hrequired\xf4DtypeDlistKpermissions\x81DpullHlistkeys\xa2Dargs\xa1Inamespace\xa2Hrequired\xf5DtypeEbytesKpermissions\x81DpullFlookup\xa2Dargs\xa1Ckey\xa2Hrequired\xf5DtypeEbytesKpermissions\x81DpullLmanifestdata\xa3Dargs\xa4Ffields\xa4Gdefault\xd9\x01\x02\x80Hrequired\xf4DtypeCsetKvalidvalues\xd9\x01\x02\x82GparentsHrevisionKhaveparents\xa3Gdefault\xf4Hrequired\xf4DtypeDboolEnodes\xa2Hrequired\xf5DtypeDlistDtree\xa2Hrequired\xf5DtypeEbytesKpermissions\x81DpullTrecommendedbatchsize\x1a\x00\x01\x86\xa0Gpushkey\xa2Dargs\xa4Ckey\xa2Hrequired\xf5DtypeEbytesInamespace\xa2Hrequired\xf5DtypeEbytesCnew\xa2Hrequired\xf5DtypeEbytesCold\xa2Hrequired\xf5DtypeEbytesKpermissions\x81DpushPrawstorefiledata\xa2Dargs\xa2Efiles\xa2Hrequired\xf5DtypeDlistJpathfilter\xa3Gdefault\xf6Hrequired\xf4DtypeDlistKpermissions\x81DpullQframingmediatypes\x81X&application/mercurial-exp-framing-0006Rpathfilterprefixes\xd9\x01\x02\x82Epath:Lrootfilesin:Nrawrepoformats\x83LgeneraldeltaHrevlogv1LsparserevlogNv1capabilitiesY\x01\xe0batch branchmap $USUAL_BUNDLE2_CAPS$ changegroupsubset compression=$BUNDLE2_COMPRESSIONS$ getbundle httpheader=1024 httpmediatype=0.1rx,0.1tx,0.2tx known lookup pushkey streamreqs=generaldelta,revlogv1,sparserevlog unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash
sending heads command
s> POST /api/exp-http-v2-0003/ro/heads HTTP/1.1\r\n
s> Accept-Encoding: identity\r\n
@@ -432,9 +432,9 @@
s> Server: testing stub value\r\n
s> Date: $HTTP_DATE$\r\n
s> Content-Type: application/mercurial-0.1\r\n
- s> Content-Length: 467\r\n
+ s> Content-Length: 480\r\n
s> \r\n
- s> batch branchmap $USUAL_BUNDLE2_CAPS$ changegroupsubset compression=$BUNDLE2_COMPRESSIONS$ getbundle httpheader=1024 httpmediatype=0.1rx,0.1tx,0.2tx known lookup pushkey streamreqs=generaldelta,revlogv1 unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash
+ s> batch branchmap $USUAL_BUNDLE2_CAPS$ changegroupsubset compression=$BUNDLE2_COMPRESSIONS$ getbundle httpheader=1024 httpmediatype=0.1rx,0.1tx,0.2tx known lookup pushkey streamreqs=generaldelta,revlogv1,sparserevlog unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash
Test with the HTTP peer
@@ -467,10 +467,10 @@
s> Server: testing stub value\r\n
s> Date: $HTTP_DATE$\r\n
s> Content-Type: application/mercurial-0.1\r\n
- s> Content-Length: 467\r\n
+ s> Content-Length: 480\r\n
s> \r\n
real URL is http://$LOCALIP:$HGPORT/redirected (glob)
- s> batch branchmap $USUAL_BUNDLE2_CAPS$ changegroupsubset compression=$BUNDLE2_COMPRESSIONS$ getbundle httpheader=1024 httpmediatype=0.1rx,0.1tx,0.2tx known lookup pushkey streamreqs=generaldelta,revlogv1 unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash
+ s> batch branchmap $USUAL_BUNDLE2_CAPS$ changegroupsubset compression=$BUNDLE2_COMPRESSIONS$ getbundle httpheader=1024 httpmediatype=0.1rx,0.1tx,0.2tx known lookup pushkey streamreqs=generaldelta,revlogv1,sparserevlog unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash
sending heads command
s> GET /redirected?cmd=heads HTTP/1.1\r\n
s> Accept-Encoding: identity\r\n
@@ -732,10 +732,10 @@
s> Server: testing stub value\r\n
s> Date: $HTTP_DATE$\r\n
s> Content-Type: application/mercurial-0.1\r\n
- s> Content-Length: 467\r\n
+ s> Content-Length: 480\r\n
s> \r\n
real URL is http://$LOCALIP:$HGPORT/redirected (glob)
- s> batch branchmap $USUAL_BUNDLE2_CAPS$ changegroupsubset compression=$BUNDLE2_COMPRESSIONS$ getbundle httpheader=1024 httpmediatype=0.1rx,0.1tx,0.2tx known lookup pushkey streamreqs=generaldelta,revlogv1 unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash
+ s> batch branchmap $USUAL_BUNDLE2_CAPS$ changegroupsubset compression=$BUNDLE2_COMPRESSIONS$ getbundle httpheader=1024 httpmediatype=0.1rx,0.1tx,0.2tx known lookup pushkey streamreqs=generaldelta,revlogv1,sparserevlog unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash
sending heads command
s> GET /redirected?cmd=heads HTTP/1.1\r\n
s> Accept-Encoding: identity\r\n
--- a/tests/test-http-proxy.t Wed Jan 09 20:00:35 2019 -0800
+++ b/tests/test-http-proxy.t Fri Jan 18 13:28:22 2019 -0500
@@ -90,7 +90,7 @@
misconfigured hosts)
$ http_proxy=localhost:$HGPORT2 hg clone --config http_proxy.always=True http://localhost:$HGPORT/ f
- abort: error: (Connection refused|Protocol not supported|.* actively refused it|Cannot assign requested address) (re)
+ abort: error: (Connection refused|Protocol not supported|.* actively refused it|\$EADDRNOTAVAIL\$) (re)
[255]
do not use the proxy if it is in the no list
--- a/tests/test-http.t Wed Jan 09 20:00:35 2019 -0800
+++ b/tests/test-http.t Fri Jan 18 13:28:22 2019 -0500
@@ -140,7 +140,7 @@
$ cd copy-pull
$ cat >> .hg/hgrc <<EOF
> [hooks]
- > changegroup = sh -c "printenv.py changegroup"
+ > changegroup = sh -c "printenv.py --line changegroup"
> EOF
$ hg pull
pulling from http://localhost:$HGPORT1/
@@ -150,7 +150,14 @@
adding file changes
added 1 changesets with 1 changes to 1 files
new changesets 5fed3813f7f5
- changegroup hook: HG_HOOKNAME=changegroup HG_HOOKTYPE=changegroup HG_NODE=5fed3813f7f5e1824344fdc9cf8f63bb662c292d HG_NODE_LAST=5fed3813f7f5e1824344fdc9cf8f63bb662c292d HG_SOURCE=pull HG_TXNID=TXN:$ID$ HG_URL=http://localhost:$HGPORT1/
+ changegroup hook: HG_HOOKNAME=changegroup
+ HG_HOOKTYPE=changegroup
+ HG_NODE=5fed3813f7f5e1824344fdc9cf8f63bb662c292d
+ HG_NODE_LAST=5fed3813f7f5e1824344fdc9cf8f63bb662c292d
+ HG_SOURCE=pull
+ HG_TXNID=TXN:$ID$
+ HG_URL=http://localhost:$HGPORT1/
+
(run 'hg update' to get a working copy)
$ cd ..
@@ -174,7 +181,7 @@
> [(b'WWW-Authenticate', b'Basic Realm="mercurial"')])
> if base64.b64decode(auth.split()[1]).split(b':', 1) != [b'user', b'pass']:
> raise common.ErrorResponse(common.HTTP_FORBIDDEN, b'no')
- > def extsetup():
+ > def extsetup(ui):
> common.permhooks.insert(0, perform_authentication)
> EOT
$ hg serve --config extensions.x=userpass.py -p $HGPORT2 -d --pid-file=pid \
@@ -519,7 +526,7 @@
> if not cookie:
> raise common.ErrorResponse(common.HTTP_SERVER_ERROR, b'no-cookie')
> raise common.ErrorResponse(common.HTTP_SERVER_ERROR, b'Cookie: %s' % cookie)
- > def extsetup():
+ > def extsetup(ui):
> common.permhooks.insert(0, perform_authentication)
> EOF
--- a/tests/test-import.t Wed Jan 09 20:00:35 2019 -0800
+++ b/tests/test-import.t Fri Jan 18 13:28:22 2019 -0500
@@ -66,7 +66,7 @@
new changesets 80971e65b431
updating to branch default
2 files updated, 0 files merged, 0 files removed, 0 files unresolved
- $ HGEDITOR=cat hg --config ui.patch="$PYTHON ../dummypatch.py" --cwd b import --edit ../exported-tip.patch
+ $ HGEDITOR=cat hg --config ui.patch="\"$PYTHON\" ../dummypatch.py" --cwd b import --edit ../exported-tip.patch
applying ../exported-tip.patch
second change
--- a/tests/test-inherit-mode.t Wed Jan 09 20:00:35 2019 -0800
+++ b/tests/test-inherit-mode.t Fri Jan 18 13:28:22 2019 -0500
@@ -42,7 +42,7 @@
$ hg init repo
$ cd repo
- $ chmod 0770 .hg/store
+ $ chmod 0770 .hg/store .hg/cache .hg/wcache
before commit
store can be written by the group, other files cannot
@@ -51,8 +51,10 @@
$ "$PYTHON" ../printmodes.py .
00700 ./.hg/
00600 ./.hg/00changelog.i
+ 00770 ./.hg/cache/
00600 ./.hg/requires
00770 ./.hg/store/
+ 00770 ./.hg/wcache/
$ mkdir dir
$ touch foo dir/bar
@@ -69,9 +71,6 @@
00600 ./.hg/00changelog.i
00770 ./.hg/cache/
00660 ./.hg/cache/branch2-served
- 00711 ./.hg/cache/checkisexec
- 007.. ./.hg/cache/checklink (re)
- 00600 ./.hg/cache/checklink-target
00660 ./.hg/cache/manifestfulltextcache (reporevlogstore !)
00660 ./.hg/cache/rbc-names-v1
00660 ./.hg/cache/rbc-revs-v1
@@ -102,6 +101,10 @@
00660 ./.hg/undo.branch
00660 ./.hg/undo.desc
00660 ./.hg/undo.dirstate
+ 00770 ./.hg/wcache/
+ 00711 ./.hg/wcache/checkisexec
+ 007.. ./.hg/wcache/checklink (re)
+ 00600 ./.hg/wcache/checklink-target
00700 ./dir/
00600 ./dir/bar
00600 ./foo
@@ -115,8 +118,10 @@
$ "$PYTHON" ../printmodes.py ../push
00770 ../push/.hg/
00660 ../push/.hg/00changelog.i
+ 00770 ../push/.hg/cache/
00660 ../push/.hg/requires
00770 ../push/.hg/store/
+ 00770 ../push/.hg/wcache/
$ umask 077
$ hg -q push ../push
@@ -152,6 +157,7 @@
00660 ../push/.hg/undo.branch
00660 ../push/.hg/undo.desc
00660 ../push/.hg/undo.dirstate
+ 00770 ../push/.hg/wcache/
Test that we don't lose the setgid bit when we call chmod.
--- a/tests/test-init.t Wed Jan 09 20:00:35 2019 -0800
+++ b/tests/test-init.t Fri Jan 18 13:28:22 2019 -0500
@@ -22,6 +22,7 @@
fncache
generaldelta
revlogv1
+ sparserevlog
store
testonly-simplestore (reposimplestore !)
$ echo this > local/foo
@@ -60,6 +61,7 @@
generaldelta
revlogv1
testonly-simplestore (reposimplestore !)
+ sparserevlog
creating repo with format.usefncache=false
@@ -69,6 +71,7 @@
00changelog.i created
generaldelta
revlogv1
+ sparserevlog
store
testonly-simplestore (reposimplestore !)
@@ -81,12 +84,13 @@
fncache
generaldelta
revlogv1
+ sparserevlog
store
testonly-simplestore (reposimplestore !)
creating repo with format.dotencode=false
- $ hg --config format.generaldelta=false --config format.usegeneraldelta=false init old4
+ $ hg --config format.generaldelta=false --config format.usegeneraldelta=false --config format.sparse-revlog=no init old4
$ checknewrepo old4
store created
00changelog.i created
@@ -210,6 +214,7 @@
fncache
generaldelta
revlogv1
+ sparserevlog
store
testonly-simplestore (reposimplestore !)
@@ -229,6 +234,7 @@
fncache
generaldelta
revlogv1
+ sparserevlog
store
testonly-simplestore (reposimplestore !)
@@ -244,6 +250,7 @@
fncache
generaldelta
revlogv1
+ sparserevlog
store
testonly-simplestore (reposimplestore !)
--- a/tests/test-install.t Wed Jan 09 20:00:35 2019 -0800
+++ b/tests/test-install.t Fri Jan 18 13:28:22 2019 -0500
@@ -2,8 +2,9 @@
$ hg debuginstall
checking encoding (ascii)...
checking Python executable (*) (glob)
- checking Python version (2.*) (glob)
- checking Python lib (*lib*)... (glob)
+ checking Python version (2.*) (glob) (no-py3 !)
+ checking Python version (3.*) (glob) (py3 !)
+ checking Python lib (.*[Ll]ib.*)... (re)
checking Python security support (*) (glob)
TLS 1.2 not supported by Python install; network connections lack modern security (?)
SNI not supported by Python install; may have connectivity issues with some servers (?)
@@ -57,8 +58,9 @@
$ HGUSER= hg debuginstall
checking encoding (ascii)...
checking Python executable (*) (glob)
- checking Python version (2.*) (glob)
- checking Python lib (*lib*)... (glob)
+ checking Python version (2.*) (glob) (no-py3 !)
+ checking Python version (3.*) (glob) (py3 !)
+ checking Python lib (.*[Ll]ib.*)... (re)
checking Python security support (*) (glob)
TLS 1.2 not supported by Python install; network connections lack modern security (?)
SNI not supported by Python install; may have connectivity issues with some servers (?)
@@ -101,8 +103,9 @@
$ HGEDITOR="~/tools/testeditor.exe" hg debuginstall
checking encoding (ascii)...
checking Python executable (*) (glob)
- checking Python version (*) (glob)
- checking Python lib (*lib*)... (glob)
+ checking Python version (2.*) (glob) (no-py3 !)
+ checking Python version (3.*) (glob) (py3 !)
+ checking Python lib (.*[Ll]ib.*)... (re)
checking Python security support (*) (glob)
TLS 1.2 not supported by Python install; network connections lack modern security (?)
SNI not supported by Python install; may have connectivity issues with some servers (?)
@@ -125,8 +128,9 @@
$ HGEDITOR="c:\foo\bar\baz.exe -y -z" hg debuginstall
checking encoding (ascii)...
checking Python executable (*) (glob)
- checking Python version (*) (glob)
- checking Python lib (*lib*)... (glob)
+ checking Python version (2.*) (glob) (no-py3 !)
+ checking Python version (3.*) (glob) (py3 !)
+ checking Python lib (.*[Ll]ib.*)... (re)
checking Python security support (*) (glob)
TLS 1.2 not supported by Python install; network connections lack modern security (?)
SNI not supported by Python install; may have connectivity issues with some servers (?)
@@ -240,7 +244,8 @@
$ ./installenv/*/hg debuginstall || cat pip.log
checking encoding (ascii)...
checking Python executable (*) (glob)
- checking Python version (2.*) (glob)
+ checking Python version (2.*) (glob) (no-py3 !)
+ checking Python version (3.*) (glob) (py3 !)
checking Python lib (*)... (glob)
checking Python security support (*) (glob)
TLS 1.2 not supported by Python install; network connections lack modern security (?)
--- a/tests/test-largefiles-update.t Wed Jan 09 20:00:35 2019 -0800
+++ b/tests/test-largefiles-update.t Fri Jan 18 13:28:22 2019 -0500
@@ -5,7 +5,7 @@
$ cat >> $HGRCPATH <<EOF
> [ui]
- > merge = internal:fail
+ > merge = internal:merge
> [extensions]
> largefiles =
> [extdiff]
--- a/tests/test-largefiles-wireproto.t Wed Jan 09 20:00:35 2019 -0800
+++ b/tests/test-largefiles-wireproto.t Fri Jan 18 13:28:22 2019 -0500
@@ -240,10 +240,10 @@
Archive contains largefiles
>>> import os
- >>> import urllib2
+ >>> from mercurial import urllibcompat
>>> u = 'http://localhost:%s/archive/default.zip' % os.environ['HGPORT2']
- >>> with open('archive.zip', 'w') as f:
- ... f.write(urllib2.urlopen(u).read()) and None
+ >>> with open('archive.zip', 'wb') as f:
+ ... f.write(urllibcompat.urlreq.urlopen(u).read()) and None
$ unzip -t archive.zip
Archive: archive.zip
testing: empty-default/.hg_archival.txt*OK (glob)
@@ -430,7 +430,7 @@
> [(b'WWW-Authenticate', b'Basic Realm="mercurial"')])
> if base64.b64decode(auth.split()[1]).split(b':', 1) != [b'user', b'pass']:
> raise common.ErrorResponse(common.HTTP_FORBIDDEN, b'no')
- > def extsetup():
+ > def extsetup(ui):
> common.permhooks.insert(0, perform_authentication)
> EOT
$ hg serve --config extensions.x=userpass.py -R credentialmain \
--- a/tests/test-lfconvert.t Wed Jan 09 20:00:35 2019 -0800
+++ b/tests/test-lfconvert.t Fri Jan 18 13:28:22 2019 -0500
@@ -100,6 +100,7 @@
generaldelta
largefiles
revlogv1
+ sparserevlog
store
testonly-simplestore (reposimplestore !)
@@ -125,10 +126,10 @@
$ hg rm large normal3
$ hg commit -q -m"remove large, normal3"
$ hg merge
- merging sub/maybelarge.dat and stuff/maybelarge.dat to stuff/maybelarge.dat
+ tool internal:merge (for pattern stuff/maybelarge.dat) can't handle binary
+ no tool found to merge stuff/maybelarge.dat
+ keep (l)ocal [working copy], take (o)ther [merge rev], or leave (u)nresolved for stuff/maybelarge.dat? u
merging sub/normal2 and stuff/normal2 to stuff/normal2
- warning: stuff/maybelarge.dat looks like a binary file.
- warning: conflicts while merging stuff/maybelarge.dat! (edit, then use 'hg resolve --mark')
0 files updated, 1 files merged, 0 files removed, 1 files unresolved
use 'hg resolve' to retry unresolved file merges or 'hg merge --abort' to abandon
[1]
@@ -356,20 +357,27 @@
Ensure the largefile can be cached in the source if necessary
$ hg clone -U largefiles-repo issue3519
$ rm -f "${USERCACHE}"/*
+ $ hg -R issue3519 branch -q mybranch
+ $ hg -R issue3519 ci -m 'change branch name only'
$ hg lfconvert --to-normal issue3519 normalized3519
initializing destination normalized3519
4 additional largefiles cached
scanning source...
sorting...
converting...
- 7 add large, normal1
- 6 add sub/*
- 5 rename sub/ to stuff/
- 4 add normal3, modify sub/*
- 3 remove large, normal3
- 2 merge
- 1 add anotherlarge (should be a largefile)
- 0 Added tag mytag for changeset abacddda7028
+ 8 add large, normal1
+ 7 add sub/*
+ 6 rename sub/ to stuff/
+ 5 add normal3, modify sub/*
+ 4 remove large, normal3
+ 3 merge
+ 2 add anotherlarge (should be a largefile)
+ 1 Added tag mytag for changeset abacddda7028
+ 0 change branch name only
+
+Ensure empty commits aren't lost in the conversion
+ $ hg -R normalized3519 log -r tip -T '{desc}\n'
+ change branch name only
Ensure the abort message is useful if a largefile is entirely unavailable
$ rm -rf normalized3519
--- a/tests/test-lfs-largefiles.t Wed Jan 09 20:00:35 2019 -0800
+++ b/tests/test-lfs-largefiles.t Fri Jan 18 13:28:22 2019 -0500
@@ -294,6 +294,7 @@
generaldelta
lfs
revlogv1
+ sparserevlog
store
$ hg log -r 'all()' -G -T '{rev} {join(lfs_files, ", ")} ({desc})\n'
--- a/tests/test-lfs-serve-access.t Wed Jan 09 20:00:35 2019 -0800
+++ b/tests/test-lfs-serve-access.t Fri Jan 18 13:28:22 2019 -0500
@@ -29,7 +29,8 @@
$ hg -R client push http://localhost:$HGPORT
pushing to http://localhost:$HGPORT/
searching for changes
- abort: LFS HTTP error: HTTP Error 400: no such method: .git (action=upload)!
+ abort: LFS HTTP error: HTTP Error 400: no such method: .git!
+ (check that lfs serving is enabled on http://localhost:$HGPORT/.git/info/lfs and "upload" is supported)
[255]
... so do a local push to make the data available. Remove the blob from the
@@ -50,7 +51,8 @@
added 1 changesets with 1 changes to 1 files
new changesets 525251863cad
updating to branch default
- abort: LFS HTTP error: HTTP Error 400: no such method: .git (action=download)!
+ abort: LFS HTTP error: HTTP Error 400: no such method: .git!
+ (check that lfs serving is enabled on http://localhost:$HGPORT/.git/info/lfs and "download" is supported)
[255]
$ "$PYTHON" $RUNTESTDIR/killdaemons.py $DAEMON_PIDS
@@ -66,14 +68,27 @@
$LOCALIP - - [$LOGDATE$] "GET /?cmd=getbundle HTTP/1.1" 200 - x-hgarg-1:bookmarks=1&bundlecaps=HG20%2Cbundle2%3DHG20%250Abookmarks%250Achangegroup%253D01%252C02%252C03%250Adigests%253Dmd5%252Csha1%252Csha512%250Aerror%253Dabort%252Cunsupportedcontent%252Cpushraced%252Cpushkey%250Ahgtagsfnodes%250Alistkeys%250Aphases%253Dheads%250Apushkey%250Aremote-changegroup%253Dhttp%252Chttps%250Arev-branch-cache%250Astream%253Dv2&cg=1&common=0000000000000000000000000000000000000000&heads=525251863cad618e55d483555f3d00a2ca99597e&listkeys=bookmarks&phases=1 x-hgproto-1:0.1 0.2 comp=$USUAL_COMPRESSIONS$ partial-pull (glob)
$LOCALIP - - [$LOGDATE$] "POST /.git/info/lfs/objects/batch HTTP/1.1" 400 - (glob)
-Blob URIs are correct when --prefix is used
-
$ rm -f $TESTTMP/access.log $TESTTMP/errors.log
$ hg --config "lfs.usercache=$TESTTMP/servercache" -R server serve -d \
> -p $HGPORT --pid-file=hg.pid --prefix=subdir/mount/point \
> -A $TESTTMP/access.log -E $TESTTMP/errors.log
$ cat hg.pid >> $DAEMON_PIDS
+Reasonable hint for a misconfigured blob server
+
+ $ hg -R httpclone update default --config lfs.url=http://localhost:$HGPORT/missing
+ abort: LFS HTTP error: HTTP Error 404: Not Found!
+ (the "lfs.url" config may be used to override http://localhost:$HGPORT/missing)
+ [255]
+
+ $ hg -R httpclone update default --config lfs.url=http://localhost:$HGPORT2/missing
+ abort: LFS error: *onnection *refused*! (glob) (?)
+ abort: LFS error: $EADDRNOTAVAIL$! (glob) (?)
+ (the "lfs.url" config may be used to override http://localhost:$HGPORT2/missing)
+ [255]
+
+Blob URIs are correct when --prefix is used
+
$ hg clone --debug http://localhost:$HGPORT/subdir/mount/point cloned2
using http://localhost:$HGPORT/subdir/mount/point
sending capabilities command
@@ -146,6 +161,7 @@
$ "$PYTHON" $RUNTESTDIR/killdaemons.py $DAEMON_PIDS
$ cat $TESTTMP/access.log $TESTTMP/errors.log
+ $LOCALIP - - [$LOGDATE$] "POST /missing/objects/batch HTTP/1.1" 404 - (glob)
$LOCALIP - - [$LOGDATE$] "GET /subdir/mount/point?cmd=capabilities HTTP/1.1" 200 - (glob)
$LOCALIP - - [$LOGDATE$] "GET /subdir/mount/point?cmd=batch HTTP/1.1" 200 - x-hgarg-1:cmds=heads+%3Bknown+nodes%3D x-hgproto-1:0.1 0.2 comp=$USUAL_COMPRESSIONS$ partial-pull (glob)
$LOCALIP - - [$LOGDATE$] "GET /subdir/mount/point?cmd=getbundle HTTP/1.1" 200 - x-hgarg-1:bookmarks=1&bundlecaps=HG20%2Cbundle2%3DHG20%250Abookmarks%250Achangegroup%253D01%252C02%252C03%250Adigests%253Dmd5%252Csha1%252Csha512%250Aerror%253Dabort%252Cunsupportedcontent%252Cpushraced%252Cpushkey%250Ahgtagsfnodes%250Alistkeys%250Aphases%253Dheads%250Apushkey%250Aremote-changegroup%253Dhttp%252Chttps%250Arev-branch-cache%250Astream%253Dv2&cg=1&common=0000000000000000000000000000000000000000&heads=525251863cad618e55d483555f3d00a2ca99597e&listkeys=bookmarks&phases=1 x-hgproto-1:0.1 0.2 comp=$USUAL_COMPRESSIONS$ partial-pull (glob)
@@ -267,7 +283,7 @@
$ hg -R client push http://localhost:$HGPORT1
pushing to http://localhost:$HGPORT1/
searching for changes
- abort: HTTP error: HTTP Error 422: corrupt blob (oid=b5bb9d8014a0f9b1d61e21e796d78dccdf1352f23cd32812f4850b878ae4944c, action=upload)!
+ abort: LFS HTTP error: HTTP Error 422: corrupt blob (oid=b5bb9d8014a0f9b1d61e21e796d78dccdf1352f23cd32812f4850b878ae4944c, action=upload)!
[255]
$ echo 'test lfs file' > server/lfs3.bin
@@ -279,14 +295,14 @@
$ hg --config lfs.url=http://localhost:$HGPORT1/.git/info/lfs \
> -R client update -r tip
- abort: HTTP error: HTTP Error 500: Internal Server Error (oid=276f73cfd75f9fb519810df5f5d96d6594ca2521abd86cbcd92122f7d51a1f3d, action=download)!
+ abort: LFS HTTP error: HTTP Error 500: Internal Server Error (oid=276f73cfd75f9fb519810df5f5d96d6594ca2521abd86cbcd92122f7d51a1f3d, action=download)!
[255]
Test a checksum failure during the processing of the GET request
$ hg --config lfs.url=http://localhost:$HGPORT1/.git/info/lfs \
> -R client update -r tip
- abort: HTTP error: HTTP Error 422: corrupt blob (oid=276f73cfd75f9fb519810df5f5d96d6594ca2521abd86cbcd92122f7d51a1f3d, action=download)!
+ abort: LFS HTTP error: HTTP Error 422: corrupt blob (oid=276f73cfd75f9fb519810df5f5d96d6594ca2521abd86cbcd92122f7d51a1f3d, action=download)!
[255]
$ "$PYTHON" $RUNTESTDIR/killdaemons.py $DAEMON_PIDS
@@ -388,7 +404,7 @@
> if base64.b64decode(auth.split()[1]).split(b':', 1) != [b'user',
> b'pass']:
> raise common.ErrorResponse(common.HTTP_FORBIDDEN, b'no')
- > def extsetup():
+ > def extsetup(ui):
> common.permhooks.insert(0, perform_authentication)
> EOF
--- a/tests/test-lfs-serve.t Wed Jan 09 20:00:35 2019 -0800
+++ b/tests/test-lfs-serve.t Fri Jan 18 13:28:22 2019 -0500
@@ -131,7 +131,7 @@
upgrade will perform the following actions:
requirements
- preserved: dotencode, fncache, generaldelta, revlogv1, store
+ preserved: dotencode, fncache, generaldelta, revlogv1, sparserevlog, store
beginning upgrade...
repository locked and read-only
--- a/tests/test-log-exthook.t Wed Jan 09 20:00:35 2019 -0800
+++ b/tests/test-log-exthook.t Fri Jan 18 13:28:22 2019 -0500
@@ -9,10 +9,12 @@
> logcmdutil,
> repair,
> )
+ > def brot13(b):
+ > return codecs.encode(b.decode('utf8'), 'rot-13').encode('utf8')
> def rot13description(self, ctx):
- > summary = codecs.encode("summary", 'rot-13')
- > description = ctx.description().strip().splitlines()[0].encode('rot13')
- > self.ui.write("%s: %s\n" % (summary, description))
+ > description = ctx.description().strip().splitlines()[0]
+ > self.ui.write(b"%s: %s\n" % (brot13(b"summary"),
+ > brot13(description)))
> def reposetup(ui, repo):
> logcmdutil.changesetprinter._exthook = rot13description
> EOF
--- a/tests/test-log.t Wed Jan 09 20:00:35 2019 -0800
+++ b/tests/test-log.t Fri Jan 18 13:28:22 2019 -0500
@@ -2637,3 +2637,123 @@
summary: A1B1C1
$ cd ..
+
+--- going to test line wrap fix on using both --stat and -G (issue5800)
+ $ hg init issue5800
+ $ cd issue5800
+ $ touch a
+ $ hg ci -Am 'add a'
+ adding a
+---- now we are going to add 300 lines to a
+ $ for i in `$TESTDIR/seq.py 1 300`; do echo $i >> a; done
+ $ hg ci -m 'modify a'
+ $ hg log
+ changeset: 1:a98683e6a834
+ tag: tip
+ user: test
+ date: Thu Jan 01 00:00:00 1970 +0000
+ summary: modify a
+
+ changeset: 0:ac82d8b1f7c4
+ user: test
+ date: Thu Jan 01 00:00:00 1970 +0000
+ summary: add a
+
+---- now visualise the changes we made without template
+ $ hg log -l1 -r a98683e6a834 --stat -G
+ @ changeset: 1:a98683e6a834
+ | tag: tip
+ ~ user: test
+ date: Thu Jan 01 00:00:00 1970 +0000
+ summary: modify a
+
+ a | 300 +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
+ 1 files changed, 300 insertions(+), 0 deletions(-)
+
+---- with template
+ $ hg log -l1 -r a98683e6a834 --stat -G -T bisect
+ @ changeset: 1:a98683e6a834
+ | bisect:
+ ~ tag: tip
+ user: test
+ date: Thu Jan 01 00:00:00 1970 +0000
+ summary: modify a
+
+ a | 300 +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
+ 1 files changed, 300 insertions(+), 0 deletions(-)
+
+ $ hg log -l1 -r a98683e6a834 --stat -G -T changelog
+ 1970-01-01 test <test>
+
+ @ * a:
+ | modify a
+ ~ [a98683e6a834] [tip]
+
+ a | 300 +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
+ 1 files changed, 300 insertions(+), 0 deletions(-)
+
+ $ hg log -l1 -r a98683e6a834 --stat -G -T compact
+ @ 1[tip] a98683e6a834 1970-01-01 00:00 +0000 test
+ | modify a
+ ~
+ a | 300 +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
+ 1 files changed, 300 insertions(+), 0 deletions(-)
+
+ $ hg log -l1 -r a98683e6a834 --stat -G -T default
+ @ changeset: 1:a98683e6a834
+ | tag: tip
+ ~ user: test
+ date: Thu Jan 01 00:00:00 1970 +0000
+ summary: modify a
+
+ a | 300 +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
+ 1 files changed, 300 insertions(+), 0 deletions(-)
+
+ $ hg log -l1 -r a98683e6a834 --stat -G -T phases
+ @ changeset: 1:a98683e6a834
+ | tag: tip
+ ~ phase: draft
+ user: test
+ date: Thu Jan 01 00:00:00 1970 +0000
+ summary: modify a
+
+ a | 300 +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
+ 1 files changed, 300 insertions(+), 0 deletions(-)
+
+ $ hg log -l1 -r a98683e6a834 --stat -G -T show
+ @ changeset: 1:a98683e6a834
+ | tag: tip
+ ~ user: test
+ date: Thu Jan 01 00:00:00 1970 +0000
+ summary: modify a
+
+ a | 300 +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
+ 1 files changed, 300 insertions(+), 0 deletions(-)
+
+ $ hg log -l1 -r a98683e6a834 --stat -G -T status
+ @ changeset: 1:a98683e6a834
+ | tag: tip
+ ~ user: test
+ date: Thu Jan 01 00:00:00 1970 +0000
+ summary: modify a
+ files:
+ M a
+
+ a | 300 +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
+ 1 files changed, 300 insertions(+), 0 deletions(-)
+
+ $ hg log -l1 -r a98683e6a834 --stat -G -T xml
+ <?xml version="1.0"?>
+ <log>
+ @ <logentry revision="1" node="a98683e6a8340830a7683909768b62871e84bc9d">
+ | <tag>tip</tag>
+ ~ <author email="test">test</author>
+ <date>1970-01-01T00:00:00+00:00</date>
+ <msg xml:space="preserve">modify a</msg>
+ </logentry>
+ a | 300 +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
+ 1 files changed, 300 insertions(+), 0 deletions(-)
+
+ </log>
+
+ $ cd ..
--- a/tests/test-logtoprocess.t Wed Jan 09 20:00:35 2019 -0800
+++ b/tests/test-logtoprocess.t Fri Jan 18 13:28:22 2019 -0500
@@ -14,12 +14,12 @@
> command = registrar.command(cmdtable)
> configtable = {}
> configitem = registrar.configitem(configtable)
- > configitem('logtoprocess', 'foo',
+ > configitem(b'logtoprocess', b'foo',
> default=None,
> )
> @command(b'foobar', [])
> def foo(ui, repo):
- > ui.log('foo', 'a message: %s\n', 'spam')
+ > ui.log(b'foo', b'a message: %s\n', b'spam')
> EOF
$ cp $HGRCPATH $HGRCPATH.bak
$ cat >> $HGRCPATH << EOF
@@ -29,18 +29,14 @@
> [logtoprocess]
> command=(echo 'logtoprocess command output:';
> echo "\$EVENT";
- > echo "\$MSG1";
- > echo "\$MSG2") > $TESTTMP/command.log
+ > echo "\$MSG1") > $TESTTMP/command.log
> commandfinish=(echo 'logtoprocess commandfinish output:';
> echo "\$EVENT";
> echo "\$MSG1";
- > echo "\$MSG2";
- > echo "\$MSG3";
> echo "canonical: \$OPT_CANONICAL_COMMAND") > $TESTTMP/commandfinish.log
> foo=(echo 'logtoprocess foo output:';
> echo "\$EVENT";
- > echo "\$MSG1";
- > echo "\$MSG2") > $TESTTMP/foo.log
+ > echo "\$MSG1") > $TESTTMP/foo.log
> EOF
Running a command triggers both a ui.log('command') and a
@@ -53,16 +49,13 @@
command
fooba
- fooba
logtoprocess command output:
#if no-chg
$ cat $TESTTMP/commandfinish.log | sort
- 0
canonical: foobar
commandfinish
- fooba
fooba exited 0 after * seconds (glob)
logtoprocess commandfinish output:
$ cat $TESTTMP/foo.log | sort
@@ -70,7 +63,6 @@
a message: spam
foo
logtoprocess foo output:
- spam
#endif
Confirm that logging blocked time catches stdio properly:
@@ -97,6 +89,16 @@
script will die after the timeout before we could touch the file and the
resulting file will not exists. If not, we will touch the file and see it.
+ $ cat >> fakepager.py <<EOF
+ > import sys
+ > printed = False
+ > for line in sys.stdin:
+ > sys.stdout.write(line)
+ > printed = True
+ > if not printed:
+ > sys.stdout.write('paged empty output!\n')
+ > EOF
+
$ cat > $TESTTMP/wait-output.sh << EOF
> #!/bin/sh
> for i in \`$TESTDIR/seq.py 50\`; do
@@ -115,6 +117,8 @@
> [extensions]
> logtoprocess=
> pager=
+ > [pager]
+ > pager = "$PYTHON" $TESTTMP/fakepager.py
> [logtoprocess]
> commandfinish=$TESTTMP/wait-output.sh
> EOF
--- a/tests/test-lrucachedict.py Wed Jan 09 20:00:35 2019 -0800
+++ b/tests/test-lrucachedict.py Fri Jan 18 13:28:22 2019 -0500
@@ -79,6 +79,36 @@
self.assertEqual(d.get('a'), 'va')
self.assertEqual(list(d), ['a', 'c', 'b'])
+ def testpeek(self):
+ d = util.lrucachedict(4)
+ d['a'] = 'va'
+ d['b'] = 'vb'
+ d['c'] = 'vc'
+
+ with self.assertRaises(KeyError):
+ d.peek('missing')
+ self.assertEqual(list(d), ['c', 'b', 'a'])
+ self.assertIsNone(d.peek('missing', None))
+ self.assertEqual(list(d), ['c', 'b', 'a'])
+
+ self.assertEqual(d.peek('a'), 'va')
+ self.assertEqual(list(d), ['c', 'b', 'a'])
+
+ def testpop(self):
+ d = util.lrucachedict(4)
+ d['a'] = 'va'
+ d['b'] = 'vb'
+ d['c'] = 'vc'
+
+ with self.assertRaises(KeyError):
+ d.pop('missing')
+ self.assertEqual(list(d), ['c', 'b', 'a'])
+ self.assertIsNone(d.pop('missing', None))
+ self.assertEqual(list(d), ['c', 'b', 'a'])
+
+ self.assertEqual(d.pop('b'), 'vb')
+ self.assertEqual(list(d), ['c', 'a'])
+
def testcopypartial(self):
d = util.lrucachedict(4)
d.insert('a', 'va', cost=4)
--- a/tests/test-manifest.py Wed Jan 09 20:00:35 2019 -0800
+++ b/tests/test-manifest.py Fri Jan 18 13:28:22 2019 -0500
@@ -4,6 +4,7 @@
import itertools
import silenttestrunner
import unittest
+import zlib
from mercurial import (
manifest as manifestmod,
@@ -397,6 +398,29 @@
def parsemanifest(self, text):
return manifestmod.manifestdict(text)
+ def testObviouslyBogusManifest(self):
+ # This is a 163k manifest that came from oss-fuzz. It was a
+ # timeout there, but when run normally it doesn't seem to
+ # present any particular slowness.
+ data = zlib.decompress(
+ b'x\x9c\xed\xce;\n\x83\x00\x10\x04\xd0\x8deNa\x93~\xf1\x03\xc9q\xf4'
+ b'\x14\xeaU\xbdB\xda\xd4\xe6Cj\xc1FA\xde+\x86\xe9f\xa2\xfci\xbb\xfb'
+ b'\xa3\xef\xea\xba\xca\x7fk\x86q\x9a\xc6\xc8\xcc&\xb3\xcf\xf8\xb8|#'
+ b'\x8a9\x00\xd8\xe6v\xf4\x01N\xe1\n\x00\x00\x00\x00\x00\x00\x00\x00'
+ b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
+ b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
+ b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
+ b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
+ b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
+ b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
+ b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
+ b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
+ b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
+ b'\x00\x00\xc0\x8aey\x1d}\x01\xd8\xe0\xb9\xf3\xde\x1b\xcf\x17'
+ b'\xac\xbe')
+ with self.assertRaises(ValueError):
+ self.parsemanifest(data)
+
class testtreemanifest(unittest.TestCase, basemanifesttests):
def parsemanifest(self, text):
return manifestmod.treemanifest(b'', text)
--- a/tests/test-merge-subrepos.t Wed Jan 09 20:00:35 2019 -0800
+++ b/tests/test-merge-subrepos.t Fri Jan 18 13:28:22 2019 -0500
@@ -57,7 +57,8 @@
A deleted subrepo file is flagged as dirty, like the top level repo
- $ hg id --config extensions.blackbox= --config blackbox.dirty=True
+ $ hg id --config extensions.blackbox= --config blackbox.dirty=True \
+ > --config blackbox.track='command commandfinish'
9bfe45a197d7+ tip
$ cat .hg/blackbox.log
* @9bfe45a197d7b0ab09bf287729dd57e9619c9da5+ (*)> serve --cmdserver chgunix * (glob) (chg !)
--- a/tests/test-merge-tools.t Wed Jan 09 20:00:35 2019 -0800
+++ b/tests/test-merge-tools.t Fri Jan 18 13:28:22 2019 -0500
@@ -1,6 +1,10 @@
test merge-tools configuration - mostly exercising filemerge.py
$ unset HGMERGE # make sure HGMERGE doesn't interfere with the test
+ $ cat >> $HGRCPATH << EOF
+ > [ui]
+ > merge=
+ > EOF
$ hg init repo
$ cd repo
@@ -1942,6 +1946,26 @@
0000: 00 01 02 03 |....|
$ hg merge --abort -q
+Check that the extra information is printed correctly
+
+ $ hg merge 9 \
+ > --config merge-tools.testecho.executable='echo' \
+ > --config merge-tools.testecho.args='merge runs here ...' \
+ > --config merge-tools.testecho.binary=True \
+ > --config ui.merge=testecho \
+ > --config ui.pre-merge-tool-output-template='\n{label("extmerge.running_merge_tool", "Running merge tool for {path} ({toolpath}):")}\n{separate("\n", extmerge_section(local), extmerge_section(base), extmerge_section(other))}\n' \
+ > --config 'templatealias.extmerge_section(sect)="- {pad("{sect.name} ({sect.label})", 20, left=True)}: {revset(sect.node)%"{rev}:{shortest(node,8)} {desc|firstline} {separate(" ", tags, bookmarks, branch)}"}"'
+ merging b
+
+ Running merge tool for b ("*/bin/echo.exe"): (glob) (windows !)
+ Running merge tool for b (*/bin/echo): (glob) (no-windows !)
+ - local (working copy): 10:2d1f533d add binary file (#2) tip default
+ - base (base): -1:00000000 default
+ - other (merge rev): 9:1e7ad7d7 add binary file (#1) default
+ merge runs here ...
+ 0 files updated, 1 files merged, 0 files removed, 0 files unresolved
+ (branch merge, don't forget to commit)
+
Check that debugpicktool examines which merge tool is chosen for
specified file as expected
--- a/tests/test-merge-types.t Wed Jan 09 20:00:35 2019 -0800
+++ b/tests/test-merge-types.t Fri Jan 18 13:28:22 2019 -0500
@@ -36,11 +36,11 @@
ancestor: c334dc3be0da, local: 521a1e40188f+, remote: 3574f3e69b1c
preserving a for resolve of a
a: versions differ -> m (premerge)
- picked tool ':merge' for a (binary False symlink True changedelete False)
- merging a
- my a@521a1e40188f+ other a@3574f3e69b1c ancestor a@c334dc3be0da
- warning: internal :merge cannot merge symlinks for a
- warning: conflicts while merging a! (edit, then use 'hg resolve --mark')
+ tool internal:merge (for pattern a) can't handle symlinks
+ couldn't find merge tool hgmerge
+ no tool found to merge a
+ picked tool ':prompt' for a (binary False symlink True changedelete False)
+ keep (l)ocal [working copy], take (o)ther [merge rev], or leave (u)nresolved for a? u
0 files updated, 0 files merged, 0 files removed, 1 files unresolved
use 'hg resolve' to retry unresolved file merges or 'hg merge --abort' to abandon
[1]
@@ -164,7 +164,7 @@
$ hg up -Cq 0
$ echo data > a
- $ HGMERGE= hg up -y --debug
+ $ HGMERGE= hg up -y --debug --config ui.merge=
searching for copies back to rev 2
resolving manifests
branchmerge: False, force: False, partial: False
@@ -207,9 +207,9 @@
$ ln -s base f
$ hg ci -qm2
$ hg merge
- merging f
- warning: internal :merge cannot merge symlinks for f
- warning: conflicts while merging f! (edit, then use 'hg resolve --mark')
+ tool internal:merge (for pattern f) can't handle symlinks
+ no tool found to merge f
+ keep (l)ocal [working copy], take (o)ther [merge rev], or leave (u)nresolved for f? u
0 files updated, 0 files merged, 0 files removed, 1 files unresolved
use 'hg resolve' to retry unresolved file merges or 'hg merge --abort' to abandon
[1]
@@ -219,9 +219,9 @@
$ hg up -Cqr1
$ hg merge
- merging f
- warning: internal :merge cannot merge symlinks for f
- warning: conflicts while merging f! (edit, then use 'hg resolve --mark')
+ tool internal:merge (for pattern f) can't handle symlinks
+ no tool found to merge f
+ keep (l)ocal [working copy], take (o)ther [merge rev], or leave (u)nresolved for f? u
0 files updated, 0 files merged, 0 files removed, 1 files unresolved
use 'hg resolve' to retry unresolved file merges or 'hg merge --abort' to abandon
[1]
@@ -246,9 +246,9 @@
$ ln -s dangling f
$ hg ci -qm2
$ hg merge
- merging f
- warning: internal :merge cannot merge symlinks for f
- warning: conflicts while merging f! (edit, then use 'hg resolve --mark')
+ tool internal:merge (for pattern f) can't handle symlinks
+ no tool found to merge f
+ keep (l)ocal [working copy], take (o)ther [merge rev], or leave (u)nresolved for f? u
0 files updated, 0 files merged, 0 files removed, 1 files unresolved
use 'hg resolve' to retry unresolved file merges or 'hg merge --abort' to abandon
[1]
@@ -258,9 +258,9 @@
$ hg up -Cqr1
$ hg merge
- merging f
- warning: internal :merge cannot merge symlinks for f
- warning: conflicts while merging f! (edit, then use 'hg resolve --mark')
+ tool internal:merge (for pattern f) can't handle symlinks
+ no tool found to merge f
+ keep (l)ocal [working copy], take (o)ther [merge rev], or leave (u)nresolved for f? u
0 files updated, 0 files merged, 0 files removed, 1 files unresolved
use 'hg resolve' to retry unresolved file merges or 'hg merge --abort' to abandon
[1]
@@ -341,15 +341,15 @@
merging b
merging bx
warning: cannot merge flags for c without common ancestor - keeping local flags
- merging d
- warning: internal :merge cannot merge symlinks for d
- warning: conflicts while merging d! (edit, then use 'hg resolve --mark')
- merging f
- warning: internal :merge cannot merge symlinks for f
- warning: conflicts while merging f! (edit, then use 'hg resolve --mark')
- merging h
- warning: internal :merge cannot merge symlinks for h
- warning: conflicts while merging h! (edit, then use 'hg resolve --mark')
+ tool internal:merge (for pattern d) can't handle symlinks
+ no tool found to merge d
+ keep (l)ocal [working copy], take (o)ther [merge rev], or leave (u)nresolved for d? u
+ tool internal:merge (for pattern f) can't handle symlinks
+ no tool found to merge f
+ keep (l)ocal [working copy], take (o)ther [merge rev], or leave (u)nresolved for f? u
+ tool internal:merge (for pattern h) can't handle symlinks
+ no tool found to merge h
+ keep (l)ocal [working copy], take (o)ther [merge rev], or leave (u)nresolved for h? u
warning: conflicts while merging a! (edit, then use 'hg resolve --mark')
warning: conflicts while merging b! (edit, then use 'hg resolve --mark')
warning: conflicts while merging bx! (edit, then use 'hg resolve --mark')
@@ -403,15 +403,15 @@
merging b
merging bx
warning: cannot merge flags for c without common ancestor - keeping local flags
- merging d
- warning: internal :merge cannot merge symlinks for d
- warning: conflicts while merging d! (edit, then use 'hg resolve --mark')
- merging f
- warning: internal :merge cannot merge symlinks for f
- warning: conflicts while merging f! (edit, then use 'hg resolve --mark')
- merging h
- warning: internal :merge cannot merge symlinks for h
- warning: conflicts while merging h! (edit, then use 'hg resolve --mark')
+ tool internal:merge (for pattern d) can't handle symlinks
+ no tool found to merge d
+ keep (l)ocal [working copy], take (o)ther [merge rev], or leave (u)nresolved for d? u
+ tool internal:merge (for pattern f) can't handle symlinks
+ no tool found to merge f
+ keep (l)ocal [working copy], take (o)ther [merge rev], or leave (u)nresolved for f? u
+ tool internal:merge (for pattern h) can't handle symlinks
+ no tool found to merge h
+ keep (l)ocal [working copy], take (o)ther [merge rev], or leave (u)nresolved for h? u
warning: conflicts while merging a! (edit, then use 'hg resolve --mark')
warning: conflicts while merging b! (edit, then use 'hg resolve --mark')
warning: conflicts while merging bx! (edit, then use 'hg resolve --mark')
--- a/tests/test-merge1.t Wed Jan 09 20:00:35 2019 -0800
+++ b/tests/test-merge1.t Fri Jan 18 13:28:22 2019 -0500
@@ -11,7 +11,7 @@
>
> print("merging for", os.path.basename(sys.argv[1]))
> EOF
- $ HGMERGE="$PYTHON ../merge"; export HGMERGE
+ $ HGMERGE="\"$PYTHON\" ../merge"; export HGMERGE
$ hg init t
$ cd t
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/tests/test-missing-capability.t Fri Jan 18 13:28:22 2019 -0500
@@ -0,0 +1,46 @@
+Checking how hg behaves when one side of a pull/push doesn't support
+some capability (because it's running an older hg version, usually).
+
+ $ hg init repo1
+ $ cd repo1
+ $ echo a > a; hg add -q a; hg commit -q -m a
+ $ hg bookmark a
+ $ hg clone -q . ../repo2
+ $ cd ../repo2
+
+ $ touch $TESTTMP/disable-lookup.py
+ $ disable_cap() {
+ > rm -f $TESTTMP/disable-lookup.pyc # pyc caching is buggy
+ > cat <<EOF > $TESTTMP/disable-lookup.py
+ > from mercurial import extensions, wireprotov1server
+ > def wcapabilities(orig, *args, **kwargs):
+ > cap = orig(*args, **kwargs)
+ > cap.remove('$1')
+ > return cap
+ > extensions.wrapfunction(wireprotov1server, '_capabilities', wcapabilities)
+ > EOF
+ > }
+ $ cat >> ../repo1/.hg/hgrc <<EOF
+ > [extensions]
+ > disable-lookup = $TESTTMP/disable-lookup.py
+ > EOF
+ $ cat >> .hg/hgrc <<EOF
+ > [ui]
+ > ssh = "$PYTHON" "$TESTDIR/dummyssh"
+ > EOF
+
+ $ hg pull ssh://user@dummy/repo1 -r tip -B a
+ pulling from ssh://user@dummy/repo1
+ no changes found
+
+ $ disable_cap lookup
+ $ hg pull ssh://user@dummy/repo1 -r tip -B a
+ pulling from ssh://user@dummy/repo1
+ abort: other repository doesn't support revision lookup, so a rev cannot be specified.
+ [255]
+
+ $ disable_cap pushkey
+ $ hg pull ssh://user@dummy/repo1 -r tip -B a
+ pulling from ssh://user@dummy/repo1
+ abort: remote bookmark a not found!
+ [255]
--- a/tests/test-narrow-clone-no-ellipsis.t Wed Jan 09 20:00:35 2019 -0800
+++ b/tests/test-narrow-clone-no-ellipsis.t Fri Jan 18 13:28:22 2019 -0500
@@ -27,6 +27,7 @@
fncache
narrowhg-experimental
revlogv1
+ sparserevlog
store
testonly-simplestore (reposimplestore !)
--- a/tests/test-narrow-clone-non-narrow-server.t Wed Jan 09 20:00:35 2019 -0800
+++ b/tests/test-narrow-clone-non-narrow-server.t Fri Jan 18 13:28:22 2019 -0500
@@ -32,7 +32,6 @@
> EOF
$ echo hello | hg -R . serve --stdio | \
> "$PYTHON" unquote.py | tr ' ' '\n' | grep narrow
- narrow=v0
exp-narrow-1
$ cd ..
@@ -58,7 +57,11 @@
comparing with http://localhost:$HGPORT1/
searching for changes
looking for local changes to affected paths
+
$ hg tracked --addinclude f1 http://localhost:$HGPORT1/
+ nothing to widen or narrow
+
+ $ hg tracked --addinclude f9 http://localhost:$HGPORT1/
comparing with http://localhost:$HGPORT1/
abort: server does not support narrow clones
[255]
--- a/tests/test-narrow-clone-stream.t Wed Jan 09 20:00:35 2019 -0800
+++ b/tests/test-narrow-clone-stream.t Fri Jan 18 13:28:22 2019 -0500
@@ -1,7 +1,23 @@
+#testcases tree flat-fncache flat-nofncache
+
Tests narrow stream clones
$ . "$TESTDIR/narrow-library.sh"
+#if tree
+ $ cat << EOF >> $HGRCPATH
+ > [experimental]
+ > treemanifest = 1
+ > EOF
+#endif
+
+#if flat-nofncache
+ $ cat << EOF >> $HGRCPATH
+ > [format]
+ > usefncache = 0
+ > EOF
+#endif
+
Server setup
$ hg init master
@@ -9,17 +25,17 @@
$ mkdir dir
$ mkdir dir/src
$ cd dir/src
- $ for x in `$TESTDIR/seq.py 20`; do echo $x > "f$x"; hg add "f$x"; hg commit -m "Commit src $x"; done
+ $ for x in `$TESTDIR/seq.py 20`; do echo $x > "F$x"; hg add "F$x"; hg commit -m "Commit src $x"; done
$ cd ..
$ mkdir tests
$ cd tests
- $ for x in `$TESTDIR/seq.py 20`; do echo $x > "f$x"; hg add "f$x"; hg commit -m "Commit src $x"; done
+ $ for x in `$TESTDIR/seq.py 20`; do echo $x > "F$x"; hg add "F$x"; hg commit -m "Commit src $x"; done
$ cd ../../..
Trying to stream clone when the server does not support it
- $ hg clone --narrow ssh://user@dummy/master narrow --noupdate --include "dir/src/f10" --stream
+ $ hg clone --narrow ssh://user@dummy/master narrow --noupdate --include "dir/src/F10" --stream
streaming all changes
remote: abort: server does not support narrow stream clones
abort: pull failed on remote
@@ -27,13 +43,53 @@
Enable stream clone on the server
- $ echo "[server]" >> master/.hg/hgrc
- $ echo "stream-narrow-clones=True" >> master/.hg/hgrc
+ $ echo "[experimental]" >> master/.hg/hgrc
+ $ echo "server.stream-narrow-clones=True" >> master/.hg/hgrc
Cloning a specific file when stream clone is supported
- $ hg clone --narrow ssh://user@dummy/master narrow --noupdate --include "dir/src/f10" --stream
+ $ hg clone --narrow ssh://user@dummy/master narrow --noupdate --include "dir/src/F10" --stream
streaming all changes
- remote: abort: server does not support narrow stream clones
- abort: pull failed on remote
- [255]
+ * files to transfer, * KB of data (glob)
+ transferred * KB in * seconds (* */sec) (glob)
+
+ $ cd narrow
+ $ ls
+ $ hg tracked
+ I path:dir/src/F10
+
+Making sure we have the correct set of requirements
+
+ $ cat .hg/requires
+ dotencode (tree flat-fncache !)
+ fncache (tree flat-fncache !)
+ generaldelta
+ narrowhg-experimental
+ revlogv1
+ sparserevlog
+ store
+ treemanifest (tree !)
+
+Making sure store has the required files
+
+ $ ls .hg/store/
+ 00changelog.i
+ 00manifest.i
+ data (tree flat-fncache !)
+ fncache (tree flat-fncache !)
+ meta (tree !)
+ narrowspec
+ undo
+ undo.backupfiles
+ undo.narrowspec
+ undo.phaseroots
+
+Checking that repository has all the required data and not broken
+
+ $ hg verify
+ checking changesets
+ checking manifests
+ checking directory manifests (tree !)
+ crosschecking files in changesets and manifests
+ checking files
+ checked 40 changesets with 1 changes to 1 files
--- a/tests/test-narrow-clone.t Wed Jan 09 20:00:35 2019 -0800
+++ b/tests/test-narrow-clone.t Fri Jan 18 13:28:22 2019 -0500
@@ -43,6 +43,7 @@
fncache
narrowhg-experimental
revlogv1
+ sparserevlog
store
testonly-simplestore (reposimplestore !)
--- a/tests/test-narrow-commit.t Wed Jan 09 20:00:35 2019 -0800
+++ b/tests/test-narrow-commit.t Fri Jan 18 13:28:22 2019 -0500
@@ -51,7 +51,7 @@
$ touch outside/f1
$ hg debugwalk -v -I 'relglob:f1'
* matcher:
- <includematcher includes='(?:(?:|.*/)f1(?:/|$))'>
+ <includematcher includes='(?:|.*/)f1(?:/|$)'>
f inside/f1 inside/f1
$ hg add .
$ hg add outside/f1
--- a/tests/test-narrow-debugcommands.t Wed Jan 09 20:00:35 2019 -0800
+++ b/tests/test-narrow-debugcommands.t Fri Jan 18 13:28:22 2019 -0500
@@ -6,6 +6,7 @@
> path:foo
> [exclude]
> EOF
+ $ cp .hg/store/narrowspec .hg/narrowspec.dirstate
$ echo treemanifest >> .hg/requires
$ echo narrowhg-experimental >> .hg/requires
$ mkdir -p foo/bar
--- a/tests/test-narrow-exchange.t Wed Jan 09 20:00:35 2019 -0800
+++ b/tests/test-narrow-exchange.t Fri Jan 18 13:28:22 2019 -0500
@@ -1,3 +1,11 @@
+#testcases lfs-on lfs-off
+
+#if lfs-on
+ $ cat >> $HGRCPATH <<EOF
+ > [extensions]
+ > lfs =
+ > EOF
+#endif
$ . "$TESTDIR/narrow-library.sh"
@@ -201,10 +209,17 @@
(run 'hg update' to get a working copy)
TODO: this should tell the user that their narrow clone does not have the
necessary content to be able to push to the target
- $ hg push ssh://user@dummy/narrow2
+
+TODO: lfs shouldn't abort like this
+ $ hg push ssh://user@dummy/narrow2 || true
pushing to ssh://user@dummy/narrow2
searching for changes
remote: adding changesets
remote: adding manifests
remote: adding file changes
remote: added 1 changesets with 0 changes to 0 files
+ remote: error: pretxnchangegroup.lfs hook raised an exception: data/inside2/f.i@f59b4e021835: no match found (lfs-on !)
+ remote: transaction abort! (lfs-on !)
+ remote: rollback completed (lfs-on !)
+ remote: abort: data/inside2/f.i@f59b4e021835: no match found! (lfs-on !)
+ abort: stream ended unexpectedly (got 0 bytes, expected 4) (lfs-on !)
--- a/tests/test-narrow-expanddirstate.t Wed Jan 09 20:00:35 2019 -0800
+++ b/tests/test-narrow-expanddirstate.t Fri Jan 18 13:28:22 2019 -0500
@@ -56,9 +56,13 @@
> from mercurial import patch
> from mercurial import util as hgutil
>
+ > narrowspecexpanded = False
> def expandnarrowspec(ui, repo, newincludes=None):
> if not newincludes:
> return
+ > if getattr(repo, '_narrowspecexpanded', False):
+ > return
+ > repo._narrowspecexpanded = True
> import sys
> newincludes = set([newincludes])
> includes, excludes = repo.narrowpats
@@ -67,29 +71,20 @@
> if not repo.currenttransaction():
> ui.develwarn(b'expandnarrowspec called outside of transaction!')
> repo.setnarrowpats(includes, excludes)
+ > narrowspec.copytoworkingcopy(repo)
> newmatcher = narrowspec.match(repo.root, includes, excludes)
> added = matchmod.differencematcher(newmatcher, currentmatcher)
> for f in repo[b'.'].manifest().walk(added):
> repo.dirstate.normallookup(f)
>
- > def wrapds(ui, repo, ds):
- > class expandingdirstate(ds.__class__):
- > @hgutil.propertycache
- > def _map(self):
- > ret = super(expandingdirstate, self)._map
+ > def reposetup(ui, repo):
+ > class expandingrepo(repo.__class__):
+ > def narrowmatch(self, *args, **kwargs):
> with repo.wlock(), repo.lock(), repo.transaction(
> b'expandnarrowspec'):
> expandnarrowspec(ui, repo,
> encoding.environ.get(b'DIRSTATEINCLUDES'))
- > return ret
- > ds.__class__ = expandingdirstate
- > return ds
- >
- > def reposetup(ui, repo):
- > class expandingrepo(repo.__class__):
- > def _makedirstate(self):
- > dirstate = super(expandingrepo, self)._makedirstate()
- > return wrapds(ui, repo, dirstate)
+ > return super(expandingrepo, self).narrowmatch(*args, **kwargs)
> repo.__class__ = expandingrepo
>
> def extsetup(unused_ui):
--- a/tests/test-narrow-pull.t Wed Jan 09 20:00:35 2019 -0800
+++ b/tests/test-narrow-pull.t Fri Jan 18 13:28:22 2019 -0500
@@ -150,25 +150,3 @@
rollback completed
abort: pull failed on remote
[255]
-
-Can use 'hg share':
- $ cat >> $HGRCPATH <<EOF
- > [extensions]
- > share=
- > EOF
-
- $ cd ..
- $ hg share narrow2 narrow2-share
- updating working directory
- 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
- $ cd narrow2-share
- $ hg status
-
-We should also be able to unshare without breaking everything:
- $ hg unshare
- $ hg verify
- checking changesets
- checking manifests
- crosschecking files in changesets and manifests
- checking files
- checked 1 changesets with 1 changes to 1 files
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/tests/test-narrow-share.t Fri Jan 18 13:28:22 2019 -0500
@@ -0,0 +1,178 @@
+#testcases flat tree
+
+ $ . "$TESTDIR/narrow-library.sh"
+
+#if tree
+ $ cat << EOF >> $HGRCPATH
+ > [experimental]
+ > treemanifest = 1
+ > EOF
+#endif
+
+ $ cat << EOF >> $HGRCPATH
+ > [extensions]
+ > share =
+ > EOF
+
+ $ hg init remote
+ $ cd remote
+ $ for x in `$TESTDIR/seq.py 0 10`
+ > do
+ > mkdir d$x
+ > echo $x > d$x/f
+ > hg add d$x/f
+ > hg commit -m "add d$x/f"
+ > done
+ $ cd ..
+
+ $ hg clone --narrow ssh://user@dummy/remote main -q \
+ > --include d1 --include d3 --include d5 --include d7
+
+ $ hg share main share
+ updating working directory
+ 4 files updated, 0 files merged, 0 files removed, 0 files unresolved
+ $ hg -R share tracked
+ I path:d1
+ I path:d3
+ I path:d5
+ I path:d7
+ $ hg -R share files
+ share/d1/f
+ share/d3/f
+ share/d5/f
+ share/d7/f
+
+Narrow the share and check that the main repo's working copy gets updated
+
+# Make sure the files that are supposed to be known-clean get their timestamps set in the dirstate
+ $ sleep 2
+ $ hg -R main st
+ $ hg -R main debugdirstate --no-dates
+ n 644 2 set d1/f
+ n 644 2 set d3/f
+ n 644 2 set d5/f
+ n 644 2 set d7/f
+# Make d3/f dirty
+ $ echo x >> main/d3/f
+ $ echo y >> main/d3/g
+ $ hg add main/d3/g
+ $ hg -R main st
+ M d3/f
+ A d3/g
+# Make d5/f not match the dirstate timestamp even though it's clean
+ $ sleep 2
+ $ hg -R main st
+ M d3/f
+ A d3/g
+ $ hg -R main debugdirstate --no-dates
+ n 644 2 set d1/f
+ n 644 2 set d3/f
+ a 0 -1 unset d3/g
+ n 644 2 set d5/f
+ n 644 2 set d7/f
+ $ touch main/d5/f
+ $ hg -R share tracked --removeinclude d1 --removeinclude d3 --removeinclude d5
+ comparing with ssh://user@dummy/remote
+ searching for changes
+ looking for local changes to affected paths
+ deleting data/d1/f.i
+ deleting data/d3/f.i
+ deleting data/d5/f.i
+ deleting meta/d1/00manifest.i (tree !)
+ deleting meta/d3/00manifest.i (tree !)
+ deleting meta/d5/00manifest.i (tree !)
+ $ hg -R main tracked
+ I path:d7
+ $ hg -R main files
+ abort: working copy's narrowspec is stale
+ (run 'hg tracked --update-working-copy')
+ [255]
+ $ hg -R main tracked --update-working-copy
+ not deleting possibly dirty file d3/f
+ not deleting possibly dirty file d3/g
+ not deleting possibly dirty file d5/f
+# d1/f, d3/f, d3/g and d5/f should no longer be reported
+ $ hg -R main files
+ main/d7/f
+# d1/f should no longer be there, d3/f should be since it was dirty, d3/g should be there since
+# it was added, and d5/f should be since we couldn't be sure it was clean
+ $ find main/d* -type f | sort
+ main/d3/f
+ main/d3/g
+ main/d5/f
+ main/d7/f
+
+Widen the share and check that the main repo's working copy gets updated
+
+ $ hg -R share tracked --addinclude d1 --addinclude d3 -q
+ $ hg -R share tracked
+ I path:d1
+ I path:d3
+ I path:d7
+ $ hg -R share files
+ share/d1/f
+ share/d3/f
+ share/d7/f
+ $ hg -R main tracked
+ I path:d1
+ I path:d3
+ I path:d7
+ $ hg -R main files
+ abort: working copy's narrowspec is stale
+ (run 'hg tracked --update-working-copy')
+ [255]
+ $ hg -R main tracked --update-working-copy
+# d1/f, d3/f should be back
+ $ hg -R main files
+ main/d1/f
+ main/d3/f
+ main/d7/f
+# d3/f should be modified (not clobbered by the widening), and d3/g should be untracked
+ $ hg -R main st --all
+ M d3/f
+ ? d3/g
+ C d1/f
+ C d7/f
+
+We should also be able to unshare without breaking everything:
+
+ $ hg share main share-unshare
+ updating working directory
+ 3 files updated, 0 files merged, 0 files removed, 0 files unresolved
+ $ cd share-unshare
+ $ hg unshare
+ $ hg verify
+ checking changesets
+ checking manifests
+ checking directory manifests (tree !)
+ crosschecking files in changesets and manifests
+ checking files
+ checked 11 changesets with 3 changes to 3 files
+ $ cd ..
+
+Dirstate should be left alone when upgrading from version of hg that didn't support narrow+share
+
+ $ hg share main share-upgrade
+ updating working directory
+ 3 files updated, 0 files merged, 0 files removed, 0 files unresolved
+ $ cd share-upgrade
+ $ echo x >> d1/f
+ $ echo y >> d3/g
+ $ hg add d3/g
+ $ hg rm d7/f
+ $ hg st
+ M d1/f
+ A d3/g
+ R d7/f
+Make it look like a repo from before narrow+share was supported
+ $ rm .hg/narrowspec.dirstate
+ $ hg st
+ abort: working copy's narrowspec is stale
+ (run 'hg tracked --update-working-copy')
+ [255]
+ $ hg tracked --update-working-copy
+ $ hg st
+ M d1/f
+ A d3/g
+ R d7/f
+ $ cd ..
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/tests/test-narrow-sparse.t Fri Jan 18 13:28:22 2019 -0500
@@ -0,0 +1,69 @@
+Testing interaction of sparse and narrow when both are enabled on the client
+side and we do a non-ellipsis clone
+
+#testcases tree flat
+ $ . "$TESTDIR/narrow-library.sh"
+ $ cat << EOF >> $HGRCPATH
+ > [extensions]
+ > sparse =
+ > EOF
+
+#if tree
+ $ cat << EOF >> $HGRCPATH
+ > [experimental]
+ > treemanifest = 1
+ > EOF
+#endif
+
+ $ hg init master
+ $ cd master
+
+ $ mkdir inside
+ $ echo 'inside' > inside/f
+ $ hg add inside/f
+ $ hg commit -m 'add inside'
+
+ $ mkdir widest
+ $ echo 'widest' > widest/f
+ $ hg add widest/f
+ $ hg commit -m 'add widest'
+
+ $ mkdir outside
+ $ echo 'outside' > outside/f
+ $ hg add outside/f
+ $ hg commit -m 'add outside'
+
+ $ cd ..
+
+narrow clone the inside file
+
+ $ hg clone --narrow ssh://user@dummy/master narrow --include inside/f
+ requesting all changes
+ adding changesets
+ adding manifests
+ adding file changes
+ added 3 changesets with 1 changes to 1 files
+ new changesets *:* (glob)
+ updating to branch default
+ 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
+ $ cd narrow
+ $ hg tracked
+ I path:inside/f
+ $ hg files
+ inside/f
+
+XXX: we should have a flag in `hg debugsparse` to list the sparse profile
+ $ test -f .hg/sparse
+ [1]
+
+ $ cat .hg/requires
+ dotencode
+ fncache
+ generaldelta
+ narrowhg-experimental
+ revlogv1
+ sparserevlog
+ store
+ treemanifest (tree !)
+
+ $ hg debugrebuilddirstate
--- a/tests/test-narrow-trackedcmd.t Wed Jan 09 20:00:35 2019 -0800
+++ b/tests/test-narrow-trackedcmd.t Fri Jan 18 13:28:22 2019 -0500
@@ -107,6 +107,8 @@
--clear whether to replace the existing narrowspec
--force-delete-local-changes forces deletion of local changes when
narrowing
+ --update-working-copy update working copy when the store has
+ changed
-e --ssh CMD specify ssh command to use
--remotecmd CMD specify hg command to run on the remote side
--insecure do not verify server certificate (ignoring
@@ -196,8 +198,8 @@
X path:widest
$ hg tracked --import-rules specs --clear
- The --clear option is not yet supported.
- [1]
+ abort: the --clear option is not yet supported
+ [255]
Testing with passing a out of wdir file
--- a/tests/test-narrow-widen-no-ellipsis.t Wed Jan 09 20:00:35 2019 -0800
+++ b/tests/test-narrow-widen-no-ellipsis.t Fri Jan 18 13:28:22 2019 -0500
@@ -128,7 +128,7 @@
added 0 changesets with 1 changes to 1 files
bundle2-input-part: total payload size * (glob)
bundle2-input-bundle: 0 parts total
- widest/f: add from widened narrow clone -> g
+ widest/f: narrowspec updated -> g
getting widest/f
$ hg tracked
I path:inside
@@ -140,6 +140,12 @@
$ hg id -n
2
+Test that extending already included files should not call narrow_widen
+wireprotocol command
+
+ $ hg tracked --addinclude widest/f
+ nothing to widen or narrow
+
Pull down the newly added upstream revision.
$ hg pull
--- a/tests/test-narrow.t Wed Jan 09 20:00:35 2019 -0800
+++ b/tests/test-narrow.t Fri Jan 18 13:28:22 2019 -0500
@@ -214,6 +214,44 @@
000000000000
$ cd ..
+Narrowing doesn't resurrect old commits (unlike what regular `hg strip` does)
+ $ hg clone --narrow ssh://user@dummy/master narrow-obsmarkers --include d0 --include d3 -q
+ $ cd narrow-obsmarkers
+ $ echo a >> d0/f2
+ $ hg add d0/f2
+ $ hg ci -m 'modify d0/'
+ $ echo a >> d3/f2
+ $ hg add d3/f2
+ $ hg commit --amend -m 'modify d0/ and d3/'
+ $ hg log -T "{rev}: {desc}\n"
+ 5: modify d0/ and d3/
+ 3: add d10/f
+ 2: add d3/f
+ 1: add d2/f
+ 0: add d0/f
+ $ hg tracked --removeinclude d3 --force-delete-local-changes -q
+ $ hg log -T "{rev}: {desc}\n"
+ 3: add d10/f
+ 2: add d3/f
+ 1: add d2/f
+ 0: add d0/f
+ $ cd ..
+
+Widening doesn't lose bookmarks
+ $ hg clone --narrow ssh://user@dummy/master widen-bookmarks --include d0 -q
+ $ cd widen-bookmarks
+ $ hg bookmark my-bookmark
+ $ hg log -T "{rev}: {desc} {bookmarks}\n"
+ 1: add d10/f my-bookmark
+ 0: add d0/f
+ $ hg tracked --addinclude d3 -q
+ $ hg log -T "{rev}: {desc} {bookmarks}\n"
+ 3: add d10/f my-bookmark
+ 2: add d3/f
+ 1: add d2/f
+ 0: add d0/f
+ $ cd ..
+
Can remove last include, making repo empty
$ hg clone --narrow ssh://user@dummy/master narrow-empty --include d0 -r 5
adding changesets
--- a/tests/test-nointerrupt.t Wed Jan 09 20:00:35 2019 -0800
+++ b/tests/test-nointerrupt.t Fri Jan 18 13:28:22 2019 -0500
@@ -13,7 +13,7 @@
>
> @command(b'sleep', [], _(b'TIME'), norepo=True)
> def sleep(ui, sleeptime=b"1", **opts):
- > with ui.uninterruptable():
+ > with ui.uninterruptible():
> for _i in itertools.repeat(None, int(sleeptime)):
> time.sleep(1)
> ui.warn(b"end of unsafe operation\n")
--- a/tests/test-obsmarker-template.t Wed Jan 09 20:00:35 2019 -0800
+++ b/tests/test-obsmarker-template.t Fri Jan 18 13:28:22 2019 -0500
@@ -2501,14 +2501,14 @@
$ hg up 'desc("A0")' --hidden
0 files updated, 0 files merged, 0 files removed, 0 files unresolved
updated to hidden changeset 471597cad322
- (hidden revision '471597cad322' is pruned)
+ (hidden revision '471597cad322' was rewritten as: 617adc3a144c)
# todo: the obsfate output is not ideal
$ hg fatelog
o 617adc3a144c
|
| @ 471597cad322
- |/ Obsfate: pruned;
+ |/ Obsfate: rewritten as 2:617adc3a144c by test (at 1970-01-01 00:00 +0000);
o ea207398892e
$ hg up -r 'desc("A2")' --hidden
--- a/tests/test-obsolete-tag-cache.t Wed Jan 09 20:00:35 2019 -0800
+++ b/tests/test-obsolete-tag-cache.t Fri Jan 18 13:28:22 2019 -0500
@@ -4,6 +4,9 @@
> rebase=
> mock=$TESTDIR/mockblackbox.py
>
+ > [blackbox]
+ > track = command, commandfinish, tagscache
+ >
> [experimental]
> evolution.createmarkers=True
> EOF
--- a/tests/test-obsolete.t Wed Jan 09 20:00:35 2019 -0800
+++ b/tests/test-obsolete.t Fri Jan 18 13:28:22 2019 -0500
@@ -935,21 +935,21 @@
$ rm access.log errors.log
#endif
-Several troubles on the same changeset (create an unstable and bumped changeset)
+Several troubles on the same changeset (create an unstable and bumped and content-divergent changeset)
$ hg debugobsolete `getid obsolete_e`
obsoleted 1 changesets
2 new orphan changesets
$ hg debugobsolete `getid original_c` `getid babar`
1 new phase-divergent changesets
- $ hg log --config ui.logtemplate= -r 'phasedivergent() and orphan()'
+ 2 new content-divergent changesets
+ $ hg log --config ui.logtemplate= -r 'phasedivergent() and orphan() and contentdivergent()'
changeset: 7:50c51b361e60
user: test
date: Thu Jan 01 00:00:00 1970 +0000
- instability: orphan, phase-divergent
+ instability: orphan, phase-divergent, content-divergent
summary: add babar
-
test the "obsolete" templatekw
$ hg log -r 'obsolete()'
@@ -958,7 +958,7 @@
test the "troubles" templatekw
$ hg log -r 'phasedivergent() and orphan()'
- 7:50c51b361e60 (draft orphan phase-divergent) [ ] add babar
+ 7:50c51b361e60 (draft orphan phase-divergent content-divergent) [ ] add babar
test the default cmdline template
@@ -966,7 +966,7 @@
changeset: 7:50c51b361e60
user: test
date: Thu Jan 01 00:00:00 1970 +0000
- instability: orphan, phase-divergent
+ instability: orphan, phase-divergent, content-divergent
summary: add babar
$ hg log -T default -r 'obsolete()'
@@ -981,18 +981,18 @@
test the obsolete labels
$ hg log --config ui.logtemplate= --color=debug -r 'phasedivergent()'
- [log.changeset changeset.draft changeset.unstable instability.orphan instability.phase-divergent|changeset: 7:50c51b361e60]
+ [log.changeset changeset.draft changeset.unstable instability.orphan instability.phase-divergent instability.content-divergent|changeset: 7:50c51b361e60]
[log.user|user: test]
[log.date|date: Thu Jan 01 00:00:00 1970 +0000]
- [log.instability|instability: orphan, phase-divergent]
+ [log.instability|instability: orphan, phase-divergent, content-divergent]
[log.summary|summary: add babar]
$ hg log -T default -r 'phasedivergent()' --color=debug
- [log.changeset changeset.draft changeset.unstable instability.orphan instability.phase-divergent|changeset: 7:50c51b361e60]
+ [log.changeset changeset.draft changeset.unstable instability.orphan instability.phase-divergent instability.content-divergent|changeset: 7:50c51b361e60]
[log.user|user: test]
[log.date|date: Thu Jan 01 00:00:00 1970 +0000]
- [log.instability|instability: orphan, phase-divergent]
+ [log.instability|instability: orphan, phase-divergent, content-divergent]
[log.summary|summary: add babar]
@@ -1019,13 +1019,14 @@
$ hg up -r 'phasedivergent() and orphan()'
1 files updated, 0 files merged, 1 files removed, 0 files unresolved
$ hg summary
- parent: 7:50c51b361e60 (orphan, phase-divergent)
+ parent: 7:50c51b361e60 (orphan, phase-divergent, content-divergent)
add babar
branch: default
commit: (clean)
update: 2 new changesets (update)
phases: 4 draft
orphan: 2 changesets
+ content-divergent: 2 changesets
phase-divergent: 1 changesets
$ hg up -r 'obsolete()'
0 files updated, 0 files merged, 1 files removed, 0 files unresolved
@@ -1037,6 +1038,7 @@
update: 3 new changesets (update)
phases: 4 draft
orphan: 2 changesets
+ content-divergent: 2 changesets
phase-divergent: 1 changesets
test debugwhyunstable output
@@ -1044,15 +1046,18 @@
$ hg debugwhyunstable 50c51b361e60
orphan: obsolete parent 3de5eca88c00aa039da7399a220f4a5221faa585
phase-divergent: immutable predecessor 245bde4270cd1072a27757984f9cda8ba26f08ca
+ content-divergent: 6f96419950729f3671185b847352890f074f7557 (draft) predecessor 245bde4270cd1072a27757984f9cda8ba26f08ca
test whyunstable template keyword
$ hg log -r 50c51b361e60 -T '{whyunstable}\n'
orphan: obsolete parent 3de5eca88c00
phase-divergent: immutable predecessor 245bde4270cd
+ content-divergent: 3:6f9641995072 (draft) predecessor 245bde4270cd
$ hg log -r 50c51b361e60 -T '{whyunstable % "{instability}: {reason} {node|shortest}\n"}'
orphan: obsolete parent 3de5
phase-divergent: immutable predecessor 245b
+ content-divergent: predecessor 245b
#if serve
@@ -1076,36 +1081,43 @@
check changeset with instabilities
$ get-with-headers.py localhost:$HGPORT 'log?rev=first(phasedivergent())&style=paper' | grep '<span class="instability">'
- <span class="phase">draft</span> <span class="instability">orphan</span> <span class="instability">phase-divergent</span>
+ <span class="phase">draft</span> <span class="instability">orphan</span> <span class="instability">phase-divergent</span> <span class="instability">content-divergent</span>
$ get-with-headers.py localhost:$HGPORT 'log?rev=first(phasedivergent())&style=coal' | grep '<span class="instability">'
- <span class="phase">draft</span> <span class="instability">orphan</span> <span class="instability">phase-divergent</span>
+ <span class="phase">draft</span> <span class="instability">orphan</span> <span class="instability">phase-divergent</span> <span class="instability">content-divergent</span>
$ get-with-headers.py localhost:$HGPORT 'log?rev=first(phasedivergent())&style=gitweb' | grep '<span class="logtags">'
- <span class="logtags"><span class="phasetag" title="draft">draft</span> <span class="instabilitytag" title="orphan">orphan</span> <span class="instabilitytag" title="phase-divergent">phase-divergent</span> </span>
+ <span class="logtags"><span class="phasetag" title="draft">draft</span> <span class="instabilitytag" title="orphan">orphan</span> <span class="instabilitytag" title="phase-divergent">phase-divergent</span> <span class="instabilitytag" title="content-divergent">content-divergent</span> </span>
$ get-with-headers.py localhost:$HGPORT 'log?rev=first(phasedivergent())&style=monoblue' | grep '<span class="logtags">'
- <span class="logtags"><span class="phasetag" title="draft">draft</span> <span class="instabilitytag" title="orphan">orphan</span> <span class="instabilitytag" title="phase-divergent">phase-divergent</span> </span>
+ <span class="logtags"><span class="phasetag" title="draft">draft</span> <span class="instabilitytag" title="orphan">orphan</span> <span class="instabilitytag" title="phase-divergent">phase-divergent</span> <span class="instabilitytag" title="content-divergent">content-divergent</span> </span>
$ get-with-headers.py localhost:$HGPORT 'log?rev=first(phasedivergent())&style=spartan' | grep 'class="unstable"'
<th class="unstable">unstable:</th>
<td class="unstable">orphan: obsolete parent <a href="/rev/3de5eca88c00?style=spartan">3de5eca88c00</a></td>
<th class="unstable">unstable:</th>
<td class="unstable">phase-divergent: immutable predecessor <a href="/rev/245bde4270cd?style=spartan">245bde4270cd</a></td>
+ <th class="unstable">unstable:</th>
+ <td class="unstable">content-divergent: <a href="/rev/6f9641995072?style=spartan">6f9641995072</a> (draft) predecessor <a href="/rev/245bde4270cd?style=spartan">245bde4270cd</a></td>
-check explanation for an orphan and phase-divergent changeset
+check explanation for an orphan, phase-divergent and content-divergent changeset
- $ get-with-headers.py localhost:$HGPORT 'rev/50c51b361e60?style=paper' | egrep '(orphan|phase-divergent):'
+ $ get-with-headers.py localhost:$HGPORT 'rev/50c51b361e60?style=paper' | egrep '(orphan|phase-divergent|content-divergent):'
<td>orphan: obsolete parent <a href="/rev/3de5eca88c00?style=paper">3de5eca88c00</a><br>
- phase-divergent: immutable predecessor <a href="/rev/245bde4270cd?style=paper">245bde4270cd</a></td>
- $ get-with-headers.py localhost:$HGPORT 'rev/50c51b361e60?style=coal' | egrep '(orphan|phase-divergent):'
+ phase-divergent: immutable predecessor <a href="/rev/245bde4270cd?style=paper">245bde4270cd</a><br>
+ content-divergent: <a href="/rev/6f9641995072?style=paper">6f9641995072</a> (draft) predecessor <a href="/rev/245bde4270cd?style=paper">245bde4270cd</a></td>
+ $ get-with-headers.py localhost:$HGPORT 'rev/50c51b361e60?style=coal' | egrep '(orphan|phase-divergent|content-divergent):'
<td>orphan: obsolete parent <a href="/rev/3de5eca88c00?style=coal">3de5eca88c00</a><br>
- phase-divergent: immutable predecessor <a href="/rev/245bde4270cd?style=coal">245bde4270cd</a></td>
- $ get-with-headers.py localhost:$HGPORT 'rev/50c51b361e60?style=gitweb' | egrep '(orphan|phase-divergent):'
+ phase-divergent: immutable predecessor <a href="/rev/245bde4270cd?style=coal">245bde4270cd</a><br>
+ content-divergent: <a href="/rev/6f9641995072?style=coal">6f9641995072</a> (draft) predecessor <a href="/rev/245bde4270cd?style=coal">245bde4270cd</a></td>
+ $ get-with-headers.py localhost:$HGPORT 'rev/50c51b361e60?style=gitweb' | egrep '(orphan|phase-divergent|content-divergent):'
<td>orphan: obsolete parent <a class="list" href="/rev/3de5eca88c00?style=gitweb">3de5eca88c00</a></td>
<td>phase-divergent: immutable predecessor <a class="list" href="/rev/245bde4270cd?style=gitweb">245bde4270cd</a></td>
- $ get-with-headers.py localhost:$HGPORT 'rev/50c51b361e60?style=monoblue' | egrep '(orphan|phase-divergent):'
+ <td>content-divergent: <a class="list" href="/rev/6f9641995072?style=gitweb">6f9641995072</a> (draft) predecessor <a class="list" href="/rev/245bde4270cd?style=gitweb">245bde4270cd</a></td>
+ $ get-with-headers.py localhost:$HGPORT 'rev/50c51b361e60?style=monoblue' | egrep '(orphan|phase-divergent|content-divergent):'
<dd>orphan: obsolete parent <a href="/rev/3de5eca88c00?style=monoblue">3de5eca88c00</a></dd>
<dd>phase-divergent: immutable predecessor <a href="/rev/245bde4270cd?style=monoblue">245bde4270cd</a></dd>
- $ get-with-headers.py localhost:$HGPORT 'rev/50c51b361e60?style=spartan' | egrep '(orphan|phase-divergent):'
+ <dd>content-divergent: <a href="/rev/6f9641995072?style=monoblue">6f9641995072</a> (draft) predecessor <a href="/rev/245bde4270cd?style=monoblue">245bde4270cd</a></dd>
+ $ get-with-headers.py localhost:$HGPORT 'rev/50c51b361e60?style=spartan' | egrep '(orphan|phase-divergent|content-divergent):'
<td class="unstable">orphan: obsolete parent <a href="/rev/3de5eca88c00?style=spartan">3de5eca88c00</a></td>
<td class="unstable">phase-divergent: immutable predecessor <a href="/rev/245bde4270cd?style=spartan">245bde4270cd</a></td>
+ <td class="unstable">content-divergent: <a href="/rev/6f9641995072?style=spartan">6f9641995072</a> (draft) predecessor <a href="/rev/245bde4270cd?style=spartan">245bde4270cd</a></td>
$ killdaemons.py
--- a/tests/test-parseindex.t Wed Jan 09 20:00:35 2019 -0800
+++ b/tests/test-parseindex.t Fri Jan 18 13:28:22 2019 -0500
@@ -132,9 +132,9 @@
$ mkdir invalidparent
$ cd invalidparent
- $ hg clone --pull -q --config phases.publish=False ../a limit
- $ hg clone --pull -q --config phases.publish=False ../a neglimit
- $ hg clone --pull -q --config phases.publish=False ../a segv
+ $ hg clone --pull -q --config phases.publish=False ../a limit --config format.sparse-revlog=no
+ $ hg clone --pull -q --config phases.publish=False ../a neglimit --config format.sparse-revlog=no
+ $ hg clone --pull -q --config phases.publish=False ../a segv --config format.sparse-revlog=no
$ rm -R limit/.hg/cache neglimit/.hg/cache segv/.hg/cache
$ "$PYTHON" <<EOF
--- a/tests/test-patchbomb.t Wed Jan 09 20:00:35 2019 -0800
+++ b/tests/test-patchbomb.t Fri Jan 18 13:28:22 2019 -0500
@@ -22,7 +22,9 @@
> skipblank = False
> print(l, end='')
> EOF
- $ FILTERBOUNDARY="$PYTHON `pwd`/prune-blank-after-boundary.py"
+ $ filterboundary() {
+ > "$PYTHON" "$TESTTMP/prune-blank-after-boundary.py"
+ > }
$ echo "[extensions]" >> $HGRCPATH
$ echo "patchbomb=" >> $HGRCPATH
@@ -357,7 +359,7 @@
test bundle and description:
$ hg email --date '1970-1-1 0:3' -n -f quux -t foo \
- > -c bar -s test -r tip -b --desc description | $FILTERBOUNDARY
+ > -c bar -s test -r tip -b --desc description | filterboundary
searching for changes
1 changesets found
@@ -403,7 +405,7 @@
$ hg email --date '1970-1-1 0:3' -n -f quux -t foo \
> -c bar -s test -r tip -b --desc description \
- > --config patchbomb.bundletype=gzip-v1 | $FILTERBOUNDARY
+ > --config patchbomb.bundletype=gzip-v1 | filterboundary
searching for changes
1 changesets found
@@ -884,7 +886,7 @@
test inline for single patch:
- $ hg email --date '1970-1-1 0:1' -n -f quux -t foo -c bar -s test -i -r 2 | $FILTERBOUNDARY
+ $ hg email --date '1970-1-1 0:1' -n -f quux -t foo -c bar -s test -i -r 2 | filterboundary
this patch series consists of 1 patches.
@@ -927,7 +929,7 @@
test inline for single patch (quoted-printable):
- $ hg email --date '1970-1-1 0:1' -n -f quux -t foo -c bar -s test -i -r 4 | $FILTERBOUNDARY
+ $ hg email --date '1970-1-1 0:1' -n -f quux -t foo -c bar -s test -i -r 4 | filterboundary
this patch series consists of 1 patches.
@@ -986,7 +988,7 @@
test inline for multiple patches:
$ hg email --date '1970-1-1 0:1' -n -f quux -t foo -c bar -s test -i \
- > -r 0:1 -r 4 | $FILTERBOUNDARY
+ > -r 0:1 -r 4 | filterboundary
this patch series consists of 3 patches.
@@ -1138,7 +1140,7 @@
--===*=-- (glob)
test attach for single patch:
- $ hg email --date '1970-1-1 0:1' -n -f quux -t foo -c bar -s test -a -r 2 | $FILTERBOUNDARY
+ $ hg email --date '1970-1-1 0:1' -n -f quux -t foo -c bar -s test -a -r 2 | filterboundary
this patch series consists of 1 patches.
@@ -1189,7 +1191,7 @@
--===*=-- (glob)
test attach for single patch (quoted-printable):
- $ hg email --date '1970-1-1 0:1' -n -f quux -t foo -c bar -s test -a -r 4 | $FILTERBOUNDARY
+ $ hg email --date '1970-1-1 0:1' -n -f quux -t foo -c bar -s test -a -r 4 | filterboundary
this patch series consists of 1 patches.
@@ -1256,7 +1258,7 @@
--===*=-- (glob)
test attach and body for single patch:
- $ hg email --date '1970-1-1 0:1' -n -f quux -t foo -c bar -s test -a --body -r 2 | $FILTERBOUNDARY
+ $ hg email --date '1970-1-1 0:1' -n -f quux -t foo -c bar -s test -a --body -r 2 | filterboundary
this patch series consists of 1 patches.
@@ -1318,7 +1320,7 @@
test attach for multiple patches:
$ hg email --date '1970-1-1 0:1' -n -f quux -t foo -c bar -s test -a \
- > -r 0:1 -r 4 | $FILTERBOUNDARY
+ > -r 0:1 -r 4 | filterboundary
this patch series consists of 3 patches.
@@ -1775,7 +1777,7 @@
test inline for single named patch:
$ hg email --date '1970-1-1 0:1' -n -f quux -t foo -c bar -s test -i \
- > -r 2 | $FILTERBOUNDARY
+ > -r 2 | filterboundary
this patch series consists of 1 patches.
@@ -1818,7 +1820,7 @@
test inline for multiple named/unnamed patches:
$ hg email --date '1970-1-1 0:1' -n -f quux -t foo -c bar -s test -i \
- > -r 0:1 | $FILTERBOUNDARY
+ > -r 0:1 | filterboundary
this patch series consists of 2 patches.
@@ -2124,7 +2126,7 @@
$ hg up -qr1
$ echo dirt > a
$ hg email --date '1970-1-1 0:1' -n --flag fooFlag -f quux -t foo -c bar -s test \
- > -r 2 | $FILTERBOUNDARY
+ > -r 2 | filterboundary
this patch series consists of 1 patches.
--- a/tests/test-phabricator.t Wed Jan 09 20:00:35 2019 -0800
+++ b/tests/test-phabricator.t Fri Jan 18 13:28:22 2019 -0500
@@ -65,6 +65,23 @@
D4597 - created - 1a5640df7bbf: create beta for phabricator test
saved backup bundle to $TESTTMP/repo/.hg/strip-backup/1a5640df7bbf-6daf3e6e-phabsend.hg
+The amend won't explode after posting a public commit. The local tag is left
+behind to identify it.
+
+ $ echo 'public change' > beta
+ $ hg ci -m 'create public change for phabricator testing'
+ $ hg phase --public .
+ $ echo 'draft change' > alpha
+ $ hg ci -m 'create draft change for phabricator testing'
+ $ hg phabsend --amend -r '.^::' --test-vcr "$VCR/phabsend-create-public.json"
+ D5544 - created - 540a21d3fbeb: create public change for phabricator testing
+ D5545 - created - 6bca752686cd: create draft change for phabricator testing
+ warning: not updating public commit 2:540a21d3fbeb
+ saved backup bundle to $TESTTMP/repo/.hg/strip-backup/6bca752686cd-41faefb4-phabsend.hg
+ $ hg tags -v
+ tip 3:620a50fd6ed9
+ D5544 2:540a21d3fbeb local
+
$ hg debugcallconduit user.search --test-vcr "$VCR/phab-conduit.json" <<EOF
> {
> "constraints": {
@@ -88,10 +105,14 @@
Template keywords
$ hg log -T'{rev} {phabreview|json}\n'
+ 3 {"id": "D5545", "url": "https://phab.mercurial-scm.org/D5545"}
+ 2 {"id": "D5544", "url": "https://phab.mercurial-scm.org/D5544"}
1 {"id": "D4597", "url": "https://phab.mercurial-scm.org/D4597"}
0 {"id": "D4596", "url": "https://phab.mercurial-scm.org/D4596"}
- $ hg log -T'{rev} {phabreview.url} {phabreview.id}\n'
+ $ hg log -T'{rev} {if(phabreview, "{phabreview.url} {phabreview.id}")}\n'
+ 3 https://phab.mercurial-scm.org/D5545 D5545
+ 2 https://phab.mercurial-scm.org/D5544 D5544
1 https://phab.mercurial-scm.org/D4597 D4597
0 https://phab.mercurial-scm.org/D4596 D4596
--- a/tests/test-phases-exchange.t Wed Jan 09 20:00:35 2019 -0800
+++ b/tests/test-phases-exchange.t Fri Jan 18 13:28:22 2019 -0500
@@ -1559,3 +1559,261 @@
|
o 426ba public
+ $ killdaemons.py
+
+
+auto-publish config
+-------------------
+
+ $ hg init auto-publish-orig
+ $ hg clone -q auto-publish-orig auto-publish-clone
+ $ cd auto-publish-clone
+ $ mkcommit a-p-A
+ test-debug-phase: new rev 0: x -> 1
+ $ mkcommit a-p-B
+ test-debug-phase: new rev 1: x -> 1
+
+abort behavior
+
+ $ hg push --config experimental.auto-publish=abort
+ pushing to $TESTTMP/auto-publish-orig
+ abort: push would publish 2 changesets
+ (use --publish or adjust 'experimental.auto-publish' config)
+ [255]
+ $ hg push -r '.^' --config experimental.auto-publish=abort
+ pushing to $TESTTMP/auto-publish-orig
+ abort: push would publish 1 changesets
+ (use --publish or adjust 'experimental.auto-publish' config)
+ [255]
+
+--publish flag makes push succeed
+
+ $ hg push -r '.^' --publish --config experimental.auto-publish=abort
+ pushing to $TESTTMP/auto-publish-orig
+ searching for changes
+ adding changesets
+ adding manifests
+ adding file changes
+ added 1 changesets with 1 changes to 1 files
+ test-debug-phase: new rev 0: x -> 0
+ test-debug-phase: move rev 0: 1 -> 0
+
+warn behavior
+
+ $ hg push --config experimental.auto-publish=warn
+ pushing to $TESTTMP/auto-publish-orig
+ 1 changesets about to be published
+ searching for changes
+ adding changesets
+ adding manifests
+ adding file changes
+ added 1 changesets with 1 changes to 1 files
+ test-debug-phase: new rev 1: x -> 0
+ test-debug-phase: move rev 1: 1 -> 0
+
+confirm behavior
+
+ $ mkcommit a-p-C
+ test-debug-phase: new rev 2: x -> 1
+ $ hg push --config experimental.auto-publish=confirm
+ pushing to $TESTTMP/auto-publish-orig
+ push and publish 1 changesets (yn)? y
+ searching for changes
+ adding changesets
+ adding manifests
+ adding file changes
+ added 1 changesets with 1 changes to 1 files
+ test-debug-phase: new rev 2: x -> 0
+ test-debug-phase: move rev 2: 1 -> 0
+
+ $ cd ..
+
+
+--publish flag
+--------------
+
+ $ hg init doesnt-publish
+ $ cd doesnt-publish
+ $ cat > .hg/hgrc << EOF
+ > [phases]
+ > publish=0
+ > EOF
+ $ mkcommit orig-root
+ test-debug-phase: new rev 0: x -> 1
+ $ hg phase --public -r 'all()'
+ test-debug-phase: move rev 0: 1 -> 0
+ $ cd ..
+
+ $ hg clone -q doesnt-publish client
+ $ cd client
+
+pushing nothing
+
+ $ mkcommit new-A
+ test-debug-phase: new rev 1: x -> 1
+ $ mkcommit new-B
+ test-debug-phase: new rev 2: x -> 1
+ $ hg push --publish -r null
+ pushing to $TESTTMP/doesnt-publish
+ searching for changes
+ no changes found
+ [1]
+ $ hgph
+ @ 2 draft new-B - 89512e87d697
+ |
+ o 1 draft new-A - 4826e44e690e
+ |
+ o 0 public orig-root - c48edaf99a10
+
+
+pushing a new changeset (selective)
+
+ $ hg push --publish -r 'desc("new-A")'
+ pushing to $TESTTMP/doesnt-publish
+ searching for changes
+ adding changesets
+ adding manifests
+ adding file changes
+ added 1 changesets with 1 changes to 1 files
+ test-debug-phase: new rev 1: x -> 0
+ test-debug-phase: move rev 1: 1 -> 0
+ $ hgph
+ @ 2 draft new-B - 89512e87d697
+ |
+ o 1 public new-A - 4826e44e690e
+ |
+ o 0 public orig-root - c48edaf99a10
+
+
+pushing a new changeset (linear)
+
+ $ hg push --publish
+ pushing to $TESTTMP/doesnt-publish
+ searching for changes
+ adding changesets
+ adding manifests
+ adding file changes
+ added 1 changesets with 1 changes to 1 files
+ test-debug-phase: new rev 2: x -> 0
+ test-debug-phase: move rev 2: 1 -> 0
+ $ hgph
+ @ 2 public new-B - 89512e87d697
+ |
+ o 1 public new-A - 4826e44e690e
+ |
+ o 0 public orig-root - c48edaf99a10
+
+
+pushing new changesets (different branches)
+
+ $ mkcommit new-C
+ test-debug-phase: new rev 3: x -> 1
+ $ hg update -q '.^'
+ $ hg branch -q another
+ $ mkcommit new-D
+ test-debug-phase: new rev 4: x -> 1
+ $ hg push --new-branch --publish
+ pushing to $TESTTMP/doesnt-publish
+ searching for changes
+ adding changesets
+ adding manifests
+ adding file changes
+ added 2 changesets with 2 changes to 2 files (+1 heads)
+ test-debug-phase: new rev 3: x -> 0
+ test-debug-phase: new rev 4: x -> 0
+ test-debug-phase: move rev 3: 1 -> 0
+ test-debug-phase: move rev 4: 1 -> 0
+ $ hgph
+ @ 4 public new-D - 5e53dcafd13c
+ |
+ | o 3 public new-C - 1665482cc06d
+ |/
+ o 2 public new-B - 89512e87d697
+ |
+ o 1 public new-A - 4826e44e690e
+ |
+ o 0 public orig-root - c48edaf99a10
+
+
+pushing a shared changeset
+
+ $ mkcommit new-E
+ test-debug-phase: new rev 5: x -> 1
+ $ hg push
+ pushing to $TESTTMP/doesnt-publish
+ searching for changes
+ adding changesets
+ adding manifests
+ adding file changes
+ added 1 changesets with 1 changes to 1 files
+ test-debug-phase: new rev 5: x -> 1
+ $ hg push --publish
+ pushing to $TESTTMP/doesnt-publish
+ searching for changes
+ no changes found
+ test-debug-phase: move rev 5: 1 -> 0
+ test-debug-phase: move rev 5: 1 -> 0
+ [1]
+ $ hgph
+ @ 5 public new-E - 48931ee3529c
+ |
+ o 4 public new-D - 5e53dcafd13c
+ |
+ | o 3 public new-C - 1665482cc06d
+ |/
+ o 2 public new-B - 89512e87d697
+ |
+ o 1 public new-A - 4826e44e690e
+ |
+ o 0 public orig-root - c48edaf99a10
+
+ $ cd ..
+
+--publish with subrepos (doesn't propagate to subrepos currently)
+
+ $ hg init with-subrepo
+ $ cd with-subrepo
+ $ cat > .hg/hgrc << EOF
+ > [phases]
+ > publish=0
+ > EOF
+ $ hg init subrepo
+ $ cd subrepo
+ $ cat > .hg/hgrc << EOF
+ > [phases]
+ > publish=0
+ > EOF
+ $ echo foo > foo
+ $ hg ci -qAm0
+ test-debug-phase: new rev 0: x -> 1
+ $ cd ..
+ $ echo 'subrepo = subrepo' > .hgsub
+ $ hg add .hgsub
+ $ hg ci -m 'Adding subrepo'
+ test-debug-phase: new rev 0: x -> 1
+ $ hgph
+ @ 0 draft Adding subrepo - 74d5b62379c0
+
+ $ hgph -R subrepo
+ @ 0 draft 0 - 4b3f578e3344
+
+ $ cd ..
+ $ hg clone with-subrepo client-with-subrepo
+ updating to branch default
+ cloning subrepo subrepo from $TESTTMP/with-subrepo/subrepo
+ 2 files updated, 0 files merged, 0 files removed, 0 files unresolved
+ $ cd client-with-subrepo
+ $ hg push --publish
+ pushing to $TESTTMP/with-subrepo
+ no changes made to subrepo subrepo since last push to $TESTTMP/with-subrepo/subrepo
+ searching for changes
+ no changes found
+ test-debug-phase: move rev 0: 1 -> 0
+ test-debug-phase: move rev 0: 1 -> 0
+ [1]
+ $ hgph
+ @ 0 public Adding subrepo - 74d5b62379c0
+
+ $ hgph -R subrepo
+ @ 0 draft 0 - 4b3f578e3344
+
--- a/tests/test-phases.t Wed Jan 09 20:00:35 2019 -0800
+++ b/tests/test-phases.t Fri Jan 18 13:28:22 2019 -0500
@@ -841,6 +841,7 @@
fncache
generaldelta
revlogv1
+ sparserevlog
store
$ echo X > X
$ hg add X
@@ -867,6 +868,7 @@
generaldelta
internal-phase
revlogv1
+ sparserevlog
store
$ mkcommit A
test-debug-phase: new rev 0: x -> 1
--- a/tests/test-profile.t Wed Jan 09 20:00:35 2019 -0800
+++ b/tests/test-profile.t Fri Jan 18 13:28:22 2019 -0500
@@ -86,22 +86,22 @@
Various statprof formatters work
- $ hg --profile --config profiling.statformat=byline sleep 2>../out
+ $ hg --profile --config profiling.statformat=byline sleep 2>../out || cat ../out
$ head -n 3 ../out
% cumulative self
time seconds seconds name
* sleepext.py:*:sleep (glob)
$ cat ../out | statprofran
- $ hg --profile --config profiling.statformat=bymethod sleep 2>../out
+ $ hg --profile --config profiling.statformat=bymethod sleep 2>../out || cat ../out
$ head -n 1 ../out
% cumulative self
$ cat ../out | statprofran
- $ hg --profile --config profiling.statformat=hotpath sleep 2>../out
+ $ hg --profile --config profiling.statformat=hotpath sleep 2>../out || cat ../out
$ cat ../out | statprofran
- $ hg --profile --config profiling.statformat=json sleep 2>../out
+ $ hg --profile --config profiling.statformat=json sleep 2>../out || cat ../out
$ cat ../out
\[\[-?\d+.* (re)
--- a/tests/test-push-race.t Wed Jan 09 20:00:35 2019 -0800
+++ b/tests/test-push-race.t Fri Jan 18 13:28:22 2019 -0500
@@ -28,31 +28,31 @@
> configtable = {}
> configitem = registrar.configitem(configtable)
>
- > configitem('delaypush', 'ready-path',
+ > configitem(b'delaypush', b'ready-path',
> default=None,
> )
- > configitem('delaypush', 'release-path',
+ > configitem(b'delaypush', b'release-path',
> default=None,
> )
>
> def delaypush(orig, pushop):
> # notify we are done preparing
> ui = pushop.repo.ui
- > readypath = ui.config('delaypush', 'ready-path')
+ > readypath = ui.config(b'delaypush', b'ready-path')
> if readypath is not None:
> with open(readypath, 'w') as r:
> r.write('foo')
- > ui.status('wrote ready: %s\n' % readypath)
+ > ui.status(b'wrote ready: %s\n' % readypath)
> # now wait for the other process to be done
- > watchpath = ui.config('delaypush', 'release-path')
+ > watchpath = ui.config(b'delaypush', b'release-path')
> if watchpath is not None:
- > ui.status('waiting on: %s\n' % watchpath)
+ > ui.status(b'waiting on: %s\n' % watchpath)
> limit = 100
> while 0 < limit and not os.path.exists(watchpath):
> limit -= 1
> time.sleep(0.1)
> if limit <= 0:
- > ui.warn('exiting without watchfile: %s' % watchpath)
+ > ui.warn(b'exiting without watchfile: %s' % watchpath)
> else:
> # delete the file at the end of the push
> def delete():
@@ -65,7 +65,7 @@
> return orig(pushop)
>
> def uisetup(ui):
- > extensions.wrapfunction(exchange, '_pushbundle2', delaypush)
+ > extensions.wrapfunction(exchange, b'_pushbundle2', delaypush)
> EOF
$ waiton () {
--- a/tests/test-rebase-abort.t Wed Jan 09 20:00:35 2019 -0800
+++ b/tests/test-rebase-abort.t Fri Jan 18 13:28:22 2019 -0500
@@ -206,7 +206,7 @@
$ hg rebase -b 4 -d 2
rebasing 3:a6484957d6b9 "B bis"
- note: rebase of 3:a6484957d6b9 created no changes to commit
+ note: not rebasing 3:a6484957d6b9 "B bis", its destination already has all its changes
rebasing 4:145842775fec "C1" (tip)
merging c
warning: conflicts while merging c! (edit, then use 'hg resolve --mark')
@@ -493,7 +493,7 @@
$ hg commit -m 'add content draft' -q
$ hg rebase -d 'public()' --tool :merge -q
- note: rebase of 3:0682fd3dabf5 created no changes to commit
+ note: not rebasing 3:0682fd3dabf5 "disappear draft", its destination already has all its changes
warning: conflicts while merging root! (edit, then use 'hg resolve --mark')
unresolved conflicts (see hg resolve, then hg rebase --continue)
[1]
--- a/tests/test-rebase-backup.t Wed Jan 09 20:00:35 2019 -0800
+++ b/tests/test-rebase-backup.t Fri Jan 18 13:28:22 2019 -0500
@@ -3,9 +3,9 @@
> rebase=
> EOF
-==========================================
-Test history-editing-backup config option |
-==========================================
+==================================
+Test backup-bundle config option |
+==================================
Test with Pre-obsmarker rebase:
1) When config option is not set:
$ hg init repo1
@@ -50,8 +50,8 @@
2) When config option is set:
$ cat << EOF >> $HGRCPATH
- > [ui]
- > history-editing-backup = False
+ > [rewrite]
+ > backup-bundle = False
> EOF
$ echo f>f
@@ -116,10 +116,10 @@
|
o 0: a
-When history-editing-backup = True:
+When backup-bundle = True:
$ cat << EOF >> $HGRCPATH
- > [ui]
- > history-editing-backup = True
+ > [rewrite]
+ > backup-bundle = True
> EOF
$ hg rebase -s 5 -d .
rebasing 5:1f8148a544ee "b"
@@ -132,10 +132,10 @@
saved backup bundle to $TESTTMP/repo1/.hg/strip-backup/818c1a43c916-2b644d96-backup.hg
rebase aborted
-When history-editing-backup = False:
+When backup-bundle = False:
$ cat << EOF >> $HGRCPATH
- > [ui]
- > history-editing-backup = False
+ > [rewrite]
+ > backup-bundle = False
> EOF
$ hg rebase -s 5 -d .
rebasing 5:1f8148a544ee "b"
--- a/tests/test-rebase-cache.t Wed Jan 09 20:00:35 2019 -0800
+++ b/tests/test-rebase-cache.t Fri Jan 18 13:28:22 2019 -0500
@@ -231,7 +231,7 @@
$ hg rebase -s 7 -d 6
rebasing 7:653b9feb4616 "branch3"
- note: rebase of 7:653b9feb4616 created no changes to commit
+ note: not rebasing 7:653b9feb4616 "branch3", its destination already has all its changes
rebasing 8:4666b71e8e32 "F" (tip)
saved backup bundle to $TESTTMP/a3/.hg/strip-backup/653b9feb4616-3c88de16-rebase.hg
--- a/tests/test-rebase-collapse.t Wed Jan 09 20:00:35 2019 -0800
+++ b/tests/test-rebase-collapse.t Fri Jan 18 13:28:22 2019 -0500
@@ -34,7 +34,7 @@
> echo "===="
> echo "edited manually" >> \$1
> EOF
- $ HGEDITOR="sh $TESTTMP/editor.sh" hg rebase --collapse --keepbranches -e --dest F
+ $ HGEDITOR="sh $TESTTMP/editor.sh" hg rebase --collapse --keepbranches -e --source B --dest F
rebasing 1:112478962961 "B" (B)
rebasing 3:26805aba1e60 "C" (C)
rebasing 5:f585351a92f8 "D" (D tip)
@@ -500,7 +500,7 @@
$ hg rebase -s 5 -d 4
rebasing 5:fbfb97b1089a "E" (tip)
- note: rebase of 5:fbfb97b1089a created no changes to commit
+ note: not rebasing 5:fbfb97b1089a "E" (tip), its destination already has all its changes
saved backup bundle to $TESTTMP/e/.hg/strip-backup/fbfb97b1089a-553e1d85-rebase.hg
$ hg tglog
@ 4: f338eb3c2c7c 'E'
--- a/tests/test-rebase-detach.t Wed Jan 09 20:00:35 2019 -0800
+++ b/tests/test-rebase-detach.t Fri Jan 18 13:28:22 2019 -0500
@@ -293,7 +293,7 @@
$ hg ci -A -m 'B2'
adding B
$ hg phase --force --secret .
- $ hg rebase -s . -d B --config ui.merge=internal:fail
+ $ hg rebase -s . -d B --config ui.merge=internal:merge
rebasing 3:17b4880d2402 "B2" (tip)
merging B
warning: conflicts while merging B! (edit, then use 'hg resolve --mark')
@@ -304,7 +304,7 @@
continue: hg rebase --continue
$ hg rebase -c
rebasing 3:17b4880d2402 "B2" (tip)
- note: rebase of 3:17b4880d2402 created no changes to commit
+ note: not rebasing 3:17b4880d2402 "B2" (tip), its destination already has all its changes
saved backup bundle to $TESTTMP/a7/.hg/strip-backup/17b4880d2402-1ae1f6cc-rebase.hg
$ hg log -G --template "{rev}:{phase} '{desc}' {branches}\n"
o 2:draft 'C'
--- a/tests/test-rebase-emptycommit.t Wed Jan 09 20:00:35 2019 -0800
+++ b/tests/test-rebase-emptycommit.t Fri Jan 18 13:28:22 2019 -0500
@@ -51,9 +51,9 @@
$ hg rebase -r 3+4 -d E --keep
rebasing 3:e7b3f00ed42e "D" (BOOK-D)
- note: rebase of 3:e7b3f00ed42e created no changes to commit
+ note: not rebasing 3:e7b3f00ed42e "D" (BOOK-D), its destination already has all its changes
rebasing 4:69a34c08022a "E" (BOOK-E)
- note: rebase of 4:69a34c08022a created no changes to commit
+ note: not rebasing 4:69a34c08022a "E" (BOOK-E), its destination already has all its changes
$ hg log -G -T '{rev} {desc} {bookmarks}'
o 7 E BOOK-D BOOK-E
|
@@ -84,9 +84,9 @@
$ hg rebase -s 2 -d E
rebasing 2:dc0947a82db8 "C" (BOOK-C C)
rebasing 3:e7b3f00ed42e "D" (BOOK-D)
- note: rebase of 3:e7b3f00ed42e created no changes to commit
+ note: not rebasing 3:e7b3f00ed42e "D" (BOOK-D), its destination already has all its changes
rebasing 4:69a34c08022a "E" (BOOK-E)
- note: rebase of 4:69a34c08022a created no changes to commit
+ note: not rebasing 4:69a34c08022a "E" (BOOK-E), its destination already has all its changes
rebasing 5:6b2aeab91270 "F" (BOOK-F F)
saved backup bundle to $TESTTMP/non-merge/.hg/strip-backup/dc0947a82db8-52bb4973-rebase.hg
$ hg log -G -T '{rev} {desc} {bookmarks}'
@@ -131,11 +131,11 @@
$ hg rebase -r '(A::)-(B::)-A' -d H
rebasing 2:dc0947a82db8 "C" (BOOK-C)
- note: rebase of 2:dc0947a82db8 created no changes to commit
+ note: not rebasing 2:dc0947a82db8 "C" (BOOK-C), its destination already has all its changes
rebasing 3:b18e25de2cf5 "D" (BOOK-D)
- note: rebase of 3:b18e25de2cf5 created no changes to commit
+ note: not rebasing 3:b18e25de2cf5 "D" (BOOK-D), its destination already has all its changes
rebasing 4:86a1f6686812 "E" (BOOK-E E)
- note: rebase of 4:86a1f6686812 created no changes to commit
+ note: not rebasing 4:86a1f6686812 "E" (BOOK-E E), its destination already has all its changes
saved backup bundle to $TESTTMP/merge1/.hg/strip-backup/b18e25de2cf5-1fd0a4ba-rebase.hg
$ hg log -G -T '{rev} {desc} {bookmarks}'
@@ -180,11 +180,11 @@
$ hg rebase -r '(A::)-(B::)-A' -d H
rebasing 2:dc0947a82db8 "C" (BOOK-C)
- note: rebase of 2:dc0947a82db8 created no changes to commit
+ note: not rebasing 2:dc0947a82db8 "C" (BOOK-C), its destination already has all its changes
rebasing 3:b18e25de2cf5 "D" (BOOK-D D)
rebasing 4:03ca77807e91 "E" (BOOK-E E)
rebasing 5:ad6717a6a58e "F" (BOOK-F)
- note: rebase of 5:ad6717a6a58e created no changes to commit
+ note: not rebasing 5:ad6717a6a58e "F" (BOOK-F), its destination already has all its changes
rebasing 6:c58e8bdac1f4 "G" (BOOK-G G)
saved backup bundle to $TESTTMP/merge2/.hg/strip-backup/b18e25de2cf5-2d487005-rebase.hg
--- a/tests/test-rebase-inmemory.t Wed Jan 09 20:00:35 2019 -0800
+++ b/tests/test-rebase-inmemory.t Fri Jan 18 13:28:22 2019 -0500
@@ -694,8 +694,6 @@
o 0:cb9a9f314b8b test
a
-#if execbit
-
Test a metadata-only in-memory merge
$ cd $TESTTMP
$ hg init no_exception
@@ -720,5 +718,3 @@
diff --git a/foo.txt b/foo.txt
old mode 100644
new mode 100755
-
-#endif
--- a/tests/test-rebase-interruptions.t Wed Jan 09 20:00:35 2019 -0800
+++ b/tests/test-rebase-interruptions.t Fri Jan 18 13:28:22 2019 -0500
@@ -454,7 +454,7 @@
continue: hg rebase --continue
$ hg rebase --continue
rebasing 1:fdaca8533b86 "b"
- note: rebase of 1:fdaca8533b86 created no changes to commit
+ note: not rebasing 1:fdaca8533b86 "b", its destination already has all its changes
saved backup bundle to $TESTTMP/repo/.hg/strip-backup/fdaca8533b86-7fd70513-rebase.hg
$ hg resolve --list
$ test -d .hg/merge
--- a/tests/test-rebase-mq-skip.t Wed Jan 09 20:00:35 2019 -0800
+++ b/tests/test-rebase-mq-skip.t Fri Jan 18 13:28:22 2019 -0500
@@ -62,7 +62,7 @@
committing changelog
rebasing 3:148775c71080 "P1" (p1.patch qtip)
resolving manifests
- note: rebase of 3:148775c71080 created no changes to commit
+ note: not rebasing 3:148775c71080 "P1" (p1.patch qtip), its destination already has all its changes
rebase merging completed
updating mq patch p0.patch to 5:9ecc820b1737
$TESTTMP/a/.hg/patches/p0.patch
@@ -143,10 +143,10 @@
$ HGMERGE=internal:fail hg rebase
rebasing 1:b4bffa6e4776 "r1" (qbase r1)
- note: rebase of 1:b4bffa6e4776 created no changes to commit
+ note: not rebasing 1:b4bffa6e4776 "r1" (qbase r1), its destination already has all its changes
rebasing 2:c0fd129beb01 "r2" (r2)
rebasing 3:6ff5b8feed8e "r3" (r3)
- note: rebase of 3:6ff5b8feed8e created no changes to commit
+ note: not rebasing 3:6ff5b8feed8e "r3" (r3), its destination already has all its changes
rebasing 4:094320fec554 "r4" (r4)
unresolved conflicts (see hg resolve, then hg rebase --continue)
[1]
@@ -160,10 +160,10 @@
already rebased 2:c0fd129beb01 "r2" (r2) as 1660ab13ce9a
already rebased 3:6ff5b8feed8e "r3" (r3) as 1660ab13ce9a
rebasing 4:094320fec554 "r4" (r4)
- note: rebase of 4:094320fec554 created no changes to commit
+ note: not rebasing 4:094320fec554 "r4" (r4), its destination already has all its changes
rebasing 5:681a378595ba "r5" (r5)
rebasing 6:512a1f24768b "r6" (qtip r6)
- note: rebase of 6:512a1f24768b created no changes to commit
+ note: not rebasing 6:512a1f24768b "r6" (qtip r6), its destination already has all its changes
saved backup bundle to $TESTTMP/b/.hg/strip-backup/b4bffa6e4776-b9bfb84d-rebase.hg
$ hg tglog
--- a/tests/test-rebase-mq.t Wed Jan 09 20:00:35 2019 -0800
+++ b/tests/test-rebase-mq.t Fri Jan 18 13:28:22 2019 -0500
@@ -337,7 +337,7 @@
foo
$ [ -f .hg/patches/empty-important ]
$ hg -q rebase -d 2
- note: rebase of 1:0aaf4c3af7eb created no changes to commit
+ note: not rebasing 1:0aaf4c3af7eb "important commit message" (empty-important qbase), its destination already has all its changes
$ hg qseries
guarded
bar
--- a/tests/test-rebase-named-branches.t Wed Jan 09 20:00:35 2019 -0800
+++ b/tests/test-rebase-named-branches.t Fri Jan 18 13:28:22 2019 -0500
@@ -157,7 +157,7 @@
$ hg rebase -b 'max(branch("dev-two"))' -d dev-one --keepbranches
rebasing 5:bc8139ee757c "dev-one named branch"
- note: rebase of 5:bc8139ee757c created no changes to commit
+ note: not rebasing 5:bc8139ee757c "dev-one named branch", its destination already has all its changes
rebasing 6:42aa3cf0fa7a "F"
rebasing 7:1a1e6f72ec38 "G"
rebasing 8:904590360559 "H"
@@ -329,7 +329,7 @@
$ hg up -qr 2
$ hg rebase
rebasing 2:792845bb77ee "b2"
- note: rebase of 2:792845bb77ee created no changes to commit
+ note: not rebasing 2:792845bb77ee "b2", its destination already has all its changes
saved backup bundle to $TESTTMP/case1/.hg/strip-backup/792845bb77ee-627120ee-rebase.hg
$ hg tglog
o 2: c062e3ecd6c6 'c1' c
@@ -395,7 +395,7 @@
$ hg rebase -r 3:: -d .
rebasing 3:76abc1c6f8c7 "b1"
rebasing 4:8427af5d86f2 "c2 closed" (tip)
- note: rebase of 4:8427af5d86f2 created no changes to commit
+ note: not rebasing 4:8427af5d86f2 "c2 closed" (tip), its destination already has all its changes
saved backup bundle to $TESTTMP/case2/.hg/strip-backup/76abc1c6f8c7-cd698d13-rebase.hg
$ hg tglog
o 3: 117b0ed08075 'b1' x
--- a/tests/test-rebase-newancestor.t Wed Jan 09 20:00:35 2019 -0800
+++ b/tests/test-rebase-newancestor.t Fri Jan 18 13:28:22 2019 -0500
@@ -130,7 +130,7 @@
> c
> EOF
rebasing 1:1d1a643d390e "dev: create branch"
- note: rebase of 1:1d1a643d390e created no changes to commit
+ note: not rebasing 1:1d1a643d390e "dev: create branch", its destination already has all its changes
rebasing 2:ec2c14fb2984 "dev: f-dev stuff"
rebasing 4:4b019212aaf6 "dev: merge default"
file 'f-default' was deleted in local [dest] but was modified in other [source].
--- a/tests/test-rebase-obsolete.t Wed Jan 09 20:00:35 2019 -0800
+++ b/tests/test-rebase-obsolete.t Fri Jan 18 13:28:22 2019 -0500
@@ -129,10 +129,10 @@
grafting 3:32af7686d403 "D"
$ hg rebase -s 42ccdea3bb16 -d .
rebasing 1:42ccdea3bb16 "B"
- note: rebase of 1:42ccdea3bb16 created no changes to commit
+ note: not rebasing 1:42ccdea3bb16 "B", its destination already has all its changes
rebasing 2:5fddd98957c8 "C"
rebasing 3:32af7686d403 "D"
- note: rebase of 3:32af7686d403 created no changes to commit
+ note: not rebasing 3:32af7686d403 "D", its destination already has all its changes
$ hg log -G
o 10:5ae4c968c6ac C
|
@@ -1456,7 +1456,7 @@
rebasing 2:b18e25de2cf5 "D" (D)
note: not rebasing 3:7fb047a69f22 "E" (E), already in destination as 1:112478962961 "B" (B)
rebasing 5:66f1a38021c9 "F" (F tip)
- note: rebase of 5:66f1a38021c9 created no changes to commit
+ note: not rebasing 5:66f1a38021c9 "F" (F tip), its destination already has all its changes
$ hg log -G
o 6:8f47515dda15 D
|
@@ -1492,7 +1492,7 @@
note: not rebasing 2:b18e25de2cf5 "D" (D), already in destination as 1:112478962961 "B" (B)
rebasing 3:7fb047a69f22 "E" (E)
rebasing 5:66f1a38021c9 "F" (F tip)
- note: rebase of 5:66f1a38021c9 created no changes to commit
+ note: not rebasing 5:66f1a38021c9 "F" (F tip), its destination already has all its changes
$ hg log -G
o 6:533690786a86 E
--- a/tests/test-rebase-parameters.t Wed Jan 09 20:00:35 2019 -0800
+++ b/tests/test-rebase-parameters.t Fri Jan 18 13:28:22 2019 -0500
@@ -440,7 +440,7 @@
$ hg rebase -s 2 -d 1 --tool internal:local
rebasing 2:e4e3f3546619 "c2b" (tip)
- note: rebase of 2:e4e3f3546619 created no changes to commit
+ note: not rebasing 2:e4e3f3546619 "c2b" (tip), its destination already has all its changes
saved backup bundle to $TESTTMP/b1/.hg/strip-backup/e4e3f3546619-b0841178-rebase.hg
$ hg cat c2
@@ -493,7 +493,7 @@
[255]
$ hg rebase -c --tool internal:fail
rebasing 2:e4e3f3546619 "c2b" (tip)
- note: rebase of 2:e4e3f3546619 created no changes to commit
+ note: not rebasing 2:e4e3f3546619 "c2b" (tip), its destination already has all its changes
saved backup bundle to $TESTTMP/b3/.hg/strip-backup/e4e3f3546619-b0841178-rebase.hg
$ hg rebase -i
--- a/tests/test-rebase-scenario-global.t Wed Jan 09 20:00:35 2019 -0800
+++ b/tests/test-rebase-scenario-global.t Fri Jan 18 13:28:22 2019 -0500
@@ -138,7 +138,7 @@
$ hg rebase -s 4 -d 7
rebasing 4:9520eea781bc "E"
rebasing 6:eea13746799a "G"
- note: rebase of 6:eea13746799a created no changes to commit
+ note: not rebasing 6:eea13746799a "G", its destination already has all its changes
saved backup bundle to $TESTTMP/a3/.hg/strip-backup/9520eea781bc-fcd8edd4-rebase.hg
$ f E.orig
E.orig: file not found
@@ -169,7 +169,7 @@
$ hg rebase -s 5 -d 4
rebasing 5:24b6387c8c8c "F"
rebasing 6:eea13746799a "G"
- note: rebase of 6:eea13746799a created no changes to commit
+ note: not rebasing 6:eea13746799a "G", its destination already has all its changes
rebasing 7:02de42196ebe "H" (tip)
saved backup bundle to $TESTTMP/a4/.hg/strip-backup/24b6387c8c8c-c3fe765d-rebase.hg
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/tests/test-remotefilelog-bad-configs.t Fri Jan 18 13:28:22 2019 -0500
@@ -0,0 +1,40 @@
+#require no-windows
+
+ $ . "$TESTDIR/remotefilelog-library.sh"
+
+ $ hg init master
+ $ cd master
+ $ cat >> .hg/hgrc <<EOF
+ > [remotefilelog]
+ > server=True
+ > EOF
+ $ echo x > x
+ $ echo y > y
+ $ echo z > z
+ $ hg commit -qAm xy
+
+ $ cd ..
+
+ $ hgcloneshallow ssh://user@dummy/master shallow -q
+ 3 files fetched over 1 fetches - (3 misses, 0.00% hit ratio) over *s (glob)
+ $ cd shallow
+
+Verify error message when noc achepath specified
+ $ hg up -q null
+ $ cp $HGRCPATH $HGRCPATH.bak
+ $ grep -v cachepath < $HGRCPATH.bak > tmp
+ $ mv tmp $HGRCPATH
+ $ hg up tip
+ abort: could not find config option remotefilelog.cachepath
+ [255]
+ $ mv $HGRCPATH.bak $HGRCPATH
+
+Verify error message when no fallback specified
+
+ $ hg up -q null
+ $ rm .hg/hgrc
+ $ clearcache
+ $ hg up tip
+ 3 files fetched over 1 fetches - (3 misses, 0.00% hit ratio) over *s (glob)
+ abort: no remotefilelog server configured - is your .hg/hgrc trusted?
+ [255]
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/tests/test-remotefilelog-bgprefetch.t Fri Jan 18 13:28:22 2019 -0500
@@ -0,0 +1,369 @@
+#require no-windows
+
+ $ . "$TESTDIR/remotefilelog-library.sh"
+
+ $ hg init master
+ $ cd master
+ $ cat >> .hg/hgrc <<EOF
+ > [remotefilelog]
+ > server=True
+ > EOF
+ $ echo x > x
+ $ echo z > z
+ $ hg commit -qAm x
+ $ echo x2 > x
+ $ echo y > y
+ $ hg commit -qAm y
+ $ echo w > w
+ $ rm z
+ $ hg commit -qAm w
+ $ hg bookmark foo
+
+ $ cd ..
+
+# clone the repo
+
+ $ hgcloneshallow ssh://user@dummy/master shallow --noupdate
+ streaming all changes
+ 2 files to transfer, 776 bytes of data
+ transferred 776 bytes in * seconds (*/sec) (glob)
+ searching for changes
+ no changes found
+
+# Set the prefetchdays config to zero so that all commits are prefetched
+# no matter what their creation date is. Also set prefetchdelay config
+# to zero so that there is no delay between prefetches.
+ $ cd shallow
+ $ cat >> .hg/hgrc <<EOF
+ > [remotefilelog]
+ > prefetchdays=0
+ > prefetchdelay=0
+ > EOF
+ $ cd ..
+
+# prefetch a revision
+ $ cd shallow
+
+ $ hg prefetch -r 0
+ 2 files fetched over 1 fetches - (2 misses, 0.00% hit ratio) over *s (glob)
+
+ $ hg cat -r 0 x
+ x
+
+# background prefetch on pull when configured
+
+ $ cat >> .hg/hgrc <<EOF
+ > [remotefilelog]
+ > pullprefetch=bookmark()
+ > backgroundprefetch=True
+ > EOF
+ $ hg strip tip
+ saved backup bundle to $TESTTMP/shallow/.hg/strip-backup/6b4b6f66ef8c-b4b8bdaf-backup.hg (glob)
+
+ $ clearcache
+ $ hg pull
+ pulling from ssh://user@dummy/master
+ searching for changes
+ adding changesets
+ adding manifests
+ adding file changes
+ added 1 changesets with 0 changes to 0 files
+ updating bookmark foo
+ new changesets 6b4b6f66ef8c
+ (run 'hg update' to get a working copy)
+ prefetching file contents
+ $ sleep 0.5
+ $ hg debugwaitonprefetch >/dev/null 2>%1
+ $ find $CACHEDIR -type f | sort
+ $TESTTMP/hgcache/master/11/f6ad8ec52a2984abaafd7c3b516503785c2072/ef95c5376f34698742fe34f315fd82136f8f68c0
+ $TESTTMP/hgcache/master/95/cb0bfd2977c761298d9624e4b4d4c72a39974a/076f5e2225b3ff0400b98c92aa6cdf403ee24cca
+ $TESTTMP/hgcache/master/af/f024fe4ab0fece4091de044c58c9ae4233383a/bb6ccd5dceaa5e9dc220e0dad65e051b94f69a2c
+ $TESTTMP/hgcache/repos
+
+# background prefetch with repack on pull when configured
+
+ $ cat >> .hg/hgrc <<EOF
+ > [remotefilelog]
+ > backgroundrepack=True
+ > EOF
+ $ hg strip tip
+ saved backup bundle to $TESTTMP/shallow/.hg/strip-backup/6b4b6f66ef8c-b4b8bdaf-backup.hg (glob)
+
+ $ clearcache
+ $ hg pull
+ pulling from ssh://user@dummy/master
+ searching for changes
+ adding changesets
+ adding manifests
+ adding file changes
+ added 1 changesets with 0 changes to 0 files
+ updating bookmark foo
+ new changesets 6b4b6f66ef8c
+ (run 'hg update' to get a working copy)
+ prefetching file contents
+ $ sleep 0.5
+ $ hg debugwaitonprefetch >/dev/null 2>%1
+ $ sleep 0.5
+ $ hg debugwaitonrepack >/dev/null 2>%1
+ $ find $CACHEDIR -type f | sort
+ $TESTTMP/hgcache/master/packs/6e8633deba6e544e5f8edbd7b996d6e31a2c42ae.histidx
+ $TESTTMP/hgcache/master/packs/6e8633deba6e544e5f8edbd7b996d6e31a2c42ae.histpack
+ $TESTTMP/hgcache/master/packs/8ce5ab3745465ab83bba30a7b9c295e0c8404652.dataidx
+ $TESTTMP/hgcache/master/packs/8ce5ab3745465ab83bba30a7b9c295e0c8404652.datapack
+ $TESTTMP/hgcache/master/packs/repacklock
+ $TESTTMP/hgcache/repos
+
+# background prefetch with repack on update when wcprevset configured
+
+ $ clearcache
+ $ hg up -r 0
+ 2 files updated, 0 files merged, 0 files removed, 0 files unresolved
+ 2 files fetched over 1 fetches - (2 misses, 0.00% hit ratio) over *s (glob)
+ $ find $CACHEDIR -type f | sort
+ $TESTTMP/hgcache/master/11/f6ad8ec52a2984abaafd7c3b516503785c2072/1406e74118627694268417491f018a4a883152f0
+ $TESTTMP/hgcache/master/39/5df8f7c51f007019cb30201c49e884b46b92fa/69a1b67522704ec122181c0890bd16e9d3e7516a
+ $TESTTMP/hgcache/repos
+
+ $ hg up -r 1
+ 2 files updated, 0 files merged, 0 files removed, 0 files unresolved
+ 2 files fetched over 2 fetches - (2 misses, 0.00% hit ratio) over *s (glob)
+
+ $ cat >> .hg/hgrc <<EOF
+ > [remotefilelog]
+ > bgprefetchrevs=.::
+ > EOF
+
+ $ clearcache
+ $ hg up -r 0
+ 1 files updated, 0 files merged, 1 files removed, 0 files unresolved
+ * files fetched over * fetches - (* misses, 0.00% hit ratio) over *s (glob)
+ $ sleep 1
+ $ hg debugwaitonprefetch >/dev/null 2>%1
+ $ sleep 1
+ $ hg debugwaitonrepack >/dev/null 2>%1
+ $ find $CACHEDIR -type f | sort
+ $TESTTMP/hgcache/master/packs/8f1443d44e57fec96f72fb2412e01d2818767ef2.histidx
+ $TESTTMP/hgcache/master/packs/8f1443d44e57fec96f72fb2412e01d2818767ef2.histpack
+ $TESTTMP/hgcache/master/packs/f4d50848e0b465e9bfd2875f213044c06cfd7407.dataidx
+ $TESTTMP/hgcache/master/packs/f4d50848e0b465e9bfd2875f213044c06cfd7407.datapack
+ $TESTTMP/hgcache/master/packs/repacklock
+ $TESTTMP/hgcache/repos
+
+# Ensure that file 'w' was prefetched - it was not part of the update operation and therefore
+# could only be downloaded by the background prefetch
+
+ $ hg debugdatapack `ls -ct $TESTTMP/hgcache/master/packs/*.datapack | head -n 1`
+ $TESTTMP/hgcache/master/packs/f4d50848e0b465e9bfd2875f213044c06cfd7407:
+ w:
+ Node Delta Base Delta Length Blob Size
+ bb6ccd5dceaa 000000000000 2 2
+
+ Total: 2 2 (0.0% bigger)
+ x:
+ Node Delta Base Delta Length Blob Size
+ ef95c5376f34 000000000000 3 3
+ 1406e7411862 ef95c5376f34 14 2
+
+ Total: 17 5 (240.0% bigger)
+ y:
+ Node Delta Base Delta Length Blob Size
+ 076f5e2225b3 000000000000 2 2
+
+ Total: 2 2 (0.0% bigger)
+ z:
+ Node Delta Base Delta Length Blob Size
+ 69a1b6752270 000000000000 2 2
+
+ Total: 2 2 (0.0% bigger)
+
+# background prefetch with repack on commit when wcprevset configured
+
+ $ cat >> .hg/hgrc <<EOF
+ > [remotefilelog]
+ > bgprefetchrevs=0::
+ > EOF
+
+ $ clearcache
+ $ find $CACHEDIR -type f | sort
+ $ echo b > b
+ $ hg commit -qAm b
+ * files fetched over 1 fetches - (* misses, 0.00% hit ratio) over *s (glob)
+ $ hg bookmark temporary
+ $ sleep 1
+ $ hg debugwaitonprefetch >/dev/null 2>%1
+ $ sleep 1
+ $ hg debugwaitonrepack >/dev/null 2>%1
+ $ find $CACHEDIR -type f | sort
+ $TESTTMP/hgcache/master/packs/8f1443d44e57fec96f72fb2412e01d2818767ef2.histidx
+ $TESTTMP/hgcache/master/packs/8f1443d44e57fec96f72fb2412e01d2818767ef2.histpack
+ $TESTTMP/hgcache/master/packs/f4d50848e0b465e9bfd2875f213044c06cfd7407.dataidx
+ $TESTTMP/hgcache/master/packs/f4d50848e0b465e9bfd2875f213044c06cfd7407.datapack
+ $TESTTMP/hgcache/master/packs/repacklock
+ $TESTTMP/hgcache/repos
+
+# Ensure that file 'w' was prefetched - it was not part of the commit operation and therefore
+# could only be downloaded by the background prefetch
+
+ $ hg debugdatapack `ls -ct $TESTTMP/hgcache/master/packs/*.datapack | head -n 1`
+ $TESTTMP/hgcache/master/packs/f4d50848e0b465e9bfd2875f213044c06cfd7407:
+ w:
+ Node Delta Base Delta Length Blob Size
+ bb6ccd5dceaa 000000000000 2 2
+
+ Total: 2 2 (0.0% bigger)
+ x:
+ Node Delta Base Delta Length Blob Size
+ ef95c5376f34 000000000000 3 3
+ 1406e7411862 ef95c5376f34 14 2
+
+ Total: 17 5 (240.0% bigger)
+ y:
+ Node Delta Base Delta Length Blob Size
+ 076f5e2225b3 000000000000 2 2
+
+ Total: 2 2 (0.0% bigger)
+ z:
+ Node Delta Base Delta Length Blob Size
+ 69a1b6752270 000000000000 2 2
+
+ Total: 2 2 (0.0% bigger)
+
+# background prefetch with repack on rebase when wcprevset configured
+
+ $ hg up -r 2
+ 3 files updated, 0 files merged, 3 files removed, 0 files unresolved
+ (leaving bookmark temporary)
+ $ clearcache
+ $ find $CACHEDIR -type f | sort
+ $ hg rebase -s temporary -d foo
+ rebasing 3:58147a5b5242 "b" (temporary tip)
+ saved backup bundle to $TESTTMP/shallow/.hg/strip-backup/58147a5b5242-c3678817-rebase.hg (glob)
+ 3 files fetched over 1 fetches - (3 misses, 0.00% hit ratio) over *s (glob)
+ $ sleep 1
+ $ hg debugwaitonprefetch >/dev/null 2>%1
+ $ sleep 1
+ $ hg debugwaitonrepack >/dev/null 2>%1
+
+# Ensure that file 'y' was prefetched - it was not part of the rebase operation and therefore
+# could only be downloaded by the background prefetch
+
+ $ hg debugdatapack `ls -ct $TESTTMP/hgcache/master/packs/*.datapack | head -n 1`
+ $TESTTMP/hgcache/master/packs/f4d50848e0b465e9bfd2875f213044c06cfd7407:
+ w:
+ Node Delta Base Delta Length Blob Size
+ bb6ccd5dceaa 000000000000 2 2
+
+ Total: 2 2 (0.0% bigger)
+ x:
+ Node Delta Base Delta Length Blob Size
+ ef95c5376f34 000000000000 3 3
+ 1406e7411862 ef95c5376f34 14 2
+
+ Total: 17 5 (240.0% bigger)
+ y:
+ Node Delta Base Delta Length Blob Size
+ 076f5e2225b3 000000000000 2 2
+
+ Total: 2 2 (0.0% bigger)
+ z:
+ Node Delta Base Delta Length Blob Size
+ 69a1b6752270 000000000000 2 2
+
+ Total: 2 2 (0.0% bigger)
+
+# Check that foregound prefetch with no arguments blocks until background prefetches finish
+
+ $ hg up -r 3
+ 2 files updated, 0 files merged, 0 files removed, 0 files unresolved
+ $ clearcache
+ $ hg prefetch --repack
+ waiting for lock on prefetching in $TESTTMP/shallow held by process * on host * (glob) (?)
+ got lock after * seconds (glob) (?)
+ (running background incremental repack)
+ * files fetched over 1 fetches - (* misses, 0.00% hit ratio) over *s (glob) (?)
+
+ $ sleep 0.5
+ $ hg debugwaitonrepack >/dev/null 2>%1
+
+ $ find $CACHEDIR -type f | sort
+ $TESTTMP/hgcache/master/packs/8f1443d44e57fec96f72fb2412e01d2818767ef2.histidx
+ $TESTTMP/hgcache/master/packs/8f1443d44e57fec96f72fb2412e01d2818767ef2.histpack
+ $TESTTMP/hgcache/master/packs/f4d50848e0b465e9bfd2875f213044c06cfd7407.dataidx
+ $TESTTMP/hgcache/master/packs/f4d50848e0b465e9bfd2875f213044c06cfd7407.datapack
+ $TESTTMP/hgcache/master/packs/repacklock
+ $TESTTMP/hgcache/repos
+
+# Ensure that files were prefetched
+ $ hg debugdatapack `ls -ct $TESTTMP/hgcache/master/packs/*.datapack | head -n 1`
+ $TESTTMP/hgcache/master/packs/f4d50848e0b465e9bfd2875f213044c06cfd7407:
+ w:
+ Node Delta Base Delta Length Blob Size
+ bb6ccd5dceaa 000000000000 2 2
+
+ Total: 2 2 (0.0% bigger)
+ x:
+ Node Delta Base Delta Length Blob Size
+ ef95c5376f34 000000000000 3 3
+ 1406e7411862 ef95c5376f34 14 2
+
+ Total: 17 5 (240.0% bigger)
+ y:
+ Node Delta Base Delta Length Blob Size
+ 076f5e2225b3 000000000000 2 2
+
+ Total: 2 2 (0.0% bigger)
+ z:
+ Node Delta Base Delta Length Blob Size
+ 69a1b6752270 000000000000 2 2
+
+ Total: 2 2 (0.0% bigger)
+
+# Check that foreground prefetch fetches revs specified by '. + draft() + bgprefetchrevs + pullprefetch'
+
+ $ clearcache
+ $ hg prefetch --repack
+ waiting for lock on prefetching in $TESTTMP/shallow held by process * on host * (glob) (?)
+ got lock after * seconds (glob) (?)
+ (running background incremental repack)
+ * files fetched over 1 fetches - (* misses, 0.00% hit ratio) over *s (glob) (?)
+ $ sleep 0.5
+ $ hg debugwaitonrepack >/dev/null 2>%1
+
+ $ find $CACHEDIR -type f | sort
+ $TESTTMP/hgcache/master/packs/8f1443d44e57fec96f72fb2412e01d2818767ef2.histidx
+ $TESTTMP/hgcache/master/packs/8f1443d44e57fec96f72fb2412e01d2818767ef2.histpack
+ $TESTTMP/hgcache/master/packs/f4d50848e0b465e9bfd2875f213044c06cfd7407.dataidx
+ $TESTTMP/hgcache/master/packs/f4d50848e0b465e9bfd2875f213044c06cfd7407.datapack
+ $TESTTMP/hgcache/master/packs/repacklock
+ $TESTTMP/hgcache/repos
+
+# Ensure that files were prefetched
+ $ hg debugdatapack `ls -ct $TESTTMP/hgcache/master/packs/*.datapack | head -n 1`
+ $TESTTMP/hgcache/master/packs/f4d50848e0b465e9bfd2875f213044c06cfd7407:
+ w:
+ Node Delta Base Delta Length Blob Size
+ bb6ccd5dceaa 000000000000 2 2
+
+ Total: 2 2 (0.0% bigger)
+ x:
+ Node Delta Base Delta Length Blob Size
+ ef95c5376f34 000000000000 3 3
+ 1406e7411862 ef95c5376f34 14 2
+
+ Total: 17 5 (240.0% bigger)
+ y:
+ Node Delta Base Delta Length Blob Size
+ 076f5e2225b3 000000000000 2 2
+
+ Total: 2 2 (0.0% bigger)
+ z:
+ Node Delta Base Delta Length Blob Size
+ 69a1b6752270 000000000000 2 2
+
+ Total: 2 2 (0.0% bigger)
+
+# Test that if data was prefetched and repacked we dont need to prefetch it again
+# It ensures that Mercurial looks not only in loose files but in packs as well
+
+ $ hg prefetch --repack
+ (running background incremental repack)
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/tests/test-remotefilelog-blame.t Fri Jan 18 13:28:22 2019 -0500
@@ -0,0 +1,32 @@
+#require no-windows
+
+ $ . "$TESTDIR/remotefilelog-library.sh"
+
+ $ hg init master
+ $ cd master
+ $ cat >> .hg/hgrc <<EOF
+ > [remotefilelog]
+ > server=True
+ > EOF
+ $ echo x > x
+ $ hg commit -qAm x
+ $ echo y >> x
+ $ hg commit -qAm y
+ $ echo z >> x
+ $ hg commit -qAm z
+ $ echo a > a
+ $ hg commit -qAm a
+
+ $ cd ..
+
+ $ hgcloneshallow ssh://user@dummy/master shallow -q
+ 2 files fetched over 1 fetches - (2 misses, 0.00% hit ratio) over *s (glob)
+ $ cd shallow
+
+Test blame
+
+ $ hg blame x
+ 0: x
+ 1: y
+ 2: z
+ 2 files fetched over 1 fetches - (2 misses, 0.00% hit ratio) over *s (glob)
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/tests/test-remotefilelog-bundle2-legacy.t Fri Jan 18 13:28:22 2019 -0500
@@ -0,0 +1,92 @@
+#require no-windows
+
+ $ . "$TESTDIR/remotefilelog-library.sh"
+
+generaldelta to generaldelta interactions with bundle2 but legacy clients
+without changegroup2 support
+ $ cat > testcg2.py << EOF
+ > import sys
+ > from mercurial import changegroup, registrar, util
+ > cmdtable = {}
+ > command = registrar.command(cmdtable)
+ > @command('testcg2', norepo=True)
+ > def testcg2(ui):
+ > if not util.safehasattr(changegroup, 'cg2packer'):
+ > sys.exit(80)
+ > EOF
+ $ cat >> $HGRCPATH << EOF
+ > [extensions]
+ > testcg2 = $TESTTMP/testcg2.py
+ > EOF
+ $ hg testcg2 || exit 80
+
+ $ cat > disablecg2.py << EOF
+ > from mercurial import changegroup, error, util
+ > deleted = False
+ > def reposetup(ui, repo):
+ > global deleted
+ > if deleted:
+ > return
+ > packermap = changegroup._packermap
+ > # protect against future changes
+ > if len(packermap) != 3:
+ > raise error.Abort('packermap has %d versions, expected 3!' % len(packermap))
+ > for k in ['01', '02', '03']:
+ > if not packermap.get(k):
+ > raise error.Abort("packermap doesn't have key '%s'!" % k)
+ >
+ > del packermap['02']
+ > deleted = True
+ > EOF
+
+ $ hg init master
+ $ grep generaldelta master/.hg/requires
+ generaldelta
+ $ cd master
+preferuncompressed = False so that we can make both generaldelta and non-generaldelta clones
+ $ cat >> .hg/hgrc <<EOF
+ > [remotefilelog]
+ > server=True
+ > [experimental]
+ > bundle2-exp = True
+ > [server]
+ > preferuncompressed = False
+ > EOF
+ $ echo x > x
+ $ hg commit -qAm x
+
+ $ cd ..
+
+ $ hgcloneshallow ssh://user@dummy/master shallow -q --pull --config experimental.bundle2-exp=True
+ 1 files fetched over 1 fetches - (1 misses, 0.00% hit ratio) over *s (glob)
+ $ cd shallow
+ $ cat >> .hg/hgrc << EOF
+ > [extensions]
+ > disablecg2 = $TESTTMP/disablecg2.py
+ > EOF
+
+ $ cd ../master
+ $ echo y > y
+ $ hg commit -qAm y
+
+ $ cd ../shallow
+ $ hg pull -u
+ pulling from ssh://user@dummy/master
+ searching for changes
+ adding changesets
+ adding manifests
+ adding file changes
+ added 1 changesets with 0 changes to 0 files
+ new changesets d34c38483be9
+ 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
+ 1 files fetched over 1 fetches - (1 misses, 0.00% hit ratio) over *s (glob)
+
+ $ echo a > a
+ $ hg commit -qAm a
+ $ hg push
+ pushing to ssh://user@dummy/master
+ searching for changes
+ remote: adding changesets
+ remote: adding manifests
+ remote: adding file changes
+ remote: added 1 changesets with 1 changes to 1 files
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/tests/test-remotefilelog-bundle2.t Fri Jan 18 13:28:22 2019 -0500
@@ -0,0 +1,78 @@
+#require no-windows
+
+ $ . "$TESTDIR/remotefilelog-library.sh"
+
+ $ hg init master
+ $ grep generaldelta master/.hg/requires
+ generaldelta
+ $ cd master
+preferuncompressed = False so that we can make both generaldelta and non-generaldelta clones
+ $ cat >> .hg/hgrc <<EOF
+ > [remotefilelog]
+ > server=True
+ > [experimental]
+ > bundle2-exp = True
+ > [server]
+ > preferuncompressed = False
+ > EOF
+ $ echo x > x
+ $ hg commit -qAm x
+
+ $ cd ..
+
+ $ hgcloneshallow ssh://user@dummy/master shallow-generaldelta -q --pull --config experimental.bundle2-exp=True
+ 1 files fetched over 1 fetches - (1 misses, 0.00% hit ratio) over *s (glob)
+ $ grep generaldelta shallow-generaldelta/.hg/requires
+ generaldelta
+ $ hgcloneshallow ssh://user@dummy/master shallow-plain -q --pull --config format.usegeneraldelta=False --config format.generaldelta=False --config experimental.bundle2-exp=True
+ $ grep generaldelta shallow-plain/.hg/requires
+ [1]
+
+ $ cd master
+ $ echo a > a
+ $ hg commit -qAm a
+
+pull from generaldelta to generaldelta
+ $ cd ../shallow-generaldelta
+ $ hg pull -u
+ pulling from ssh://user@dummy/master
+ searching for changes
+ adding changesets
+ adding manifests
+ adding file changes
+ added 1 changesets with 0 changes to 0 files
+ new changesets 2fbb8bb2b903
+ 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
+ 1 files fetched over 1 fetches - (1 misses, 0.00% hit ratio) over *s (glob)
+push from generaldelta to generaldelta
+ $ echo b > b
+ $ hg commit -qAm b
+ $ hg push
+ pushing to ssh://user@dummy/master
+ searching for changes
+ remote: adding changesets
+ remote: adding manifests
+ remote: adding file changes
+ remote: added 1 changesets with 1 changes to 1 files
+pull from generaldelta to non-generaldelta
+ $ cd ../shallow-plain
+ $ hg pull -u
+ pulling from ssh://user@dummy/master
+ searching for changes
+ adding changesets
+ adding manifests
+ adding file changes
+ added 2 changesets with 0 changes to 0 files
+ new changesets 2fbb8bb2b903:d6788bd632ca
+ 2 files updated, 0 files merged, 0 files removed, 0 files unresolved
+ 1 files fetched over 1 fetches - (1 misses, 0.00% hit ratio) over *s (glob)
+push from non-generaldelta to generaldelta
+ $ echo c > c
+ $ hg commit -qAm c
+ $ hg push
+ pushing to ssh://user@dummy/master
+ searching for changes
+ remote: adding changesets
+ remote: adding manifests
+ remote: adding file changes
+ remote: added 1 changesets with 1 changes to 1 files
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/tests/test-remotefilelog-bundles.t Fri Jan 18 13:28:22 2019 -0500
@@ -0,0 +1,75 @@
+#require no-windows
+
+ $ . "$TESTDIR/remotefilelog-library.sh"
+
+ $ hg init master
+ $ cd master
+ $ cat >> .hg/hgrc <<EOF
+ > [remotefilelog]
+ > server=True
+ > EOF
+ $ echo x > x
+ $ hg commit -qAm x
+ $ echo y >> x
+ $ hg commit -qAm y
+ $ echo z >> x
+ $ hg commit -qAm z
+
+ $ cd ..
+
+ $ hgcloneshallow ssh://user@dummy/master shallow -q
+ 1 files fetched over 1 fetches - (1 misses, 0.00% hit ratio) over *s (glob)
+ $ cd shallow
+
+Unbundling a shallow bundle
+
+ $ hg strip -r 66ee28d0328c
+ 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
+ saved backup bundle to $TESTTMP/shallow/.hg/strip-backup/66ee28d0328c-3d7aafd1-backup.hg (glob)
+ 1 files fetched over 1 fetches - (1 misses, 0.00% hit ratio) over *s (glob)
+ $ hg unbundle .hg/strip-backup/66ee28d0328c-3d7aafd1-backup.hg
+ adding changesets
+ adding manifests
+ adding file changes
+ added 2 changesets with 0 changes to 0 files
+ new changesets 66ee28d0328c:16db62c5946f
+ (run 'hg update' to get a working copy)
+
+Unbundling a full bundle
+
+ $ hg -R ../master bundle -r 66ee28d0328c:: --base "66ee28d0328c^" ../fullbundle.hg
+ 2 changesets found
+ $ hg strip -r 66ee28d0328c
+ saved backup bundle to $TESTTMP/shallow/.hg/strip-backup/66ee28d0328c-3d7aafd1-backup.hg (glob)
+ $ hg unbundle ../fullbundle.hg
+ adding changesets
+ adding manifests
+ adding file changes
+ added 2 changesets with 2 changes to 1 files
+ new changesets 66ee28d0328c:16db62c5946f (2 drafts)
+ (run 'hg update' to get a working copy)
+
+Pulling from a shallow bundle
+
+ $ hg strip -r 66ee28d0328c
+ saved backup bundle to $TESTTMP/shallow/.hg/strip-backup/66ee28d0328c-3d7aafd1-backup.hg (glob)
+ $ hg pull -r 66ee28d0328c .hg/strip-backup/66ee28d0328c-3d7aafd1-backup.hg
+ pulling from .hg/strip-backup/66ee28d0328c-3d7aafd1-backup.hg
+ searching for changes
+ adding changesets
+ adding manifests
+ adding file changes
+ added 1 changesets with 0 changes to 0 files
+ new changesets 66ee28d0328c (1 drafts)
+ (run 'hg update' to get a working copy)
+
+Pulling from a full bundle
+
+ $ hg strip -r 66ee28d0328c
+ saved backup bundle to $TESTTMP/shallow/.hg/strip-backup/66ee28d0328c-b6ee89e7-backup.hg (glob)
+ $ hg pull -r 66ee28d0328c ../fullbundle.hg
+ pulling from ../fullbundle.hg
+ searching for changes
+ abort: cannot pull from full bundles
+ (use `hg unbundle` instead)
+ [255]
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/tests/test-remotefilelog-cacheprocess.t Fri Jan 18 13:28:22 2019 -0500
@@ -0,0 +1,125 @@
+#require no-windows
+
+ $ . "$TESTDIR/remotefilelog-library.sh"
+
+ $ hg init repo
+ $ cd repo
+ $ cat >> .hg/hgrc <<EOF
+ > [remotefilelog]
+ > server=True
+ > EOF
+ $ echo x > x
+ $ echo y > y
+ $ echo z > z
+ $ hg commit -qAm xy
+ $ cd ..
+
+ $ cat > cacheprocess-logger.py <<EOF
+ > import os
+ > import shutil
+ > import sys
+ > if sys.version_info[0] > 2:
+ > xrange = range
+ > f = open('$TESTTMP/cachelog.log', 'w')
+ > srccache = os.path.join('$TESTTMP', 'oldhgcache')
+ > def log(message):
+ > f.write(message)
+ > f.flush()
+ > destcache = sys.argv[-1]
+ > try:
+ > while True:
+ > cmd = sys.stdin.readline().strip()
+ > log('got command %r\n' % cmd)
+ > if cmd == 'exit':
+ > sys.exit(0)
+ > elif cmd == 'get':
+ > count = int(sys.stdin.readline())
+ > log('client wants %r blobs\n' % count)
+ > wants = []
+ > for _ in xrange(count):
+ > key = sys.stdin.readline()[:-1]
+ > wants.append(key)
+ > if '\0' in key:
+ > _, key = key.split('\0')
+ > srcpath = os.path.join(srccache, key)
+ > if os.path.exists(srcpath):
+ > dest = os.path.join(destcache, key)
+ > destdir = os.path.dirname(dest)
+ > if not os.path.exists(destdir):
+ > os.makedirs(destdir)
+ > shutil.copyfile(srcpath, dest)
+ > else:
+ > # report a cache miss
+ > sys.stdout.write(key + '\n')
+ > sys.stdout.write('0\n')
+ > for key in sorted(wants):
+ > log('requested %r\n' % key)
+ > sys.stdout.flush()
+ > elif cmd == 'set':
+ > assert False, 'todo writing'
+ > else:
+ > assert False, 'unknown command! %r' % cmd
+ > except Exception as e:
+ > log('Exception! %r\n' % e)
+ > raise
+ > EOF
+
+ $ cat >> $HGRCPATH <<EOF
+ > [remotefilelog]
+ > cacheprocess = python $TESTTMP/cacheprocess-logger.py
+ > EOF
+
+Test cache keys and cache misses.
+ $ hgcloneshallow ssh://user@dummy/repo clone -q
+ 3 files fetched over 1 fetches - (3 misses, 0.00% hit ratio) over *s (glob)
+ $ cat cachelog.log
+ got command 'get'
+ client wants 3 blobs
+ requested 'master/11/f6ad8ec52a2984abaafd7c3b516503785c2072/1406e74118627694268417491f018a4a883152f0'
+ requested 'master/39/5df8f7c51f007019cb30201c49e884b46b92fa/69a1b67522704ec122181c0890bd16e9d3e7516a'
+ requested 'master/95/cb0bfd2977c761298d9624e4b4d4c72a39974a/076f5e2225b3ff0400b98c92aa6cdf403ee24cca'
+ got command 'set'
+ Exception! AssertionError('todo writing',)
+
+Test cache hits.
+ $ mv hgcache oldhgcache
+ $ rm cachelog.log
+ $ hgcloneshallow ssh://user@dummy/repo clone-cachehit -q
+ 3 files fetched over 1 fetches - (0 misses, 100.00% hit ratio) over *s (glob)
+ $ cat cachelog.log | grep -v exit
+ got command 'get'
+ client wants 3 blobs
+ requested 'master/11/f6ad8ec52a2984abaafd7c3b516503785c2072/1406e74118627694268417491f018a4a883152f0'
+ requested 'master/39/5df8f7c51f007019cb30201c49e884b46b92fa/69a1b67522704ec122181c0890bd16e9d3e7516a'
+ requested 'master/95/cb0bfd2977c761298d9624e4b4d4c72a39974a/076f5e2225b3ff0400b98c92aa6cdf403ee24cca'
+
+ $ cat >> $HGRCPATH <<EOF
+ > [remotefilelog]
+ > cacheprocess.includepath = yes
+ > EOF
+
+Test cache keys and cache misses with includepath.
+ $ rm -r hgcache oldhgcache
+ $ rm cachelog.log
+ $ hgcloneshallow ssh://user@dummy/repo clone-withpath -q
+ 3 files fetched over 1 fetches - (3 misses, 0.00% hit ratio) over *s (glob)
+ $ cat cachelog.log
+ got command 'get'
+ client wants 3 blobs
+ requested 'x\x00master/11/f6ad8ec52a2984abaafd7c3b516503785c2072/1406e74118627694268417491f018a4a883152f0'
+ requested 'y\x00master/95/cb0bfd2977c761298d9624e4b4d4c72a39974a/076f5e2225b3ff0400b98c92aa6cdf403ee24cca'
+ requested 'z\x00master/39/5df8f7c51f007019cb30201c49e884b46b92fa/69a1b67522704ec122181c0890bd16e9d3e7516a'
+ got command 'set'
+ Exception! AssertionError('todo writing',)
+
+Test cache hits with includepath.
+ $ mv hgcache oldhgcache
+ $ rm cachelog.log
+ $ hgcloneshallow ssh://user@dummy/repo clone-withpath-cachehit -q
+ 3 files fetched over 1 fetches - (0 misses, 100.00% hit ratio) over *s (glob)
+ $ cat cachelog.log | grep -v exit
+ got command 'get'
+ client wants 3 blobs
+ requested 'x\x00master/11/f6ad8ec52a2984abaafd7c3b516503785c2072/1406e74118627694268417491f018a4a883152f0'
+ requested 'y\x00master/95/cb0bfd2977c761298d9624e4b4d4c72a39974a/076f5e2225b3ff0400b98c92aa6cdf403ee24cca'
+ requested 'z\x00master/39/5df8f7c51f007019cb30201c49e884b46b92fa/69a1b67522704ec122181c0890bd16e9d3e7516a'
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/tests/test-remotefilelog-clone-tree.t Fri Jan 18 13:28:22 2019 -0500
@@ -0,0 +1,119 @@
+#require no-windows
+
+ $ . "$TESTDIR/remotefilelog-library.sh"
+
+ $ hg init master
+ $ cd master
+ $ echo treemanifest >> .hg/requires
+ $ cat >> .hg/hgrc <<EOF
+ > [remotefilelog]
+ > server=True
+ > EOF
+# uppercase directory name to test encoding
+ $ mkdir -p A/B
+ $ echo x > A/B/x
+ $ hg commit -qAm x
+
+ $ cd ..
+
+# shallow clone from full
+
+ $ hgcloneshallow ssh://user@dummy/master shallow --noupdate
+ streaming all changes
+ 4 files to transfer, 449 bytes of data
+ transferred 449 bytes in * seconds (*/sec) (glob)
+ searching for changes
+ no changes found
+ $ cd shallow
+ $ cat .hg/requires
+ dotencode
+ exp-remotefilelog-repo-req-1
+ fncache
+ generaldelta
+ revlogv1
+ sparserevlog
+ store
+ treemanifest
+ $ find .hg/store/meta | sort
+ .hg/store/meta
+ .hg/store/meta/_a
+ .hg/store/meta/_a/00manifest.i
+ .hg/store/meta/_a/_b
+ .hg/store/meta/_a/_b/00manifest.i
+
+ $ hg update
+ 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
+ 1 files fetched over 1 fetches - (1 misses, 0.00% hit ratio) over *s (glob)
+
+ $ cat A/B/x
+ x
+
+ $ ls .hg/store/data
+ $ echo foo > A/B/F
+ $ hg add A/B/F
+ $ hg ci -m 'local content'
+ $ ls .hg/store/data
+ ca31988f085bfb945cb8115b78fabdee40f741aa
+
+ $ cd ..
+
+# shallow clone from shallow
+
+ $ hgcloneshallow ssh://user@dummy/shallow shallow2 --noupdate
+ streaming all changes
+ 5 files to transfer, 1008 bytes of data
+ transferred 1008 bytes in * seconds (*/sec) (glob)
+ searching for changes
+ no changes found
+ $ cd shallow2
+ $ cat .hg/requires
+ dotencode
+ exp-remotefilelog-repo-req-1
+ fncache
+ generaldelta
+ revlogv1
+ sparserevlog
+ store
+ treemanifest
+ $ ls .hg/store/data
+ ca31988f085bfb945cb8115b78fabdee40f741aa
+
+ $ hg update
+ 2 files updated, 0 files merged, 0 files removed, 0 files unresolved
+
+ $ cat A/B/x
+ x
+
+ $ cd ..
+
+# full clone from shallow
+# - send stderr to /dev/null because the order of stdout/err causes
+# flakiness here
+ $ hg clone --noupdate ssh://user@dummy/shallow full 2>/dev/null
+ streaming all changes
+ remote: abort: Cannot clone from a shallow repo to a full repo.
+ [255]
+
+# getbundle full clone
+
+ $ printf '[server]\npreferuncompressed=False\n' >> master/.hg/hgrc
+ $ hgcloneshallow ssh://user@dummy/master shallow3
+ requesting all changes
+ adding changesets
+ adding manifests
+ adding file changes
+ added 1 changesets with 0 changes to 0 files
+ new changesets 18d955ee7ba0
+ updating to branch default
+ 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
+
+ $ ls shallow3/.hg/store/data
+ $ cat shallow3/.hg/requires
+ dotencode
+ exp-remotefilelog-repo-req-1
+ fncache
+ generaldelta
+ revlogv1
+ sparserevlog
+ store
+ treemanifest
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/tests/test-remotefilelog-clone.t Fri Jan 18 13:28:22 2019 -0500
@@ -0,0 +1,115 @@
+#require no-windows
+
+ $ . "$TESTDIR/remotefilelog-library.sh"
+
+ $ hg init master
+ $ cd master
+ $ cat >> .hg/hgrc <<EOF
+ > [remotefilelog]
+ > server=True
+ > EOF
+ $ echo x > x
+ $ hg commit -qAm x
+
+ $ cd ..
+
+# shallow clone from full
+
+ $ hgcloneshallow ssh://user@dummy/master shallow --noupdate
+ streaming all changes
+ 2 files to transfer, 227 bytes of data
+ transferred 227 bytes in * seconds (*/sec) (glob)
+ searching for changes
+ no changes found
+ $ cd shallow
+ $ cat .hg/requires
+ dotencode
+ exp-remotefilelog-repo-req-1
+ fncache
+ generaldelta
+ revlogv1
+ sparserevlog
+ store
+
+ $ hg update
+ 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
+ 1 files fetched over 1 fetches - (1 misses, 0.00% hit ratio) over *s (glob)
+
+ $ cat x
+ x
+
+ $ ls .hg/store/data
+ $ echo foo > f
+ $ hg add f
+ $ hg ci -m 'local content'
+ $ ls .hg/store/data
+ 4a0a19218e082a343a1b17e5333409af9d98f0f5
+
+ $ cd ..
+
+# shallow clone from shallow
+
+ $ hgcloneshallow ssh://user@dummy/shallow shallow2 --noupdate
+ streaming all changes
+ 3 files to transfer, 564 bytes of data
+ transferred 564 bytes in * seconds (*/sec) (glob)
+ searching for changes
+ no changes found
+ $ cd shallow2
+ $ cat .hg/requires
+ dotencode
+ exp-remotefilelog-repo-req-1
+ fncache
+ generaldelta
+ revlogv1
+ sparserevlog
+ store
+ $ ls .hg/store/data
+ 4a0a19218e082a343a1b17e5333409af9d98f0f5
+
+ $ hg update
+ 2 files updated, 0 files merged, 0 files removed, 0 files unresolved
+
+ $ cat x
+ x
+
+ $ cd ..
+
+# full clone from shallow
+
+Note: the output to STDERR comes from a different process to the output on
+STDOUT and their relative ordering is not deterministic. As a result, the test
+was failing sporadically. To avoid this, we capture STDERR to a file and
+check its contents separately.
+
+ $ TEMP_STDERR=full-clone-from-shallow.stderr.tmp
+ $ hg clone --noupdate ssh://user@dummy/shallow full 2>$TEMP_STDERR
+ streaming all changes
+ remote: abort: Cannot clone from a shallow repo to a full repo.
+ [255]
+ $ cat $TEMP_STDERR
+ abort: pull failed on remote
+ $ rm $TEMP_STDERR
+
+# getbundle full clone
+
+ $ printf '[server]\npreferuncompressed=False\n' >> master/.hg/hgrc
+ $ hgcloneshallow ssh://user@dummy/master shallow3
+ requesting all changes
+ adding changesets
+ adding manifests
+ adding file changes
+ added 1 changesets with 0 changes to 0 files
+ new changesets b292c1e3311f
+ updating to branch default
+ 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
+
+ $ ls shallow3/.hg/store/data
+ $ cat shallow3/.hg/requires
+ dotencode
+ exp-remotefilelog-repo-req-1
+ fncache
+ generaldelta
+ revlogv1
+ sparserevlog
+ store
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/tests/test-remotefilelog-corrupt-cache.t Fri Jan 18 13:28:22 2019 -0500
@@ -0,0 +1,72 @@
+#require no-windows
+
+ $ . "$TESTDIR/remotefilelog-library.sh"
+
+ $ hg init master
+ $ cd master
+ $ cat >> .hg/hgrc <<EOF
+ > [remotefilelog]
+ > server=True
+ > EOF
+ $ echo x > x
+ $ echo y > y
+ $ echo z > z
+ $ hg commit -qAm xy
+
+ $ cd ..
+
+ $ hgcloneshallow ssh://user@dummy/master shallow -q
+ 3 files fetched over 1 fetches - (3 misses, 0.00% hit ratio) over *s (glob)
+ $ cd shallow
+
+Verify corrupt cache handling repairs by default
+
+ $ hg up -q null
+ $ chmod u+w $CACHEDIR/master/11/f6ad8ec52a2984abaafd7c3b516503785c2072/1406e74118627694268417491f018a4a883152f0
+ $ echo x > $CACHEDIR/master/11/f6ad8ec52a2984abaafd7c3b516503785c2072/1406e74118627694268417491f018a4a883152f0
+ $ hg up tip
+ 3 files updated, 0 files merged, 0 files removed, 0 files unresolved
+ 1 files fetched over 1 fetches - (1 misses, 0.00% hit ratio) over *s (glob)
+
+Verify corrupt cache error message
+
+ $ hg up -q null
+ $ cat >> .hg/hgrc <<EOF
+ > [remotefilelog]
+ > validatecache=off
+ > EOF
+ $ chmod u+w $CACHEDIR/master/11/f6ad8ec52a2984abaafd7c3b516503785c2072/1406e74118627694268417491f018a4a883152f0
+ $ echo x > $CACHEDIR/master/11/f6ad8ec52a2984abaafd7c3b516503785c2072/1406e74118627694268417491f018a4a883152f0
+ $ hg up tip 2>&1 | egrep "^RuntimeError"
+ RuntimeError: unexpected remotefilelog header: illegal format
+
+Verify detection and remediation when remotefilelog.validatecachelog is set
+
+ $ cat >> .hg/hgrc <<EOF
+ > [remotefilelog]
+ > validatecachelog=$PWD/.hg/remotefilelog_cache.log
+ > validatecache=strict
+ > EOF
+ $ chmod u+w $CACHEDIR/master/11/f6ad8ec52a2984abaafd7c3b516503785c2072/1406e74118627694268417491f018a4a883152f0
+ $ echo x > $CACHEDIR/master/11/f6ad8ec52a2984abaafd7c3b516503785c2072/1406e74118627694268417491f018a4a883152f0
+ $ hg up tip
+ 3 files updated, 0 files merged, 0 files removed, 0 files unresolved
+ 1 files fetched over 1 fetches - (1 misses, 0.00% hit ratio) over *s (glob)
+ $ cat .hg/remotefilelog_cache.log
+ corrupt $TESTTMP/hgcache/master/11/f6ad8ec52a2984abaafd7c3b516503785c2072/1406e74118627694268417491f018a4a883152f0 during contains
+
+Verify handling of corrupt server cache
+
+ $ rm -f ../master/.hg/remotefilelogcache/y/076f5e2225b3ff0400b98c92aa6cdf403ee24cca
+ $ touch ../master/.hg/remotefilelogcache/y/076f5e2225b3ff0400b98c92aa6cdf403ee24cca
+ $ clearcache
+ $ hg prefetch -r .
+ 3 files fetched over 1 fetches - (3 misses, 0.00% hit ratio) over *s (glob)
+ $ test -s ../master/.hg/remotefilelogcache/y/076f5e2225b3ff0400b98c92aa6cdf403ee24cca
+ $ hg debugremotefilelog $CACHEDIR/master/95/cb0bfd2977c761298d9624e4b4d4c72a39974a/076f5e2225b3ff0400b98c92aa6cdf403ee24cca
+ size: 2 bytes
+ path: $TESTTMP/hgcache/master/95/cb0bfd2977c761298d9624e4b4d4c72a39974a/076f5e2225b3ff0400b98c92aa6cdf403ee24cca
+ key: 076f5e2225b3
+
+ node => p1 p2 linknode copyfrom
+ 076f5e2225b3 => 000000000000 000000000000 f3d0bb0d1e48
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/tests/test-remotefilelog-datapack.py Fri Jan 18 13:28:22 2019 -0500
@@ -0,0 +1,378 @@
+#!/usr/bin/env python
+from __future__ import absolute_import, print_function
+
+import hashlib
+import os
+import random
+import shutil
+import stat
+import struct
+import sys
+import tempfile
+import time
+import unittest
+
+import silenttestrunner
+
+# Load the local remotefilelog, not the system one
+sys.path[0:0] = [os.path.join(os.path.dirname(__file__), '..')]
+from mercurial.node import nullid
+from mercurial import (
+ pycompat,
+ ui as uimod,
+)
+from hgext.remotefilelog import (
+ basepack,
+ constants,
+ datapack,
+)
+
+class datapacktestsbase(object):
+ def __init__(self, datapackreader, paramsavailable):
+ self.datapackreader = datapackreader
+ self.paramsavailable = paramsavailable
+
+ def setUp(self):
+ self.tempdirs = []
+
+ def tearDown(self):
+ for d in self.tempdirs:
+ shutil.rmtree(d)
+
+ def makeTempDir(self):
+ tempdir = tempfile.mkdtemp()
+ self.tempdirs.append(tempdir)
+ return tempdir
+
+ def getHash(self, content):
+ return hashlib.sha1(content).digest()
+
+ def getFakeHash(self):
+ return ''.join(chr(random.randint(0, 255)) for _ in range(20))
+
+ def createPack(self, revisions=None, packdir=None):
+ if revisions is None:
+ revisions = [("filename", self.getFakeHash(), nullid, "content")]
+
+ if packdir is None:
+ packdir = self.makeTempDir()
+
+ packer = datapack.mutabledatapack(uimod.ui(), packdir, version=2)
+
+ for args in revisions:
+ filename, node, base, content = args[0:4]
+ # meta is optional
+ meta = None
+ if len(args) > 4:
+ meta = args[4]
+ packer.add(filename, node, base, content, metadata=meta)
+
+ path = packer.close()
+ return self.datapackreader(path)
+
+ def _testAddSingle(self, content):
+ """Test putting a simple blob into a pack and reading it out.
+ """
+ filename = "foo"
+ node = self.getHash(content)
+
+ revisions = [(filename, node, nullid, content)]
+ pack = self.createPack(revisions)
+ if self.paramsavailable:
+ self.assertEquals(pack.params.fanoutprefix,
+ basepack.SMALLFANOUTPREFIX)
+
+ chain = pack.getdeltachain(filename, node)
+ self.assertEquals(content, chain[0][4])
+
+ def testAddSingle(self):
+ self._testAddSingle('')
+
+ def testAddSingleEmpty(self):
+ self._testAddSingle('abcdef')
+
+ def testAddMultiple(self):
+ """Test putting multiple unrelated blobs into a pack and reading them
+ out.
+ """
+ revisions = []
+ for i in range(10):
+ filename = "foo%s" % i
+ content = "abcdef%s" % i
+ node = self.getHash(content)
+ revisions.append((filename, node, self.getFakeHash(), content))
+
+ pack = self.createPack(revisions)
+
+ for filename, node, base, content in revisions:
+ entry = pack.getdelta(filename, node)
+ self.assertEquals((content, filename, base, {}), entry)
+
+ chain = pack.getdeltachain(filename, node)
+ self.assertEquals(content, chain[0][4])
+
+ def testAddDeltas(self):
+ """Test putting multiple delta blobs into a pack and read the chain.
+ """
+ revisions = []
+ filename = "foo"
+ lastnode = nullid
+ for i in range(10):
+ content = "abcdef%s" % i
+ node = self.getHash(content)
+ revisions.append((filename, node, lastnode, content))
+ lastnode = node
+
+ pack = self.createPack(revisions)
+
+ entry = pack.getdelta(filename, revisions[0][1])
+ realvalue = (revisions[0][3], filename, revisions[0][2], {})
+ self.assertEquals(entry, realvalue)
+
+ # Test that the chain for the final entry has all the others
+ chain = pack.getdeltachain(filename, node)
+ for i in range(10):
+ content = "abcdef%s" % i
+ self.assertEquals(content, chain[-i - 1][4])
+
+ def testPackMany(self):
+ """Pack many related and unrelated objects.
+ """
+ # Build a random pack file
+ revisions = []
+ blobs = {}
+ random.seed(0)
+ for i in range(100):
+ filename = "filename-%s" % i
+ filerevs = []
+ for j in range(random.randint(1, 100)):
+ content = "content-%s" % j
+ node = self.getHash(content)
+ lastnode = nullid
+ if len(filerevs) > 0:
+ lastnode = filerevs[random.randint(0, len(filerevs) - 1)]
+ filerevs.append(node)
+ blobs[(filename, node, lastnode)] = content
+ revisions.append((filename, node, lastnode, content))
+
+ pack = self.createPack(revisions)
+
+ # Verify the pack contents
+ for (filename, node, lastnode), content in sorted(blobs.iteritems()):
+ chain = pack.getdeltachain(filename, node)
+ for entry in chain:
+ expectedcontent = blobs[(entry[0], entry[1], entry[3])]
+ self.assertEquals(entry[4], expectedcontent)
+
+ def testPackMetadata(self):
+ revisions = []
+ for i in range(100):
+ filename = '%s.txt' % i
+ content = 'put-something-here \n' * i
+ node = self.getHash(content)
+ meta = {constants.METAKEYFLAG: i ** 4,
+ constants.METAKEYSIZE: len(content),
+ 'Z': 'random_string',
+ '_': '\0' * i}
+ revisions.append((filename, node, nullid, content, meta))
+ pack = self.createPack(revisions)
+ for name, node, x, content, origmeta in revisions:
+ parsedmeta = pack.getmeta(name, node)
+ # flag == 0 should be optimized out
+ if origmeta[constants.METAKEYFLAG] == 0:
+ del origmeta[constants.METAKEYFLAG]
+ self.assertEquals(parsedmeta, origmeta)
+
+ def testGetMissing(self):
+ """Test the getmissing() api.
+ """
+ revisions = []
+ filename = "foo"
+ lastnode = nullid
+ for i in range(10):
+ content = "abcdef%s" % i
+ node = self.getHash(content)
+ revisions.append((filename, node, lastnode, content))
+ lastnode = node
+
+ pack = self.createPack(revisions)
+
+ missing = pack.getmissing([("foo", revisions[0][1])])
+ self.assertFalse(missing)
+
+ missing = pack.getmissing([("foo", revisions[0][1]),
+ ("foo", revisions[1][1])])
+ self.assertFalse(missing)
+
+ fakenode = self.getFakeHash()
+ missing = pack.getmissing([("foo", revisions[0][1]), ("foo", fakenode)])
+ self.assertEquals(missing, [("foo", fakenode)])
+
+ def testAddThrows(self):
+ pack = self.createPack()
+
+ try:
+ pack.add('filename', nullid, 'contents')
+ self.assertTrue(False, "datapack.add should throw")
+ except RuntimeError:
+ pass
+
+ def testBadVersionThrows(self):
+ pack = self.createPack()
+ path = pack.path + '.datapack'
+ with open(path) as f:
+ raw = f.read()
+ raw = struct.pack('!B', 255) + raw[1:]
+ os.chmod(path, os.stat(path).st_mode | stat.S_IWRITE)
+ with open(path, 'w+') as f:
+ f.write(raw)
+
+ try:
+ pack = self.datapackreader(pack.path)
+ self.assertTrue(False, "bad version number should have thrown")
+ except RuntimeError:
+ pass
+
+ def testMissingDeltabase(self):
+ fakenode = self.getFakeHash()
+ revisions = [("filename", fakenode, self.getFakeHash(), "content")]
+ pack = self.createPack(revisions)
+ chain = pack.getdeltachain("filename", fakenode)
+ self.assertEquals(len(chain), 1)
+
+ def testLargePack(self):
+ """Test creating and reading from a large pack with over X entries.
+ This causes it to use a 2^16 fanout table instead."""
+ revisions = []
+ blobs = {}
+ total = basepack.SMALLFANOUTCUTOFF + 1
+ for i in pycompat.xrange(total):
+ filename = "filename-%s" % i
+ content = filename
+ node = self.getHash(content)
+ blobs[(filename, node)] = content
+ revisions.append((filename, node, nullid, content))
+
+ pack = self.createPack(revisions)
+ if self.paramsavailable:
+ self.assertEquals(pack.params.fanoutprefix,
+ basepack.LARGEFANOUTPREFIX)
+
+ for (filename, node), content in blobs.iteritems():
+ actualcontent = pack.getdeltachain(filename, node)[0][4]
+ self.assertEquals(actualcontent, content)
+
+ def testPacksCache(self):
+ """Test that we remember the most recent packs while fetching the delta
+ chain."""
+
+ packdir = self.makeTempDir()
+ deltachains = []
+
+ numpacks = 10
+ revisionsperpack = 100
+
+ for i in range(numpacks):
+ chain = []
+ revision = (str(i), self.getFakeHash(), nullid, "content")
+
+ for _ in range(revisionsperpack):
+ chain.append(revision)
+ revision = (
+ str(i),
+ self.getFakeHash(),
+ revision[1],
+ self.getFakeHash()
+ )
+
+ self.createPack(chain, packdir)
+ deltachains.append(chain)
+
+ class testdatapackstore(datapack.datapackstore):
+ # Ensures that we are not keeping everything in the cache.
+ DEFAULTCACHESIZE = numpacks / 2
+
+ store = testdatapackstore(uimod.ui(), packdir)
+
+ random.shuffle(deltachains)
+ for randomchain in deltachains:
+ revision = random.choice(randomchain)
+ chain = store.getdeltachain(revision[0], revision[1])
+
+ mostrecentpack = next(iter(store.packs), None)
+ self.assertEquals(
+ mostrecentpack.getdeltachain(revision[0], revision[1]),
+ chain
+ )
+
+ self.assertEquals(randomchain.index(revision) + 1, len(chain))
+
+ # perf test off by default since it's slow
+ def _testIndexPerf(self):
+ random.seed(0)
+ print("Multi-get perf test")
+ packsizes = [
+ 100,
+ 10000,
+ 100000,
+ 500000,
+ 1000000,
+ 3000000,
+ ]
+ lookupsizes = [
+ 10,
+ 100,
+ 1000,
+ 10000,
+ 100000,
+ 1000000,
+ ]
+ for packsize in packsizes:
+ revisions = []
+ for i in pycompat.xrange(packsize):
+ filename = "filename-%s" % i
+ content = "content-%s" % i
+ node = self.getHash(content)
+ revisions.append((filename, node, nullid, content))
+
+ path = self.createPack(revisions).path
+
+ # Perf of large multi-get
+ import gc
+ gc.disable()
+ pack = self.datapackreader(path)
+ for lookupsize in lookupsizes:
+ if lookupsize > packsize:
+ continue
+ random.shuffle(revisions)
+ findnodes = [(rev[0], rev[1]) for rev in revisions]
+
+ start = time.time()
+ pack.getmissing(findnodes[:lookupsize])
+ elapsed = time.time() - start
+ print ("%s pack %s lookups = %0.04f" %
+ (('%s' % packsize).rjust(7),
+ ('%s' % lookupsize).rjust(7),
+ elapsed))
+
+ print("")
+ gc.enable()
+
+ # The perf test is meant to produce output, so we always fail the test
+ # so the user sees the output.
+ raise RuntimeError("perf test always fails")
+
+class datapacktests(datapacktestsbase, unittest.TestCase):
+ def __init__(self, *args, **kwargs):
+ datapacktestsbase.__init__(self, datapack.datapack, True)
+ unittest.TestCase.__init__(self, *args, **kwargs)
+
+# TODO:
+# datapack store:
+# - getmissing
+# - GC two packs into one
+
+if __name__ == '__main__':
+ if pycompat.iswindows:
+ sys.exit(80) # Skip on Windows
+ silenttestrunner.main(__name__)
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/tests/test-remotefilelog-gc.t Fri Jan 18 13:28:22 2019 -0500
@@ -0,0 +1,112 @@
+#require no-windows
+
+ $ . "$TESTDIR/remotefilelog-library.sh"
+
+ $ hg init master
+ $ cd master
+ $ cat >> .hg/hgrc <<EOF
+ > [remotefilelog]
+ > server=True
+ > serverexpiration=-1
+ > EOF
+ $ echo x > x
+ $ hg commit -qAm x
+ $ cd ..
+
+ $ hgcloneshallow ssh://user@dummy/master shallow -q
+ 1 files fetched over 1 fetches - (1 misses, 0.00% hit ratio) over *s (glob)
+
+# Set the prefetchdays config to zero so that all commits are prefetched
+# no matter what their creation date is.
+ $ cd shallow
+ $ cat >> .hg/hgrc <<EOF
+ > [remotefilelog]
+ > prefetchdays=0
+ > EOF
+ $ cd ..
+
+# commit a new version of x so we can gc the old one
+
+ $ cd master
+ $ echo y > x
+ $ hg commit -qAm y
+ $ cd ..
+
+ $ cd shallow
+ $ hg pull -q
+ $ hg update -q
+ 1 files fetched over 1 fetches - (1 misses, 0.00% hit ratio) over *s (glob)
+ $ cd ..
+
+# gc client cache
+
+ $ lastweek=`$PYTHON -c 'import datetime,time; print(datetime.datetime.fromtimestamp(time.time() - (86400 * 7)).strftime("%y%m%d%H%M"))'`
+ $ find $CACHEDIR -type f -exec touch -t $lastweek {} \;
+
+ $ find $CACHEDIR -type f | sort
+ $TESTTMP/hgcache/master/11/f6ad8ec52a2984abaafd7c3b516503785c2072/1406e74118627694268417491f018a4a883152f0 (glob)
+ $TESTTMP/hgcache/master/11/f6ad8ec52a2984abaafd7c3b516503785c2072/48023ec064c1d522f0d792a5a912bb1bf7859a4a (glob)
+ $TESTTMP/hgcache/repos (glob)
+ $ hg gc
+ finished: removed 1 of 2 files (0.00 GB to 0.00 GB)
+ $ find $CACHEDIR -type f | sort
+ $TESTTMP/hgcache/master/11/f6ad8ec52a2984abaafd7c3b516503785c2072/48023ec064c1d522f0d792a5a912bb1bf7859a4a (glob)
+ $TESTTMP/hgcache/repos
+
+# gc server cache
+
+ $ find master/.hg/remotefilelogcache -type f | sort
+ master/.hg/remotefilelogcache/x/1406e74118627694268417491f018a4a883152f0 (glob)
+ master/.hg/remotefilelogcache/x/48023ec064c1d522f0d792a5a912bb1bf7859a4a (glob)
+ $ hg gc master
+ finished: removed 0 of 1 files (0.00 GB to 0.00 GB)
+ $ find master/.hg/remotefilelogcache -type f | sort
+ master/.hg/remotefilelogcache/x/48023ec064c1d522f0d792a5a912bb1bf7859a4a (glob)
+
+# Test that GC keepset includes pullprefetch revset if it is configured
+
+ $ cd shallow
+ $ cat >> .hg/hgrc <<EOF
+ > [remotefilelog]
+ > pullprefetch=all()
+ > EOF
+ $ hg prefetch
+ 1 files fetched over 1 fetches - (1 misses, 0.00% hit ratio) over *s (glob)
+
+ $ cd ..
+ $ hg gc
+ finished: removed 0 of 2 files (0.00 GB to 0.00 GB)
+
+# Ensure that there are 2 versions of the file in cache
+ $ find $CACHEDIR -type f | sort
+ $TESTTMP/hgcache/master/11/f6ad8ec52a2984abaafd7c3b516503785c2072/1406e74118627694268417491f018a4a883152f0 (glob)
+ $TESTTMP/hgcache/master/11/f6ad8ec52a2984abaafd7c3b516503785c2072/48023ec064c1d522f0d792a5a912bb1bf7859a4a (glob)
+ $TESTTMP/hgcache/repos (glob)
+
+# Test that if garbage collection on repack and repack on hg gc flags are set then incremental repack with garbage collector is run
+
+ $ hg gc --config remotefilelog.gcrepack=True --config remotefilelog.repackonhggc=True
+
+# Ensure that loose files are repacked
+ $ find $CACHEDIR -type f | sort
+ $TESTTMP/hgcache/master/packs/320dab99b7e3f60512b97f347689625263d22cf5.dataidx
+ $TESTTMP/hgcache/master/packs/320dab99b7e3f60512b97f347689625263d22cf5.datapack
+ $TESTTMP/hgcache/master/packs/837b83c1ef6485a336eb4421ac5973c0ec130fbb.histidx
+ $TESTTMP/hgcache/master/packs/837b83c1ef6485a336eb4421ac5973c0ec130fbb.histpack
+ $TESTTMP/hgcache/master/packs/repacklock
+ $TESTTMP/hgcache/repos
+
+# Test that warning is displayed when there are no valid repos in repofile
+
+ $ cp $CACHEDIR/repos $CACHEDIR/repos.bak
+ $ echo " " > $CACHEDIR/repos
+ $ hg gc
+ warning: no valid repos in repofile
+ $ mv $CACHEDIR/repos.bak $CACHEDIR/repos
+
+# Test that warning is displayed when the repo path is malformed
+
+ $ printf "asdas\0das" >> $CACHEDIR/repos
+ $ hg gc 2>&1 | head -n2
+ warning: malformed path: * (glob)
+ Traceback (most recent call last):
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/tests/test-remotefilelog-gcrepack.t Fri Jan 18 13:28:22 2019 -0500
@@ -0,0 +1,159 @@
+#require no-windows
+
+ $ . "$TESTDIR/remotefilelog-library.sh"
+
+ $ hg init master
+ $ cd master
+ $ cat >> .hg/hgrc <<EOF
+ > [remotefilelog]
+ > server=True
+ > EOF
+ $ echo x > x
+ $ hg commit -qAm x
+ $ echo y > y
+ $ rm x
+ $ hg commit -qAm DxAy
+ $ echo yy > y
+ $ hg commit -qAm y
+ $ cd ..
+
+ $ hgcloneshallow ssh://user@dummy/master shallow -q
+ 1 files fetched over 1 fetches - (1 misses, 0.00% hit ratio) over *s (glob)
+
+# Set the prefetchdays config to zero so that all commits are prefetched
+# no matter what their creation date is.
+ $ cd shallow
+ $ cat >> .hg/hgrc <<EOF
+ > [remotefilelog]
+ > prefetchdays=0
+ > EOF
+ $ cd ..
+
+# Prefetch all data and repack
+
+ $ cd shallow
+ $ cat >> .hg/hgrc <<EOF
+ > [remotefilelog]
+ > bgprefetchrevs=all()
+ > EOF
+
+ $ hg prefetch
+ 2 files fetched over 1 fetches - (2 misses, 0.00% hit ratio) over *s (glob)
+ $ hg repack
+ $ sleep 0.5
+ $ hg debugwaitonrepack >/dev/null 2>%1
+
+ $ find $CACHEDIR | sort | grep ".datapack\|.histpack"
+ $TESTTMP/hgcache/master/packs/7bcd2d90b99395ca43172a0dd24e18860b2902f9.histpack
+ $TESTTMP/hgcache/master/packs/dc8f8fdc76690ce27791ce9f53a18da379e50d37.datapack
+
+# Ensure that all file versions were prefetched
+
+ $ hg debugdatapack `ls -ct $TESTTMP/hgcache/master/packs/*.datapack | head -n 1`
+ $TESTTMP/hgcache/master/packs/dc8f8fdc76690ce27791ce9f53a18da379e50d37:
+ x:
+ Node Delta Base Delta Length Blob Size
+ 1406e7411862 000000000000 2 2
+
+ Total: 2 2 (0.0% bigger)
+ y:
+ Node Delta Base Delta Length Blob Size
+ 50dbc4572b8e 000000000000 3 3
+ 076f5e2225b3 50dbc4572b8e 14 2
+
+ Total: 17 5 (240.0% bigger)
+
+# Test garbage collection during repack
+
+ $ cat >> .hg/hgrc <<EOF
+ > [remotefilelog]
+ > bgprefetchrevs=tip
+ > gcrepack=True
+ > nodettl=86400
+ > EOF
+
+ $ hg repack
+ $ sleep 0.5
+ $ hg debugwaitonrepack >/dev/null 2>%1
+
+ $ find $CACHEDIR | sort | grep ".datapack\|.histpack"
+ $TESTTMP/hgcache/master/packs/7bcd2d90b99395ca43172a0dd24e18860b2902f9.histpack
+ $TESTTMP/hgcache/master/packs/a4e1d094ec2aee8a08a4d6d95a13c634cc7d7394.datapack
+
+# Ensure that file 'x' was garbage collected. It should be GCed because it is not in the keepset
+# and is old (commit date is 0.0 in tests). Ensure that file 'y' is present as it is in the keepset.
+
+ $ hg debugdatapack `ls -ct $TESTTMP/hgcache/master/packs/*.datapack | head -n 1`
+ $TESTTMP/hgcache/master/packs/a4e1d094ec2aee8a08a4d6d95a13c634cc7d7394:
+ y:
+ Node Delta Base Delta Length Blob Size
+ 50dbc4572b8e 000000000000 3 3
+
+ Total: 3 3 (0.0% bigger)
+
+# Prefetch all data again and repack for later garbage collection
+
+ $ cat >> .hg/hgrc <<EOF
+ > [remotefilelog]
+ > bgprefetchrevs=all()
+ > EOF
+
+ $ hg prefetch
+ 2 files fetched over 1 fetches - (2 misses, 0.00% hit ratio) over *s (glob)
+ $ hg repack
+ $ sleep 0.5
+ $ hg debugwaitonrepack >/dev/null 2>%1
+
+ $ find $CACHEDIR | sort | grep ".datapack\|.histpack"
+ $TESTTMP/hgcache/master/packs/7bcd2d90b99395ca43172a0dd24e18860b2902f9.histpack
+ $TESTTMP/hgcache/master/packs/dc8f8fdc76690ce27791ce9f53a18da379e50d37.datapack
+
+# Ensure that all file versions were prefetched
+
+ $ hg debugdatapack `ls -ct $TESTTMP/hgcache/master/packs/*.datapack | head -n 1`
+ $TESTTMP/hgcache/master/packs/dc8f8fdc76690ce27791ce9f53a18da379e50d37:
+ x:
+ Node Delta Base Delta Length Blob Size
+ 1406e7411862 000000000000 2 2
+
+ Total: 2 2 (0.0% bigger)
+ y:
+ Node Delta Base Delta Length Blob Size
+ 50dbc4572b8e 000000000000 3 3
+ 076f5e2225b3 50dbc4572b8e 14 2
+
+ Total: 17 5 (240.0% bigger)
+
+# Test garbage collection during repack. Ensure that new files are not removed even though they are not in the keepset
+# For the purposes of the test the TTL of a file is set to current time + 100 seconds. i.e. all commits in tests have
+# a date of 1970 and therefore to prevent garbage collection we have to set nodettl to be farther from 1970 than we are now.
+
+ $ cat >> .hg/hgrc <<EOF
+ > [remotefilelog]
+ > bgprefetchrevs=
+ > nodettl=$(($(date +%s) + 100))
+ > EOF
+
+ $ hg repack
+ $ sleep 0.5
+ $ hg debugwaitonrepack >/dev/null 2>%1
+
+ $ find $CACHEDIR | sort | grep ".datapack\|.histpack"
+ $TESTTMP/hgcache/master/packs/7bcd2d90b99395ca43172a0dd24e18860b2902f9.histpack
+ $TESTTMP/hgcache/master/packs/dc8f8fdc76690ce27791ce9f53a18da379e50d37.datapack
+
+# Ensure that all file versions were prefetched
+
+ $ hg debugdatapack `ls -ct $TESTTMP/hgcache/master/packs/*.datapack | head -n 1`
+ $TESTTMP/hgcache/master/packs/dc8f8fdc76690ce27791ce9f53a18da379e50d37:
+ x:
+ Node Delta Base Delta Length Blob Size
+ 1406e7411862 000000000000 2 2
+
+ Total: 2 2 (0.0% bigger)
+ y:
+ Node Delta Base Delta Length Blob Size
+ 50dbc4572b8e 000000000000 3 3
+ 076f5e2225b3 50dbc4572b8e 14 2
+
+ Total: 17 5 (240.0% bigger)
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/tests/test-remotefilelog-histpack.py Fri Jan 18 13:28:22 2019 -0500
@@ -0,0 +1,278 @@
+#!/usr/bin/env python
+from __future__ import absolute_import
+
+import hashlib
+import os
+import random
+import shutil
+import stat
+import struct
+import sys
+import tempfile
+import unittest
+
+import silenttestrunner
+
+from mercurial.node import nullid
+from mercurial import (
+ pycompat,
+ ui as uimod,
+)
+# Load the local remotefilelog, not the system one
+sys.path[0:0] = [os.path.join(os.path.dirname(__file__), '..')]
+from hgext.remotefilelog import (
+ basepack,
+ historypack,
+)
+
+class histpacktests(unittest.TestCase):
+ def setUp(self):
+ self.tempdirs = []
+
+ def tearDown(self):
+ for d in self.tempdirs:
+ shutil.rmtree(d)
+
+ def makeTempDir(self):
+ tempdir = tempfile.mkdtemp()
+ self.tempdirs.append(tempdir)
+ return pycompat.fsencode(tempdir)
+
+ def getHash(self, content):
+ return hashlib.sha1(content).digest()
+
+ def getFakeHash(self):
+ return b''.join(pycompat.bytechr(random.randint(0, 255))
+ for _ in range(20))
+
+ def createPack(self, revisions=None):
+ """Creates and returns a historypack containing the specified revisions.
+
+ `revisions` is a list of tuples, where each tuple contains a filanem,
+ node, p1node, p2node, and linknode.
+ """
+ if revisions is None:
+ revisions = [("filename", self.getFakeHash(), nullid, nullid,
+ self.getFakeHash(), None)]
+
+ packdir = pycompat.fsencode(self.makeTempDir())
+ packer = historypack.mutablehistorypack(uimod.ui(), packdir,
+ version=2)
+
+ for filename, node, p1, p2, linknode, copyfrom in revisions:
+ packer.add(filename, node, p1, p2, linknode, copyfrom)
+
+ path = packer.close()
+ return historypack.historypack(path)
+
+ def testAddSingle(self):
+ """Test putting a single entry into a pack and reading it out.
+ """
+ filename = "foo"
+ node = self.getFakeHash()
+ p1 = self.getFakeHash()
+ p2 = self.getFakeHash()
+ linknode = self.getFakeHash()
+
+ revisions = [(filename, node, p1, p2, linknode, None)]
+ pack = self.createPack(revisions)
+
+ actual = pack.getancestors(filename, node)[node]
+ self.assertEquals(p1, actual[0])
+ self.assertEquals(p2, actual[1])
+ self.assertEquals(linknode, actual[2])
+
+ def testAddMultiple(self):
+ """Test putting multiple unrelated revisions into a pack and reading
+ them out.
+ """
+ revisions = []
+ for i in range(10):
+ filename = "foo-%s" % i
+ node = self.getFakeHash()
+ p1 = self.getFakeHash()
+ p2 = self.getFakeHash()
+ linknode = self.getFakeHash()
+ revisions.append((filename, node, p1, p2, linknode, None))
+
+ pack = self.createPack(revisions)
+
+ for filename, node, p1, p2, linknode, copyfrom in revisions:
+ actual = pack.getancestors(filename, node)[node]
+ self.assertEquals(p1, actual[0])
+ self.assertEquals(p2, actual[1])
+ self.assertEquals(linknode, actual[2])
+ self.assertEquals(copyfrom, actual[3])
+
+ def testAddAncestorChain(self):
+ """Test putting multiple revisions in into a pack and read the ancestor
+ chain.
+ """
+ revisions = []
+ filename = b"foo"
+ lastnode = nullid
+ for i in range(10):
+ node = self.getFakeHash()
+ revisions.append((filename, node, lastnode, nullid, nullid, None))
+ lastnode = node
+
+ # revisions must be added in topological order, newest first
+ revisions = list(reversed(revisions))
+ pack = self.createPack(revisions)
+
+ # Test that the chain has all the entries
+ ancestors = pack.getancestors(revisions[0][0], revisions[0][1])
+ for filename, node, p1, p2, linknode, copyfrom in revisions:
+ ap1, ap2, alinknode, acopyfrom = ancestors[node]
+ self.assertEquals(ap1, p1)
+ self.assertEquals(ap2, p2)
+ self.assertEquals(alinknode, linknode)
+ self.assertEquals(acopyfrom, copyfrom)
+
+ def testPackMany(self):
+ """Pack many related and unrelated ancestors.
+ """
+ # Build a random pack file
+ allentries = {}
+ ancestorcounts = {}
+ revisions = []
+ random.seed(0)
+ for i in range(100):
+ filename = b"filename-%d" % i
+ entries = []
+ p2 = nullid
+ linknode = nullid
+ for j in range(random.randint(1, 100)):
+ node = self.getFakeHash()
+ p1 = nullid
+ if len(entries) > 0:
+ p1 = entries[random.randint(0, len(entries) - 1)]
+ entries.append(node)
+ revisions.append((filename, node, p1, p2, linknode, None))
+ allentries[(filename, node)] = (p1, p2, linknode)
+ if p1 == nullid:
+ ancestorcounts[(filename, node)] = 1
+ else:
+ newcount = ancestorcounts[(filename, p1)] + 1
+ ancestorcounts[(filename, node)] = newcount
+
+ # Must add file entries in reverse topological order
+ revisions = list(reversed(revisions))
+ pack = self.createPack(revisions)
+
+ # Verify the pack contents
+ for (filename, node), (p1, p2, lastnode) in allentries.items():
+ ancestors = pack.getancestors(filename, node)
+ self.assertEquals(ancestorcounts[(filename, node)],
+ len(ancestors))
+ for anode, (ap1, ap2, alinknode, copyfrom) in ancestors.items():
+ ep1, ep2, elinknode = allentries[(filename, anode)]
+ self.assertEquals(ap1, ep1)
+ self.assertEquals(ap2, ep2)
+ self.assertEquals(alinknode, elinknode)
+ self.assertEquals(copyfrom, None)
+
+ def testGetNodeInfo(self):
+ revisions = []
+ filename = b"foo"
+ lastnode = nullid
+ for i in range(10):
+ node = self.getFakeHash()
+ revisions.append((filename, node, lastnode, nullid, nullid, None))
+ lastnode = node
+
+ pack = self.createPack(revisions)
+
+ # Test that getnodeinfo returns the expected results
+ for filename, node, p1, p2, linknode, copyfrom in revisions:
+ ap1, ap2, alinknode, acopyfrom = pack.getnodeinfo(filename, node)
+ self.assertEquals(ap1, p1)
+ self.assertEquals(ap2, p2)
+ self.assertEquals(alinknode, linknode)
+ self.assertEquals(acopyfrom, copyfrom)
+
+ def testGetMissing(self):
+ """Test the getmissing() api.
+ """
+ revisions = []
+ filename = b"foo"
+ for i in range(10):
+ node = self.getFakeHash()
+ p1 = self.getFakeHash()
+ p2 = self.getFakeHash()
+ linknode = self.getFakeHash()
+ revisions.append((filename, node, p1, p2, linknode, None))
+
+ pack = self.createPack(revisions)
+
+ missing = pack.getmissing([(filename, revisions[0][1])])
+ self.assertFalse(missing)
+
+ missing = pack.getmissing([(filename, revisions[0][1]),
+ (filename, revisions[1][1])])
+ self.assertFalse(missing)
+
+ fakenode = self.getFakeHash()
+ missing = pack.getmissing([(filename, revisions[0][1]),
+ (filename, fakenode)])
+ self.assertEquals(missing, [(filename, fakenode)])
+
+ # Test getmissing on a non-existant filename
+ missing = pack.getmissing([("bar", fakenode)])
+ self.assertEquals(missing, [("bar", fakenode)])
+
+ def testAddThrows(self):
+ pack = self.createPack()
+
+ try:
+ pack.add(b'filename', nullid, nullid, nullid, nullid, None)
+ self.assertTrue(False, "historypack.add should throw")
+ except RuntimeError:
+ pass
+
+ def testBadVersionThrows(self):
+ pack = self.createPack()
+ path = pack.path + '.histpack'
+ with open(path) as f:
+ raw = f.read()
+ raw = struct.pack('!B', 255) + raw[1:]
+ os.chmod(path, os.stat(path).st_mode | stat.S_IWRITE)
+ with open(path, 'w+') as f:
+ f.write(raw)
+
+ try:
+ pack = historypack.historypack(pack.path)
+ self.assertTrue(False, "bad version number should have thrown")
+ except RuntimeError:
+ pass
+
+ def testLargePack(self):
+ """Test creating and reading from a large pack with over X entries.
+ This causes it to use a 2^16 fanout table instead."""
+ total = basepack.SMALLFANOUTCUTOFF + 1
+ revisions = []
+ for i in pycompat.xrange(total):
+ filename = b"foo-%d" % i
+ node = self.getFakeHash()
+ p1 = self.getFakeHash()
+ p2 = self.getFakeHash()
+ linknode = self.getFakeHash()
+ revisions.append((filename, node, p1, p2, linknode, None))
+
+ pack = self.createPack(revisions)
+ self.assertEquals(pack.params.fanoutprefix, basepack.LARGEFANOUTPREFIX)
+
+ for filename, node, p1, p2, linknode, copyfrom in revisions:
+ actual = pack.getancestors(filename, node)[node]
+ self.assertEquals(p1, actual[0])
+ self.assertEquals(p2, actual[1])
+ self.assertEquals(linknode, actual[2])
+ self.assertEquals(copyfrom, actual[3])
+# TODO:
+# histpack store:
+# - repack two packs into one
+
+if __name__ == '__main__':
+ if pycompat.iswindows:
+ sys.exit(80) # Skip on Windows
+ silenttestrunner.main(__name__)
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/tests/test-remotefilelog-http.t Fri Jan 18 13:28:22 2019 -0500
@@ -0,0 +1,95 @@
+#require no-windows
+
+ $ . "$TESTDIR/remotefilelog-library.sh"
+
+ $ hg init master
+ $ cd master
+ $ cat >> .hg/hgrc <<EOF
+ > [remotefilelog]
+ > server=True
+ > EOF
+ $ echo x > x
+ $ echo y > y
+ $ hg commit -qAm x
+ $ hg serve -p $HGPORT -d --pid-file=../hg1.pid -E ../error.log -A ../access.log
+
+Build a query string for later use:
+ $ GET=`hg debugdata -m 0 | $PYTHON -c \
+ > 'import sys ; print([("?cmd=x_rfl_getfile&file=%s&node=%s" % tuple(s.split("\0"))) for s in sys.stdin.read().splitlines()][0])'`
+
+ $ cd ..
+ $ cat hg1.pid >> $DAEMON_PIDS
+
+ $ hgcloneshallow http://localhost:$HGPORT/ shallow -q
+ 2 files fetched over 1 fetches - (2 misses, 0.00% hit ratio) over *s (glob)
+
+ $ grep getfile access.log
+ * "GET /?cmd=batch HTTP/1.1" 200 - x-hgarg-1:cmds=x_rfl_getfile+*node%3D1406e74118627694268417491f018a4a883152f0* (glob)
+
+Clear filenode cache so we can test fetching with a modified batch size
+ $ rm -r $TESTTMP/hgcache
+Now do a fetch with a large batch size so we're sure it works
+ $ hgcloneshallow http://localhost:$HGPORT/ shallow-large-batch \
+ > --config remotefilelog.batchsize=1000 -q
+ 2 files fetched over 1 fetches - (2 misses, 0.00% hit ratio) over *s (glob)
+
+The 'remotefilelog' capability should *not* be exported over http(s),
+as the getfile method it offers doesn't work with http.
+ $ get-with-headers.py localhost:$HGPORT '?cmd=capabilities' | grep lookup | identifyrflcaps
+ x_rfl_getfile
+ x_rfl_getflogheads
+
+ $ get-with-headers.py localhost:$HGPORT '?cmd=hello' | grep lookup | identifyrflcaps
+ x_rfl_getfile
+ x_rfl_getflogheads
+
+ $ get-with-headers.py localhost:$HGPORT '?cmd=this-command-does-not-exist' | head -n 1
+ 400 no such method: this-command-does-not-exist
+ $ get-with-headers.py localhost:$HGPORT '?cmd=x_rfl_getfiles' | head -n 1
+ 400 no such method: x_rfl_getfiles
+
+Verify serving from a shallow clone doesn't allow for remotefile
+fetches. This also serves to test the error handling for our batchable
+getfile RPC.
+
+ $ cd shallow
+ $ hg serve -p $HGPORT1 -d --pid-file=../hg2.pid -E ../error2.log
+ $ cd ..
+ $ cat hg2.pid >> $DAEMON_PIDS
+
+This GET should work, because this server is serving master, which is
+a full clone.
+
+ $ get-with-headers.py localhost:$HGPORT "$GET"
+ 200 Script output follows
+
+ 0\x00x\x9c3b\xa8\xe0\x12a{\xee(\x91T6E\xadE\xdcS\x9e\xb1\xcb\xab\xc30\xe8\x03\x03\x91 \xe4\xc6\xfb\x99J,\x17\x0c\x9f-\xcb\xfcR7c\xf3c\x97r\xbb\x10\x06\x00\x96m\x121 (no-eol) (esc)
+
+This GET should fail using the in-band signalling mechanism, because
+it's not a full clone. Note that it's also plausible for servers to
+refuse to serve file contents for other reasons, like the file
+contents not being visible to the current user.
+
+ $ get-with-headers.py localhost:$HGPORT1 "$GET"
+ 200 Script output follows
+
+ 1\x00cannot fetch remote files from shallow repo (no-eol) (esc)
+
+Clones should work with httppostargs turned on
+
+ $ cd master
+ $ hg --config experimental.httppostargs=1 serve -p $HGPORT2 -d --pid-file=../hg3.pid -E ../error3.log
+
+ $ cd ..
+ $ cat hg3.pid >> $DAEMON_PIDS
+
+Clear filenode cache so we can test fetching with a modified batch size
+ $ rm -r $TESTTMP/hgcache
+
+ $ hgcloneshallow http://localhost:$HGPORT2/ shallow-postargs -q
+ 2 files fetched over 1 fetches - (2 misses, 0.00% hit ratio) over *s (glob)
+
+All error logs should be empty:
+ $ cat error.log
+ $ cat error2.log
+ $ cat error3.log
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/tests/test-remotefilelog-keepset.t Fri Jan 18 13:28:22 2019 -0500
@@ -0,0 +1,39 @@
+#require no-windows
+
+ $ . "$TESTDIR/remotefilelog-library.sh"
+
+ $ hg init master
+ $ cd master
+ $ cat >> .hg/hgrc <<EOF
+ > [remotefilelog]
+ > server=True
+ > serverexpiration=-1
+ > EOF
+ $ echo x > x
+ $ hg commit -qAm x
+ $ echo y > y
+ $ hg commit -qAm y
+ $ echo z > z
+ $ hg commit -qAm z
+ $ cd ..
+
+ $ hgcloneshallow ssh://user@dummy/master shallow -q
+ 3 files fetched over 1 fetches - (3 misses, 0.00% hit ratio) over *s (glob)
+
+# Compute keepset for 0th and 2nd commit, which implies that we do not process
+# the 1st commit, therefore we diff 2nd manifest with the 0th manifest and
+# populate the keepkeys from the diff
+ $ cd shallow
+ $ cat >> .hg/hgrc <<EOF
+ > [remotefilelog]
+ > pullprefetch=0+2
+ > EOF
+ $ hg debugkeepset
+
+# Compute keepset for all commits, which implies that we only process deltas of
+# manifests of commits 1 and 2 and therefore populate the keepkeys from deltas
+ $ cat >> .hg/hgrc <<EOF
+ > [remotefilelog]
+ > pullprefetch=all()
+ > EOF
+ $ hg debugkeepset
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/tests/test-remotefilelog-linknodes.t Fri Jan 18 13:28:22 2019 -0500
@@ -0,0 +1,194 @@
+#require no-windows
+
+# Tests for the complicated linknode logic in remotefilelog.py::ancestormap()
+
+ $ . "$TESTDIR/remotefilelog-library.sh"
+
+ $ hg init master
+ $ cd master
+ $ cat >> .hg/hgrc <<EOF
+ > [remotefilelog]
+ > server=True
+ > serverexpiration=-1
+ > EOF
+ $ echo x > x
+ $ hg commit -qAm x
+ $ cd ..
+
+ $ hgcloneshallow ssh://user@dummy/master shallow -q
+ 1 files fetched over 1 fetches - (1 misses, 0.00% hit ratio) over *s (glob)
+
+# Rebase produces correct log -f linknodes
+
+ $ cd shallow
+ $ echo y > y
+ $ hg commit -qAm y
+ $ hg up 0
+ 0 files updated, 0 files merged, 1 files removed, 0 files unresolved
+ $ echo x >> x
+ $ hg commit -qAm xx
+ $ hg log -f x --template "{node|short}\n"
+ 0632994590a8
+ b292c1e3311f
+
+ $ hg rebase -d 1
+ rebasing 2:0632994590a8 "xx" (tip)
+ saved backup bundle to $TESTTMP/shallow/.hg/strip-backup/0632994590a8-0bc786d8-rebase.hg (glob)
+ $ hg log -f x --template "{node|short}\n"
+ 81deab2073bc
+ b292c1e3311f
+
+# Rebase back, log -f still works
+
+ $ hg rebase -d 0 -r 2
+ rebasing 2:81deab2073bc "xx" (tip)
+ saved backup bundle to $TESTTMP/shallow/.hg/strip-backup/81deab2073bc-80cb4fda-rebase.hg (glob)
+ $ hg log -f x --template "{node|short}\n"
+ b3fca10fb42d
+ b292c1e3311f
+
+ $ hg rebase -d 1 -r 2
+ rebasing 2:b3fca10fb42d "xx" (tip)
+ saved backup bundle to $TESTTMP/shallow/.hg/strip-backup/b3fca10fb42d-da73a0c7-rebase.hg (glob)
+
+ $ cd ..
+
+# Reset repos
+ $ clearcache
+
+ $ rm -rf master
+ $ rm -rf shallow
+ $ hg init master
+ $ cd master
+ $ cat >> .hg/hgrc <<EOF
+ > [remotefilelog]
+ > server=True
+ > serverexpiration=-1
+ > EOF
+ $ echo x > x
+ $ hg commit -qAm x
+ $ cd ..
+
+ $ hgcloneshallow ssh://user@dummy/master shallow -q
+ 1 files fetched over 1 fetches - (1 misses, 0.00% hit ratio) over *s (glob)
+
+# Rebase stack onto landed commit
+
+ $ cd master
+ $ echo x >> x
+ $ hg commit -Aqm xx
+
+ $ cd ../shallow
+ $ echo x >> x
+ $ hg commit -Aqm xx2
+ $ echo y >> x
+ $ hg commit -Aqm xxy
+
+ $ hg pull -q
+ $ hg rebase -d tip
+ rebasing 1:4549721d828f "xx2"
+ note: not rebasing 1:4549721d828f "xx2", its destination already has all its changes
+ rebasing 2:5ef6d97e851c "xxy"
+ saved backup bundle to $TESTTMP/shallow/.hg/strip-backup/4549721d828f-b084e33c-rebase.hg (glob)
+ $ hg log -f x --template '{node|short}\n'
+ 4ae8e31c85ef
+ 0632994590a8
+ b292c1e3311f
+
+ $ cd ..
+
+# system cache has invalid linknode, but .hg/store/data has valid
+
+ $ cd shallow
+ $ hg strip -r 1 -q
+ $ rm -rf .hg/store/data/*
+ $ echo x >> x
+ $ hg commit -Aqm xx_local
+ $ hg log -f x --template '{rev}:{node|short}\n'
+ 1:21847713771d
+ 0:b292c1e3311f
+
+ $ cd ..
+ $ rm -rf shallow
+
+/* Local linknode is invalid; remote linknode is valid (formerly slow case) */
+
+ $ hgcloneshallow ssh://user@dummy/master shallow -q
+ 1 files fetched over 1 fetches - (1 misses, 0.00% hit ratio) over * (glob)
+ $ cd shallow
+ $ echo x >> x
+ $ hg commit -Aqm xx2
+ $ cd ../master
+ $ echo y >> y
+ $ hg commit -Aqm yy2
+ $ echo x >> x
+ $ hg commit -Aqm xx2-fake-rebased
+ $ echo y >> y
+ $ hg commit -Aqm yy3
+ $ cd ../shallow
+ $ hg pull --config remotefilelog.debug=True
+ pulling from ssh://user@dummy/master
+ searching for changes
+ adding changesets
+ adding manifests
+ adding file changes
+ added 3 changesets with 0 changes to 0 files (+1 heads)
+ new changesets 01979f9404f8:7200df4e0aca
+ (run 'hg heads' to see heads, 'hg merge' to merge)
+ $ hg update tip -q
+ 1 files fetched over 1 fetches - (1 misses, 0.00% hit ratio) over *s (glob)
+ $ echo x > x
+ $ hg commit -qAm xx3
+
+# At this point, the linknode points to c1254e70bad1 instead of 32e6611f6149
+ $ hg log -G -T '{node|short} {desc} {phase} {files}\n'
+ @ a5957b6bf0bd xx3 draft x
+ |
+ o 7200df4e0aca yy3 public y
+ |
+ o 32e6611f6149 xx2-fake-rebased public x
+ |
+ o 01979f9404f8 yy2 public y
+ |
+ | o c1254e70bad1 xx2 draft x
+ |/
+ o 0632994590a8 xx public x
+ |
+ o b292c1e3311f x public x
+
+# Check the contents of the local blob for incorrect linknode
+ $ hg debugremotefilelog .hg/store/data/11f6ad8ec52a2984abaafd7c3b516503785c2072/d4a3ed9310e5bd9887e3bf779da5077efab28216
+ size: 6 bytes
+ path: .hg/store/data/11f6ad8ec52a2984abaafd7c3b516503785c2072/d4a3ed9310e5bd9887e3bf779da5077efab28216
+ key: d4a3ed9310e5
+
+ node => p1 p2 linknode copyfrom
+ d4a3ed9310e5 => aee31534993a 000000000000 c1254e70bad1
+ aee31534993a => 1406e7411862 000000000000 0632994590a8
+ 1406e7411862 => 000000000000 000000000000 b292c1e3311f
+
+# Verify that we do a fetch on the first log (remote blob fetch for linkrev fix)
+ $ hg log -f x -T '{node|short} {desc} {phase} {files}\n'
+ a5957b6bf0bd xx3 draft x
+ 32e6611f6149 xx2-fake-rebased public x
+ 0632994590a8 xx public x
+ b292c1e3311f x public x
+ 1 files fetched over 1 fetches - (1 misses, 0.00% hit ratio) over *s (glob)
+
+# But not after that
+ $ hg log -f x -T '{node|short} {desc} {phase} {files}\n'
+ a5957b6bf0bd xx3 draft x
+ 32e6611f6149 xx2-fake-rebased public x
+ 0632994590a8 xx public x
+ b292c1e3311f x public x
+
+# Check the contents of the remote blob for correct linknode
+ $ hg debugremotefilelog $CACHEDIR/master/11/f6ad8ec52a2984abaafd7c3b516503785c2072/d4a3ed9310e5bd9887e3bf779da5077efab28216
+ size: 6 bytes
+ path: $TESTTMP/hgcache/master/11/f6ad8ec52a2984abaafd7c3b516503785c2072/d4a3ed9310e5bd9887e3bf779da5077efab28216
+ key: d4a3ed9310e5
+
+ node => p1 p2 linknode copyfrom
+ d4a3ed9310e5 => aee31534993a 000000000000 32e6611f6149
+ aee31534993a => 1406e7411862 000000000000 0632994590a8
+ 1406e7411862 => 000000000000 000000000000 b292c1e3311f
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/tests/test-remotefilelog-local.t Fri Jan 18 13:28:22 2019 -0500
@@ -0,0 +1,207 @@
+#require no-windows
+
+ $ . "$TESTDIR/remotefilelog-library.sh"
+
+ $ hg init master
+ $ cd master
+ $ cat >> .hg/hgrc <<EOF
+ > [remotefilelog]
+ > server=True
+ > EOF
+ $ echo x > x
+ $ echo y > y
+ $ echo z > z
+ $ hg commit -qAm xy
+
+ $ cd ..
+
+ $ hgcloneshallow ssh://user@dummy/master shallow -q
+ 3 files fetched over 1 fetches - (3 misses, 0.00% hit ratio) over *s (glob)
+ $ cd shallow
+
+# status
+
+ $ clearcache
+ $ echo xx > x
+ $ echo yy > y
+ $ touch a
+ $ hg status
+ M x
+ M y
+ ? a
+ 1 files fetched over 1 fetches - (1 misses, 0.00% hit ratio) over *s (glob)
+ $ hg add a
+ $ hg status
+ M x
+ M y
+ A a
+
+# diff
+
+ $ hg debugrebuilddirstate # fixes dirstate non-determinism
+ $ hg add a
+ $ clearcache
+ $ hg diff
+ diff -r f3d0bb0d1e48 x
+ --- a/x* (glob)
+ +++ b/x* (glob)
+ @@ -1,1 +1,1 @@
+ -x
+ +xx
+ diff -r f3d0bb0d1e48 y
+ --- a/y* (glob)
+ +++ b/y* (glob)
+ @@ -1,1 +1,1 @@
+ -y
+ +yy
+ 3 files fetched over 1 fetches - (3 misses, 0.00% hit ratio) over *s (glob)
+
+# local commit
+
+ $ clearcache
+ $ echo a > a
+ $ echo xxx > x
+ $ echo yyy > y
+ $ hg commit -m a
+ ? files fetched over 1 fetches - (? misses, 0.00% hit ratio) over *s (glob)
+
+# local commit where the dirstate is clean -- ensure that we do just one fetch
+# (update to a commit on the server first)
+
+ $ hg --config debug.dirstate.delaywrite=1 up 0
+ 2 files updated, 0 files merged, 1 files removed, 0 files unresolved
+ $ clearcache
+ $ hg debugdirstate
+ n 644 2 * x (glob)
+ n 644 2 * y (glob)
+ n 644 2 * z (glob)
+ $ echo xxxx > x
+ $ echo yyyy > y
+ $ hg commit -m x
+ created new head
+ 2 files fetched over 1 fetches - (2 misses, 0.00% hit ratio) over *s (glob)
+
+# restore state for future tests
+
+ $ hg -q strip .
+ $ hg -q up tip
+
+# rebase
+
+ $ clearcache
+ $ cd ../master
+ $ echo w > w
+ $ hg commit -qAm w
+
+ $ cd ../shallow
+ $ hg pull
+ pulling from ssh://user@dummy/master
+ searching for changes
+ adding changesets
+ adding manifests
+ adding file changes
+ added 1 changesets with 0 changes to 0 files (+1 heads)
+ new changesets fed61014d323
+ (run 'hg heads' to see heads, 'hg merge' to merge)
+
+ $ hg rebase -d tip
+ rebasing 1:9abfe7bca547 "a"
+ saved backup bundle to $TESTTMP/shallow/.hg/strip-backup/9abfe7bca547-8b11e5ff-rebase.hg (glob)
+ 3 files fetched over 2 fetches - (3 misses, 0.00% hit ratio) over *s (glob)
+
+# strip
+
+ $ clearcache
+ $ hg debugrebuilddirstate # fixes dirstate non-determinism
+ $ hg strip -r .
+ 2 files updated, 0 files merged, 1 files removed, 0 files unresolved
+ saved backup bundle to $TESTTMP/shallow/.hg/strip-backup/19edf50f4de7-df3d0f74-backup.hg (glob)
+ 4 files fetched over 2 fetches - (4 misses, 0.00% hit ratio) over *s (glob)
+
+# unbundle
+
+ $ clearcache
+ $ ls
+ w
+ x
+ y
+ z
+
+ $ hg debugrebuilddirstate # fixes dirstate non-determinism
+ $ hg unbundle .hg/strip-backup/19edf50f4de7-df3d0f74-backup.hg
+ adding changesets
+ adding manifests
+ adding file changes
+ added 1 changesets with 0 changes to 0 files
+ new changesets 19edf50f4de7 (1 drafts)
+ (run 'hg update' to get a working copy)
+
+ $ hg up
+ 3 files updated, 0 files merged, 0 files removed, 0 files unresolved
+ 4 files fetched over 1 fetches - (4 misses, 0.00% hit ratio) over *s (glob)
+ $ cat a
+ a
+
+# revert
+
+ $ clearcache
+ $ hg revert -r .~2 y z
+ no changes needed to z
+ 2 files fetched over 2 fetches - (2 misses, 0.00% hit ratio) over *s (glob)
+ $ hg checkout -C -r . -q
+
+# explicit bundle should produce full bundle file
+
+ $ hg bundle -r 2 --base 1 ../local.bundle
+ 1 changesets found
+ 1 files fetched over 1 fetches - (1 misses, 0.00% hit ratio) over *s (glob)
+ $ cd ..
+
+ $ hgcloneshallow ssh://user@dummy/master shallow2 -q
+ 1 files fetched over 1 fetches - (1 misses, 0.00% hit ratio) over *s (glob)
+ $ cd shallow2
+ $ hg unbundle ../local.bundle
+ adding changesets
+ adding manifests
+ adding file changes
+ added 1 changesets with 3 changes to 3 files
+ new changesets 19edf50f4de7 (1 drafts)
+ (run 'hg update' to get a working copy)
+
+ $ hg log -r 2 --stat
+ changeset: 2:19edf50f4de7
+ tag: tip
+ user: test
+ date: Thu Jan 01 00:00:00 1970 +0000
+ summary: a
+
+ a | 1 +
+ x | 2 +-
+ y | 2 +-
+ 3 files changed, 3 insertions(+), 2 deletions(-)
+
+# Merge
+
+ $ echo merge >> w
+ $ hg commit -m w
+ created new head
+ $ hg merge 2
+ 3 files updated, 0 files merged, 0 files removed, 0 files unresolved
+ (branch merge, don't forget to commit)
+ $ hg commit -m merge
+ $ hg strip -q -r ".^"
+
+# commit without producing new node
+
+ $ cd $TESTTMP
+ $ hgcloneshallow ssh://user@dummy/master shallow3 -q
+ $ cd shallow3
+ $ echo 1 > A
+ $ hg commit -m foo -A A
+ $ hg log -r . -T '{node}\n'
+ 383ce605500277f879b7460a16ba620eb6930b7f
+ $ hg update -r '.^' -q
+ $ echo 1 > A
+ $ hg commit -m foo -A A
+ $ hg log -r . -T '{node}\n'
+ 383ce605500277f879b7460a16ba620eb6930b7f
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/tests/test-remotefilelog-log.t Fri Jan 18 13:28:22 2019 -0500
@@ -0,0 +1,118 @@
+#require no-windows
+
+ $ . "$TESTDIR/remotefilelog-library.sh"
+
+ $ hg init master
+ $ cd master
+ $ cat >> .hg/hgrc <<EOF
+ > [remotefilelog]
+ > server=True
+ > EOF
+ $ echo x > x
+ $ hg commit -qAm x
+ $ mkdir dir
+ $ echo y > dir/y
+ $ hg commit -qAm y
+
+ $ cd ..
+
+Shallow clone from full
+
+ $ hgcloneshallow ssh://user@dummy/master shallow --noupdate
+ streaming all changes
+ 2 files to transfer, 473 bytes of data
+ transferred 473 bytes in * seconds (*/sec) (glob)
+ searching for changes
+ no changes found
+ $ cd shallow
+ $ cat .hg/requires
+ dotencode
+ exp-remotefilelog-repo-req-1
+ fncache
+ generaldelta
+ revlogv1
+ sparserevlog
+ store
+
+ $ hg update
+ 2 files updated, 0 files merged, 0 files removed, 0 files unresolved
+ 2 files fetched over 1 fetches - (2 misses, 0.00% hit ratio) over *s (glob)
+
+Log on a file without -f
+
+ $ hg log dir/y
+ warning: file log can be slow on large repos - use -f to speed it up
+ changeset: 1:2e73264fab97
+ tag: tip
+ user: test
+ date: Thu Jan 01 00:00:00 1970 +0000
+ summary: y
+
+Log on a file with -f
+
+ $ hg log -f dir/y
+ changeset: 1:2e73264fab97
+ tag: tip
+ user: test
+ date: Thu Jan 01 00:00:00 1970 +0000
+ summary: y
+
+Log on a file with kind in path
+ $ hg log -r "filelog('path:dir/y')"
+ changeset: 1:2e73264fab97
+ tag: tip
+ user: test
+ date: Thu Jan 01 00:00:00 1970 +0000
+ summary: y
+
+Log on multiple files with -f
+
+ $ hg log -f dir/y x
+ changeset: 1:2e73264fab97
+ tag: tip
+ user: test
+ date: Thu Jan 01 00:00:00 1970 +0000
+ summary: y
+
+ changeset: 0:b292c1e3311f
+ user: test
+ date: Thu Jan 01 00:00:00 1970 +0000
+ summary: x
+
+Log on a directory
+
+ $ hg log dir
+ changeset: 1:2e73264fab97
+ tag: tip
+ user: test
+ date: Thu Jan 01 00:00:00 1970 +0000
+ summary: y
+
+Log on a file from inside a directory
+
+ $ cd dir
+ $ hg log y
+ warning: file log can be slow on large repos - use -f to speed it up
+ changeset: 1:2e73264fab97
+ tag: tip
+ user: test
+ date: Thu Jan 01 00:00:00 1970 +0000
+ summary: y
+
+Log on a file via -fr
+ $ cd ..
+ $ hg log -fr tip dir/ --template '{rev}\n'
+ 1
+
+Trace renames
+ $ hg mv x z
+ $ hg commit -m move
+ $ hg log -f z -T '{desc} {file_copies}\n' -G
+ @ move z (x)
+ :
+ o x
+
+
+Verify remotefilelog handles rename metadata stripping when comparing file sizes
+ $ hg debugrebuilddirstate
+ $ hg status
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/tests/test-remotefilelog-partial-shallow.t Fri Jan 18 13:28:22 2019 -0500
@@ -0,0 +1,75 @@
+#require no-windows
+
+ $ . "$TESTDIR/remotefilelog-library.sh"
+
+ $ hg init master
+ $ cd master
+ $ cat >> .hg/hgrc <<EOF
+ > [remotefilelog]
+ > server=True
+ > EOF
+ $ echo x > foo
+ $ echo y > bar
+ $ hg commit -qAm one
+
+ $ cd ..
+
+# partial shallow clone
+
+ $ hg clone --shallow ssh://user@dummy/master shallow --noupdate --config remotefilelog.includepattern=foo
+ streaming all changes
+ 3 files to transfer, 336 bytes of data
+ transferred 336 bytes in * seconds (*/sec) (glob)
+ searching for changes
+ no changes found
+ $ cat >> shallow/.hg/hgrc <<EOF
+ > [remotefilelog]
+ > cachepath=$PWD/hgcache
+ > debug=True
+ > includepattern=foo
+ > reponame = master
+ > [extensions]
+ > remotefilelog=
+ > EOF
+ $ ls shallow/.hg/store/data
+ bar.i
+
+# update partial clone
+
+ $ cd shallow
+ $ hg update
+ 2 files updated, 0 files merged, 0 files removed, 0 files unresolved
+ 1 files fetched over 1 fetches - (1 misses, 0.00% hit ratio) over *s (glob)
+ $ cat foo
+ x
+ $ cat bar
+ y
+ $ cd ..
+
+# pull partial clone
+
+ $ cd master
+ $ echo a >> foo
+ $ echo b >> bar
+ $ hg commit -qm two
+ $ cd ../shallow
+ $ hg pull
+ pulling from ssh://user@dummy/master
+ searching for changes
+ adding changesets
+ adding manifests
+ adding file changes
+ added 1 changesets with 0 changes to 0 files
+ new changesets a9688f18cb91
+ (run 'hg update' to get a working copy)
+ $ hg update
+ 2 files updated, 0 files merged, 0 files removed, 0 files unresolved
+ 1 files fetched over 1 fetches - (1 misses, 0.00% hit ratio) over *s (glob)
+ $ cat foo
+ x
+ a
+ $ cat bar
+ y
+ b
+
+ $ cd ..
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/tests/test-remotefilelog-permissions.t Fri Jan 18 13:28:22 2019 -0500
@@ -0,0 +1,46 @@
+#require no-windows
+
+ $ . "$TESTDIR/remotefilelog-library.sh"
+
+ $ hg init master
+ $ cd master
+ $ cat >> .hg/hgrc <<EOF
+ > [remotefilelog]
+ > server=True
+ > EOF
+ $ echo x > x
+ $ hg commit -qAm x
+
+ $ cd ..
+
+ $ hgcloneshallow ssh://user@dummy/master shallow -q
+ 1 files fetched over 1 fetches - (1 misses, 0.00% hit ratio) over *s (glob)
+
+ $ cd master
+ $ echo xx > x
+ $ hg commit -qAm x2
+ $ cd ..
+
+# Test cache misses with read only permissions on server
+
+ $ chmod -R a-w master/.hg/remotefilelogcache
+ $ cd shallow
+ $ hg pull -q
+ $ hg update
+ 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
+ 1 files fetched over 1 fetches - (1 misses, 0.00% hit ratio) over *s (glob)
+ $ cd ..
+
+ $ chmod -R u+w master/.hg/remotefilelogcache
+
+# Test setting up shared cache with the right permissions
+# (this is hard to test in a cross platform way, so we just make sure nothing
+# crashes)
+
+ $ rm -rf $CACHEDIR
+ $ umask 002
+ $ mkdir $CACHEDIR
+ $ hg -q clone --shallow ssh://user@dummy/master shallow2 --config remotefilelog.cachegroup="`id -g -n`"
+ 1 files fetched over 1 fetches - (1 misses, 0.00% hit ratio) over * (glob)
+ $ ls -ld $CACHEDIR/11
+ drwxrws* $TESTTMP/hgcache/11 (glob)
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/tests/test-remotefilelog-prefetch.t Fri Jan 18 13:28:22 2019 -0500
@@ -0,0 +1,235 @@
+#require no-windows
+
+ $ . "$TESTDIR/remotefilelog-library.sh"
+
+ $ hg init master
+ $ cd master
+ $ cat >> .hg/hgrc <<EOF
+ > [remotefilelog]
+ > server=True
+ > EOF
+ $ echo x > x
+ $ echo z > z
+ $ hg commit -qAm x
+ $ echo x2 > x
+ $ echo y > y
+ $ hg commit -qAm y
+ $ hg bookmark foo
+
+ $ cd ..
+
+# prefetch a revision
+
+ $ hgcloneshallow ssh://user@dummy/master shallow --noupdate
+ streaming all changes
+ 2 files to transfer, 528 bytes of data
+ transferred 528 bytes in * seconds (*/sec) (glob)
+ searching for changes
+ no changes found
+ $ cd shallow
+
+ $ hg prefetch -r 0
+ 2 files fetched over 1 fetches - (2 misses, 0.00% hit ratio) over *s (glob)
+
+ $ hg cat -r 0 x
+ x
+
+# prefetch with base
+
+ $ clearcache
+ $ hg prefetch -r 0::1 -b 0
+ 2 files fetched over 1 fetches - (2 misses, 0.00% hit ratio) over *s (glob)
+
+ $ hg cat -r 1 x
+ x2
+ $ hg cat -r 1 y
+ y
+
+ $ hg cat -r 0 x
+ x
+ 1 files fetched over 1 fetches - (1 misses, 0.00% hit ratio) over *s (glob)
+
+ $ hg cat -r 0 z
+ z
+ 1 files fetched over 1 fetches - (1 misses, 0.00% hit ratio) over *s (glob)
+
+ $ hg prefetch -r 0::1 --base 0
+ $ hg prefetch -r 0::1 -b 1
+ $ hg prefetch -r 0::1
+
+# prefetch a range of revisions
+
+ $ clearcache
+ $ hg prefetch -r 0::1
+ 4 files fetched over 1 fetches - (4 misses, 0.00% hit ratio) over *s (glob)
+
+ $ hg cat -r 0 x
+ x
+ $ hg cat -r 1 x
+ x2
+
+# prefetch certain files
+
+ $ clearcache
+ $ hg prefetch -r 1 x
+ 1 files fetched over 1 fetches - (1 misses, 0.00% hit ratio) over *s (glob)
+
+ $ hg cat -r 1 x
+ x2
+
+ $ hg cat -r 1 y
+ y
+ 1 files fetched over 1 fetches - (1 misses, 0.00% hit ratio) over *s (glob)
+
+# prefetch on pull when configured
+
+ $ printf "[remotefilelog]\npullprefetch=bookmark()\n" >> .hg/hgrc
+ $ hg strip tip
+ saved backup bundle to $TESTTMP/shallow/.hg/strip-backup/109c3a557a73-3f43405e-backup.hg (glob)
+
+ $ clearcache
+ $ hg pull
+ pulling from ssh://user@dummy/master
+ searching for changes
+ adding changesets
+ adding manifests
+ adding file changes
+ added 1 changesets with 0 changes to 0 files
+ updating bookmark foo
+ new changesets 109c3a557a73
+ (run 'hg update' to get a working copy)
+ prefetching file contents
+ 3 files fetched over 1 fetches - (3 misses, 0.00% hit ratio) over *s (glob)
+
+ $ hg up tip
+ 3 files updated, 0 files merged, 0 files removed, 0 files unresolved
+
+# prefetch only fetches changes not in working copy
+
+ $ hg strip tip
+ 1 files updated, 0 files merged, 1 files removed, 0 files unresolved
+ saved backup bundle to $TESTTMP/shallow/.hg/strip-backup/109c3a557a73-3f43405e-backup.hg (glob)
+ 1 files fetched over 1 fetches - (1 misses, 0.00% hit ratio) over *s (glob)
+ $ clearcache
+
+ $ hg pull
+ pulling from ssh://user@dummy/master
+ searching for changes
+ adding changesets
+ adding manifests
+ adding file changes
+ added 1 changesets with 0 changes to 0 files
+ updating bookmark foo
+ new changesets 109c3a557a73
+ (run 'hg update' to get a working copy)
+ prefetching file contents
+ 2 files fetched over 1 fetches - (2 misses, 0.00% hit ratio) over *s (glob)
+
+# Make some local commits that produce the same file versions as are on the
+# server. To simulate a situation where we have local commits that were somehow
+# pushed, and we will soon pull.
+
+ $ hg prefetch -r 'all()'
+ 2 files fetched over 1 fetches - (2 misses, 0.00% hit ratio) over *s (glob)
+ $ hg strip -q -r 0
+ $ echo x > x
+ $ echo z > z
+ $ hg commit -qAm x
+ $ echo x2 > x
+ $ echo y > y
+ $ hg commit -qAm y
+
+# prefetch server versions, even if local versions are available
+
+ $ clearcache
+ $ hg strip -q tip
+ $ hg pull
+ pulling from ssh://user@dummy/master
+ searching for changes
+ adding changesets
+ adding manifests
+ adding file changes
+ added 1 changesets with 0 changes to 0 files
+ updating bookmark foo
+ new changesets 109c3a557a73
+ 1 local changesets published (?)
+ (run 'hg update' to get a working copy)
+ prefetching file contents
+ 2 files fetched over 1 fetches - (2 misses, 0.00% hit ratio) over *s (glob)
+
+ $ cd ..
+
+# Prefetch unknown files during checkout
+
+ $ hgcloneshallow ssh://user@dummy/master shallow2
+ streaming all changes
+ 2 files to transfer, 528 bytes of data
+ transferred 528 bytes in * seconds * (glob)
+ searching for changes
+ no changes found
+ updating to branch default
+ 3 files updated, 0 files merged, 0 files removed, 0 files unresolved
+ 1 files fetched over 1 fetches - (1 misses, 0.00% hit ratio) over * (glob)
+ $ cd shallow2
+ $ hg up -q null
+ $ echo x > x
+ $ echo y > y
+ $ echo z > z
+ $ clearcache
+ $ hg up tip
+ x: untracked file differs
+ 3 files fetched over 1 fetches - (3 misses, 0.00% hit ratio) over * (glob)
+ abort: untracked files in working directory differ from files in requested revision
+ [255]
+ $ hg revert --all
+
+# Test batch fetching of lookup files during hg status
+ $ hg up --clean tip
+ 3 files updated, 0 files merged, 0 files removed, 0 files unresolved
+ $ hg debugrebuilddirstate
+ $ clearcache
+ $ hg status
+ 3 files fetched over 1 fetches - (3 misses, 0.00% hit ratio) over * (glob)
+
+# Prefetch during addrename detection
+ $ hg up -q --clean tip
+ $ hg revert --all
+ $ mv x x2
+ $ mv y y2
+ $ mv z z2
+ $ clearcache
+ $ hg addremove -s 50 > /dev/null
+ 3 files fetched over 1 fetches - (3 misses, 0.00% hit ratio) over * (glob)
+ $ hg revert --all
+ forgetting x2
+ forgetting y2
+ forgetting z2
+ undeleting x
+ undeleting y
+ undeleting z
+
+
+# Revert across double renames. Note: the scary "abort", error is because
+# https://bz.mercurial-scm.org/5419 .
+
+ $ cd ../master
+ $ hg mv z z2
+ $ hg commit -m 'move z -> z2'
+ $ cd ../shallow2
+ $ hg pull -q
+ $ clearcache
+ $ hg mv y y2
+ y2: not overwriting - file exists
+ ('hg rename --after' to record the rename)
+ [1]
+ $ hg mv x x2
+ x2: not overwriting - file exists
+ ('hg rename --after' to record the rename)
+ [1]
+ $ hg mv z2 z3
+ z2: not copying - file is not managed
+ abort: no files to copy
+ [255]
+ $ hg revert -a -r 1 || true
+ 3 files fetched over 1 fetches - (3 misses, 0.00% hit ratio) over * (glob)
+ abort: z2@109c3a557a73: not found in manifest! (?)
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/tests/test-remotefilelog-pull-noshallow.t Fri Jan 18 13:28:22 2019 -0500
@@ -0,0 +1,79 @@
+#require no-windows
+
+ $ . "$TESTDIR/remotefilelog-library.sh"
+
+Set up an extension to make sure remotefilelog clientsetup() runs
+unconditionally even if we have never used a local shallow repo.
+This mimics behavior when using remotefilelog with chg. clientsetup() can be
+triggered due to a shallow repo, and then the code can later interact with
+non-shallow repositories.
+
+ $ cat > setupremotefilelog.py << EOF
+ > from mercurial import extensions
+ > def extsetup(ui):
+ > remotefilelog = extensions.find(b'remotefilelog')
+ > remotefilelog.onetimeclientsetup(ui)
+ > EOF
+
+Set up the master repository to pull from.
+
+ $ hg init master
+ $ cd master
+ $ cat >> .hg/hgrc <<EOF
+ > [remotefilelog]
+ > server=True
+ > EOF
+ $ echo x > x
+ $ hg commit -qAm x
+
+ $ cd ..
+
+ $ hg clone ssh://user@dummy/master child -q
+
+We should see the remotefilelog capability here, which advertises that
+the server supports our custom getfiles method.
+
+ $ cd master
+ $ echo 'hello' | hg -R . serve --stdio | grep capa | identifyrflcaps
+ exp-remotefilelog-ssh-getfiles-1
+ x_rfl_getfile
+ x_rfl_getflogheads
+ $ echo 'capabilities' | hg -R . serve --stdio | identifyrflcaps ; echo
+ exp-remotefilelog-ssh-getfiles-1
+ x_rfl_getfile
+ x_rfl_getflogheads
+
+
+Pull to the child repository. Use our custom setupremotefilelog extension
+to ensure that remotefilelog.onetimeclientsetup() gets triggered. (Without
+using chg it normally would not be run in this case since the local repository
+is not shallow.)
+
+ $ echo y > y
+ $ hg commit -qAm y
+
+ $ cd ../child
+ $ hg pull --config extensions.setuprfl=$TESTTMP/setupremotefilelog.py
+ pulling from ssh://user@dummy/master
+ searching for changes
+ adding changesets
+ adding manifests
+ adding file changes
+ added 1 changesets with 1 changes to 1 files
+ new changesets d34c38483be9
+ (run 'hg update' to get a working copy)
+
+ $ hg up
+ 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
+
+ $ cat y
+ y
+
+Test that bundle works in a non-remotefilelog repo w/ remotefilelog loaded
+
+ $ echo y >> y
+ $ hg commit -qAm "modify y"
+ $ hg bundle --base ".^" --rev . mybundle.hg --config extensions.setuprfl=$TESTTMP/setupremotefilelog.py
+ 1 changesets found
+
+ $ cd ..
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/tests/test-remotefilelog-push-pull.t Fri Jan 18 13:28:22 2019 -0500
@@ -0,0 +1,229 @@
+#require no-windows
+
+ $ . "$TESTDIR/remotefilelog-library.sh"
+
+ $ hg init master
+ $ cd master
+ $ cat >> .hg/hgrc <<EOF
+ > [remotefilelog]
+ > server=True
+ > EOF
+ $ echo x > x
+ $ hg commit -qAm x
+
+ $ cd ..
+
+ $ hgcloneshallow ssh://user@dummy/master shallow -q
+ 1 files fetched over 1 fetches - (1 misses, 0.00% hit ratio) over *s (glob)
+ $ hgcloneshallow ssh://user@dummy/master shallow2 -q
+
+We should see the remotefilelog capability here, which advertises that
+the server supports our custom getfiles method.
+
+ $ cd master
+ $ echo 'hello' | hg -R . serve --stdio | grep capa | identifyrflcaps
+ exp-remotefilelog-ssh-getfiles-1
+ x_rfl_getfile
+ x_rfl_getflogheads
+ $ echo 'capabilities' | hg -R . serve --stdio | identifyrflcaps ; echo
+ exp-remotefilelog-ssh-getfiles-1
+ x_rfl_getfile
+ x_rfl_getflogheads
+
+# pull to shallow from full
+
+ $ echo y > y
+ $ hg commit -qAm y
+
+ $ cd ../shallow
+ $ hg pull
+ pulling from ssh://user@dummy/master
+ searching for changes
+ adding changesets
+ adding manifests
+ adding file changes
+ added 1 changesets with 0 changes to 0 files
+ new changesets d34c38483be9
+ (run 'hg update' to get a working copy)
+
+ $ hg up
+ 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
+ 1 files fetched over 1 fetches - (1 misses, 0.00% hit ratio) over *s (glob)
+
+ $ cat y
+ y
+
+ $ cd ..
+
+# pull from shallow to shallow (local)
+
+ $ cd shallow
+ $ echo z > z
+ $ hg commit -qAm z
+ $ echo x >> x
+ $ echo y >> y
+ $ hg commit -qAm xxyy
+ $ cd ../shallow2
+ $ clearcache
+ $ hg pull ../shallow
+ pulling from ../shallow
+ searching for changes
+ adding changesets
+ adding manifests
+ adding file changes
+ added 3 changesets with 4 changes to 3 files
+ new changesets d34c38483be9:d7373980d475 (2 drafts)
+ (run 'hg update' to get a working copy)
+ 2 files fetched over 2 fetches - (2 misses, 0.00% hit ratio) over *s (glob)
+
+# pull from shallow to shallow (ssh)
+
+ $ hg strip -r 1
+ saved backup bundle to $TESTTMP/shallow2/.hg/strip-backup/d34c38483be9-89d325c9-backup.hg (glob)
+ $ hg pull ssh://user@dummy/$TESTTMP/shallow --config remotefilelog.cachepath=${CACHEDIR}2
+ pulling from ssh://user@dummy/$TESTTMP/shallow
+ searching for changes
+ adding changesets
+ adding manifests
+ adding file changes
+ added 3 changesets with 4 changes to 3 files
+ new changesets d34c38483be9:d7373980d475 (2 drafts)
+ (run 'hg update' to get a working copy)
+ 1 files fetched over 1 fetches - (1 misses, 0.00% hit ratio) over *s (glob)
+
+ $ hg up
+ 3 files updated, 0 files merged, 0 files removed, 0 files unresolved
+ $ cat z
+ z
+
+ $ hg -R ../shallow strip -qr 3
+ $ hg strip -qr 3
+ $ cd ..
+
+# push from shallow to shallow
+
+ $ cd shallow
+ $ echo a > a
+ $ hg commit -qAm a
+ $ hg push ssh://user@dummy/$TESTTMP/shallow2
+ pushing to ssh://user@dummy/$TESTTMP/shallow2
+ searching for changes
+ remote: adding changesets
+ remote: adding manifests
+ remote: adding file changes
+ remote: added 1 changesets with 1 changes to 1 files
+
+ $ cd ../shallow2
+ $ hg up
+ 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
+ $ cat a
+ a
+
+# verify files are read-only
+
+ $ ls -l .hg/store/data
+ total * (glob)
+ drwxrwxr-x* 11f6ad8ec52a2984abaafd7c3b516503785c2072 (glob)
+ drwxrwxr-x* 395df8f7c51f007019cb30201c49e884b46b92fa (glob)
+ drwxrwxr-x* 86f7e437faa5a7fce15d1ddcb9eaeaea377667b8 (glob)
+ drwxrwxr-x* 95cb0bfd2977c761298d9624e4b4d4c72a39974a (glob)
+ $ ls -l .hg/store/data/395df8f7c51f007019cb30201c49e884b46b92fa
+ total * (glob)
+ -r--r--r--* 69a1b67522704ec122181c0890bd16e9d3e7516a (glob)
+ -r--r--r--* 69a1b67522704ec122181c0890bd16e9d3e7516a_old (glob)
+ $ cd ..
+
+# push from shallow to full
+
+ $ cd shallow
+ $ hg push
+ pushing to ssh://user@dummy/master
+ searching for changes
+ remote: adding changesets
+ remote: adding manifests
+ remote: adding file changes
+ remote: added 2 changesets with 2 changes to 2 files
+
+ $ cd ../master
+ $ hg log -l 1 --style compact
+ 3[tip] 1489bbbc46f0 1970-01-01 00:00 +0000 test
+ a
+
+ $ hg up
+ 2 files updated, 0 files merged, 0 files removed, 0 files unresolved
+ $ cat a
+ a
+
+# push public commits
+
+ $ cd ../shallow
+ $ echo p > p
+ $ hg commit -qAm p
+ $ hg phase -f -p -r .
+ $ echo d > d
+ $ hg commit -qAm d
+
+ $ cd ../shallow2
+ $ hg pull ../shallow
+ pulling from ../shallow
+ searching for changes
+ adding changesets
+ adding manifests
+ adding file changes
+ added 2 changesets with 2 changes to 2 files
+ new changesets 3a2e32c04641:cedeb4167c1f (1 drafts)
+ 2 local changesets published (?)
+ (run 'hg update' to get a working copy)
+
+ $ cd ..
+
+# Test pushing from shallow to shallow with multiple manifests introducing the
+# same filenode. Test this by constructing two separate histories of file 'c'
+# that share a file node and verifying that the history works after pushing.
+
+ $ hg init multimf-master
+ $ hgcloneshallow ssh://user@dummy/multimf-master multimf-shallow -q
+ $ hgcloneshallow ssh://user@dummy/multimf-master multimf-shallow2 -q
+ $ cd multimf-shallow
+ $ echo a > a
+ $ hg commit -qAm a
+ $ echo b > b
+ $ hg commit -qAm b
+ $ echo c > c
+ $ hg commit -qAm c1
+ $ hg up -q 0
+ $ echo c > c
+ $ hg commit -qAm c2
+ $ echo cc > c
+ $ hg commit -qAm c22
+ $ hg log -G -T '{rev} {desc}\n'
+ @ 4 c22
+ |
+ o 3 c2
+ |
+ | o 2 c1
+ | |
+ | o 1 b
+ |/
+ o 0 a
+
+
+ $ cd ../multimf-shallow2
+- initial commit to prevent hg pull from being a clone
+ $ echo z > z && hg commit -qAm z
+ $ hg pull -f ssh://user@dummy/$TESTTMP/multimf-shallow
+ pulling from ssh://user@dummy/$TESTTMP/multimf-shallow
+ searching for changes
+ warning: repository is unrelated
+ requesting all changes
+ adding changesets
+ adding manifests
+ adding file changes
+ added 5 changesets with 4 changes to 3 files (+2 heads)
+ new changesets cb9a9f314b8b:d8f06a4c6d38 (5 drafts)
+ (run 'hg heads' to see heads, 'hg merge' to merge)
+
+ $ hg up -q 5
+ $ hg log -f -T '{rev}\n' c
+ 5
+ 4
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/tests/test-remotefilelog-repack-fast.t Fri Jan 18 13:28:22 2019 -0500
@@ -0,0 +1,384 @@
+#require no-windows
+
+ $ . "$TESTDIR/remotefilelog-library.sh"
+
+ $ cat >> $HGRCPATH <<EOF
+ > [remotefilelog]
+ > fastdatapack=True
+ > EOF
+
+ $ hg init master
+ $ cd master
+ $ cat >> .hg/hgrc <<EOF
+ > [remotefilelog]
+ > server=True
+ > serverexpiration=-1
+ > EOF
+ $ echo x > x
+ $ hg commit -qAm x
+ $ echo x >> x
+ $ hg commit -qAm x2
+ $ cd ..
+
+ $ hgcloneshallow ssh://user@dummy/master shallow -q
+ 1 files fetched over 1 fetches - (1 misses, 0.00% hit ratio) over *s (glob)
+
+# Set the prefetchdays config to zero so that all commits are prefetched
+# no matter what their creation date is.
+ $ cd shallow
+ $ cat >> .hg/hgrc <<EOF
+ > [remotefilelog]
+ > prefetchdays=0
+ > EOF
+ $ cd ..
+
+# Test that repack cleans up the old files and creates new packs
+
+ $ cd shallow
+ $ find $CACHEDIR | sort
+ $TESTTMP/hgcache
+ $TESTTMP/hgcache/master
+ $TESTTMP/hgcache/master/11
+ $TESTTMP/hgcache/master/11/f6ad8ec52a2984abaafd7c3b516503785c2072
+ $TESTTMP/hgcache/master/11/f6ad8ec52a2984abaafd7c3b516503785c2072/aee31534993a501858fb6dd96a065671922e7d51
+ $TESTTMP/hgcache/repos
+
+ $ hg repack
+
+ $ find $CACHEDIR | sort
+ $TESTTMP/hgcache
+ $TESTTMP/hgcache/master
+ $TESTTMP/hgcache/master/packs
+ $TESTTMP/hgcache/master/packs/1e91b207daf5d7b48f1be9c587d6b5ae654ce78c.histidx
+ $TESTTMP/hgcache/master/packs/1e91b207daf5d7b48f1be9c587d6b5ae654ce78c.histpack
+ $TESTTMP/hgcache/master/packs/b1e0cfc7f345e408a7825e3081501959488d59ce.dataidx
+ $TESTTMP/hgcache/master/packs/b1e0cfc7f345e408a7825e3081501959488d59ce.datapack
+ $TESTTMP/hgcache/master/packs/repacklock
+ $TESTTMP/hgcache/repos
+
+# Test that the packs are readonly
+ $ ls_l $CACHEDIR/master/packs
+ -r--r--r-- 1145 1e91b207daf5d7b48f1be9c587d6b5ae654ce78c.histidx
+ -r--r--r-- 172 1e91b207daf5d7b48f1be9c587d6b5ae654ce78c.histpack
+ -r--r--r-- 1074 b1e0cfc7f345e408a7825e3081501959488d59ce.dataidx
+ -r--r--r-- 72 b1e0cfc7f345e408a7825e3081501959488d59ce.datapack
+ -rw-r--r-- 0 repacklock
+
+# Test that the data in the new packs is accessible
+ $ hg cat -r . x
+ x
+ x
+
+# Test that adding new data and repacking it results in the loose data and the
+# old packs being combined.
+
+ $ cd ../master
+ $ echo x >> x
+ $ hg commit -m x3
+ $ cd ../shallow
+ $ hg pull -q
+ $ hg up -q tip
+ 1 files fetched over 1 fetches - (1 misses, 0.00% hit ratio) over * (glob)
+
+ $ find $CACHEDIR -type f | sort
+ $TESTTMP/hgcache/master/11/f6ad8ec52a2984abaafd7c3b516503785c2072/d4a3ed9310e5bd9887e3bf779da5077efab28216
+ $TESTTMP/hgcache/master/packs/1e91b207daf5d7b48f1be9c587d6b5ae654ce78c.histidx
+ $TESTTMP/hgcache/master/packs/1e91b207daf5d7b48f1be9c587d6b5ae654ce78c.histpack
+ $TESTTMP/hgcache/master/packs/b1e0cfc7f345e408a7825e3081501959488d59ce.dataidx
+ $TESTTMP/hgcache/master/packs/b1e0cfc7f345e408a7825e3081501959488d59ce.datapack
+ $TESTTMP/hgcache/master/packs/repacklock
+ $TESTTMP/hgcache/repos
+
+ $ hg repack --traceback
+
+ $ find $CACHEDIR -type f | sort
+ $TESTTMP/hgcache/master/packs/78840d69389c7404327f7477e3931c89945c37d1.dataidx
+ $TESTTMP/hgcache/master/packs/78840d69389c7404327f7477e3931c89945c37d1.datapack
+ $TESTTMP/hgcache/master/packs/8abe7889aae389337d12ebe6085d4ee13854c7c9.histidx
+ $TESTTMP/hgcache/master/packs/8abe7889aae389337d12ebe6085d4ee13854c7c9.histpack
+ $TESTTMP/hgcache/master/packs/repacklock
+ $TESTTMP/hgcache/repos
+
+# Verify all the file data is still available
+ $ hg cat -r . x
+ x
+ x
+ x
+ $ hg cat -r '.^' x
+ x
+ x
+
+# Test that repacking again without new data does not delete the pack files
+# and did not change the pack names
+ $ hg repack
+ $ find $CACHEDIR -type f | sort
+ $TESTTMP/hgcache/master/packs/78840d69389c7404327f7477e3931c89945c37d1.dataidx
+ $TESTTMP/hgcache/master/packs/78840d69389c7404327f7477e3931c89945c37d1.datapack
+ $TESTTMP/hgcache/master/packs/8abe7889aae389337d12ebe6085d4ee13854c7c9.histidx
+ $TESTTMP/hgcache/master/packs/8abe7889aae389337d12ebe6085d4ee13854c7c9.histpack
+ $TESTTMP/hgcache/master/packs/repacklock
+ $TESTTMP/hgcache/repos
+
+# Run two repacks at once
+ $ hg repack --config "hooks.prerepack=sleep 3" &
+ $ sleep 1
+ $ hg repack
+ skipping repack - another repack is already running
+ $ hg debugwaitonrepack >/dev/null 2>&1
+
+# Run repack in the background
+ $ cd ../master
+ $ echo x >> x
+ $ hg commit -m x4
+ $ cd ../shallow
+ $ hg pull -q
+ $ hg up -q tip
+ 1 files fetched over 1 fetches - (1 misses, 0.00% hit ratio) over * (glob)
+ $ find $CACHEDIR -type f | sort
+ $TESTTMP/hgcache/master/11/f6ad8ec52a2984abaafd7c3b516503785c2072/1bb2e6237e035c8f8ef508e281f1ce075bc6db72
+ $TESTTMP/hgcache/master/packs/78840d69389c7404327f7477e3931c89945c37d1.dataidx
+ $TESTTMP/hgcache/master/packs/78840d69389c7404327f7477e3931c89945c37d1.datapack
+ $TESTTMP/hgcache/master/packs/8abe7889aae389337d12ebe6085d4ee13854c7c9.histidx
+ $TESTTMP/hgcache/master/packs/8abe7889aae389337d12ebe6085d4ee13854c7c9.histpack
+ $TESTTMP/hgcache/master/packs/repacklock
+ $TESTTMP/hgcache/repos
+
+ $ hg repack --background
+ (running background repack)
+ $ sleep 0.5
+ $ hg debugwaitonrepack >/dev/null 2>&1
+ $ find $CACHEDIR -type f | sort
+ $TESTTMP/hgcache/master/packs/39443fa1064182e93d968b5cba292eb5283260d0.dataidx
+ $TESTTMP/hgcache/master/packs/39443fa1064182e93d968b5cba292eb5283260d0.datapack
+ $TESTTMP/hgcache/master/packs/604552d403a1381749faf656feca0ca265a6d52c.histidx
+ $TESTTMP/hgcache/master/packs/604552d403a1381749faf656feca0ca265a6d52c.histpack
+ $TESTTMP/hgcache/master/packs/repacklock
+ $TESTTMP/hgcache/repos
+
+# Test debug commands
+
+ $ hg debugdatapack $TESTTMP/hgcache/master/packs/*.datapack
+ $TESTTMP/hgcache/master/packs/39443fa1064182e93d968b5cba292eb5283260d0:
+ x:
+ Node Delta Base Delta Length Blob Size
+ 1bb2e6237e03 000000000000 8 8
+ d4a3ed9310e5 1bb2e6237e03 12 6
+ aee31534993a d4a3ed9310e5 12 4
+
+ Total: 32 18 (77.8% bigger)
+ $ hg debugdatapack --long $TESTTMP/hgcache/master/packs/*.datapack
+ $TESTTMP/hgcache/master/packs/39443fa1064182e93d968b5cba292eb5283260d0:
+ x:
+ Node Delta Base Delta Length Blob Size
+ 1bb2e6237e035c8f8ef508e281f1ce075bc6db72 0000000000000000000000000000000000000000 8 8
+ d4a3ed9310e5bd9887e3bf779da5077efab28216 1bb2e6237e035c8f8ef508e281f1ce075bc6db72 12 6
+ aee31534993a501858fb6dd96a065671922e7d51 d4a3ed9310e5bd9887e3bf779da5077efab28216 12 4
+
+ Total: 32 18 (77.8% bigger)
+ $ hg debugdatapack $TESTTMP/hgcache/master/packs/*.datapack --node d4a3ed9310e5bd9887e3bf779da5077efab28216
+ $TESTTMP/hgcache/master/packs/39443fa1064182e93d968b5cba292eb5283260d0:
+
+ x
+ Node Delta Base Delta SHA1 Delta Length
+ d4a3ed9310e5bd9887e3bf779da5077efab28216 1bb2e6237e035c8f8ef508e281f1ce075bc6db72 77029ab56e83ea2115dd53ff87483682abe5d7ca 12
+ Node Delta Base Delta SHA1 Delta Length
+ 1bb2e6237e035c8f8ef508e281f1ce075bc6db72 0000000000000000000000000000000000000000 7ca8c71a64f7b56380e77573da2f7a5fdd2ecdb5 8
+ $ hg debughistorypack $TESTTMP/hgcache/master/packs/*.histidx
+
+ x
+ Node P1 Node P2 Node Link Node Copy From
+ 1bb2e6237e03 d4a3ed9310e5 000000000000 0b03bbc9e1e7
+ d4a3ed9310e5 aee31534993a 000000000000 421535db10b6
+ aee31534993a 1406e7411862 000000000000 a89d614e2364
+ 1406e7411862 000000000000 000000000000 b292c1e3311f
+
+# Test copy tracing from a pack
+ $ cd ../master
+ $ hg mv x y
+ $ hg commit -m 'move x to y'
+ $ cd ../shallow
+ $ hg pull -q
+ $ hg up -q tip
+ 1 files fetched over 1 fetches - (1 misses, 0.00% hit ratio) over * (glob)
+ $ hg repack
+ $ hg log -f y -T '{desc}\n'
+ move x to y
+ x4
+ x3
+ x2
+ x
+
+# Test copy trace across rename and back
+ $ cp -R $TESTTMP/hgcache/master/packs $TESTTMP/backuppacks
+ $ cd ../master
+ $ hg mv y x
+ $ hg commit -m 'move y back to x'
+ $ hg revert -r 0 x
+ $ mv x y
+ $ hg add y
+ $ echo >> y
+ $ hg revert x
+ $ hg commit -m 'add y back without metadata'
+ $ cd ../shallow
+ $ hg pull -q
+ $ hg up -q tip
+ 2 files fetched over 2 fetches - (2 misses, 0.00% hit ratio) over * (glob)
+ $ hg repack
+ $ ls $TESTTMP/hgcache/master/packs
+ bfd60adb76018bb952e27cd23fc151bf94865d7d.histidx
+ bfd60adb76018bb952e27cd23fc151bf94865d7d.histpack
+ fb3aa57b22789ebcc45706c352e2d6af099c5816.dataidx
+ fb3aa57b22789ebcc45706c352e2d6af099c5816.datapack
+ repacklock
+ $ hg debughistorypack $TESTTMP/hgcache/master/packs/*.histidx
+
+ x
+ Node P1 Node P2 Node Link Node Copy From
+ cd410a44d584 577959738234 000000000000 609547eda446 y
+ 1bb2e6237e03 d4a3ed9310e5 000000000000 0b03bbc9e1e7
+ d4a3ed9310e5 aee31534993a 000000000000 421535db10b6
+ aee31534993a 1406e7411862 000000000000 a89d614e2364
+ 1406e7411862 000000000000 000000000000 b292c1e3311f
+
+ y
+ Node P1 Node P2 Node Link Node Copy From
+ 577959738234 1bb2e6237e03 000000000000 c7faf2fc439a x
+ 21f46f2721e7 000000000000 000000000000 d6868642b790
+ $ hg strip -r '.^'
+ 1 files updated, 0 files merged, 1 files removed, 0 files unresolved
+ saved backup bundle to $TESTTMP/shallow/.hg/strip-backup/609547eda446-b26b56a8-backup.hg (glob)
+ $ hg -R ../master strip -r '.^'
+ 1 files updated, 0 files merged, 1 files removed, 0 files unresolved
+ saved backup bundle to $TESTTMP/master/.hg/strip-backup/609547eda446-b26b56a8-backup.hg (glob)
+
+ $ rm -rf $TESTTMP/hgcache/master/packs
+ $ cp -R $TESTTMP/backuppacks $TESTTMP/hgcache/master/packs
+
+# Test repacking datapack without history
+ $ rm -rf $CACHEDIR/master/packs/*hist*
+ $ hg repack
+ $ hg debugdatapack $TESTTMP/hgcache/master/packs/*.datapack
+ $TESTTMP/hgcache/master/packs/922aca43dbbeda4d250565372e8892ec7b08da6a:
+ x:
+ Node Delta Base Delta Length Blob Size
+ 1bb2e6237e03 000000000000 8 8
+ d4a3ed9310e5 1bb2e6237e03 12 6
+ aee31534993a d4a3ed9310e5 12 4
+
+ Total: 32 18 (77.8% bigger)
+ y:
+ Node Delta Base Delta Length Blob Size
+ 577959738234 000000000000 70 8
+
+ Total: 70 8 (775.0% bigger)
+
+ $ hg cat -r ".^" x
+ x
+ x
+ x
+ x
+
+Incremental repack
+ $ rm -rf $CACHEDIR/master/packs/*
+ $ cat >> .hg/hgrc <<EOF
+ > [remotefilelog]
+ > data.generations=60
+ > 150
+ > EOF
+
+Single pack - repack does nothing
+ $ hg prefetch -r 0
+ 1 files fetched over 1 fetches - (1 misses, 0.00% hit ratio) over * (glob)
+ $ ls_l $TESTTMP/hgcache/master/packs/ | grep datapack
+ [1]
+ $ ls_l $TESTTMP/hgcache/master/packs/ | grep histpack
+ [1]
+ $ hg repack --incremental
+ $ ls_l $TESTTMP/hgcache/master/packs/ | grep datapack
+ -r--r--r-- 70 052643fdcdebbd42d7c180a651a30d46098e6fe1.datapack
+ $ ls_l $TESTTMP/hgcache/master/packs/ | grep histpack
+ -r--r--r-- 90 955a622173324b2d8b53e1147f209f1cf125302e.histpack
+
+3 gen1 packs, 1 gen0 pack - packs 3 gen1 into 1
+ $ hg prefetch -r 1
+ 1 files fetched over 1 fetches - (1 misses, 0.00% hit ratio) over * (glob)
+ $ hg prefetch -r 2
+ 1 files fetched over 1 fetches - (1 misses, 0.00% hit ratio) over * (glob)
+ $ hg prefetch -r 3
+ 1 files fetched over 1 fetches - (1 misses, 0.00% hit ratio) over * (glob)
+ $ ls_l $TESTTMP/hgcache/master/packs/ | grep datapack
+ -r--r--r-- 70 052643fdcdebbd42d7c180a651a30d46098e6fe1.datapack
+ $ ls_l $TESTTMP/hgcache/master/packs/ | grep histpack
+ -r--r--r-- 90 955a622173324b2d8b53e1147f209f1cf125302e.histpack
+ $ hg repack --incremental
+ $ ls_l $TESTTMP/hgcache/master/packs/ | grep datapack
+ -r--r--r-- 70 052643fdcdebbd42d7c180a651a30d46098e6fe1.datapack
+ -r--r--r-- 226 39443fa1064182e93d968b5cba292eb5283260d0.datapack
+ $ ls_l $TESTTMP/hgcache/master/packs/ | grep histpack
+ -r--r--r-- 336 604552d403a1381749faf656feca0ca265a6d52c.histpack
+ -r--r--r-- 90 955a622173324b2d8b53e1147f209f1cf125302e.histpack
+
+1 gen3 pack, 1 gen0 pack - does nothing
+ $ hg repack --incremental
+ $ ls_l $TESTTMP/hgcache/master/packs/ | grep datapack
+ -r--r--r-- 70 052643fdcdebbd42d7c180a651a30d46098e6fe1.datapack
+ -r--r--r-- 226 39443fa1064182e93d968b5cba292eb5283260d0.datapack
+ $ ls_l $TESTTMP/hgcache/master/packs/ | grep histpack
+ -r--r--r-- 336 604552d403a1381749faf656feca0ca265a6d52c.histpack
+ -r--r--r-- 90 955a622173324b2d8b53e1147f209f1cf125302e.histpack
+
+Pull should run background repack
+ $ cat >> .hg/hgrc <<EOF
+ > [remotefilelog]
+ > backgroundrepack=True
+ > EOF
+ $ clearcache
+ $ hg prefetch -r 0
+ 1 files fetched over 1 fetches - (1 misses, 0.00% hit ratio) over * (glob)
+ $ hg prefetch -r 1
+ 1 files fetched over 1 fetches - (1 misses, 0.00% hit ratio) over * (glob)
+ $ hg prefetch -r 2
+ 1 files fetched over 1 fetches - (1 misses, 0.00% hit ratio) over * (glob)
+ $ hg prefetch -r 3
+ 1 files fetched over 1 fetches - (1 misses, 0.00% hit ratio) over * (glob)
+
+ $ hg pull
+ pulling from ssh://user@dummy/master
+ searching for changes
+ no changes found
+ (running background incremental repack)
+ $ sleep 0.5
+ $ hg debugwaitonrepack >/dev/null 2>&1
+ $ ls_l $TESTTMP/hgcache/master/packs/ | grep datapack
+ -r--r--r-- 303 156a6c1c83aeb69422d7936e0a46ba9bc06a71c0.datapack
+ $ ls_l $TESTTMP/hgcache/master/packs/ | grep histpack
+ -r--r--r-- 336 604552d403a1381749faf656feca0ca265a6d52c.histpack
+
+Test environment variable resolution
+ $ CACHEPATH=$TESTTMP/envcache hg prefetch --config 'remotefilelog.cachepath=$CACHEPATH'
+ 1 files fetched over 1 fetches - (1 misses, 0.00% hit ratio) over * (glob)
+ $ find $TESTTMP/envcache | sort
+ $TESTTMP/envcache
+ $TESTTMP/envcache/master
+ $TESTTMP/envcache/master/95
+ $TESTTMP/envcache/master/95/cb0bfd2977c761298d9624e4b4d4c72a39974a
+ $TESTTMP/envcache/master/95/cb0bfd2977c761298d9624e4b4d4c72a39974a/577959738234a1eb241ed3ed4b22a575833f56e0
+ $TESTTMP/envcache/repos
+
+Test local remotefilelog blob is correct when based on a pack
+ $ hg prefetch -r .
+ 1 files fetched over 1 fetches - (1 misses, 0.00% hit ratio) over * (glob)
+ $ echo >> y
+ $ hg commit -m y2
+ $ hg debugremotefilelog .hg/store/data/95cb0bfd2977c761298d9624e4b4d4c72a39974a/b70860edba4f8242a1d52f2a94679dd23cb76808
+ size: 9 bytes
+ path: .hg/store/data/95cb0bfd2977c761298d9624e4b4d4c72a39974a/b70860edba4f8242a1d52f2a94679dd23cb76808
+ key: b70860edba4f
+
+ node => p1 p2 linknode copyfrom
+ b70860edba4f => 577959738234 000000000000 08d3fbc98c48
+ 577959738234 => 1bb2e6237e03 000000000000 c7faf2fc439a x
+ 1bb2e6237e03 => d4a3ed9310e5 000000000000 0b03bbc9e1e7
+ d4a3ed9310e5 => aee31534993a 000000000000 421535db10b6
+ aee31534993a => 1406e7411862 000000000000 a89d614e2364
+ 1406e7411862 => 000000000000 000000000000 b292c1e3311f
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/tests/test-remotefilelog-repack.t Fri Jan 18 13:28:22 2019 -0500
@@ -0,0 +1,462 @@
+#require no-windows
+
+ $ . "$TESTDIR/remotefilelog-library.sh"
+
+ $ hg init master
+ $ cd master
+ $ cat >> .hg/hgrc <<EOF
+ > [remotefilelog]
+ > server=True
+ > serverexpiration=-1
+ > EOF
+ $ echo x > x
+ $ hg commit -qAm x
+ $ echo x >> x
+ $ hg commit -qAm x2
+ $ cd ..
+
+ $ hgcloneshallow ssh://user@dummy/master shallow -q
+ 1 files fetched over 1 fetches - (1 misses, 0.00% hit ratio) over *s (glob)
+
+# Set the prefetchdays config to zero so that all commits are prefetched
+# no matter what their creation date is.
+ $ cd shallow
+ $ cat >> .hg/hgrc <<EOF
+ > [remotefilelog]
+ > prefetchdays=0
+ > EOF
+ $ cd ..
+
+# Test that repack cleans up the old files and creates new packs
+
+ $ cd shallow
+ $ find $CACHEDIR | sort
+ $TESTTMP/hgcache
+ $TESTTMP/hgcache/master
+ $TESTTMP/hgcache/master/11
+ $TESTTMP/hgcache/master/11/f6ad8ec52a2984abaafd7c3b516503785c2072
+ $TESTTMP/hgcache/master/11/f6ad8ec52a2984abaafd7c3b516503785c2072/aee31534993a501858fb6dd96a065671922e7d51
+ $TESTTMP/hgcache/repos
+
+ $ hg repack
+
+ $ find $CACHEDIR | sort
+ $TESTTMP/hgcache
+ $TESTTMP/hgcache/master
+ $TESTTMP/hgcache/master/packs
+ $TESTTMP/hgcache/master/packs/1e91b207daf5d7b48f1be9c587d6b5ae654ce78c.histidx
+ $TESTTMP/hgcache/master/packs/1e91b207daf5d7b48f1be9c587d6b5ae654ce78c.histpack
+ $TESTTMP/hgcache/master/packs/b1e0cfc7f345e408a7825e3081501959488d59ce.dataidx
+ $TESTTMP/hgcache/master/packs/b1e0cfc7f345e408a7825e3081501959488d59ce.datapack
+ $TESTTMP/hgcache/master/packs/repacklock
+ $TESTTMP/hgcache/repos
+
+# Test that the packs are readonly
+ $ ls_l $CACHEDIR/master/packs
+ -r--r--r-- 1145 1e91b207daf5d7b48f1be9c587d6b5ae654ce78c.histidx
+ -r--r--r-- 172 1e91b207daf5d7b48f1be9c587d6b5ae654ce78c.histpack
+ -r--r--r-- 1074 b1e0cfc7f345e408a7825e3081501959488d59ce.dataidx
+ -r--r--r-- 72 b1e0cfc7f345e408a7825e3081501959488d59ce.datapack
+ -rw-r--r-- 0 repacklock
+
+# Test that the data in the new packs is accessible
+ $ hg cat -r . x
+ x
+ x
+
+# Test that adding new data and repacking it results in the loose data and the
+# old packs being combined.
+
+ $ cd ../master
+ $ echo x >> x
+ $ hg commit -m x3
+ $ cd ../shallow
+ $ hg pull -q
+ $ hg up -q tip
+ 1 files fetched over 1 fetches - (1 misses, 0.00% hit ratio) over * (glob)
+
+ $ find $CACHEDIR -type f | sort
+ $TESTTMP/hgcache/master/11/f6ad8ec52a2984abaafd7c3b516503785c2072/d4a3ed9310e5bd9887e3bf779da5077efab28216
+ $TESTTMP/hgcache/master/packs/1e91b207daf5d7b48f1be9c587d6b5ae654ce78c.histidx
+ $TESTTMP/hgcache/master/packs/1e91b207daf5d7b48f1be9c587d6b5ae654ce78c.histpack
+ $TESTTMP/hgcache/master/packs/b1e0cfc7f345e408a7825e3081501959488d59ce.dataidx
+ $TESTTMP/hgcache/master/packs/b1e0cfc7f345e408a7825e3081501959488d59ce.datapack
+ $TESTTMP/hgcache/master/packs/repacklock
+ $TESTTMP/hgcache/repos
+
+# First assert that with --packsonly, the loose object will be ignored:
+
+ $ hg repack --packsonly
+
+ $ find $CACHEDIR -type f | sort
+ $TESTTMP/hgcache/master/11/f6ad8ec52a2984abaafd7c3b516503785c2072/d4a3ed9310e5bd9887e3bf779da5077efab28216
+ $TESTTMP/hgcache/master/packs/1e91b207daf5d7b48f1be9c587d6b5ae654ce78c.histidx
+ $TESTTMP/hgcache/master/packs/1e91b207daf5d7b48f1be9c587d6b5ae654ce78c.histpack
+ $TESTTMP/hgcache/master/packs/b1e0cfc7f345e408a7825e3081501959488d59ce.dataidx
+ $TESTTMP/hgcache/master/packs/b1e0cfc7f345e408a7825e3081501959488d59ce.datapack
+ $TESTTMP/hgcache/master/packs/repacklock
+ $TESTTMP/hgcache/repos
+
+ $ hg repack --traceback
+
+ $ find $CACHEDIR -type f | sort
+ $TESTTMP/hgcache/master/packs/78840d69389c7404327f7477e3931c89945c37d1.dataidx
+ $TESTTMP/hgcache/master/packs/78840d69389c7404327f7477e3931c89945c37d1.datapack
+ $TESTTMP/hgcache/master/packs/8abe7889aae389337d12ebe6085d4ee13854c7c9.histidx
+ $TESTTMP/hgcache/master/packs/8abe7889aae389337d12ebe6085d4ee13854c7c9.histpack
+ $TESTTMP/hgcache/master/packs/repacklock
+ $TESTTMP/hgcache/repos
+
+# Verify all the file data is still available
+ $ hg cat -r . x
+ x
+ x
+ x
+ $ hg cat -r '.^' x
+ x
+ x
+
+# Test that repacking again without new data does not delete the pack files
+# and did not change the pack names
+ $ hg repack
+ $ find $CACHEDIR -type f | sort
+ $TESTTMP/hgcache/master/packs/78840d69389c7404327f7477e3931c89945c37d1.dataidx
+ $TESTTMP/hgcache/master/packs/78840d69389c7404327f7477e3931c89945c37d1.datapack
+ $TESTTMP/hgcache/master/packs/8abe7889aae389337d12ebe6085d4ee13854c7c9.histidx
+ $TESTTMP/hgcache/master/packs/8abe7889aae389337d12ebe6085d4ee13854c7c9.histpack
+ $TESTTMP/hgcache/master/packs/repacklock
+ $TESTTMP/hgcache/repos
+
+# Run two repacks at once
+ $ hg repack --config "hooks.prerepack=sleep 3" &
+ $ sleep 1
+ $ hg repack
+ skipping repack - another repack is already running
+ $ hg debugwaitonrepack >/dev/null 2>&1
+
+# Run repack in the background
+ $ cd ../master
+ $ echo x >> x
+ $ hg commit -m x4
+ $ cd ../shallow
+ $ hg pull -q
+ $ hg up -q tip
+ 1 files fetched over 1 fetches - (1 misses, 0.00% hit ratio) over * (glob)
+ $ find $CACHEDIR -type f | sort
+ $TESTTMP/hgcache/master/11/f6ad8ec52a2984abaafd7c3b516503785c2072/1bb2e6237e035c8f8ef508e281f1ce075bc6db72
+ $TESTTMP/hgcache/master/packs/78840d69389c7404327f7477e3931c89945c37d1.dataidx
+ $TESTTMP/hgcache/master/packs/78840d69389c7404327f7477e3931c89945c37d1.datapack
+ $TESTTMP/hgcache/master/packs/8abe7889aae389337d12ebe6085d4ee13854c7c9.histidx
+ $TESTTMP/hgcache/master/packs/8abe7889aae389337d12ebe6085d4ee13854c7c9.histpack
+ $TESTTMP/hgcache/master/packs/repacklock
+ $TESTTMP/hgcache/repos
+
+ $ hg repack --background
+ (running background repack)
+ $ sleep 0.5
+ $ hg debugwaitonrepack >/dev/null 2>&1
+ $ find $CACHEDIR -type f | sort
+ $TESTTMP/hgcache/master/packs/39443fa1064182e93d968b5cba292eb5283260d0.dataidx
+ $TESTTMP/hgcache/master/packs/39443fa1064182e93d968b5cba292eb5283260d0.datapack
+ $TESTTMP/hgcache/master/packs/604552d403a1381749faf656feca0ca265a6d52c.histidx
+ $TESTTMP/hgcache/master/packs/604552d403a1381749faf656feca0ca265a6d52c.histpack
+ $TESTTMP/hgcache/master/packs/repacklock
+ $TESTTMP/hgcache/repos
+
+# Test debug commands
+
+ $ hg debugdatapack $TESTTMP/hgcache/master/packs/*.datapack
+ $TESTTMP/hgcache/master/packs/39443fa1064182e93d968b5cba292eb5283260d0:
+ x:
+ Node Delta Base Delta Length Blob Size
+ 1bb2e6237e03 000000000000 8 8
+ d4a3ed9310e5 1bb2e6237e03 12 6
+ aee31534993a d4a3ed9310e5 12 4
+
+ Total: 32 18 (77.8% bigger)
+ $ hg debugdatapack --long $TESTTMP/hgcache/master/packs/*.datapack
+ $TESTTMP/hgcache/master/packs/39443fa1064182e93d968b5cba292eb5283260d0:
+ x:
+ Node Delta Base Delta Length Blob Size
+ 1bb2e6237e035c8f8ef508e281f1ce075bc6db72 0000000000000000000000000000000000000000 8 8
+ d4a3ed9310e5bd9887e3bf779da5077efab28216 1bb2e6237e035c8f8ef508e281f1ce075bc6db72 12 6
+ aee31534993a501858fb6dd96a065671922e7d51 d4a3ed9310e5bd9887e3bf779da5077efab28216 12 4
+
+ Total: 32 18 (77.8% bigger)
+ $ hg debugdatapack $TESTTMP/hgcache/master/packs/*.datapack --node d4a3ed9310e5bd9887e3bf779da5077efab28216
+ $TESTTMP/hgcache/master/packs/39443fa1064182e93d968b5cba292eb5283260d0:
+
+ x
+ Node Delta Base Delta SHA1 Delta Length
+ d4a3ed9310e5bd9887e3bf779da5077efab28216 1bb2e6237e035c8f8ef508e281f1ce075bc6db72 77029ab56e83ea2115dd53ff87483682abe5d7ca 12
+ Node Delta Base Delta SHA1 Delta Length
+ 1bb2e6237e035c8f8ef508e281f1ce075bc6db72 0000000000000000000000000000000000000000 7ca8c71a64f7b56380e77573da2f7a5fdd2ecdb5 8
+ $ hg debughistorypack $TESTTMP/hgcache/master/packs/*.histidx
+
+ x
+ Node P1 Node P2 Node Link Node Copy From
+ 1bb2e6237e03 d4a3ed9310e5 000000000000 0b03bbc9e1e7
+ d4a3ed9310e5 aee31534993a 000000000000 421535db10b6
+ aee31534993a 1406e7411862 000000000000 a89d614e2364
+ 1406e7411862 000000000000 000000000000 b292c1e3311f
+
+# Test copy tracing from a pack
+ $ cd ../master
+ $ hg mv x y
+ $ hg commit -m 'move x to y'
+ $ cd ../shallow
+ $ hg pull -q
+ $ hg up -q tip
+ 1 files fetched over 1 fetches - (1 misses, 0.00% hit ratio) over * (glob)
+ $ hg repack
+ $ hg log -f y -T '{desc}\n'
+ move x to y
+ x4
+ x3
+ x2
+ x
+
+# Test copy trace across rename and back
+ $ cp -R $TESTTMP/hgcache/master/packs $TESTTMP/backuppacks
+ $ cd ../master
+ $ hg mv y x
+ $ hg commit -m 'move y back to x'
+ $ hg revert -r 0 x
+ $ mv x y
+ $ hg add y
+ $ echo >> y
+ $ hg revert x
+ $ hg commit -m 'add y back without metadata'
+ $ cd ../shallow
+ $ hg pull -q
+ $ hg up -q tip
+ 2 files fetched over 2 fetches - (2 misses, 0.00% hit ratio) over * (glob)
+ $ hg repack
+ $ ls $TESTTMP/hgcache/master/packs
+ bfd60adb76018bb952e27cd23fc151bf94865d7d.histidx
+ bfd60adb76018bb952e27cd23fc151bf94865d7d.histpack
+ fb3aa57b22789ebcc45706c352e2d6af099c5816.dataidx
+ fb3aa57b22789ebcc45706c352e2d6af099c5816.datapack
+ repacklock
+ $ hg debughistorypack $TESTTMP/hgcache/master/packs/*.histidx
+
+ x
+ Node P1 Node P2 Node Link Node Copy From
+ cd410a44d584 577959738234 000000000000 609547eda446 y
+ 1bb2e6237e03 d4a3ed9310e5 000000000000 0b03bbc9e1e7
+ d4a3ed9310e5 aee31534993a 000000000000 421535db10b6
+ aee31534993a 1406e7411862 000000000000 a89d614e2364
+ 1406e7411862 000000000000 000000000000 b292c1e3311f
+
+ y
+ Node P1 Node P2 Node Link Node Copy From
+ 577959738234 1bb2e6237e03 000000000000 c7faf2fc439a x
+ 21f46f2721e7 000000000000 000000000000 d6868642b790
+ $ hg strip -r '.^'
+ 1 files updated, 0 files merged, 1 files removed, 0 files unresolved
+ saved backup bundle to $TESTTMP/shallow/.hg/strip-backup/609547eda446-b26b56a8-backup.hg (glob)
+ $ hg -R ../master strip -r '.^'
+ 1 files updated, 0 files merged, 1 files removed, 0 files unresolved
+ saved backup bundle to $TESTTMP/master/.hg/strip-backup/609547eda446-b26b56a8-backup.hg (glob)
+
+ $ rm -rf $TESTTMP/hgcache/master/packs
+ $ cp -R $TESTTMP/backuppacks $TESTTMP/hgcache/master/packs
+
+# Test repacking datapack without history
+ $ rm -rf $CACHEDIR/master/packs/*hist*
+ $ hg repack
+ $ hg debugdatapack $TESTTMP/hgcache/master/packs/*.datapack
+ $TESTTMP/hgcache/master/packs/922aca43dbbeda4d250565372e8892ec7b08da6a:
+ x:
+ Node Delta Base Delta Length Blob Size
+ 1bb2e6237e03 000000000000 8 8
+ d4a3ed9310e5 1bb2e6237e03 12 6
+ aee31534993a d4a3ed9310e5 12 4
+
+ Total: 32 18 (77.8% bigger)
+ y:
+ Node Delta Base Delta Length Blob Size
+ 577959738234 000000000000 70 8
+
+ Total: 70 8 (775.0% bigger)
+
+ $ hg cat -r ".^" x
+ x
+ x
+ x
+ x
+
+Incremental repack
+ $ rm -rf $CACHEDIR/master/packs/*
+ $ cat >> .hg/hgrc <<EOF
+ > [remotefilelog]
+ > data.generations=60
+ > 150
+ > EOF
+
+Single pack - repack does nothing
+ $ hg prefetch -r 0
+ 1 files fetched over 1 fetches - (1 misses, 0.00% hit ratio) over * (glob)
+ $ ls_l $TESTTMP/hgcache/master/packs/ | grep datapack
+ [1]
+ $ ls_l $TESTTMP/hgcache/master/packs/ | grep histpack
+ [1]
+ $ hg repack --incremental
+ $ ls_l $TESTTMP/hgcache/master/packs/ | grep datapack
+ -r--r--r-- 70 052643fdcdebbd42d7c180a651a30d46098e6fe1.datapack
+ $ ls_l $TESTTMP/hgcache/master/packs/ | grep histpack
+ -r--r--r-- 90 955a622173324b2d8b53e1147f209f1cf125302e.histpack
+
+3 gen1 packs, 1 gen0 pack - packs 3 gen1 into 1
+ $ hg prefetch -r 1
+ 1 files fetched over 1 fetches - (1 misses, 0.00% hit ratio) over * (glob)
+ $ hg prefetch -r 2
+ 1 files fetched over 1 fetches - (1 misses, 0.00% hit ratio) over * (glob)
+ $ hg prefetch -r 38
+ abort: unknown revision '38'!
+ [255]
+ $ ls_l $TESTTMP/hgcache/master/packs/ | grep datapack
+ -r--r--r-- 70 052643fdcdebbd42d7c180a651a30d46098e6fe1.datapack
+ $ ls_l $TESTTMP/hgcache/master/packs/ | grep histpack
+ -r--r--r-- 90 955a622173324b2d8b53e1147f209f1cf125302e.histpack
+
+For the data packs, setting the limit for the repackmaxpacksize to be 64 such
+that data pack with size 65 is more than the limit. This effectively ensures
+that no generation has 3 packs and therefore, no packs are chosen for the
+incremental repacking. As for the history packs, setting repackmaxpacksize to be
+0 which should always result in no repacking.
+ $ hg repack --incremental --config remotefilelog.data.repackmaxpacksize=64 \
+ > --config remotefilelog.history.repackmaxpacksize=0
+ $ ls_l $TESTTMP/hgcache/master/packs/ | grep datapack
+ -r--r--r-- 70 052643fdcdebbd42d7c180a651a30d46098e6fe1.datapack
+ -r--r--r-- 149 78840d69389c7404327f7477e3931c89945c37d1.datapack
+ $ ls_l $TESTTMP/hgcache/master/packs/ | grep histpack
+ -r--r--r-- 254 8abe7889aae389337d12ebe6085d4ee13854c7c9.histpack
+ -r--r--r-- 90 955a622173324b2d8b53e1147f209f1cf125302e.histpack
+
+Setting limit for the repackmaxpacksize to be the size of the biggest pack file
+which ensures that it is effectively ignored in the incremental repacking.
+ $ hg repack --incremental --config remotefilelog.data.repackmaxpacksize=65 \
+ > --config remotefilelog.history.repackmaxpacksize=336
+ $ ls_l $TESTTMP/hgcache/master/packs/ | grep datapack
+ -r--r--r-- 70 052643fdcdebbd42d7c180a651a30d46098e6fe1.datapack
+ -r--r--r-- 149 78840d69389c7404327f7477e3931c89945c37d1.datapack
+ $ ls_l $TESTTMP/hgcache/master/packs/ | grep histpack
+ -r--r--r-- 254 8abe7889aae389337d12ebe6085d4ee13854c7c9.histpack
+ -r--r--r-- 90 955a622173324b2d8b53e1147f209f1cf125302e.histpack
+
+1 gen3 pack, 1 gen0 pack - does nothing
+ $ hg repack --incremental
+ $ ls_l $TESTTMP/hgcache/master/packs/ | grep datapack
+ -r--r--r-- 70 052643fdcdebbd42d7c180a651a30d46098e6fe1.datapack
+ -r--r--r-- 149 78840d69389c7404327f7477e3931c89945c37d1.datapack
+ $ ls_l $TESTTMP/hgcache/master/packs/ | grep histpack
+ -r--r--r-- 254 8abe7889aae389337d12ebe6085d4ee13854c7c9.histpack
+ -r--r--r-- 90 955a622173324b2d8b53e1147f209f1cf125302e.histpack
+
+Pull should run background repack
+ $ cat >> .hg/hgrc <<EOF
+ > [remotefilelog]
+ > backgroundrepack=True
+ > EOF
+ $ clearcache
+ $ hg prefetch -r 0
+ 1 files fetched over 1 fetches - (1 misses, 0.00% hit ratio) over * (glob)
+ $ hg prefetch -r 1
+ 1 files fetched over 1 fetches - (1 misses, 0.00% hit ratio) over * (glob)
+ $ hg prefetch -r 2
+ 1 files fetched over 1 fetches - (1 misses, 0.00% hit ratio) over * (glob)
+ $ hg prefetch -r 3
+ 1 files fetched over 1 fetches - (1 misses, 0.00% hit ratio) over * (glob)
+
+ $ hg pull
+ pulling from ssh://user@dummy/master
+ searching for changes
+ no changes found
+ (running background incremental repack)
+ $ sleep 0.5
+ $ hg debugwaitonrepack >/dev/null 2>&1
+ $ ls_l $TESTTMP/hgcache/master/packs/ | grep datapack
+ -r--r--r-- 303 156a6c1c83aeb69422d7936e0a46ba9bc06a71c0.datapack
+ $ ls_l $TESTTMP/hgcache/master/packs/ | grep histpack
+ -r--r--r-- 336 604552d403a1381749faf656feca0ca265a6d52c.histpack
+
+Test environment variable resolution
+ $ CACHEPATH=$TESTTMP/envcache hg prefetch --config 'remotefilelog.cachepath=$CACHEPATH'
+ 1 files fetched over 1 fetches - (1 misses, 0.00% hit ratio) over * (glob)
+ $ find $TESTTMP/envcache | sort
+ $TESTTMP/envcache
+ $TESTTMP/envcache/master
+ $TESTTMP/envcache/master/95
+ $TESTTMP/envcache/master/95/cb0bfd2977c761298d9624e4b4d4c72a39974a
+ $TESTTMP/envcache/master/95/cb0bfd2977c761298d9624e4b4d4c72a39974a/577959738234a1eb241ed3ed4b22a575833f56e0
+ $TESTTMP/envcache/repos
+
+Test local remotefilelog blob is correct when based on a pack
+ $ hg prefetch -r .
+ 1 files fetched over 1 fetches - (1 misses, 0.00% hit ratio) over * (glob)
+ $ echo >> y
+ $ hg commit -m y2
+ $ hg debugremotefilelog .hg/store/data/95cb0bfd2977c761298d9624e4b4d4c72a39974a/b70860edba4f8242a1d52f2a94679dd23cb76808
+ size: 9 bytes
+ path: .hg/store/data/95cb0bfd2977c761298d9624e4b4d4c72a39974a/b70860edba4f8242a1d52f2a94679dd23cb76808
+ key: b70860edba4f
+
+ node => p1 p2 linknode copyfrom
+ b70860edba4f => 577959738234 000000000000 08d3fbc98c48
+ 577959738234 => 1bb2e6237e03 000000000000 c7faf2fc439a x
+ 1bb2e6237e03 => d4a3ed9310e5 000000000000 0b03bbc9e1e7
+ d4a3ed9310e5 => aee31534993a 000000000000 421535db10b6
+ aee31534993a => 1406e7411862 000000000000 a89d614e2364
+ 1406e7411862 => 000000000000 000000000000 b292c1e3311f
+
+Test limiting the max delta chain length
+ $ hg repack --config packs.maxchainlen=1
+ $ hg debugdatapack $TESTTMP/hgcache/master/packs/*.dataidx
+ $TESTTMP/hgcache/master/packs/f258af4c033dd5cd32b4dbc42a1efcd8e4c7d909:
+ x:
+ Node Delta Base Delta Length Blob Size
+ 1bb2e6237e03 000000000000 8 8
+ d4a3ed9310e5 1bb2e6237e03 12 6
+ aee31534993a 000000000000 4 4
+ 1406e7411862 aee31534993a 12 2
+
+ Total: 36 20 (80.0% bigger)
+ y:
+ Node Delta Base Delta Length Blob Size
+ 577959738234 000000000000 70 8
+
+ Total: 70 8 (775.0% bigger)
+
+Test huge pack cleanup using different values of packs.maxpacksize:
+ $ hg repack --incremental --debug
+ $ hg repack --incremental --debug --config packs.maxpacksize=512
+ removing oversize packfile $TESTTMP/hgcache/master/packs/f258af4c033dd5cd32b4dbc42a1efcd8e4c7d909.datapack (425 bytes)
+ removing oversize packfile $TESTTMP/hgcache/master/packs/f258af4c033dd5cd32b4dbc42a1efcd8e4c7d909.dataidx (1.21 KB)
+
+Do a repack where the new pack reuses a delta from the old pack
+ $ clearcache
+ $ hg prefetch -r '2::3'
+ 2 files fetched over 1 fetches - (2 misses, 0.00% hit ratio) over * (glob)
+ $ hg repack
+ $ hg debugdatapack $CACHEDIR/master/packs/*.datapack
+ $TESTTMP/hgcache/master/packs/9ec6b30891bd851320acb7c66b69a2bdf41c8df3:
+ x:
+ Node Delta Base Delta Length Blob Size
+ 1bb2e6237e03 000000000000 8 8
+ d4a3ed9310e5 1bb2e6237e03 12 6
+
+ Total: 20 14 (42.9% bigger)
+ $ hg prefetch -r '0::1'
+ 2 files fetched over 1 fetches - (2 misses, 0.00% hit ratio) over * (glob)
+ $ hg repack
+ $ hg debugdatapack $CACHEDIR/master/packs/*.datapack
+ $TESTTMP/hgcache/master/packs/156a6c1c83aeb69422d7936e0a46ba9bc06a71c0:
+ x:
+ Node Delta Base Delta Length Blob Size
+ 1bb2e6237e03 000000000000 8 8
+ d4a3ed9310e5 1bb2e6237e03 12 6
+ aee31534993a d4a3ed9310e5 12 4
+ 1406e7411862 aee31534993a 12 2
+
+ Total: 44 20 (120.0% bigger)
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/tests/test-remotefilelog-share.t Fri Jan 18 13:28:22 2019 -0500
@@ -0,0 +1,27 @@
+#require no-windows
+
+ $ . "$TESTDIR/remotefilelog-library.sh"
+
+ $ cat >> $HGRCPATH <<EOF
+ > [extensions]
+ > remotefilelog=
+ > share=
+ > EOF
+
+ $ hg init master
+ $ cd master
+ $ cat >> .hg/hgrc <<EOF
+ > [remotefilelog]
+ > server=True
+ > EOF
+ $ echo x > x
+ $ hg commit -qAm x
+
+ $ cd ..
+
+
+ $ hgcloneshallow ssh://user@dummy/master source --noupdate -q
+ $ hg share source dest
+ updating working directory
+ 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
+ $ hg -R dest unshare
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/tests/test-remotefilelog-sparse.t Fri Jan 18 13:28:22 2019 -0500
@@ -0,0 +1,109 @@
+#require no-windows
+
+ $ . "$TESTDIR/remotefilelog-library.sh"
+
+ $ hg init master
+ $ cd master
+ $ cat >> .hg/hgrc <<EOF
+ > [remotefilelog]
+ > server=True
+ > EOF
+ $ echo x > x
+ $ echo z > z
+ $ hg commit -qAm x1
+ $ echo x2 > x
+ $ echo z2 > z
+ $ hg commit -qAm x2
+ $ hg bookmark foo
+
+ $ cd ..
+
+# prefetch a revision w/ a sparse checkout
+
+ $ hgcloneshallow ssh://user@dummy/master shallow --noupdate
+ streaming all changes
+ 2 files to transfer, 527 bytes of data
+ transferred 527 bytes in 0.* seconds (*/sec) (glob)
+ searching for changes
+ no changes found
+ $ cd shallow
+ $ printf "[extensions]\nsparse=\n" >> .hg/hgrc
+
+ $ hg debugsparse -I x
+ $ hg prefetch -r 0
+ 1 files fetched over 1 fetches - (1 misses, 0.00% hit ratio) over *s (glob)
+
+ $ hg cat -r 0 x
+ x
+
+ $ hg debugsparse -I z
+ $ hg prefetch -r 0
+ 1 files fetched over 1 fetches - (1 misses, 0.00% hit ratio) over *s (glob)
+
+ $ hg cat -r 0 z
+ z
+
+# prefetch sparse only on pull when configured
+
+ $ printf "[remotefilelog]\npullprefetch=bookmark()\n" >> .hg/hgrc
+ $ hg strip tip
+ saved backup bundle to $TESTTMP/shallow/.hg/strip-backup/876b1317060d-b2e91d8d-backup.hg (glob)
+
+ $ hg debugsparse --delete z
+
+ $ clearcache
+ $ hg pull
+ pulling from ssh://user@dummy/master
+ searching for changes
+ adding changesets
+ adding manifests
+ adding file changes
+ added 1 changesets with 0 changes to 0 files
+ updating bookmark foo
+ new changesets 876b1317060d
+ (run 'hg update' to get a working copy)
+ prefetching file contents
+ 1 files fetched over 1 fetches - (1 misses, 0.00% hit ratio) over *s (glob)
+
+# Dont consider filtered files when doing copy tracing
+
+## Push an unrelated commit
+ $ cd ../
+
+ $ hgcloneshallow ssh://user@dummy/master shallow2
+ streaming all changes
+ 2 files to transfer, 527 bytes of data
+ transferred 527 bytes in 0.* seconds (*) (glob)
+ searching for changes
+ no changes found
+ updating to branch default
+ 2 files updated, 0 files merged, 0 files removed, 0 files unresolved
+ 1 files fetched over 1 fetches - (1 misses, 0.00% hit ratio) over *s (glob)
+ $ cd shallow2
+ $ printf "[extensions]\nsparse=\n" >> .hg/hgrc
+
+ $ hg up -q 0
+ 2 files fetched over 1 fetches - (2 misses, 0.00% hit ratio) over *s (glob)
+ $ touch a
+ $ hg ci -Aqm a
+ $ hg push -q -f
+
+## Pull the unrelated commit and rebase onto it - verify unrelated file was not
+pulled
+
+ $ cd ../shallow
+ $ hg up -q 1
+ $ hg pull -q
+ $ hg debugsparse -I z
+ $ clearcache
+ $ hg prefetch -r '. + .^' -I x -I z
+ 4 files fetched over 1 fetches - (4 misses, 0.00% hit ratio) over * (glob)
+Originally this was testing that the rebase doesn't fetch pointless
+blobs. Right now it fails because core's sparse can't load a spec from
+the working directory. Presumably there's a fix, but I'm not sure what it is.
+ $ hg rebase -d 2 --keep
+ rebasing 1:876b1317060d "x2" (foo)
+ transaction abort!
+ rollback completed
+ abort: cannot parse sparse patterns from working directory
+ [255]
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/tests/test-remotefilelog-tags.t Fri Jan 18 13:28:22 2019 -0500
@@ -0,0 +1,78 @@
+#require no-windows
+
+ $ . "$TESTDIR/remotefilelog-library.sh"
+
+ $ hg init master
+ $ cd master
+ $ cat >> .hg/hgrc <<EOF
+ > [remotefilelog]
+ > server=True
+ > EOF
+ $ echo x > foo
+ $ echo y > bar
+ $ hg commit -qAm one
+ $ hg tag tag1
+ $ cd ..
+
+# clone with tags
+
+ $ hg clone --shallow ssh://user@dummy/master shallow --noupdate --config remotefilelog.excludepattern=.hgtags
+ streaming all changes
+ 3 files to transfer, 662 bytes of data
+ transferred 662 bytes in * seconds (*/sec) (glob)
+ searching for changes
+ no changes found
+ $ cat >> shallow/.hg/hgrc <<EOF
+ > [remotefilelog]
+ > cachepath=$PWD/hgcache
+ > debug=True
+ > reponame = master
+ > excludepattern=.hgtags
+ > [extensions]
+ > remotefilelog=
+ > EOF
+
+ $ cd shallow
+ $ ls .hg/store/data
+ ~2ehgtags.i
+ $ hg tags
+ tip 1:6ce44dcfda68
+ tag1 0:e0360bc0d9e1
+ $ hg update
+ 3 files updated, 0 files merged, 0 files removed, 0 files unresolved
+ 2 files fetched over 1 fetches - (2 misses, 0.00% hit ratio) over *s (glob)
+
+# pull with tags
+
+ $ cd ../master
+ $ hg tag tag2
+ $ cd ../shallow
+ $ hg pull
+ pulling from ssh://user@dummy/master
+ searching for changes
+ adding changesets
+ adding manifests
+ adding file changes
+ added 1 changesets with 0 changes to 0 files
+ new changesets 6a22dfa4fd34
+ (run 'hg update' to get a working copy)
+ $ hg tags
+ tip 2:6a22dfa4fd34
+ tag2 1:6ce44dcfda68
+ tag1 0:e0360bc0d9e1
+ $ hg update
+ 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
+
+ $ ls .hg/store/data
+ ~2ehgtags.i
+
+ $ hg log -l 1 --stat
+ changeset: 2:6a22dfa4fd34
+ tag: tip
+ user: test
+ date: Thu Jan 01 00:00:00 1970 +0000
+ summary: Added tag tag2 for changeset 6ce44dcfda68
+
+ .hgtags | 1 +
+ 1 files changed, 1 insertions(+), 0 deletions(-)
+
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/tests/test-remotefilelog-wireproto.t Fri Jan 18 13:28:22 2019 -0500
@@ -0,0 +1,48 @@
+#require no-windows
+
+ $ . "$TESTDIR/remotefilelog-library.sh"
+
+ $ hg init master
+ $ cd master
+ $ cat >> .hg/hgrc <<EOF
+ > [remotefilelog]
+ > server=True
+ > EOF
+ $ echo x > x
+ $ hg commit -qAm x
+ $ echo y >> x
+ $ hg commit -qAm y
+ $ echo z >> x
+ $ hg commit -qAm z
+ $ hg update 1
+ 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
+ $ echo w >> x
+ $ hg commit -qAm w
+
+ $ cd ..
+
+Shallow clone and activate getflogheads testing extension
+
+ $ hgcloneshallow ssh://user@dummy/master shallow --noupdate
+ streaming all changes
+ 2 files to transfer, 908 bytes of data
+ transferred 908 bytes in * seconds (*/sec) (glob)
+ searching for changes
+ no changes found
+ $ cd shallow
+
+ $ cat >> .hg/hgrc <<EOF
+ > [extensions]
+ > getflogheads=$TESTDIR/remotefilelog-getflogheads.py
+ > EOF
+
+Get heads of a remotefilelog
+
+ $ hg getflogheads x
+ 2797809ca5e9c2f307d82b1345e832f655fb99a2
+ ca758b402ddc91e37e3113e1a97791b537e1b7bb
+
+Get heads of a non-existing remotefilelog
+
+ $ hg getflogheads y
+ EMPTY
--- a/tests/test-repair-strip.t Wed Jan 09 20:00:35 2019 -0800
+++ b/tests/test-repair-strip.t Fri Jan 18 13:28:22 2019 -0500
@@ -51,6 +51,7 @@
transaction abort!
failed to truncate data/b.i
rollback failed - please run hg recover
+ (failure reason: [Errno 13] Permission denied .hg/store/data/b.i')
strip failed, backup bundle
abort: Permission denied .hg/store/data/b.i
% after update 0, strip 2
@@ -104,6 +105,7 @@
transaction abort!
failed to truncate 00manifest.i
rollback failed - please run hg recover
+ (failure reason: [Errno 13] Permission denied .hg/store/00manifest.i')
strip failed, backup bundle
abort: Permission denied .hg/store/00manifest.i
% after update 0, strip 2
--- a/tests/test-repo-compengines.t Wed Jan 09 20:00:35 2019 -0800
+++ b/tests/test-repo-compengines.t Fri Jan 18 13:28:22 2019 -0500
@@ -7,6 +7,7 @@
fncache
generaldelta
revlogv1
+ sparserevlog
store
testonly-simplestore (reposimplestore !)
@@ -47,6 +48,7 @@
fncache
generaldelta
revlogv1
+ sparserevlog
store
testonly-simplestore (reposimplestore !)
@@ -71,6 +73,7 @@
fncache
generaldelta
revlogv1
+ sparserevlog
store
testonly-simplestore (reposimplestore !)
--- a/tests/test-resolve.t Wed Jan 09 20:00:35 2019 -0800
+++ b/tests/test-resolve.t Fri Jan 18 13:28:22 2019 -0500
@@ -435,7 +435,7 @@
$ hg resolve -l
R file1
R file2
-Test explicitly setting the otion to 'none'
+Test explicitly setting the option to 'none'
$ hg resolve --unmark
$ hg resolve -l
U file1
@@ -538,7 +538,7 @@
Test when config option is set:
==============================
- $ cat >> $HGRCPATH << EOF
+ $ cat >> .hg/hgrc << EOF
> [ui]
> interactive = True
> [commands]
@@ -596,7 +596,7 @@
R emp3
Test that commands.resolve.confirm respect --unmark option (only when no patterns args are given):
-===============================================================================================
+=================================================================================================
$ hg resolve -u emp1
@@ -626,4 +626,172 @@
$ hg rebase --abort
rebase aborted
+
+Done with commands.resolve.confirm tests:
$ cd ..
+
+Test that commands.resolve.mark-check works even if there are deleted files:
+ $ hg init resolve-deleted
+ $ cd resolve-deleted
+ $ echo r0 > file1
+ $ hg ci -qAm r0
+ $ echo r1 > file1
+ $ hg ci -qm r1
+ $ hg co -qr 0
+ $ hg rm file1
+ $ hg ci -qm "r2 (delete file1)"
+
+(At this point we have r0 creating file1, and sibling commits r1 and r2, which
+ modify and delete file1, respectively)
+
+ $ hg merge -r 1
+ file 'file1' was deleted in local [working copy] but was modified in other [merge rev].
+ What do you want to do?
+ use (c)hanged version, leave (d)eleted, or leave (u)nresolved? u
+ 0 files updated, 0 files merged, 0 files removed, 1 files unresolved
+ use 'hg resolve' to retry unresolved file merges or 'hg merge --abort' to abandon
+ [1]
+ $ hg resolve --list
+ U file1
+Because we left it as 'unresolved' the file should still exist.
+ $ [ -f file1 ] || echo "File does not exist?"
+BC behavior: `hg resolve --mark` accepts that the file is still there, and
+doesn't have a problem with this situation.
+ $ hg resolve --mark --config commands.resolve.mark-check=abort
+ (no more unresolved files)
+ $ hg resolve --list
+ R file1
+The file is still there:
+ $ [ -f file1 ] || echo "File does not exist?"
+Let's check mark-check=warn:
+ $ hg resolve --unmark file1
+ $ hg resolve --mark --config commands.resolve.mark-check=warn
+ (no more unresolved files)
+ $ hg resolve --list
+ R file1
+The file is still there:
+ $ [ -f file1 ] || echo "File does not exist?"
+Let's resolve the issue by deleting the file via `hg resolve`
+ $ hg resolve --unmark file1
+ $ echo 'd' | hg resolve file1 --config ui.interactive=1
+ file 'file1' was deleted in local [working copy] but was modified in other [merge rev].
+ What do you want to do?
+ use (c)hanged version, leave (d)eleted, or leave (u)nresolved? d
+ (no more unresolved files)
+ $ hg resolve --list
+ R file1
+The file is deleted:
+ $ [ -f file1 ] && echo "File still exists?" || true
+Doing `hg resolve --mark` doesn't break now that the file is missing:
+ $ hg resolve --mark --config commands.resolve.mark-check=abort
+ (no more unresolved files)
+ $ hg resolve --mark --config commands.resolve.mark-check=warn
+ (no more unresolved files)
+Resurrect the file, and delete it outside of hg:
+ $ hg resolve --unmark file1
+ $ hg resolve file1
+ file 'file1' was deleted in local [working copy] but was modified in other [merge rev].
+ What do you want to do?
+ use (c)hanged version, leave (d)eleted, or leave (u)nresolved? u
+ [1]
+ $ [ -f file1 ] || echo "File does not exist?"
+ $ hg resolve --list
+ U file1
+ $ rm file1
+ $ hg resolve --mark --config commands.resolve.mark-check=abort
+ (no more unresolved files)
+ $ hg resolve --list
+ R file1
+ $ hg resolve --unmark file1
+ $ hg resolve file1
+ file 'file1' was deleted in local [working copy] but was modified in other [merge rev].
+ What do you want to do?
+ use (c)hanged version, leave (d)eleted, or leave (u)nresolved? u
+ [1]
+ $ [ -f file1 ] || echo "File does not exist?"
+ $ hg resolve --list
+ U file1
+ $ rm file1
+ $ hg resolve --mark --config commands.resolve.mark-check=warn
+ (no more unresolved files)
+ $ hg resolve --list
+ R file1
+
+
+For completeness, let's try that in the opposite direction (merging r2 into r1,
+instead of r1 into r2):
+ $ hg update -qCr 1
+ $ hg merge -r 2
+ file 'file1' was deleted in other [merge rev] but was modified in local [working copy].
+ What do you want to do?
+ use (c)hanged version, (d)elete, or leave (u)nresolved? u
+ 0 files updated, 0 files merged, 0 files removed, 1 files unresolved
+ use 'hg resolve' to retry unresolved file merges or 'hg merge --abort' to abandon
+ [1]
+ $ hg resolve --list
+ U file1
+Because we left it as 'unresolved' the file should still exist.
+ $ [ -f file1 ] || echo "File does not exist?"
+BC behavior: `hg resolve --mark` accepts that the file is still there, and
+doesn't have a problem with this situation.
+ $ hg resolve --mark --config commands.resolve.mark-check=abort
+ (no more unresolved files)
+ $ hg resolve --list
+ R file1
+The file is still there:
+ $ [ -f file1 ] || echo "File does not exist?"
+Let's check mark-check=warn:
+ $ hg resolve --unmark file1
+ $ hg resolve --mark --config commands.resolve.mark-check=warn
+ (no more unresolved files)
+ $ hg resolve --list
+ R file1
+The file is still there:
+ $ [ -f file1 ] || echo "File does not exist?"
+Let's resolve the issue by deleting the file via `hg resolve`
+ $ hg resolve --unmark file1
+ $ echo 'd' | hg resolve file1 --config ui.interactive=1
+ file 'file1' was deleted in other [merge rev] but was modified in local [working copy].
+ What do you want to do?
+ use (c)hanged version, (d)elete, or leave (u)nresolved? d
+ (no more unresolved files)
+ $ hg resolve --list
+ R file1
+The file is deleted:
+ $ [ -f file1 ] && echo "File still exists?" || true
+Doing `hg resolve --mark` doesn't break now that the file is missing:
+ $ hg resolve --mark --config commands.resolve.mark-check=abort
+ (no more unresolved files)
+ $ hg resolve --mark --config commands.resolve.mark-check=warn
+ (no more unresolved files)
+Resurrect the file, and delete it outside of hg:
+ $ hg resolve --unmark file1
+ $ hg resolve file1
+ file 'file1' was deleted in other [merge rev] but was modified in local [working copy].
+ What do you want to do?
+ use (c)hanged version, (d)elete, or leave (u)nresolved? u
+ [1]
+ $ [ -f file1 ] || echo "File does not exist?"
+ $ hg resolve --list
+ U file1
+ $ rm file1
+ $ hg resolve --mark --config commands.resolve.mark-check=abort
+ (no more unresolved files)
+ $ hg resolve --list
+ R file1
+ $ hg resolve --unmark file1
+ $ hg resolve file1
+ file 'file1' was deleted in other [merge rev] but was modified in local [working copy].
+ What do you want to do?
+ use (c)hanged version, (d)elete, or leave (u)nresolved? u
+ [1]
+ $ [ -f file1 ] || echo "File does not exist?"
+ $ hg resolve --list
+ U file1
+ $ rm file1
+ $ hg resolve --mark --config commands.resolve.mark-check=warn
+ (no more unresolved files)
+ $ hg resolve --list
+ R file1
+
+ $ cd ..
--- a/tests/test-revlog-raw.py Wed Jan 09 20:00:35 2019 -0800
+++ b/tests/test-revlog-raw.py Fri Jan 18 13:28:22 2019 -0500
@@ -2,6 +2,8 @@
from __future__ import absolute_import, print_function
+import collections
+import hashlib
import sys
from mercurial import (
@@ -12,11 +14,16 @@
vfs,
)
+from mercurial.revlogutils import (
+ deltas,
+)
+
# TESTTMP is optional. This makes it convenient to run without run-tests.py
tvfs = vfs.vfs(encoding.environ.get(b'TESTTMP', b'/tmp'))
# Enable generaldelta otherwise revlog won't use delta as expected by the test
-tvfs.options = {b'generaldelta': True, b'revlogv1': True}
+tvfs.options = {b'generaldelta': True, b'revlogv1': True,
+ b'sparse-revlog': True}
# The test wants to control whether to use delta explicitly, based on
# "storedeltachains".
@@ -291,6 +298,124 @@
abort('rev %d: corrupted %stext'
% (rev, raw and 'raw' or ''))
+slicingdata = [
+ ([0, 1, 2, 3, 55, 56, 58, 59, 60],
+ [[0, 1], [2], [58], [59, 60]],
+ 10),
+ ([0, 1, 2, 3, 55, 56, 58, 59, 60],
+ [[0, 1], [2], [58], [59, 60]],
+ 10),
+ ([-1, 0, 1, 2, 3, 55, 56, 58, 59, 60],
+ [[-1, 0, 1], [2], [58], [59, 60]],
+ 10),
+]
+
+def slicingtest(rlog):
+ oldmin = rlog._srmingapsize
+ try:
+ # the test revlog is small, we remove the floor under which we
+ # slicing is diregarded.
+ rlog._srmingapsize = 0
+ for item in slicingdata:
+ chain, expected, target = item
+ result = deltas.slicechunk(rlog, chain, targetsize=target)
+ result = list(result)
+ if result != expected:
+ print('slicing differ:')
+ print(' chain: %s' % chain)
+ print(' target: %s' % target)
+ print(' expected: %s' % expected)
+ print(' result: %s' % result)
+ finally:
+ rlog._srmingapsize = oldmin
+
+def md5sum(s):
+ return hashlib.md5(s).digest()
+
+def _maketext(*coord):
+ """create piece of text according to range of integers
+
+ The test returned use a md5sum of the integer to make it less
+ compressible"""
+ pieces = []
+ for start, size in coord:
+ num = range(start, start + size)
+ p = [md5sum(b'%d' % r) for r in num]
+ pieces.append(b'\n'.join(p))
+ return b'\n'.join(pieces) + b'\n'
+
+data = [
+ _maketext((0, 120), (456, 60)),
+ _maketext((0, 120), (345, 60)),
+ _maketext((0, 120), (734, 60)),
+ _maketext((0, 120), (734, 60), (923, 45)),
+ _maketext((0, 120), (734, 60), (234, 45)),
+ _maketext((0, 120), (734, 60), (564, 45)),
+ _maketext((0, 120), (734, 60), (361, 45)),
+ _maketext((0, 120), (734, 60), (489, 45)),
+ _maketext((0, 120), (123, 60)),
+ _maketext((0, 120), (145, 60)),
+ _maketext((0, 120), (104, 60)),
+ _maketext((0, 120), (430, 60)),
+ _maketext((0, 120), (430, 60), (923, 45)),
+ _maketext((0, 120), (430, 60), (234, 45)),
+ _maketext((0, 120), (430, 60), (564, 45)),
+ _maketext((0, 120), (430, 60), (361, 45)),
+ _maketext((0, 120), (430, 60), (489, 45)),
+ _maketext((0, 120), (249, 60)),
+ _maketext((0, 120), (832, 60)),
+ _maketext((0, 120), (891, 60)),
+ _maketext((0, 120), (543, 60)),
+ _maketext((0, 120), (120, 60)),
+ _maketext((0, 120), (60, 60), (768, 30)),
+ _maketext((0, 120), (60, 60), (260, 30)),
+ _maketext((0, 120), (60, 60), (450, 30)),
+ _maketext((0, 120), (60, 60), (361, 30)),
+ _maketext((0, 120), (60, 60), (886, 30)),
+ _maketext((0, 120), (60, 60), (116, 30)),
+ _maketext((0, 120), (60, 60), (567, 30), (629, 40)),
+ _maketext((0, 120), (60, 60), (569, 30), (745, 40)),
+ _maketext((0, 120), (60, 60), (777, 30), (700, 40)),
+ _maketext((0, 120), (60, 60), (618, 30), (398, 40), (158, 10)),
+]
+
+def makesnapshot(tr):
+ rl = newrevlog(name=b'_snaprevlog3.i', recreate=True)
+ for i in data:
+ appendrev(rl, i, tr)
+ return rl
+
+snapshots = [-1, 0, 6, 8, 11, 17, 19, 21, 25, 30]
+def issnapshottest(rlog):
+ result = []
+ if rlog.issnapshot(-1):
+ result.append(-1)
+ for rev in rlog:
+ if rlog.issnapshot(rev):
+ result.append(rev)
+ if snapshots != result:
+ print('snapshot differ:')
+ print(' expected: %s' % snapshots)
+ print(' got: %s' % result)
+
+snapshotmapall = {0: [6, 8, 11, 17, 19, 25], 8: [21], -1: [0, 30]}
+snapshotmap15 = {0: [17, 19, 25], 8: [21], -1: [30]}
+def findsnapshottest(rlog):
+ resultall = collections.defaultdict(list)
+ deltas._findsnapshots(rlog, resultall, 0)
+ resultall = dict(resultall.items())
+ if resultall != snapshotmapall:
+ print('snapshot map differ:')
+ print(' expected: %s' % snapshotmapall)
+ print(' got: %s' % resultall)
+ result15 = collections.defaultdict(list)
+ deltas._findsnapshots(rlog, result15, 15)
+ result15 = dict(result15.items())
+ if result15 != snapshotmap15:
+ print('snapshot map differ:')
+ print(' expected: %s' % snapshotmap15)
+ print(' got: %s' % result15)
+
def maintest():
expected = rl = None
with newtransaction() as tr:
@@ -313,6 +438,13 @@
rl4 = lowlevelcopy(rl, tr)
checkrevlog(rl4, expected)
print('lowlevelcopy test passed')
+ slicingtest(rl)
+ print('slicing test passed')
+ rl5 = makesnapshot(tr)
+ issnapshottest(rl5)
+ print('issnapshot test passed')
+ findsnapshottest(rl5)
+ print('findsnapshot test passed')
try:
maintest()
--- a/tests/test-revlog-raw.py.out Wed Jan 09 20:00:35 2019 -0800
+++ b/tests/test-revlog-raw.py.out Fri Jan 18 13:28:22 2019 -0500
@@ -2,3 +2,6 @@
addgroupcopy test passed
clone test passed
lowlevelcopy test passed
+slicing test passed
+issnapshot test passed
+findsnapshot test passed
--- a/tests/test-revlog-v2.t Wed Jan 09 20:00:35 2019 -0800
+++ b/tests/test-revlog-v2.t Fri Jan 18 13:28:22 2019 -0500
@@ -22,8 +22,9 @@
$ cd empty-repo
$ cat .hg/requires
dotencode
- exp-revlogv2.0
+ exp-revlogv2.1
fncache
+ sparserevlog
store
$ hg log
@@ -53,7 +54,7 @@
date: Thu Jan 01 00:00:00 1970 +0000
summary: initial
-Header written as expected (changelog always disables generaldelta)
+Header written as expected
$ f --hexdump --bytes 4 .hg/store/00changelog.i
.hg/store/00changelog.i:
@@ -61,4 +62,4 @@
$ f --hexdump --bytes 4 .hg/store/data/foo.i
.hg/store/data/foo.i:
- 0000: 00 03 de ad |....|
+ 0000: 00 01 de ad |....|
--- a/tests/test-revset.t Wed Jan 09 20:00:35 2019 -0800
+++ b/tests/test-revset.t Fri Jan 18 13:28:22 2019 -0500
@@ -649,6 +649,17 @@
hg: parse error: relation subscript must be an integer
[255]
+suggested relations
+
+ $ hg debugrevspec '.#generafions[0]'
+ hg: parse error: unknown identifier: generafions
+ (did you mean generations?)
+ [255]
+
+ $ hg debugrevspec '.#f[0]'
+ hg: parse error: unknown identifier: f
+ [255]
+
parsed tree at stages:
$ hg debugrevspec -p all '()'
@@ -1416,12 +1427,8 @@
$ hg debugrevspec -s '9: & heads(all())'
* set:
<filteredset
- <filteredset
- <baseset [9]>,
- <spanset+ 0:10>>,
- <not
- <filteredset
- <baseset [9]>, set([0, 1, 2, 3, 4, 5, 6, 8])>>>
+ <baseset [9]>,
+ <baseset+ [7, 9]>>
9
but should follow the order of the subset
--- a/tests/test-revset2.t Wed Jan 09 20:00:35 2019 -0800
+++ b/tests/test-revset2.t Fri Jan 18 13:28:22 2019 -0500
@@ -669,8 +669,6 @@
abort: namespace 'unknown' does not exist!
[255]
$ log 'named("re:unknown")'
- abort: no namespace exists that match 'unknown'!
- [255]
$ log 'present(named("unknown"))'
$ log 'present(named("re:unknown"))'
--- a/tests/test-rollback.t Wed Jan 09 20:00:35 2019 -0800
+++ b/tests/test-rollback.t Fri Jan 18 13:28:22 2019 -0500
@@ -278,11 +278,12 @@
>
> def uisetup(ui):
> class badui(ui.__class__):
- > def write_err(self, *args, **kwargs):
+ > def _write(self, dest, *args, **kwargs):
> olderr = self.ferr
> try:
- > self.ferr = fdproxy(self, olderr)
- > return super(badui, self).write_err(*args, **kwargs)
+ > if dest is self.ferr:
+ > self.ferr = dest = fdproxy(self, olderr)
+ > return super(badui, self)._write(dest, *args, **kwargs)
> finally:
> self.ferr = olderr
>
--- a/tests/test-run-tests.t Wed Jan 09 20:00:35 2019 -0800
+++ b/tests/test-run-tests.t Fri Jan 18 13:28:22 2019 -0500
@@ -644,12 +644,14 @@
$ rt --debug 2>&1 | grep -v pwd
running 2 tests using 1 parallel processes
+ + alias hg=hg.exe (windows !)
+ echo *SALT* 0 0 (glob)
*SALT* 0 0 (glob)
+ echo babar
babar
+ echo *SALT* 10 0 (glob)
*SALT* 10 0 (glob)
+ .+ alias hg=hg.exe (windows !)
*+ echo *SALT* 0 0 (glob)
*SALT* 0 0 (glob)
+ echo babar
@@ -714,6 +716,12 @@
(delete the duplicated test file)
$ rm test-failure-copy.t
+multiple runs per test should be parallelized
+
+ $ rt --jobs 2 --runs-per-test 2 test-success.t
+ running 2 tests using 2 parallel processes
+ ..
+ # Ran 2 tests, 0 skipped, 0 failed.
Interactive run
===============
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/tests/test-rust-ancestor.py Fri Jan 18 13:28:22 2019 -0500
@@ -0,0 +1,159 @@
+from __future__ import absolute_import
+import sys
+import unittest
+
+try:
+ from mercurial import rustext
+ rustext.__name__ # trigger immediate actual import
+except ImportError:
+ rustext = None
+else:
+ # this would fail already without appropriate ancestor.__package__
+ from mercurial.rustext.ancestor import (
+ AncestorsIterator,
+ LazyAncestors,
+ MissingAncestors,
+ )
+
+try:
+ from mercurial.cext import parsers as cparsers
+except ImportError:
+ cparsers = None
+
+# picked from test-parse-index2, copied rather than imported
+# so that it stays stable even if test-parse-index2 changes or disappears.
+data_non_inlined = (
+ b'\x00\x00\x00\x01\x00\x00\x00\x00\x00\x01D\x19'
+ b'\x00\x07e\x12\x00\x00\x00\x00\x00\x00\x00\x00\xff\xff\xff\xff'
+ b'\xff\xff\xff\xff\xd1\xf4\xbb\xb0\xbe\xfc\x13\xbd\x8c\xd3\x9d'
+ b'\x0f\xcd\xd9;\x8c\x07\x8cJ/\x00\x00\x00\x00\x00\x00\x00\x00\x00'
+ b'\x00\x00\x00\x00\x00\x00\x01D\x19\x00\x00\x00\x00\x00\xdf\x00'
+ b'\x00\x01q\x00\x00\x00\x01\x00\x00\x00\x01\x00\x00\x00\x00\xff'
+ b'\xff\xff\xff\xc1\x12\xb9\x04\x96\xa4Z1t\x91\xdfsJ\x90\xf0\x9bh'
+ b'\x07l&\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
+ b'\x00\x01D\xf8\x00\x00\x00\x00\x01\x1b\x00\x00\x01\xb8\x00\x00'
+ b'\x00\x01\x00\x00\x00\x02\x00\x00\x00\x01\xff\xff\xff\xff\x02\n'
+ b'\x0e\xc6&\xa1\x92\xae6\x0b\x02i\xfe-\xe5\xbao\x05\xd1\xe7\x00'
+ b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x01F'
+ b'\x13\x00\x00\x00\x00\x01\xec\x00\x00\x03\x06\x00\x00\x00\x01'
+ b'\x00\x00\x00\x03\x00\x00\x00\x02\xff\xff\xff\xff\x12\xcb\xeby1'
+ b'\xb6\r\x98B\xcb\x07\xbd`\x8f\x92\xd9\xc4\x84\xbdK\x00\x00\x00'
+ b'\x00\x00\x00\x00\x00\x00\x00\x00\x00'
+ )
+
+
+@unittest.skipIf(rustext is None or cparsers is None,
+ "rustext or the C Extension parsers module "
+ "ancestor relies on is not available")
+class rustancestorstest(unittest.TestCase):
+ """Test the correctness of binding to Rust code.
+
+ This test is merely for the binding to Rust itself: extraction of
+ Python variable, giving back the results etc.
+
+ It is not meant to test the algorithmic correctness of the operations
+ on ancestors it provides. Hence the very simple embedded index data is
+ good enough.
+
+ Algorithmic correctness is asserted by the Rust unit tests.
+ """
+
+ def parseindex(self):
+ return cparsers.parse_index2(data_non_inlined, False)[0]
+
+ def testiteratorrevlist(self):
+ idx = self.parseindex()
+ # checking test assumption about the index binary data:
+ self.assertEqual({i: (r[5], r[6]) for i, r in enumerate(idx)},
+ {0: (-1, -1),
+ 1: (0, -1),
+ 2: (1, -1),
+ 3: (2, -1)})
+ ait = AncestorsIterator(idx, [3], 0, True)
+ self.assertEqual([r for r in ait], [3, 2, 1, 0])
+
+ ait = AncestorsIterator(idx, [3], 0, False)
+ self.assertEqual([r for r in ait], [2, 1, 0])
+
+ def testlazyancestors(self):
+ idx = self.parseindex()
+ start_count = sys.getrefcount(idx) # should be 2 (see Python doc)
+ self.assertEqual({i: (r[5], r[6]) for i, r in enumerate(idx)},
+ {0: (-1, -1),
+ 1: (0, -1),
+ 2: (1, -1),
+ 3: (2, -1)})
+ lazy = LazyAncestors(idx, [3], 0, True)
+ # we have two more references to the index:
+ # - in its inner iterator for __contains__ and __bool__
+ # - in the LazyAncestors instance itself (to spawn new iterators)
+ self.assertEqual(sys.getrefcount(idx), start_count + 2)
+
+ self.assertTrue(2 in lazy)
+ self.assertTrue(bool(lazy))
+ self.assertEqual(list(lazy), [3, 2, 1, 0])
+ # a second time to validate that we spawn new iterators
+ self.assertEqual(list(lazy), [3, 2, 1, 0])
+
+ # now let's watch the refcounts closer
+ ait = iter(lazy)
+ self.assertEqual(sys.getrefcount(idx), start_count + 3)
+ del ait
+ self.assertEqual(sys.getrefcount(idx), start_count + 2)
+ del lazy
+ self.assertEqual(sys.getrefcount(idx), start_count)
+
+ # let's check bool for an empty one
+ self.assertFalse(LazyAncestors(idx, [0], 0, False))
+
+ def testmissingancestors(self):
+ idx = self.parseindex()
+ missanc = MissingAncestors(idx, [1])
+ self.assertTrue(missanc.hasbases())
+ self.assertEqual(missanc.missingancestors([3]), [2, 3])
+ missanc.addbases({2})
+ self.assertEqual(missanc.bases(), {1, 2})
+ self.assertEqual(missanc.missingancestors([3]), [3])
+ self.assertEqual(missanc.basesheads(), {2})
+
+ def testmissingancestorsremove(self):
+ idx = self.parseindex()
+ missanc = MissingAncestors(idx, [1])
+ revs = {0, 1, 2, 3}
+ missanc.removeancestorsfrom(revs)
+ self.assertEqual(revs, {2, 3})
+
+ def testrefcount(self):
+ idx = self.parseindex()
+ start_count = sys.getrefcount(idx)
+
+ # refcount increases upon iterator init...
+ ait = AncestorsIterator(idx, [3], 0, True)
+ self.assertEqual(sys.getrefcount(idx), start_count + 1)
+ self.assertEqual(next(ait), 3)
+
+ # and decreases once the iterator is removed
+ del ait
+ self.assertEqual(sys.getrefcount(idx), start_count)
+
+ # and removing ref to the index after iterator init is no issue
+ ait = AncestorsIterator(idx, [3], 0, True)
+ del idx
+ self.assertEqual(list(ait), [3, 2, 1, 0])
+
+ def testgrapherror(self):
+ data = (data_non_inlined[:64 + 27] +
+ b'\xf2' +
+ data_non_inlined[64 + 28:])
+ idx = cparsers.parse_index2(data, False)[0]
+ with self.assertRaises(rustext.GraphError) as arc:
+ AncestorsIterator(idx, [1], -1, False)
+ exc = arc.exception
+ self.assertIsInstance(exc, ValueError)
+ # rust-cpython issues appropriate str instances for Python 2 and 3
+ self.assertEqual(exc.args, ('ParentOutOfRange', 1))
+
+
+if __name__ == '__main__':
+ import silenttestrunner
+ silenttestrunner.main(__name__)
--- a/tests/test-setdiscovery.t Wed Jan 09 20:00:35 2019 -0800
+++ b/tests/test-setdiscovery.t Fri Jan 18 13:28:22 2019 -0500
@@ -543,14 +543,14 @@
> unrandomsample = $TESTTMP/unrandomsample.py
> EOF
- $ hg -R r1 outgoing r2 -T'{rev} ' --config extensions.blackbox=
+ $ hg -R r1 outgoing r2 -T'{rev} ' --config extensions.blackbox= \
+ > --config blackbox.track='command commandfinish discovery'
comparing with r2
searching for changes
101 102 103 104 105 106 107 108 109 110 (no-eol)
- $ hg -R r1 --config extensions.blackbox= blackbox
+ $ hg -R r1 --config extensions.blackbox= blackbox --config blackbox.track=
* @5d0b986a083e0d91f116de4691e2aaa54d5bbec0 (*)> serve --cmdserver chgunix * (glob) (chg !)
* @5d0b986a083e0d91f116de4691e2aaa54d5bbec0 (*)> -R r1 outgoing r2 *-T{rev} * --config *extensions.blackbox=* (glob)
* @5d0b986a083e0d91f116de4691e2aaa54d5bbec0 (*)> found 101 common and 1 unknown server heads, 2 roundtrips in *.????s (glob)
* @5d0b986a083e0d91f116de4691e2aaa54d5bbec0 (*)> -R r1 outgoing r2 *-T{rev} * --config *extensions.blackbox=* exited 0 after *.?? seconds (glob)
- * @5d0b986a083e0d91f116de4691e2aaa54d5bbec0 (*)> -R r1 --config *extensions.blackbox=* blackbox (glob)
$ cd ..
--- a/tests/test-share.t Wed Jan 09 20:00:35 2019 -0800
+++ b/tests/test-share.t Fri Jan 18 13:28:22 2019 -0500
@@ -28,16 +28,14 @@
default 0:d3873e73d99e
$ hg tags
tip 0:d3873e73d99e
- $ ls -1 .hg/cache || true
- ls: .hg/cache: $ENOENT$ (no-execbit no-symlink !)
+ $ test -d .hg/cache
+ [1]
+ $ ls -1 .hg/wcache || true
checkisexec (execbit !)
checklink (symlink !)
checklink-target (symlink !)
$ ls -1 ../repo1/.hg/cache
branch2-served
- checkisexec (execbit !)
- checklink (symlink !)
- checklink-target (symlink !)
manifestfulltextcache (reporevlogstore !)
rbc-names-v1
rbc-revs-v1
--- a/tests/test-shelve.t Wed Jan 09 20:00:35 2019 -0800
+++ b/tests/test-shelve.t Fri Jan 18 13:28:22 2019 -0500
@@ -668,269 +668,8 @@
$ hg bookmark
\* test (4|13):33f7f61e6c5e (re)
-shelve should leave dirstate clean (issue4055)
-
- $ cd ..
- $ hg init shelverebase
- $ cd shelverebase
- $ printf 'x\ny\n' > x
- $ echo z > z
- $ hg commit -Aqm xy
- $ echo z >> x
- $ hg commit -Aqm z
- $ hg up 5c4c67fb7dce
- 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
- $ printf 'a\nx\ny\nz\n' > x
- $ hg commit -Aqm xyz
- $ echo c >> z
- $ hg shelve
- shelved as default
- 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
-
- $ hg rebase -d 6c103be8f4e4 --config extensions.rebase=
- rebasing 2:323bfa07f744 "xyz"( \(tip\))? (re)
- merging x
- saved backup bundle to \$TESTTMP/shelverebase/.hg/strip-backup/323bfa07f744-(78114325|7ae538ef)-rebase.hg (re)
- $ hg unshelve
- unshelving change 'default'
- rebasing shelved changes
- $ hg status
- M z
-
- $ cd ..
-
-shelve should only unshelve pending changes (issue4068)
-
- $ hg init onlypendingchanges
- $ cd onlypendingchanges
- $ touch a
- $ hg ci -Aqm a
- $ touch b
- $ hg ci -Aqm b
- $ hg up -q 3903775176ed
- $ touch c
- $ hg ci -Aqm c
-
- $ touch d
- $ hg add d
- $ hg shelve
- shelved as default
- 0 files updated, 0 files merged, 1 files removed, 0 files unresolved
- $ hg up -q 0e067c57feba
- $ hg unshelve
- unshelving change 'default'
- rebasing shelved changes
- $ hg status
- A d
-
-unshelve should work on an ancestor of the original commit
-
- $ hg shelve
- shelved as default
- 0 files updated, 0 files merged, 1 files removed, 0 files unresolved
- $ hg up 3903775176ed
- 0 files updated, 0 files merged, 1 files removed, 0 files unresolved
- $ hg unshelve
- unshelving change 'default'
- rebasing shelved changes
- $ hg status
- A d
-
-test bug 4073 we need to enable obsolete markers for it
-
- $ cat >> $HGRCPATH << EOF
- > [experimental]
- > evolution.createmarkers=True
- > EOF
- $ hg shelve
- shelved as default
- 0 files updated, 0 files merged, 1 files removed, 0 files unresolved
- $ hg debugobsolete `hg log -r 0e067c57feba -T '{node}'`
- obsoleted 1 changesets
- $ hg unshelve
- unshelving change 'default'
-
-unshelve should leave unknown files alone (issue4113)
-
- $ echo e > e
- $ hg shelve
- shelved as default
- 0 files updated, 0 files merged, 1 files removed, 0 files unresolved
- $ hg status
- ? e
- $ hg unshelve
- unshelving change 'default'
- $ hg status
- A d
- ? e
- $ cat e
- e
-
-unshelve should keep a copy of unknown files
-
- $ hg add e
- $ hg shelve
- shelved as default
- 0 files updated, 0 files merged, 2 files removed, 0 files unresolved
- $ echo z > e
- $ hg unshelve
- unshelving change 'default'
- $ cat e
- e
- $ cat e.orig
- z
-
-
-unshelve and conflicts with tracked and untracked files
-
- preparing:
-
- $ rm *.orig
- $ hg ci -qm 'commit stuff'
- $ hg phase -p null:
-
- no other changes - no merge:
-
- $ echo f > f
- $ hg add f
- $ hg shelve
- shelved as default
- 0 files updated, 0 files merged, 1 files removed, 0 files unresolved
- $ echo g > f
- $ hg unshelve
- unshelving change 'default'
- $ hg st
- A f
- ? f.orig
- $ cat f
- f
- $ cat f.orig
- g
-
- other uncommitted changes - merge:
-
- $ hg st
- A f
- ? f.orig
- $ hg shelve
- shelved as default
- 0 files updated, 0 files merged, 1 files removed, 0 files unresolved
-#if repobundlerepo
- $ hg log -G --template '{rev} {desc|firstline} {author}' -R bundle://.hg/shelved/default.hg -r 'bundle()' --hidden
- o [48] changes to: commit stuff shelve@localhost (re)
- |
- ~
-#endif
- $ hg log -G --template '{rev} {desc|firstline} {author}'
- @ [37] commit stuff test (re)
- |
- | o 2 c test
- |/
- o 0 a test
-
- $ mv f.orig f
- $ echo 1 > a
- $ hg unshelve --date '1073741824 0'
- unshelving change 'default'
- temporarily committing pending changes (restore with 'hg unshelve --abort')
- rebasing shelved changes
- merging f
- warning: conflicts while merging f! (edit, then use 'hg resolve --mark')
- unresolved conflicts (see 'hg resolve', then 'hg unshelve --continue')
- [1]
-
-#if phasebased
- $ hg log -G --template '{rev} {desc|firstline} {author} {date|isodate}'
- @ 9 pending changes temporary commit shelve@localhost 2004-01-10 13:37 +0000
- |
- | @ 8 changes to: commit stuff shelve@localhost 1970-01-01 00:00 +0000
- |/
- o 7 commit stuff test 1970-01-01 00:00 +0000
- |
- | o 2 c test 1970-01-01 00:00 +0000
- |/
- o 0 a test 1970-01-01 00:00 +0000
-
-#endif
-
-#if stripbased
- $ hg log -G --template '{rev} {desc|firstline} {author} {date|isodate}'
- @ 5 changes to: commit stuff shelve@localhost 1970-01-01 00:00 +0000
- |
- | @ 4 pending changes temporary commit shelve@localhost 2004-01-10 13:37 +0000
- |/
- o 3 commit stuff test 1970-01-01 00:00 +0000
- |
- | o 2 c test 1970-01-01 00:00 +0000
- |/
- o 0 a test 1970-01-01 00:00 +0000
-
-#endif
-
- $ hg st
- M f
- ? f.orig
- $ cat f
- <<<<<<< shelve: d44eae5c3d33 - shelve: pending changes temporary commit
- g
- =======
- f
- >>>>>>> working-copy: aef214a5229c - shelve: changes to: commit stuff
- $ cat f.orig
- g
- $ hg unshelve --abort -t false
- tool option will be ignored
- unshelve of 'default' aborted
- $ hg st
- M a
- ? f.orig
- $ cat f.orig
- g
- $ hg unshelve
- unshelving change 'default'
- temporarily committing pending changes (restore with 'hg unshelve --abort')
- rebasing shelved changes
- $ hg st
- M a
- A f
- ? f.orig
-
- other committed changes - merge:
-
- $ hg shelve f
- shelved as default
- 0 files updated, 0 files merged, 1 files removed, 0 files unresolved
- $ hg ci a -m 'intermediate other change'
- $ mv f.orig f
- $ hg unshelve
- unshelving change 'default'
- rebasing shelved changes
- merging f
- warning: conflicts while merging f! (edit, then use 'hg resolve --mark')
- unresolved conflicts (see 'hg resolve', then 'hg unshelve --continue')
- [1]
- $ hg st
- M f
- ? f.orig
- $ cat f
- <<<<<<< shelve: 6b563750f973 - test: intermediate other change
- g
- =======
- f
- >>>>>>> working-copy: aef214a5229c - shelve: changes to: commit stuff
- $ cat f.orig
- g
- $ hg unshelve --abort
- unshelve of 'default' aborted
- $ hg st
- ? f.orig
- $ cat f.orig
- g
- $ hg shelve --delete default
-
Recreate some conflict again
- $ cd ../repo
$ hg up -C -r 2e69b451d1ea
1 files updated, 0 files merged, 0 files removed, 0 files unresolved
(leaving bookmark test)
@@ -1143,21 +882,6 @@
$ cd ..
-you shouldn't be able to ask for the patch/stats of the most recent shelve if
-there are no shelves
-
- $ hg init noshelves
- $ cd noshelves
-
- $ hg shelve --patch
- abort: there are no shelves to show
- [255]
- $ hg shelve --stat
- abort: there are no shelves to show
- [255]
-
- $ cd ..
-
Shelve from general delta repo uses bundle2 on disk
--------------------------------------------------
@@ -1327,43 +1051,6 @@
$ cd ..
-test .orig files go where the user wants them to
----------------------------------------------------------------
- $ hg init salvage
- $ cd salvage
- $ echo 'content' > root
- $ hg commit -A -m 'root' -q
- $ echo '' > root
- $ hg shelve -q
- $ echo 'contADDent' > root
- $ hg unshelve -q --config 'ui.origbackuppath=.hg/origbackups'
- warning: conflicts while merging root! (edit, then use 'hg resolve --mark')
- unresolved conflicts (see 'hg resolve', then 'hg unshelve --continue')
- [1]
- $ ls .hg/origbackups
- root
- $ rm -rf .hg/origbackups
-
-test Abort unshelve always gets user out of the unshelved state
----------------------------------------------------------------
-
-with a corrupted shelve state file
- $ sed 's/ae8c668541e8/123456789012/' .hg/shelvedstate > ../corrupt-shelvedstate
- $ mv ../corrupt-shelvedstate .hg/shelvestate
- $ hg unshelve --abort 2>&1 | grep 'aborted'
- unshelve of 'default' aborted
- $ hg summary
- parent: 0:ae8c668541e8 tip
- root
- branch: default
- commit: 1 modified
- update: (current)
- phases: 1 draft
- $ hg up -C .
- 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
-
- $ cd ..
-
Keep active bookmark while (un)shelving even on shared repo (issue4940)
-----------------------------------------------------------------------
@@ -1400,505 +1087,3 @@
test (4|13):33f7f61e6c5e (re)
$ cd ..
-
-Shelve and unshelve unknown files. For the purposes of unshelve, a shelved
-unknown file is the same as a shelved added file, except that it will be in
-unknown state after unshelve if and only if it was either absent or unknown
-before the unshelve operation.
-
- $ hg init unknowns
- $ cd unknowns
-
-The simplest case is if I simply have an unknown file that I shelve and unshelve
-
- $ echo unknown > unknown
- $ hg status
- ? unknown
- $ hg shelve --unknown
- shelved as default
- 0 files updated, 0 files merged, 1 files removed, 0 files unresolved
- $ hg status
- $ hg unshelve
- unshelving change 'default'
- $ hg status
- ? unknown
- $ rm unknown
-
-If I shelve, add the file, and unshelve, does it stay added?
-
- $ echo unknown > unknown
- $ hg shelve -u
- shelved as default
- 0 files updated, 0 files merged, 1 files removed, 0 files unresolved
- $ hg status
- $ touch unknown
- $ hg add unknown
- $ hg status
- A unknown
- $ hg unshelve
- unshelving change 'default'
- temporarily committing pending changes (restore with 'hg unshelve --abort')
- rebasing shelved changes
- merging unknown
- $ hg status
- A unknown
- $ hg forget unknown
- $ rm unknown
-
-And if I shelve, commit, then unshelve, does it become modified?
-
- $ echo unknown > unknown
- $ hg shelve -u
- shelved as default
- 0 files updated, 0 files merged, 1 files removed, 0 files unresolved
- $ hg status
- $ touch unknown
- $ hg add unknown
- $ hg commit -qm "Add unknown"
- $ hg status
- $ hg unshelve
- unshelving change 'default'
- rebasing shelved changes
- merging unknown
- $ hg status
- M unknown
- $ hg remove --force unknown
- $ hg commit -qm "Remove unknown"
-
- $ cd ..
-
-We expects that non-bare shelve keeps newly created branch in
-working directory.
-
- $ hg init shelve-preserve-new-branch
- $ cd shelve-preserve-new-branch
- $ echo "a" >> a
- $ hg add a
- $ echo "b" >> b
- $ hg add b
- $ hg commit -m "ab"
- $ echo "aa" >> a
- $ echo "bb" >> b
- $ hg branch new-branch
- marked working directory as branch new-branch
- (branches are permanent and global, did you want a bookmark?)
- $ hg status
- M a
- M b
- $ hg branch
- new-branch
- $ hg shelve a
- shelved as default
- 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
- $ hg branch
- new-branch
- $ hg status
- M b
- $ touch "c" >> c
- $ hg add c
- $ hg status
- M b
- A c
- $ hg shelve --exclude c
- shelved as default-01
- 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
- $ hg branch
- new-branch
- $ hg status
- A c
- $ hg shelve --include c
- shelved as default-02
- 0 files updated, 0 files merged, 1 files removed, 0 files unresolved
- $ hg branch
- new-branch
- $ hg status
- $ echo "d" >> d
- $ hg add d
- $ hg status
- A d
-
-We expect that bare-shelve will not keep branch in current working directory.
-
- $ hg shelve
- shelved as default-03
- 0 files updated, 0 files merged, 1 files removed, 0 files unresolved
- $ hg branch
- default
- $ cd ..
-
-When i shelve commit on newly created branch i expect
-that after unshelve newly created branch will be preserved.
-
- $ hg init shelve_on_new_branch_simple
- $ cd shelve_on_new_branch_simple
- $ echo "aaa" >> a
- $ hg commit -A -m "a"
- adding a
- $ hg branch
- default
- $ hg branch test
- marked working directory as branch test
- (branches are permanent and global, did you want a bookmark?)
- $ echo "bbb" >> a
- $ hg status
- M a
- $ hg shelve
- shelved as default
- 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
- $ hg branch
- default
- $ echo "bbb" >> b
- $ hg status
- ? b
- $ hg unshelve
- unshelving change 'default'
- marked working directory as branch test
- $ hg status
- M a
- ? b
- $ hg branch
- test
- $ cd ..
-
-When i shelve commit on newly created branch, make
-some changes, unshelve it and running into merge
-conflicts i expect that after fixing them and
-running unshelve --continue newly created branch
-will be preserved.
-
- $ hg init shelve_on_new_branch_conflict
- $ cd shelve_on_new_branch_conflict
- $ echo "aaa" >> a
- $ hg commit -A -m "a"
- adding a
- $ hg branch
- default
- $ hg branch test
- marked working directory as branch test
- (branches are permanent and global, did you want a bookmark?)
- $ echo "bbb" >> a
- $ hg status
- M a
- $ hg shelve
- shelved as default
- 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
- $ hg branch
- default
- $ echo "ccc" >> a
- $ hg status
- M a
- $ hg unshelve
- unshelving change 'default'
- temporarily committing pending changes (restore with 'hg unshelve --abort')
- rebasing shelved changes
- merging a
- warning: conflicts while merging a! (edit, then use 'hg resolve --mark')
- unresolved conflicts (see 'hg resolve', then 'hg unshelve --continue')
- [1]
- $ echo "aaabbbccc" > a
- $ rm a.orig
- $ hg resolve --mark a
- (no more unresolved files)
- continue: hg unshelve --continue
- $ hg unshelve --continue
- marked working directory as branch test
- unshelve of 'default' complete
- $ cat a
- aaabbbccc
- $ hg status
- M a
- $ hg branch
- test
- $ hg commit -m "test-commit"
-
-When i shelve on test branch, update to default branch
-and unshelve i expect that it will not preserve previous
-test branch.
-
- $ echo "xxx" > b
- $ hg add b
- $ hg shelve
- shelved as test
- 0 files updated, 0 files merged, 1 files removed, 0 files unresolved
- $ hg update -r 7049e48789d7
- 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
- $ hg unshelve
- unshelving change 'test'
- rebasing shelved changes
- $ hg status
- A b
- $ hg branch
- default
- $ cd ..
-
-When i unshelve resulting in merge conflicts and makes saved
-file shelvedstate looks like in previous versions in
-mercurial(without restore branch information in 7th line) i
-expect that after resolving conflicts and successfully
-running 'shelve --continue' the branch information won't be
-restored and branch will be unchanged.
-
-shelve on new branch, conflict with previous shelvedstate
-
- $ hg init conflict
- $ cd conflict
- $ echo "aaa" >> a
- $ hg commit -A -m "a"
- adding a
- $ hg branch
- default
- $ hg branch test
- marked working directory as branch test
- (branches are permanent and global, did you want a bookmark?)
- $ echo "bbb" >> a
- $ hg status
- M a
- $ hg shelve
- shelved as default
- 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
- $ hg branch
- default
- $ echo "ccc" >> a
- $ hg status
- M a
- $ hg unshelve
- unshelving change 'default'
- temporarily committing pending changes (restore with 'hg unshelve --abort')
- rebasing shelved changes
- merging a
- warning: conflicts while merging a! (edit, then use 'hg resolve --mark')
- unresolved conflicts (see 'hg resolve', then 'hg unshelve --continue')
- [1]
-
-Removing restore branch information from shelvedstate file(making it looks like
-in previous versions) and running unshelve --continue
-
- $ cp .hg/shelvedstate .hg/shelvedstate_old
- $ cat .hg/shelvedstate_old | grep -v 'branchtorestore' > .hg/shelvedstate
-
- $ echo "aaabbbccc" > a
- $ rm a.orig
- $ hg resolve --mark a
- (no more unresolved files)
- continue: hg unshelve --continue
- $ hg unshelve --continue
- unshelve of 'default' complete
- $ cat a
- aaabbbccc
- $ hg status
- M a
- $ hg branch
- default
- $ cd ..
-
-On non bare shelve the branch information shouldn't be restored
-
- $ hg init bare_shelve_on_new_branch
- $ cd bare_shelve_on_new_branch
- $ echo "aaa" >> a
- $ hg commit -A -m "a"
- adding a
- $ hg branch
- default
- $ hg branch test
- marked working directory as branch test
- (branches are permanent and global, did you want a bookmark?)
- $ echo "bbb" >> a
- $ hg status
- M a
- $ hg shelve a
- shelved as default
- 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
- $ hg branch
- test
- $ hg branch default
- marked working directory as branch default
- (branches are permanent and global, did you want a bookmark?)
- $ echo "bbb" >> b
- $ hg status
- ? b
- $ hg unshelve
- unshelving change 'default'
- $ hg status
- M a
- ? b
- $ hg branch
- default
- $ cd ..
-
-Prepare unshelve with a corrupted shelvedstate
- $ hg init r1 && cd r1
- $ echo text1 > file && hg add file
- $ hg shelve
- shelved as default
- 0 files updated, 0 files merged, 1 files removed, 0 files unresolved
- $ echo text2 > file && hg ci -Am text1
- adding file
- $ hg unshelve
- unshelving change 'default'
- rebasing shelved changes
- merging file
- warning: conflicts while merging file! (edit, then use 'hg resolve --mark')
- unresolved conflicts (see 'hg resolve', then 'hg unshelve --continue')
- [1]
- $ echo somethingsomething > .hg/shelvedstate
-
-Unshelve --continue fails with appropriate message if shelvedstate is corrupted
- $ hg unshelve --continue
- abort: corrupted shelved state file
- (please run hg unshelve --abort to abort unshelve operation)
- [255]
-
-Unshelve --abort works with a corrupted shelvedstate
- $ hg unshelve --abort
- could not read shelved state file, your working copy may be in an unexpected state
- please update to some commit
-
-Unshelve --abort fails with appropriate message if there's no unshelve in
-progress
- $ hg unshelve --abort
- abort: no unshelve in progress
- [255]
- $ cd ..
-
-Unshelve respects --keep even if user intervention is needed
- $ hg init unshelvekeep && cd unshelvekeep
- $ echo 1 > file && hg ci -Am 1
- adding file
- $ echo 2 >> file
- $ hg shelve
- shelved as default
- 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
- $ echo 3 >> file && hg ci -Am 13
- $ hg shelve --list
- default (*s ago) * changes to: 1 (glob)
- $ hg unshelve --keep
- unshelving change 'default'
- rebasing shelved changes
- merging file
- warning: conflicts while merging file! (edit, then use 'hg resolve --mark')
- unresolved conflicts (see 'hg resolve', then 'hg unshelve --continue')
- [1]
- $ hg resolve --mark file
- (no more unresolved files)
- continue: hg unshelve --continue
- $ hg unshelve --continue
- unshelve of 'default' complete
- $ hg shelve --list
- default (*s ago) * changes to: 1 (glob)
- $ cd ..
-
-Unshelving when there are deleted files does not crash (issue4176)
- $ hg init unshelve-deleted-file && cd unshelve-deleted-file
- $ echo a > a && echo b > b && hg ci -Am ab
- adding a
- adding b
- $ echo aa > a && hg shelve
- shelved as default
- 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
- $ rm b
- $ hg st
- ! b
- $ hg unshelve
- unshelving change 'default'
- $ hg shelve
- shelved as default
- 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
- $ rm a && echo b > b
- $ hg st
- ! a
- $ hg unshelve
- unshelving change 'default'
- abort: shelved change touches missing files
- (run hg status to see which files are missing)
- [255]
- $ hg st
- ! a
- $ cd ..
-
-New versions of Mercurial know how to read onld shelvedstate files
- $ hg init oldshelvedstate
- $ cd oldshelvedstate
- $ echo root > root && hg ci -Am root
- adding root
- $ echo 1 > a
- $ hg add a
- $ hg shelve --name ashelve
- shelved as ashelve
- 0 files updated, 0 files merged, 1 files removed, 0 files unresolved
- $ echo 2 > a
- $ hg ci -Am a
- adding a
- $ hg unshelve
- unshelving change 'ashelve'
- rebasing shelved changes
- merging a
- warning: conflicts while merging a! (edit, then use 'hg resolve --mark')
- unresolved conflicts (see 'hg resolve', then 'hg unshelve --continue')
- [1]
-putting v1 shelvedstate file in place of a created v2
- $ cat << EOF > .hg/shelvedstate
- > 1
- > ashelve
- > 8b058dae057a5a78f393f4535d9e363dd5efac9d
- > 8b058dae057a5a78f393f4535d9e363dd5efac9d
- > 8b058dae057a5a78f393f4535d9e363dd5efac9d f543b27db2cdb41737e2e0008dc524c471da1446
- > f543b27db2cdb41737e2e0008dc524c471da1446
- >
- > nokeep
- > :no-active-bookmark
- > EOF
- $ echo 1 > a
- $ hg resolve --mark a
- (no more unresolved files)
- continue: hg unshelve --continue
-mercurial does not crash
- $ hg unshelve --continue
- unshelve of 'ashelve' complete
-
-#if phasebased
-
-Unshelve with some metadata file missing
-----------------------------------------
-
- $ hg shelve
- shelved as default
- 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
- $ echo 3 > a
-
-Test with the `.shelve` missing, but the changeset still in the repo (non-natural case)
-
- $ rm .hg/shelved/default.shelve
- $ hg unshelve
- unshelving change 'default'
- temporarily committing pending changes (restore with 'hg unshelve --abort')
- rebasing shelved changes
- merging a
- warning: conflicts while merging a! (edit, then use 'hg resolve --mark')
- unresolved conflicts (see 'hg resolve', then 'hg unshelve --continue')
- [1]
- $ hg unshelve --abort
- unshelve of 'default' aborted
-
-Unshelve without .shelve metadata (can happen when upgrading a repository with old shelve)
-
- $ cat .hg/shelved/default.shelve
- node=82e0cb9893247d12667017593ce1e5655860f1ac
- $ hg strip --hidden --rev 82e0cb989324 --no-backup
- $ rm .hg/shelved/default.shelve
- $ hg unshelve
- unshelving change 'default'
- temporarily committing pending changes (restore with 'hg unshelve --abort')
- rebasing shelved changes
- merging a
- warning: conflicts while merging a! (edit, then use 'hg resolve --mark')
- unresolved conflicts (see 'hg resolve', then 'hg unshelve --continue')
- [1]
- $ cat .hg/shelved/default.shelve
- node=82e0cb9893247d12667017593ce1e5655860f1ac
- $ hg unshelve --abort
- unshelve of 'default' aborted
-
-#endif
-
- $ cd ..
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/tests/test-shelve2.t Fri Jan 18 13:28:22 2019 -0500
@@ -0,0 +1,835 @@
+#testcases stripbased phasebased
+
+ $ cat <<EOF >> $HGRCPATH
+ > [extensions]
+ > mq =
+ > shelve =
+ > [defaults]
+ > diff = --nodates --git
+ > qnew = --date '0 0'
+ > [shelve]
+ > maxbackups = 2
+ > EOF
+
+#if phasebased
+
+ $ cat <<EOF >> $HGRCPATH
+ > [format]
+ > internal-phase = yes
+ > EOF
+
+#endif
+
+shelve should leave dirstate clean (issue4055)
+
+ $ hg init shelverebase
+ $ cd shelverebase
+ $ printf 'x\ny\n' > x
+ $ echo z > z
+ $ hg commit -Aqm xy
+ $ echo z >> x
+ $ hg commit -Aqm z
+ $ hg up 5c4c67fb7dce
+ 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
+ $ printf 'a\nx\ny\nz\n' > x
+ $ hg commit -Aqm xyz
+ $ echo c >> z
+ $ hg shelve
+ shelved as default
+ 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
+
+ $ hg rebase -d 6c103be8f4e4 --config extensions.rebase=
+ rebasing 2:323bfa07f744 "xyz"( \(tip\))? (re)
+ merging x
+ saved backup bundle to \$TESTTMP/shelverebase/.hg/strip-backup/323bfa07f744-(78114325|7ae538ef)-rebase.hg (re)
+ $ hg unshelve
+ unshelving change 'default'
+ rebasing shelved changes
+ $ hg status
+ M z
+
+ $ cd ..
+
+shelve should only unshelve pending changes (issue4068)
+
+ $ hg init onlypendingchanges
+ $ cd onlypendingchanges
+ $ touch a
+ $ hg ci -Aqm a
+ $ touch b
+ $ hg ci -Aqm b
+ $ hg up -q 3903775176ed
+ $ touch c
+ $ hg ci -Aqm c
+
+ $ touch d
+ $ hg add d
+ $ hg shelve
+ shelved as default
+ 0 files updated, 0 files merged, 1 files removed, 0 files unresolved
+ $ hg up -q 0e067c57feba
+ $ hg unshelve
+ unshelving change 'default'
+ rebasing shelved changes
+ $ hg status
+ A d
+
+unshelve should work on an ancestor of the original commit
+
+ $ hg shelve
+ shelved as default
+ 0 files updated, 0 files merged, 1 files removed, 0 files unresolved
+ $ hg up 3903775176ed
+ 0 files updated, 0 files merged, 1 files removed, 0 files unresolved
+ $ hg unshelve
+ unshelving change 'default'
+ rebasing shelved changes
+ $ hg status
+ A d
+
+test bug 4073 we need to enable obsolete markers for it
+
+ $ cat >> $HGRCPATH << EOF
+ > [experimental]
+ > evolution.createmarkers=True
+ > EOF
+ $ hg shelve
+ shelved as default
+ 0 files updated, 0 files merged, 1 files removed, 0 files unresolved
+ $ hg debugobsolete `hg log -r 0e067c57feba -T '{node}'`
+ obsoleted 1 changesets
+ $ hg unshelve
+ unshelving change 'default'
+
+unshelve should leave unknown files alone (issue4113)
+
+ $ echo e > e
+ $ hg shelve
+ shelved as default
+ 0 files updated, 0 files merged, 1 files removed, 0 files unresolved
+ $ hg status
+ ? e
+ $ hg unshelve
+ unshelving change 'default'
+ $ hg status
+ A d
+ ? e
+ $ cat e
+ e
+
+unshelve should keep a copy of unknown files
+
+ $ hg add e
+ $ hg shelve
+ shelved as default
+ 0 files updated, 0 files merged, 2 files removed, 0 files unresolved
+ $ echo z > e
+ $ hg unshelve
+ unshelving change 'default'
+ $ cat e
+ e
+ $ cat e.orig
+ z
+
+
+unshelve and conflicts with tracked and untracked files
+
+ preparing:
+
+ $ rm *.orig
+ $ hg ci -qm 'commit stuff'
+ $ hg phase -p null:
+
+ no other changes - no merge:
+
+ $ echo f > f
+ $ hg add f
+ $ hg shelve
+ shelved as default
+ 0 files updated, 0 files merged, 1 files removed, 0 files unresolved
+ $ echo g > f
+ $ hg unshelve
+ unshelving change 'default'
+ $ hg st
+ A f
+ ? f.orig
+ $ cat f
+ f
+ $ cat f.orig
+ g
+
+ other uncommitted changes - merge:
+
+ $ hg st
+ A f
+ ? f.orig
+ $ hg shelve
+ shelved as default
+ 0 files updated, 0 files merged, 1 files removed, 0 files unresolved
+#if repobundlerepo
+ $ hg log -G --template '{rev} {desc|firstline} {author}' -R bundle://.hg/shelved/default.hg -r 'bundle()' --hidden
+ o [48] changes to: commit stuff shelve@localhost (re)
+ |
+ ~
+#endif
+ $ hg log -G --template '{rev} {desc|firstline} {author}'
+ @ [37] commit stuff test (re)
+ |
+ | o 2 c test
+ |/
+ o 0 a test
+
+ $ mv f.orig f
+ $ echo 1 > a
+ $ hg unshelve --date '1073741824 0'
+ unshelving change 'default'
+ temporarily committing pending changes (restore with 'hg unshelve --abort')
+ rebasing shelved changes
+ merging f
+ warning: conflicts while merging f! (edit, then use 'hg resolve --mark')
+ unresolved conflicts (see 'hg resolve', then 'hg unshelve --continue')
+ [1]
+
+#if phasebased
+ $ hg log -G --template '{rev} {desc|firstline} {author} {date|isodate}'
+ @ 9 pending changes temporary commit shelve@localhost 2004-01-10 13:37 +0000
+ |
+ | @ 8 changes to: commit stuff shelve@localhost 1970-01-01 00:00 +0000
+ |/
+ o 7 commit stuff test 1970-01-01 00:00 +0000
+ |
+ | o 2 c test 1970-01-01 00:00 +0000
+ |/
+ o 0 a test 1970-01-01 00:00 +0000
+
+#endif
+
+#if stripbased
+ $ hg log -G --template '{rev} {desc|firstline} {author} {date|isodate}'
+ @ 5 changes to: commit stuff shelve@localhost 1970-01-01 00:00 +0000
+ |
+ | @ 4 pending changes temporary commit shelve@localhost 2004-01-10 13:37 +0000
+ |/
+ o 3 commit stuff test 1970-01-01 00:00 +0000
+ |
+ | o 2 c test 1970-01-01 00:00 +0000
+ |/
+ o 0 a test 1970-01-01 00:00 +0000
+
+#endif
+
+ $ hg st
+ M f
+ ? f.orig
+ $ cat f
+ <<<<<<< shelve: d44eae5c3d33 - shelve: pending changes temporary commit
+ g
+ =======
+ f
+ >>>>>>> working-copy: aef214a5229c - shelve: changes to: commit stuff
+ $ cat f.orig
+ g
+ $ hg unshelve --abort -t false
+ tool option will be ignored
+ unshelve of 'default' aborted
+ $ hg st
+ M a
+ ? f.orig
+ $ cat f.orig
+ g
+ $ hg unshelve
+ unshelving change 'default'
+ temporarily committing pending changes (restore with 'hg unshelve --abort')
+ rebasing shelved changes
+ $ hg st
+ M a
+ A f
+ ? f.orig
+
+ other committed changes - merge:
+
+ $ hg shelve f
+ shelved as default
+ 0 files updated, 0 files merged, 1 files removed, 0 files unresolved
+ $ hg ci a -m 'intermediate other change'
+ $ mv f.orig f
+ $ hg unshelve
+ unshelving change 'default'
+ rebasing shelved changes
+ merging f
+ warning: conflicts while merging f! (edit, then use 'hg resolve --mark')
+ unresolved conflicts (see 'hg resolve', then 'hg unshelve --continue')
+ [1]
+ $ hg st
+ M f
+ ? f.orig
+ $ cat f
+ <<<<<<< shelve: 6b563750f973 - test: intermediate other change
+ g
+ =======
+ f
+ >>>>>>> working-copy: aef214a5229c - shelve: changes to: commit stuff
+ $ cat f.orig
+ g
+ $ hg unshelve --abort
+ unshelve of 'default' aborted
+ $ hg st
+ ? f.orig
+ $ cat f.orig
+ g
+ $ hg shelve --delete default
+ $ cd ..
+
+you shouldn't be able to ask for the patch/stats of the most recent shelve if
+there are no shelves
+
+ $ hg init noshelves
+ $ cd noshelves
+
+ $ hg shelve --patch
+ abort: there are no shelves to show
+ [255]
+ $ hg shelve --stat
+ abort: there are no shelves to show
+ [255]
+
+ $ cd ..
+
+test .orig files go where the user wants them to
+---------------------------------------------------------------
+ $ hg init salvage
+ $ cd salvage
+ $ echo 'content' > root
+ $ hg commit -A -m 'root' -q
+ $ echo '' > root
+ $ hg shelve -q
+ $ echo 'contADDent' > root
+ $ hg unshelve -q --config 'ui.origbackuppath=.hg/origbackups'
+ warning: conflicts while merging root! (edit, then use 'hg resolve --mark')
+ unresolved conflicts (see 'hg resolve', then 'hg unshelve --continue')
+ [1]
+ $ ls .hg/origbackups
+ root
+ $ rm -rf .hg/origbackups
+
+test Abort unshelve always gets user out of the unshelved state
+---------------------------------------------------------------
+
+with a corrupted shelve state file
+ $ sed 's/ae8c668541e8/123456789012/' .hg/shelvedstate > ../corrupt-shelvedstate
+ $ mv ../corrupt-shelvedstate .hg/shelvestate
+ $ hg unshelve --abort 2>&1 | grep 'aborted'
+ unshelve of 'default' aborted
+ $ hg summary
+ parent: 0:ae8c668541e8 tip
+ root
+ branch: default
+ commit: 1 modified
+ update: (current)
+ phases: 1 draft
+ $ hg up -C .
+ 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
+
+ $ cd ..
+
+Shelve and unshelve unknown files. For the purposes of unshelve, a shelved
+unknown file is the same as a shelved added file, except that it will be in
+unknown state after unshelve if and only if it was either absent or unknown
+before the unshelve operation.
+
+ $ hg init unknowns
+ $ cd unknowns
+
+The simplest case is if I simply have an unknown file that I shelve and unshelve
+
+ $ echo unknown > unknown
+ $ hg status
+ ? unknown
+ $ hg shelve --unknown
+ shelved as default
+ 0 files updated, 0 files merged, 1 files removed, 0 files unresolved
+ $ hg status
+ $ hg unshelve
+ unshelving change 'default'
+ $ hg status
+ ? unknown
+ $ rm unknown
+
+If I shelve, add the file, and unshelve, does it stay added?
+
+ $ echo unknown > unknown
+ $ hg shelve -u
+ shelved as default
+ 0 files updated, 0 files merged, 1 files removed, 0 files unresolved
+ $ hg status
+ $ touch unknown
+ $ hg add unknown
+ $ hg status
+ A unknown
+ $ hg unshelve
+ unshelving change 'default'
+ temporarily committing pending changes (restore with 'hg unshelve --abort')
+ rebasing shelved changes
+ merging unknown
+ $ hg status
+ A unknown
+ $ hg forget unknown
+ $ rm unknown
+
+And if I shelve, commit, then unshelve, does it become modified?
+
+ $ echo unknown > unknown
+ $ hg shelve -u
+ shelved as default
+ 0 files updated, 0 files merged, 1 files removed, 0 files unresolved
+ $ hg status
+ $ touch unknown
+ $ hg add unknown
+ $ hg commit -qm "Add unknown"
+ $ hg status
+ $ hg unshelve
+ unshelving change 'default'
+ rebasing shelved changes
+ merging unknown
+ $ hg status
+ M unknown
+ $ hg remove --force unknown
+ $ hg commit -qm "Remove unknown"
+
+ $ cd ..
+
+We expects that non-bare shelve keeps newly created branch in
+working directory.
+
+ $ hg init shelve-preserve-new-branch
+ $ cd shelve-preserve-new-branch
+ $ echo "a" >> a
+ $ hg add a
+ $ echo "b" >> b
+ $ hg add b
+ $ hg commit -m "ab"
+ $ echo "aa" >> a
+ $ echo "bb" >> b
+ $ hg branch new-branch
+ marked working directory as branch new-branch
+ (branches are permanent and global, did you want a bookmark?)
+ $ hg status
+ M a
+ M b
+ $ hg branch
+ new-branch
+ $ hg shelve a
+ shelved as default
+ 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
+ $ hg branch
+ new-branch
+ $ hg status
+ M b
+ $ touch "c" >> c
+ $ hg add c
+ $ hg status
+ M b
+ A c
+ $ hg shelve --exclude c
+ shelved as default-01
+ 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
+ $ hg branch
+ new-branch
+ $ hg status
+ A c
+ $ hg shelve --include c
+ shelved as default-02
+ 0 files updated, 0 files merged, 1 files removed, 0 files unresolved
+ $ hg branch
+ new-branch
+ $ hg status
+ $ echo "d" >> d
+ $ hg add d
+ $ hg status
+ A d
+
+We expect that bare-shelve will not keep branch in current working directory.
+
+ $ hg shelve
+ shelved as default-03
+ 0 files updated, 0 files merged, 1 files removed, 0 files unresolved
+ $ hg branch
+ default
+ $ cd ..
+
+When i shelve commit on newly created branch i expect
+that after unshelve newly created branch will be preserved.
+
+ $ hg init shelve_on_new_branch_simple
+ $ cd shelve_on_new_branch_simple
+ $ echo "aaa" >> a
+ $ hg commit -A -m "a"
+ adding a
+ $ hg branch
+ default
+ $ hg branch test
+ marked working directory as branch test
+ (branches are permanent and global, did you want a bookmark?)
+ $ echo "bbb" >> a
+ $ hg status
+ M a
+ $ hg shelve
+ shelved as default
+ 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
+ $ hg branch
+ default
+ $ echo "bbb" >> b
+ $ hg status
+ ? b
+ $ hg unshelve
+ unshelving change 'default'
+ marked working directory as branch test
+ $ hg status
+ M a
+ ? b
+ $ hg branch
+ test
+ $ cd ..
+
+When i shelve commit on newly created branch, make
+some changes, unshelve it and running into merge
+conflicts i expect that after fixing them and
+running unshelve --continue newly created branch
+will be preserved.
+
+ $ hg init shelve_on_new_branch_conflict
+ $ cd shelve_on_new_branch_conflict
+ $ echo "aaa" >> a
+ $ hg commit -A -m "a"
+ adding a
+ $ hg branch
+ default
+ $ hg branch test
+ marked working directory as branch test
+ (branches are permanent and global, did you want a bookmark?)
+ $ echo "bbb" >> a
+ $ hg status
+ M a
+ $ hg shelve
+ shelved as default
+ 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
+ $ hg branch
+ default
+ $ echo "ccc" >> a
+ $ hg status
+ M a
+ $ hg unshelve
+ unshelving change 'default'
+ temporarily committing pending changes (restore with 'hg unshelve --abort')
+ rebasing shelved changes
+ merging a
+ warning: conflicts while merging a! (edit, then use 'hg resolve --mark')
+ unresolved conflicts (see 'hg resolve', then 'hg unshelve --continue')
+ [1]
+ $ echo "aaabbbccc" > a
+ $ rm a.orig
+ $ hg resolve --mark a
+ (no more unresolved files)
+ continue: hg unshelve --continue
+ $ hg unshelve --continue
+ marked working directory as branch test
+ unshelve of 'default' complete
+ $ cat a
+ aaabbbccc
+ $ hg status
+ M a
+ $ hg branch
+ test
+ $ hg commit -m "test-commit"
+
+When i shelve on test branch, update to default branch
+and unshelve i expect that it will not preserve previous
+test branch.
+
+ $ echo "xxx" > b
+ $ hg add b
+ $ hg shelve
+ shelved as test
+ 0 files updated, 0 files merged, 1 files removed, 0 files unresolved
+ $ hg update -r 7049e48789d7
+ 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
+ $ hg unshelve
+ unshelving change 'test'
+ rebasing shelved changes
+ $ hg status
+ A b
+ $ hg branch
+ default
+ $ cd ..
+
+When i unshelve resulting in merge conflicts and makes saved
+file shelvedstate looks like in previous versions in
+mercurial(without restore branch information in 7th line) i
+expect that after resolving conflicts and successfully
+running 'shelve --continue' the branch information won't be
+restored and branch will be unchanged.
+
+shelve on new branch, conflict with previous shelvedstate
+
+ $ hg init conflict
+ $ cd conflict
+ $ echo "aaa" >> a
+ $ hg commit -A -m "a"
+ adding a
+ $ hg branch
+ default
+ $ hg branch test
+ marked working directory as branch test
+ (branches are permanent and global, did you want a bookmark?)
+ $ echo "bbb" >> a
+ $ hg status
+ M a
+ $ hg shelve
+ shelved as default
+ 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
+ $ hg branch
+ default
+ $ echo "ccc" >> a
+ $ hg status
+ M a
+ $ hg unshelve
+ unshelving change 'default'
+ temporarily committing pending changes (restore with 'hg unshelve --abort')
+ rebasing shelved changes
+ merging a
+ warning: conflicts while merging a! (edit, then use 'hg resolve --mark')
+ unresolved conflicts (see 'hg resolve', then 'hg unshelve --continue')
+ [1]
+
+Removing restore branch information from shelvedstate file(making it looks like
+in previous versions) and running unshelve --continue
+
+ $ cp .hg/shelvedstate .hg/shelvedstate_old
+ $ cat .hg/shelvedstate_old | grep -v 'branchtorestore' > .hg/shelvedstate
+
+ $ echo "aaabbbccc" > a
+ $ rm a.orig
+ $ hg resolve --mark a
+ (no more unresolved files)
+ continue: hg unshelve --continue
+ $ hg unshelve --continue
+ unshelve of 'default' complete
+ $ cat a
+ aaabbbccc
+ $ hg status
+ M a
+ $ hg branch
+ default
+ $ cd ..
+
+On non bare shelve the branch information shouldn't be restored
+
+ $ hg init bare_shelve_on_new_branch
+ $ cd bare_shelve_on_new_branch
+ $ echo "aaa" >> a
+ $ hg commit -A -m "a"
+ adding a
+ $ hg branch
+ default
+ $ hg branch test
+ marked working directory as branch test
+ (branches are permanent and global, did you want a bookmark?)
+ $ echo "bbb" >> a
+ $ hg status
+ M a
+ $ hg shelve a
+ shelved as default
+ 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
+ $ hg branch
+ test
+ $ hg branch default
+ marked working directory as branch default
+ (branches are permanent and global, did you want a bookmark?)
+ $ echo "bbb" >> b
+ $ hg status
+ ? b
+ $ hg unshelve
+ unshelving change 'default'
+ $ hg status
+ M a
+ ? b
+ $ hg branch
+ default
+ $ cd ..
+
+Prepare unshelve with a corrupted shelvedstate
+ $ hg init r1 && cd r1
+ $ echo text1 > file && hg add file
+ $ hg shelve
+ shelved as default
+ 0 files updated, 0 files merged, 1 files removed, 0 files unresolved
+ $ echo text2 > file && hg ci -Am text1
+ adding file
+ $ hg unshelve
+ unshelving change 'default'
+ rebasing shelved changes
+ merging file
+ warning: conflicts while merging file! (edit, then use 'hg resolve --mark')
+ unresolved conflicts (see 'hg resolve', then 'hg unshelve --continue')
+ [1]
+ $ echo somethingsomething > .hg/shelvedstate
+
+Unshelve --continue fails with appropriate message if shelvedstate is corrupted
+ $ hg unshelve --continue
+ abort: corrupted shelved state file
+ (please run hg unshelve --abort to abort unshelve operation)
+ [255]
+
+Unshelve --abort works with a corrupted shelvedstate
+ $ hg unshelve --abort
+ could not read shelved state file, your working copy may be in an unexpected state
+ please update to some commit
+
+Unshelve --abort fails with appropriate message if there's no unshelve in
+progress
+ $ hg unshelve --abort
+ abort: no unshelve in progress
+ [255]
+ $ cd ..
+
+Unshelve respects --keep even if user intervention is needed
+ $ hg init unshelvekeep && cd unshelvekeep
+ $ echo 1 > file && hg ci -Am 1
+ adding file
+ $ echo 2 >> file
+ $ hg shelve
+ shelved as default
+ 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
+ $ echo 3 >> file && hg ci -Am 13
+ $ hg shelve --list
+ default (*s ago) * changes to: 1 (glob)
+ $ hg unshelve --keep
+ unshelving change 'default'
+ rebasing shelved changes
+ merging file
+ warning: conflicts while merging file! (edit, then use 'hg resolve --mark')
+ unresolved conflicts (see 'hg resolve', then 'hg unshelve --continue')
+ [1]
+ $ hg resolve --mark file
+ (no more unresolved files)
+ continue: hg unshelve --continue
+ $ hg unshelve --continue
+ unshelve of 'default' complete
+ $ hg shelve --list
+ default (*s ago) * changes to: 1 (glob)
+ $ cd ..
+
+Unshelving when there are deleted files does not crash (issue4176)
+ $ hg init unshelve-deleted-file && cd unshelve-deleted-file
+ $ echo a > a && echo b > b && hg ci -Am ab
+ adding a
+ adding b
+ $ echo aa > a && hg shelve
+ shelved as default
+ 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
+ $ rm b
+ $ hg st
+ ! b
+ $ hg unshelve
+ unshelving change 'default'
+ $ hg shelve
+ shelved as default
+ 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
+ $ rm a && echo b > b
+ $ hg st
+ ! a
+ $ hg unshelve
+ unshelving change 'default'
+ abort: shelved change touches missing files
+ (run hg status to see which files are missing)
+ [255]
+ $ hg st
+ ! a
+ $ cd ..
+
+New versions of Mercurial know how to read onld shelvedstate files
+ $ hg init oldshelvedstate
+ $ cd oldshelvedstate
+ $ echo root > root && hg ci -Am root
+ adding root
+ $ echo 1 > a
+ $ hg add a
+ $ hg shelve --name ashelve
+ shelved as ashelve
+ 0 files updated, 0 files merged, 1 files removed, 0 files unresolved
+ $ echo 2 > a
+ $ hg ci -Am a
+ adding a
+ $ hg unshelve
+ unshelving change 'ashelve'
+ rebasing shelved changes
+ merging a
+ warning: conflicts while merging a! (edit, then use 'hg resolve --mark')
+ unresolved conflicts (see 'hg resolve', then 'hg unshelve --continue')
+ [1]
+putting v1 shelvedstate file in place of a created v2
+ $ cat << EOF > .hg/shelvedstate
+ > 1
+ > ashelve
+ > 8b058dae057a5a78f393f4535d9e363dd5efac9d
+ > 8b058dae057a5a78f393f4535d9e363dd5efac9d
+ > 8b058dae057a5a78f393f4535d9e363dd5efac9d f543b27db2cdb41737e2e0008dc524c471da1446
+ > f543b27db2cdb41737e2e0008dc524c471da1446
+ >
+ > nokeep
+ > :no-active-bookmark
+ > EOF
+ $ echo 1 > a
+ $ hg resolve --mark a
+ (no more unresolved files)
+ continue: hg unshelve --continue
+mercurial does not crash
+ $ hg unshelve --continue
+ unshelve of 'ashelve' complete
+
+#if phasebased
+
+Unshelve with some metadata file missing
+----------------------------------------
+
+ $ hg shelve
+ shelved as default
+ 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
+ $ echo 3 > a
+
+Test with the `.shelve` missing, but the changeset still in the repo (non-natural case)
+
+ $ rm .hg/shelved/default.shelve
+ $ hg unshelve
+ unshelving change 'default'
+ temporarily committing pending changes (restore with 'hg unshelve --abort')
+ rebasing shelved changes
+ merging a
+ warning: conflicts while merging a! (edit, then use 'hg resolve --mark')
+ unresolved conflicts (see 'hg resolve', then 'hg unshelve --continue')
+ [1]
+ $ hg unshelve --abort
+ unshelve of 'default' aborted
+
+Unshelve without .shelve metadata (can happen when upgrading a repository with old shelve)
+
+ $ cat .hg/shelved/default.shelve
+ node=82e0cb9893247d12667017593ce1e5655860f1ac
+ $ hg strip --hidden --rev 82e0cb989324 --no-backup
+ $ rm .hg/shelved/default.shelve
+ $ hg unshelve
+ unshelving change 'default'
+ temporarily committing pending changes (restore with 'hg unshelve --abort')
+ rebasing shelved changes
+ merging a
+ warning: conflicts while merging a! (edit, then use 'hg resolve --mark')
+ unresolved conflicts (see 'hg resolve', then 'hg unshelve --continue')
+ [1]
+ $ cat .hg/shelved/default.shelve
+ node=82e0cb9893247d12667017593ce1e5655860f1ac
+ $ hg unshelve --abort
+ unshelve of 'default' aborted
+
+#endif
+
+ $ cd ..
--- a/tests/test-sparse-requirement.t Wed Jan 09 20:00:35 2019 -0800
+++ b/tests/test-sparse-requirement.t Fri Jan 18 13:28:22 2019 -0500
@@ -21,6 +21,7 @@
fncache
generaldelta
revlogv1
+ sparserevlog
store
testonly-simplestore (reposimplestore !)
@@ -37,6 +38,7 @@
fncache
generaldelta
revlogv1
+ sparserevlog
store
testonly-simplestore (reposimplestore !)
@@ -55,6 +57,7 @@
fncache
generaldelta
revlogv1
+ sparserevlog
store
testonly-simplestore (reposimplestore !)
--- a/tests/test-sqlitestore.t Wed Jan 09 20:00:35 2019 -0800
+++ b/tests/test-sqlitestore.t Fri Jan 18 13:28:22 2019 -0500
@@ -13,6 +13,7 @@
fncache
generaldelta
revlogv1
+ sparserevlog
store
storage.new-repo-backend=sqlite is recognized
@@ -26,6 +27,7 @@
fncache
generaldelta
revlogv1
+ sparserevlog
store
$ cat >> $HGRCPATH << EOF
@@ -43,6 +45,7 @@
fncache
generaldelta
revlogv1
+ sparserevlog
store
Can force compression to none
@@ -55,6 +58,7 @@
fncache
generaldelta
revlogv1
+ sparserevlog
store
Can make a local commit
--- a/tests/test-ssh-bundle1.t Wed Jan 09 20:00:35 2019 -0800
+++ b/tests/test-ssh-bundle1.t Fri Jan 18 13:28:22 2019 -0500
@@ -482,9 +482,9 @@
sending upgrade request: * proto=exp-ssh-v2-0003 (glob) (sshv2 !)
sending hello command
sending between command
- remote: 427 (sshv1 !)
+ remote: 440 (sshv1 !)
protocol upgraded to exp-ssh-v2-0003 (sshv2 !)
- remote: capabilities: batch branchmap $USUAL_BUNDLE2_CAPS$ changegroupsubset getbundle known lookup protocaps pushkey streamreqs=generaldelta,revlogv1 unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash
+ remote: capabilities: batch branchmap $USUAL_BUNDLE2_CAPS$ changegroupsubset getbundle known lookup protocaps pushkey streamreqs=generaldelta,revlogv1,sparserevlog unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash
remote: 1 (sshv1 !)
sending protocaps command
preparing listkeys for "bookmarks"
--- a/tests/test-ssh-proto-unbundle.t Wed Jan 09 20:00:35 2019 -0800
+++ b/tests/test-ssh-proto-unbundle.t Fri Jan 18 13:28:22 2019 -0500
@@ -56,9 +56,9 @@
i> 0000000000000000000000000000000000000000-0000000000000000000000000000000000000000
i> flush() -> None
o> readline() -> 4:
- o> 427\n
- o> readline() -> 427:
- o> capabilities: batch branchmap $USUAL_BUNDLE2_CAPS$ changegroupsubset getbundle known lookup protocaps pushkey streamreqs=generaldelta,revlogv1 unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash\n
+ o> 440\n
+ o> readline() -> 440:
+ o> capabilities: batch branchmap $USUAL_BUNDLE2_CAPS$ changegroupsubset getbundle known lookup protocaps pushkey streamreqs=generaldelta,revlogv1,sparserevlog unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash\n
o> readline() -> 2:
o> 1\n
o> readline() -> 1:
@@ -109,8 +109,8 @@
o> readline() -> 62:
o> upgraded * exp-ssh-v2-0003\n (glob)
o> readline() -> 4:
- o> 426\n
- o> read(426) -> 426: capabilities: batch branchmap $USUAL_BUNDLE2_CAPS$ changegroupsubset getbundle known lookup protocaps pushkey streamreqs=generaldelta,revlogv1 unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash
+ o> 439\n
+ o> read(439) -> 439: capabilities: batch branchmap $USUAL_BUNDLE2_CAPS$ changegroupsubset getbundle known lookup protocaps pushkey streamreqs=generaldelta,revlogv1,sparserevlog unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash
o> read(1) -> 1:
o> \n
sending unbundle command
@@ -235,9 +235,9 @@
i> 0000000000000000000000000000000000000000-0000000000000000000000000000000000000000
i> flush() -> None
o> readline() -> 4:
- o> 427\n
- o> readline() -> 427:
- o> capabilities: batch branchmap $USUAL_BUNDLE2_CAPS$ changegroupsubset getbundle known lookup protocaps pushkey streamreqs=generaldelta,revlogv1 unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash\n
+ o> 440\n
+ o> readline() -> 440:
+ o> capabilities: batch branchmap $USUAL_BUNDLE2_CAPS$ changegroupsubset getbundle known lookup protocaps pushkey streamreqs=generaldelta,revlogv1,sparserevlog unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash\n
o> readline() -> 2:
o> 1\n
o> readline() -> 1:
@@ -294,8 +294,8 @@
o> readline() -> 62:
o> upgraded * exp-ssh-v2-0003\n (glob)
o> readline() -> 4:
- o> 426\n
- o> read(426) -> 426: capabilities: batch branchmap $USUAL_BUNDLE2_CAPS$ changegroupsubset getbundle known lookup protocaps pushkey streamreqs=generaldelta,revlogv1 unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash
+ o> 439\n
+ o> read(439) -> 439: capabilities: batch branchmap $USUAL_BUNDLE2_CAPS$ changegroupsubset getbundle known lookup protocaps pushkey streamreqs=generaldelta,revlogv1,sparserevlog unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash
o> read(1) -> 1:
o> \n
sending unbundle command
@@ -361,9 +361,9 @@
i> 0000000000000000000000000000000000000000-0000000000000000000000000000000000000000
i> flush() -> None
o> readline() -> 4:
- o> 427\n
- o> readline() -> 427:
- o> capabilities: batch branchmap $USUAL_BUNDLE2_CAPS$ changegroupsubset getbundle known lookup protocaps pushkey streamreqs=generaldelta,revlogv1 unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash\n
+ o> 440\n
+ o> readline() -> 440:
+ o> capabilities: batch branchmap $USUAL_BUNDLE2_CAPS$ changegroupsubset getbundle known lookup protocaps pushkey streamreqs=generaldelta,revlogv1,sparserevlog unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash\n
o> readline() -> 2:
o> 1\n
o> readline() -> 1:
@@ -421,8 +421,8 @@
o> readline() -> 62:
o> upgraded * exp-ssh-v2-0003\n (glob)
o> readline() -> 4:
- o> 426\n
- o> read(426) -> 426: capabilities: batch branchmap $USUAL_BUNDLE2_CAPS$ changegroupsubset getbundle known lookup protocaps pushkey streamreqs=generaldelta,revlogv1 unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash
+ o> 439\n
+ o> read(439) -> 439: capabilities: batch branchmap $USUAL_BUNDLE2_CAPS$ changegroupsubset getbundle known lookup protocaps pushkey streamreqs=generaldelta,revlogv1,sparserevlog unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash
o> read(1) -> 1:
o> \n
sending unbundle command
@@ -489,9 +489,9 @@
i> 0000000000000000000000000000000000000000-0000000000000000000000000000000000000000
i> flush() -> None
o> readline() -> 4:
- o> 427\n
- o> readline() -> 427:
- o> capabilities: batch branchmap $USUAL_BUNDLE2_CAPS$ changegroupsubset getbundle known lookup protocaps pushkey streamreqs=generaldelta,revlogv1 unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash\n
+ o> 440\n
+ o> readline() -> 440:
+ o> capabilities: batch branchmap $USUAL_BUNDLE2_CAPS$ changegroupsubset getbundle known lookup protocaps pushkey streamreqs=generaldelta,revlogv1,sparserevlog unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash\n
o> readline() -> 2:
o> 1\n
o> readline() -> 1:
@@ -548,8 +548,8 @@
o> readline() -> 62:
o> upgraded * exp-ssh-v2-0003\n (glob)
o> readline() -> 4:
- o> 426\n
- o> read(426) -> 426: capabilities: batch branchmap $USUAL_BUNDLE2_CAPS$ changegroupsubset getbundle known lookup protocaps pushkey streamreqs=generaldelta,revlogv1 unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash
+ o> 439\n
+ o> read(439) -> 439: capabilities: batch branchmap $USUAL_BUNDLE2_CAPS$ changegroupsubset getbundle known lookup protocaps pushkey streamreqs=generaldelta,revlogv1,sparserevlog unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash
o> read(1) -> 1:
o> \n
sending unbundle command
@@ -615,9 +615,9 @@
i> 0000000000000000000000000000000000000000-0000000000000000000000000000000000000000
i> flush() -> None
o> readline() -> 4:
- o> 427\n
- o> readline() -> 427:
- o> capabilities: batch branchmap $USUAL_BUNDLE2_CAPS$ changegroupsubset getbundle known lookup protocaps pushkey streamreqs=generaldelta,revlogv1 unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash\n
+ o> 440\n
+ o> readline() -> 440:
+ o> capabilities: batch branchmap $USUAL_BUNDLE2_CAPS$ changegroupsubset getbundle known lookup protocaps pushkey streamreqs=generaldelta,revlogv1,sparserevlog unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash\n
o> readline() -> 2:
o> 1\n
o> readline() -> 1:
@@ -675,8 +675,8 @@
o> readline() -> 62:
o> upgraded * exp-ssh-v2-0003\n (glob)
o> readline() -> 4:
- o> 426\n
- o> read(426) -> 426: capabilities: batch branchmap $USUAL_BUNDLE2_CAPS$ changegroupsubset getbundle known lookup protocaps pushkey streamreqs=generaldelta,revlogv1 unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash
+ o> 439\n
+ o> read(439) -> 439: capabilities: batch branchmap $USUAL_BUNDLE2_CAPS$ changegroupsubset getbundle known lookup protocaps pushkey streamreqs=generaldelta,revlogv1,sparserevlog unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash
o> read(1) -> 1:
o> \n
sending unbundle command
@@ -743,9 +743,9 @@
i> 0000000000000000000000000000000000000000-0000000000000000000000000000000000000000
i> flush() -> None
o> readline() -> 4:
- o> 427\n
- o> readline() -> 427:
- o> capabilities: batch branchmap $USUAL_BUNDLE2_CAPS$ changegroupsubset getbundle known lookup protocaps pushkey streamreqs=generaldelta,revlogv1 unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash\n
+ o> 440\n
+ o> readline() -> 440:
+ o> capabilities: batch branchmap $USUAL_BUNDLE2_CAPS$ changegroupsubset getbundle known lookup protocaps pushkey streamreqs=generaldelta,revlogv1,sparserevlog unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash\n
o> readline() -> 2:
o> 1\n
o> readline() -> 1:
@@ -805,8 +805,8 @@
o> readline() -> 62:
o> upgraded * exp-ssh-v2-0003\n (glob)
o> readline() -> 4:
- o> 426\n
- o> read(426) -> 426: capabilities: batch branchmap $USUAL_BUNDLE2_CAPS$ changegroupsubset getbundle known lookup protocaps pushkey streamreqs=generaldelta,revlogv1 unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash
+ o> 439\n
+ o> read(439) -> 439: capabilities: batch branchmap $USUAL_BUNDLE2_CAPS$ changegroupsubset getbundle known lookup protocaps pushkey streamreqs=generaldelta,revlogv1,sparserevlog unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash
o> read(1) -> 1:
o> \n
sending unbundle command
@@ -875,9 +875,9 @@
i> 0000000000000000000000000000000000000000-0000000000000000000000000000000000000000
i> flush() -> None
o> readline() -> 4:
- o> 427\n
- o> readline() -> 427:
- o> capabilities: batch branchmap $USUAL_BUNDLE2_CAPS$ changegroupsubset getbundle known lookup protocaps pushkey streamreqs=generaldelta,revlogv1 unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash\n
+ o> 440\n
+ o> readline() -> 440:
+ o> capabilities: batch branchmap $USUAL_BUNDLE2_CAPS$ changegroupsubset getbundle known lookup protocaps pushkey streamreqs=generaldelta,revlogv1,sparserevlog unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash\n
o> readline() -> 2:
o> 1\n
o> readline() -> 1:
@@ -934,8 +934,8 @@
o> readline() -> 62:
o> upgraded * exp-ssh-v2-0003\n (glob)
o> readline() -> 4:
- o> 426\n
- o> read(426) -> 426: capabilities: batch branchmap $USUAL_BUNDLE2_CAPS$ changegroupsubset getbundle known lookup protocaps pushkey streamreqs=generaldelta,revlogv1 unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash
+ o> 439\n
+ o> read(439) -> 439: capabilities: batch branchmap $USUAL_BUNDLE2_CAPS$ changegroupsubset getbundle known lookup protocaps pushkey streamreqs=generaldelta,revlogv1,sparserevlog unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash
o> read(1) -> 1:
o> \n
sending unbundle command
@@ -1001,9 +1001,9 @@
i> 0000000000000000000000000000000000000000-0000000000000000000000000000000000000000
i> flush() -> None
o> readline() -> 4:
- o> 427\n
- o> readline() -> 427:
- o> capabilities: batch branchmap $USUAL_BUNDLE2_CAPS$ changegroupsubset getbundle known lookup protocaps pushkey streamreqs=generaldelta,revlogv1 unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash\n
+ o> 440\n
+ o> readline() -> 440:
+ o> capabilities: batch branchmap $USUAL_BUNDLE2_CAPS$ changegroupsubset getbundle known lookup protocaps pushkey streamreqs=generaldelta,revlogv1,sparserevlog unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash\n
o> readline() -> 2:
o> 1\n
o> readline() -> 1:
@@ -1063,8 +1063,8 @@
o> readline() -> 62:
o> upgraded * exp-ssh-v2-0003\n (glob)
o> readline() -> 4:
- o> 426\n
- o> read(426) -> 426: capabilities: batch branchmap $USUAL_BUNDLE2_CAPS$ changegroupsubset getbundle known lookup protocaps pushkey streamreqs=generaldelta,revlogv1 unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash
+ o> 439\n
+ o> read(439) -> 439: capabilities: batch branchmap $USUAL_BUNDLE2_CAPS$ changegroupsubset getbundle known lookup protocaps pushkey streamreqs=generaldelta,revlogv1,sparserevlog unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash
o> read(1) -> 1:
o> \n
sending unbundle command
@@ -1133,9 +1133,9 @@
i> 0000000000000000000000000000000000000000-0000000000000000000000000000000000000000
i> flush() -> None
o> readline() -> 4:
- o> 427\n
- o> readline() -> 427:
- o> capabilities: batch branchmap $USUAL_BUNDLE2_CAPS$ changegroupsubset getbundle known lookup protocaps pushkey streamreqs=generaldelta,revlogv1 unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash\n
+ o> 440\n
+ o> readline() -> 440:
+ o> capabilities: batch branchmap $USUAL_BUNDLE2_CAPS$ changegroupsubset getbundle known lookup protocaps pushkey streamreqs=generaldelta,revlogv1,sparserevlog unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash\n
o> readline() -> 2:
o> 1\n
o> readline() -> 1:
@@ -1195,8 +1195,8 @@
o> readline() -> 62:
o> upgraded * exp-ssh-v2-0003\n (glob)
o> readline() -> 4:
- o> 426\n
- o> read(426) -> 426: capabilities: batch branchmap $USUAL_BUNDLE2_CAPS$ changegroupsubset getbundle known lookup protocaps pushkey streamreqs=generaldelta,revlogv1 unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash
+ o> 439\n
+ o> read(439) -> 439: capabilities: batch branchmap $USUAL_BUNDLE2_CAPS$ changegroupsubset getbundle known lookup protocaps pushkey streamreqs=generaldelta,revlogv1,sparserevlog unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash
o> read(1) -> 1:
o> \n
sending unbundle command
@@ -1271,9 +1271,9 @@
i> 0000000000000000000000000000000000000000-0000000000000000000000000000000000000000
i> flush() -> None
o> readline() -> 4:
- o> 427\n
- o> readline() -> 427:
- o> capabilities: batch branchmap $USUAL_BUNDLE2_CAPS$ changegroupsubset getbundle known lookup protocaps pushkey streamreqs=generaldelta,revlogv1 unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash\n
+ o> 440\n
+ o> readline() -> 440:
+ o> capabilities: batch branchmap $USUAL_BUNDLE2_CAPS$ changegroupsubset getbundle known lookup protocaps pushkey streamreqs=generaldelta,revlogv1,sparserevlog unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash\n
o> readline() -> 2:
o> 1\n
o> readline() -> 1:
@@ -1331,8 +1331,8 @@
o> readline() -> 62:
o> upgraded * exp-ssh-v2-0003\n (glob)
o> readline() -> 4:
- o> 426\n
- o> read(426) -> 426: capabilities: batch branchmap $USUAL_BUNDLE2_CAPS$ changegroupsubset getbundle known lookup protocaps pushkey streamreqs=generaldelta,revlogv1 unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash
+ o> 439\n
+ o> read(439) -> 439: capabilities: batch branchmap $USUAL_BUNDLE2_CAPS$ changegroupsubset getbundle known lookup protocaps pushkey streamreqs=generaldelta,revlogv1,sparserevlog unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash
o> read(1) -> 1:
o> \n
sending unbundle command
@@ -1400,9 +1400,9 @@
i> 0000000000000000000000000000000000000000-0000000000000000000000000000000000000000
i> flush() -> None
o> readline() -> 4:
- o> 427\n
- o> readline() -> 427:
- o> capabilities: batch branchmap $USUAL_BUNDLE2_CAPS$ changegroupsubset getbundle known lookup protocaps pushkey streamreqs=generaldelta,revlogv1 unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash\n
+ o> 440\n
+ o> readline() -> 440:
+ o> capabilities: batch branchmap $USUAL_BUNDLE2_CAPS$ changegroupsubset getbundle known lookup protocaps pushkey streamreqs=generaldelta,revlogv1,sparserevlog unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash\n
o> readline() -> 2:
o> 1\n
o> readline() -> 1:
@@ -1460,8 +1460,8 @@
o> readline() -> 62:
o> upgraded * exp-ssh-v2-0003\n (glob)
o> readline() -> 4:
- o> 426\n
- o> read(426) -> 426: capabilities: batch branchmap $USUAL_BUNDLE2_CAPS$ changegroupsubset getbundle known lookup protocaps pushkey streamreqs=generaldelta,revlogv1 unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash
+ o> 439\n
+ o> read(439) -> 439: capabilities: batch branchmap $USUAL_BUNDLE2_CAPS$ changegroupsubset getbundle known lookup protocaps pushkey streamreqs=generaldelta,revlogv1,sparserevlog unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash
o> read(1) -> 1:
o> \n
sending unbundle command
@@ -1531,9 +1531,9 @@
i> 0000000000000000000000000000000000000000-0000000000000000000000000000000000000000
i> flush() -> None
o> readline() -> 4:
- o> 427\n
- o> readline() -> 427:
- o> capabilities: batch branchmap $USUAL_BUNDLE2_CAPS$ changegroupsubset getbundle known lookup protocaps pushkey streamreqs=generaldelta,revlogv1 unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash\n
+ o> 440\n
+ o> readline() -> 440:
+ o> capabilities: batch branchmap $USUAL_BUNDLE2_CAPS$ changegroupsubset getbundle known lookup protocaps pushkey streamreqs=generaldelta,revlogv1,sparserevlog unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash\n
o> readline() -> 2:
o> 1\n
o> readline() -> 1:
@@ -1593,8 +1593,8 @@
o> readline() -> 62:
o> upgraded * exp-ssh-v2-0003\n (glob)
o> readline() -> 4:
- o> 426\n
- o> read(426) -> 426: capabilities: batch branchmap $USUAL_BUNDLE2_CAPS$ changegroupsubset getbundle known lookup protocaps pushkey streamreqs=generaldelta,revlogv1 unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash
+ o> 439\n
+ o> read(439) -> 439: capabilities: batch branchmap $USUAL_BUNDLE2_CAPS$ changegroupsubset getbundle known lookup protocaps pushkey streamreqs=generaldelta,revlogv1,sparserevlog unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash
o> read(1) -> 1:
o> \n
sending unbundle command
@@ -1672,9 +1672,9 @@
i> 0000000000000000000000000000000000000000-0000000000000000000000000000000000000000
i> flush() -> None
o> readline() -> 4:
- o> 427\n
- o> readline() -> 427:
- o> capabilities: batch branchmap $USUAL_BUNDLE2_CAPS$ changegroupsubset getbundle known lookup protocaps pushkey streamreqs=generaldelta,revlogv1 unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash\n
+ o> 440\n
+ o> readline() -> 440:
+ o> capabilities: batch branchmap $USUAL_BUNDLE2_CAPS$ changegroupsubset getbundle known lookup protocaps pushkey streamreqs=generaldelta,revlogv1,sparserevlog unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash\n
o> readline() -> 2:
o> 1\n
o> readline() -> 1:
@@ -1738,8 +1738,8 @@
o> readline() -> 62:
o> upgraded * exp-ssh-v2-0003\n (glob)
o> readline() -> 4:
- o> 426\n
- o> read(426) -> 426: capabilities: batch branchmap $USUAL_BUNDLE2_CAPS$ changegroupsubset getbundle known lookup protocaps pushkey streamreqs=generaldelta,revlogv1 unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash
+ o> 439\n
+ o> read(439) -> 439: capabilities: batch branchmap $USUAL_BUNDLE2_CAPS$ changegroupsubset getbundle known lookup protocaps pushkey streamreqs=generaldelta,revlogv1,sparserevlog unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash
o> read(1) -> 1:
o> \n
sending unbundle command
@@ -1812,9 +1812,9 @@
i> 0000000000000000000000000000000000000000-0000000000000000000000000000000000000000
i> flush() -> None
o> readline() -> 4:
- o> 427\n
- o> readline() -> 427:
- o> capabilities: batch branchmap $USUAL_BUNDLE2_CAPS$ changegroupsubset getbundle known lookup protocaps pushkey streamreqs=generaldelta,revlogv1 unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash\n
+ o> 440\n
+ o> readline() -> 440:
+ o> capabilities: batch branchmap $USUAL_BUNDLE2_CAPS$ changegroupsubset getbundle known lookup protocaps pushkey streamreqs=generaldelta,revlogv1,sparserevlog unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash\n
o> readline() -> 2:
o> 1\n
o> readline() -> 1:
@@ -1867,8 +1867,8 @@
o> readline() -> 62:
o> upgraded * exp-ssh-v2-0003\n (glob)
o> readline() -> 4:
- o> 426\n
- o> read(426) -> 426: capabilities: batch branchmap $USUAL_BUNDLE2_CAPS$ changegroupsubset getbundle known lookup protocaps pushkey streamreqs=generaldelta,revlogv1 unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash
+ o> 439\n
+ o> read(439) -> 439: capabilities: batch branchmap $USUAL_BUNDLE2_CAPS$ changegroupsubset getbundle known lookup protocaps pushkey streamreqs=generaldelta,revlogv1,sparserevlog unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash
o> read(1) -> 1:
o> \n
sending unbundle command
@@ -1942,9 +1942,9 @@
i> 0000000000000000000000000000000000000000-0000000000000000000000000000000000000000
i> flush() -> None
o> readline() -> 4:
- o> 427\n
- o> readline() -> 427:
- o> capabilities: batch branchmap $USUAL_BUNDLE2_CAPS$ changegroupsubset getbundle known lookup protocaps pushkey streamreqs=generaldelta,revlogv1 unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash\n
+ o> 440\n
+ o> readline() -> 440:
+ o> capabilities: batch branchmap $USUAL_BUNDLE2_CAPS$ changegroupsubset getbundle known lookup protocaps pushkey streamreqs=generaldelta,revlogv1,sparserevlog unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash\n
o> readline() -> 2:
o> 1\n
o> readline() -> 1:
@@ -2001,8 +2001,8 @@
o> readline() -> 62:
o> upgraded * exp-ssh-v2-0003\n (glob)
o> readline() -> 4:
- o> 426\n
- o> read(426) -> 426: capabilities: batch branchmap $USUAL_BUNDLE2_CAPS$ changegroupsubset getbundle known lookup protocaps pushkey streamreqs=generaldelta,revlogv1 unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash
+ o> 439\n
+ o> read(439) -> 439: capabilities: batch branchmap $USUAL_BUNDLE2_CAPS$ changegroupsubset getbundle known lookup protocaps pushkey streamreqs=generaldelta,revlogv1,sparserevlog unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash
o> read(1) -> 1:
o> \n
sending unbundle command
--- a/tests/test-ssh-proto.t Wed Jan 09 20:00:35 2019 -0800
+++ b/tests/test-ssh-proto.t Fri Jan 18 13:28:22 2019 -0500
@@ -64,8 +64,8 @@
devel-peer-request: pairs: 81 bytes
sending hello command
sending between command
- remote: 427
- remote: capabilities: batch branchmap $USUAL_BUNDLE2_CAPS$ changegroupsubset getbundle known lookup protocaps pushkey streamreqs=generaldelta,revlogv1 unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash
+ remote: 440
+ remote: capabilities: batch branchmap $USUAL_BUNDLE2_CAPS$ changegroupsubset getbundle known lookup protocaps pushkey streamreqs=generaldelta,revlogv1,sparserevlog unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash
remote: 1
devel-peer-request: protocaps
devel-peer-request: caps: * bytes (glob)
@@ -86,9 +86,9 @@
i> write(6) -> 6:
i> hello\n
o> readline() -> 4:
- o> 427\n
- o> readline() -> 427:
- o> capabilities: batch branchmap $USUAL_BUNDLE2_CAPS$ changegroupsubset getbundle known lookup protocaps pushkey streamreqs=generaldelta,revlogv1 unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash\n
+ o> 440\n
+ o> readline() -> 440:
+ o> capabilities: batch branchmap $USUAL_BUNDLE2_CAPS$ changegroupsubset getbundle known lookup protocaps pushkey streamreqs=generaldelta,revlogv1,sparserevlog unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash\n
`hg debugserve --sshstdio` works
@@ -96,8 +96,8 @@
$ hg debugserve --sshstdio << EOF
> hello
> EOF
- 427
- capabilities: batch branchmap $USUAL_BUNDLE2_CAPS$ changegroupsubset getbundle known lookup protocaps pushkey streamreqs=generaldelta,revlogv1 unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash
+ 440
+ capabilities: batch branchmap $USUAL_BUNDLE2_CAPS$ changegroupsubset getbundle known lookup protocaps pushkey streamreqs=generaldelta,revlogv1,sparserevlog unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash
I/O logging works
@@ -105,24 +105,24 @@
> hello
> EOF
o> write(4) -> 4:
- o> 427\n
- o> write(427) -> 427:
- o> capabilities: batch branchmap $USUAL_BUNDLE2_CAPS$ changegroupsubset getbundle known lookup protocaps pushkey streamreqs=generaldelta,revlogv1 unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash\n
- 427
- capabilities: batch branchmap $USUAL_BUNDLE2_CAPS$ changegroupsubset getbundle known lookup protocaps pushkey streamreqs=generaldelta,revlogv1 unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash
+ o> 440\n
+ o> write(440) -> 440:
+ o> capabilities: batch branchmap $USUAL_BUNDLE2_CAPS$ changegroupsubset getbundle known lookup protocaps pushkey streamreqs=generaldelta,revlogv1,sparserevlog unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash\n
+ 440
+ capabilities: batch branchmap $USUAL_BUNDLE2_CAPS$ changegroupsubset getbundle known lookup protocaps pushkey streamreqs=generaldelta,revlogv1,sparserevlog unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash
o> flush() -> None
$ hg debugserve --sshstdio --logiofile $TESTTMP/io << EOF
> hello
> EOF
- 427
- capabilities: batch branchmap $USUAL_BUNDLE2_CAPS$ changegroupsubset getbundle known lookup protocaps pushkey streamreqs=generaldelta,revlogv1 unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash
+ 440
+ capabilities: batch branchmap $USUAL_BUNDLE2_CAPS$ changegroupsubset getbundle known lookup protocaps pushkey streamreqs=generaldelta,revlogv1,sparserevlog unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash
$ cat $TESTTMP/io
o> write(4) -> 4:
- o> 427\n
- o> write(427) -> 427:
- o> capabilities: batch branchmap $USUAL_BUNDLE2_CAPS$ changegroupsubset getbundle known lookup protocaps pushkey streamreqs=generaldelta,revlogv1 unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash\n
+ o> 440\n
+ o> write(440) -> 440:
+ o> capabilities: batch branchmap $USUAL_BUNDLE2_CAPS$ changegroupsubset getbundle known lookup protocaps pushkey streamreqs=generaldelta,revlogv1,sparserevlog unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash\n
o> flush() -> None
$ cd ..
@@ -147,9 +147,9 @@
i> write(6) -> 6:
i> hello\n
o> readline() -> 4:
- o> 427\n
- o> readline() -> 427:
- o> capabilities: batch branchmap $USUAL_BUNDLE2_CAPS$ changegroupsubset getbundle known lookup protocaps pushkey streamreqs=generaldelta,revlogv1 unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash\n
+ o> 440\n
+ o> readline() -> 440:
+ o> capabilities: batch branchmap $USUAL_BUNDLE2_CAPS$ changegroupsubset getbundle known lookup protocaps pushkey streamreqs=generaldelta,revlogv1,sparserevlog unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash\n
i> write(98) -> 98:
i> between\n
i> pairs 81\n
@@ -185,8 +185,8 @@
remote: banner: line 7
remote: banner: line 8
remote: banner: line 9
- remote: 427
- remote: capabilities: batch branchmap $USUAL_BUNDLE2_CAPS$ changegroupsubset getbundle known lookup protocaps pushkey streamreqs=generaldelta,revlogv1 unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash
+ remote: 440
+ remote: capabilities: batch branchmap $USUAL_BUNDLE2_CAPS$ changegroupsubset getbundle known lookup protocaps pushkey streamreqs=generaldelta,revlogv1,sparserevlog unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash
remote: 1
devel-peer-request: protocaps
devel-peer-request: caps: * bytes (glob)
@@ -243,9 +243,9 @@
o> readline() -> 15:
o> banner: line 9\n
o> readline() -> 4:
- o> 427\n
- o> readline() -> 427:
- o> capabilities: batch branchmap $USUAL_BUNDLE2_CAPS$ changegroupsubset getbundle known lookup protocaps pushkey streamreqs=generaldelta,revlogv1 unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash\n
+ o> 440\n
+ o> readline() -> 440:
+ o> capabilities: batch branchmap $USUAL_BUNDLE2_CAPS$ changegroupsubset getbundle known lookup protocaps pushkey streamreqs=generaldelta,revlogv1,sparserevlog unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash\n
i> write(98) -> 98:
i> between\n
i> pairs 81\n
@@ -295,13 +295,13 @@
i> write(6) -> 6:
i> hello\n
o> readline() -> 4:
- o> 427\n
+ o> 440\n
i> write(98) -> 98:
i> between\n
i> pairs 81\n
i> 0000000000000000000000000000000000000000-0000000000000000000000000000000000000000
- o> readline() -> 427:
- o> capabilities: batch branchmap $USUAL_BUNDLE2_CAPS$ changegroupsubset getbundle known lookup protocaps pushkey streamreqs=generaldelta,revlogv1 unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash\n
+ o> readline() -> 440:
+ o> capabilities: batch branchmap $USUAL_BUNDLE2_CAPS$ changegroupsubset getbundle known lookup protocaps pushkey streamreqs=generaldelta,revlogv1,sparserevlog unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash\n
o> readline() -> 2:
o> 1\n
@@ -314,8 +314,8 @@
sending hello command
sending between command
remote: 0
- remote: 427
- remote: capabilities: batch branchmap $USUAL_BUNDLE2_CAPS$ changegroupsubset getbundle known lookup protocaps pushkey streamreqs=generaldelta,revlogv1 unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash
+ remote: 440
+ remote: capabilities: batch branchmap $USUAL_BUNDLE2_CAPS$ changegroupsubset getbundle known lookup protocaps pushkey streamreqs=generaldelta,revlogv1,sparserevlog unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash
remote: 1
devel-peer-request: protocaps
devel-peer-request: caps: * bytes (glob)
@@ -363,9 +363,9 @@
i> write(6) -> 6:
i> hello\n
o> readline() -> 4:
- o> 427\n
- o> readline() -> 427:
- o> capabilities: batch branchmap $USUAL_BUNDLE2_CAPS$ changegroupsubset getbundle known lookup protocaps pushkey streamreqs=generaldelta,revlogv1 unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash\n
+ o> 440\n
+ o> readline() -> 440:
+ o> capabilities: batch branchmap $USUAL_BUNDLE2_CAPS$ changegroupsubset getbundle known lookup protocaps pushkey streamreqs=generaldelta,revlogv1,sparserevlog unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash\n
i> write(98) -> 98:
i> between\n
i> pairs 81\n
@@ -388,8 +388,8 @@
remote: 0
remote: 0
remote: 0
- remote: 427
- remote: capabilities: batch branchmap $USUAL_BUNDLE2_CAPS$ changegroupsubset getbundle known lookup protocaps pushkey streamreqs=generaldelta,revlogv1 unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash
+ remote: 440
+ remote: capabilities: batch branchmap $USUAL_BUNDLE2_CAPS$ changegroupsubset getbundle known lookup protocaps pushkey streamreqs=generaldelta,revlogv1,sparserevlog unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash
remote: 1
devel-peer-request: protocaps
devel-peer-request: caps: * bytes (glob)
@@ -445,9 +445,9 @@
i> write(6) -> 6:
i> hello\n
o> readline() -> 4:
- o> 427\n
- o> readline() -> 427:
- o> capabilities: batch branchmap $USUAL_BUNDLE2_CAPS$ changegroupsubset getbundle known lookup protocaps pushkey streamreqs=generaldelta,revlogv1 unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash\n
+ o> 440\n
+ o> readline() -> 440:
+ o> capabilities: batch branchmap $USUAL_BUNDLE2_CAPS$ changegroupsubset getbundle known lookup protocaps pushkey streamreqs=generaldelta,revlogv1,sparserevlog unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash\n
i> write(98) -> 98:
i> between\n
i> pairs 81\n
@@ -492,9 +492,9 @@
i> write(6) -> 6:
i> hello\n
o> readline() -> 4:
- o> 427\n
- o> readline() -> 427:
- o> capabilities: batch branchmap $USUAL_BUNDLE2_CAPS$ changegroupsubset getbundle known lookup protocaps pushkey streamreqs=generaldelta,revlogv1 unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash\n
+ o> 440\n
+ o> readline() -> 440:
+ o> capabilities: batch branchmap $USUAL_BUNDLE2_CAPS$ changegroupsubset getbundle known lookup protocaps pushkey streamreqs=generaldelta,revlogv1,sparserevlog unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash\n
i> write(98) -> 98:
i> between\n
i> pairs 81\n
@@ -537,9 +537,9 @@
i> write(6) -> 6:
i> hello\n
o> readline() -> 4:
- o> 427\n
- o> readline() -> 427:
- o> capabilities: batch branchmap $USUAL_BUNDLE2_CAPS$ changegroupsubset getbundle known lookup protocaps pushkey streamreqs=generaldelta,revlogv1 unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash\n
+ o> 440\n
+ o> readline() -> 440:
+ o> capabilities: batch branchmap $USUAL_BUNDLE2_CAPS$ changegroupsubset getbundle known lookup protocaps pushkey streamreqs=generaldelta,revlogv1,sparserevlog unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash\n
i> write(98) -> 98:
i> between\n
i> pairs 81\n
@@ -607,9 +607,9 @@
i> write(6) -> 6:
i> hello\n
o> readline() -> 4:
- o> 427\n
- o> readline() -> 427:
- o> capabilities: batch branchmap $USUAL_BUNDLE2_CAPS$ changegroupsubset getbundle known lookup protocaps pushkey streamreqs=generaldelta,revlogv1 unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash\n
+ o> 440\n
+ o> readline() -> 440:
+ o> capabilities: batch branchmap $USUAL_BUNDLE2_CAPS$ changegroupsubset getbundle known lookup protocaps pushkey streamreqs=generaldelta,revlogv1,sparserevlog unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash\n
Incomplete dictionary send
@@ -689,9 +689,9 @@
i> write(6) -> 6:
i> hello\n
o> readline() -> 4:
- o> 427\n
- o> readline() -> 427:
- o> capabilities: batch branchmap $USUAL_BUNDLE2_CAPS$ changegroupsubset getbundle known lookup protocaps pushkey streamreqs=generaldelta,revlogv1 unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash\n
+ o> 440\n
+ o> readline() -> 440:
+ o> capabilities: batch branchmap $USUAL_BUNDLE2_CAPS$ changegroupsubset getbundle known lookup protocaps pushkey streamreqs=generaldelta,revlogv1,sparserevlog unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash\n
i> write(98) -> 98:
i> between\n
i> pairs 81\n
@@ -723,9 +723,9 @@
i> write(6) -> 6:
i> hello\n
o> readline() -> 4:
- o> 427\n
- o> readline() -> 427:
- o> capabilities: batch branchmap $USUAL_BUNDLE2_CAPS$ changegroupsubset getbundle known lookup protocaps pushkey streamreqs=generaldelta,revlogv1 unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash\n
+ o> 440\n
+ o> readline() -> 440:
+ o> capabilities: batch branchmap $USUAL_BUNDLE2_CAPS$ changegroupsubset getbundle known lookup protocaps pushkey streamreqs=generaldelta,revlogv1,sparserevlog unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash\n
i> write(98) -> 98:
i> between\n
i> pairs 81\n
@@ -766,9 +766,9 @@
i> write(6) -> 6:
i> hello\n
o> readline() -> 4:
- o> 427\n
- o> readline() -> 427:
- o> capabilities: batch branchmap $USUAL_BUNDLE2_CAPS$ changegroupsubset getbundle known lookup protocaps pushkey streamreqs=generaldelta,revlogv1 unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash\n
+ o> 440\n
+ o> readline() -> 440:
+ o> capabilities: batch branchmap $USUAL_BUNDLE2_CAPS$ changegroupsubset getbundle known lookup protocaps pushkey streamreqs=generaldelta,revlogv1,sparserevlog unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash\n
i> write(98) -> 98:
i> between\n
i> pairs 81\n
@@ -795,9 +795,9 @@
i> write(6) -> 6:
i> hello\n
o> readline() -> 4:
- o> 427\n
- o> readline() -> 427:
- o> capabilities: batch branchmap $USUAL_BUNDLE2_CAPS$ changegroupsubset getbundle known lookup protocaps pushkey streamreqs=generaldelta,revlogv1 unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash\n
+ o> 440\n
+ o> readline() -> 440:
+ o> capabilities: batch branchmap $USUAL_BUNDLE2_CAPS$ changegroupsubset getbundle known lookup protocaps pushkey streamreqs=generaldelta,revlogv1,sparserevlog unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash\n
i> write(105) -> 105:
i> between\n
i> pairs 81\n
@@ -836,9 +836,9 @@
i> pairs 81\n
i> 0000000000000000000000000000000000000000-0000000000000000000000000000000000000000
o> readline() -> 4:
- o> 427\n
- o> readline() -> 427:
- o> capabilities: batch branchmap $USUAL_BUNDLE2_CAPS$ changegroupsubset getbundle known lookup protocaps pushkey streamreqs=generaldelta,revlogv1 unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash\n
+ o> 440\n
+ o> readline() -> 440:
+ o> capabilities: batch branchmap $USUAL_BUNDLE2_CAPS$ changegroupsubset getbundle known lookup protocaps pushkey streamreqs=generaldelta,revlogv1,sparserevlog unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash\n
o> readline() -> 2:
o> 1\n
o> readline() -> 1:
@@ -885,9 +885,9 @@
o> readline() -> 41:
o> 68986213bd4485ea51533535e3fc9e78007a711f\n
o> readline() -> 4:
- o> 427\n
- o> readline() -> 427:
- o> capabilities: batch branchmap $USUAL_BUNDLE2_CAPS$ changegroupsubset getbundle known lookup protocaps pushkey streamreqs=generaldelta,revlogv1 unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash\n
+ o> 440\n
+ o> readline() -> 440:
+ o> capabilities: batch branchmap $USUAL_BUNDLE2_CAPS$ changegroupsubset getbundle known lookup protocaps pushkey streamreqs=generaldelta,revlogv1,sparserevlog unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash\n
o> readline() -> 2:
o> 1\n
@@ -912,7 +912,7 @@
o> readline() -> 41:
o> 68986213bd4485ea51533535e3fc9e78007a711f\n
o> readline() -> 4:
- o> 427\n
+ o> 440\n
Send an upgrade request to a server that doesn't support that command
@@ -941,9 +941,9 @@
i> pairs 81\n
i> 0000000000000000000000000000000000000000-0000000000000000000000000000000000000000
o> readline() -> 4:
- o> 427\n
- o> readline() -> 427:
- o> capabilities: batch branchmap $USUAL_BUNDLE2_CAPS$ changegroupsubset getbundle known lookup protocaps pushkey streamreqs=generaldelta,revlogv1 unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash\n
+ o> 440\n
+ o> readline() -> 440:
+ o> capabilities: batch branchmap $USUAL_BUNDLE2_CAPS$ changegroupsubset getbundle known lookup protocaps pushkey streamreqs=generaldelta,revlogv1,sparserevlog unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash\n
o> readline() -> 2:
o> 1\n
o> readline() -> 1:
@@ -960,8 +960,8 @@
sending hello command
sending between command
remote: 0
- remote: 427
- remote: capabilities: batch branchmap $USUAL_BUNDLE2_CAPS$ changegroupsubset getbundle known lookup protocaps pushkey streamreqs=generaldelta,revlogv1 unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash
+ remote: 440
+ remote: capabilities: batch branchmap $USUAL_BUNDLE2_CAPS$ changegroupsubset getbundle known lookup protocaps pushkey streamreqs=generaldelta,revlogv1,sparserevlog unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash
remote: 1
devel-peer-request: protocaps
devel-peer-request: caps: * bytes (glob)
@@ -1003,9 +1003,9 @@
o> readline() -> 44:
o> upgraded this-is-some-token exp-ssh-v2-0003\n
o> readline() -> 4:
- o> 426\n
- o> readline() -> 427:
- o> capabilities: batch branchmap $USUAL_BUNDLE2_CAPS$ changegroupsubset getbundle known lookup protocaps pushkey streamreqs=generaldelta,revlogv1 unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash\n
+ o> 439\n
+ o> readline() -> 440:
+ o> capabilities: batch branchmap $USUAL_BUNDLE2_CAPS$ changegroupsubset getbundle known lookup protocaps pushkey streamreqs=generaldelta,revlogv1,sparserevlog unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash\n
$ cd ..
@@ -1018,7 +1018,7 @@
sending hello command
sending between command
protocol upgraded to exp-ssh-v2-0003
- remote: capabilities: batch branchmap $USUAL_BUNDLE2_CAPS$ changegroupsubset getbundle known lookup protocaps pushkey streamreqs=generaldelta,revlogv1 unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash
+ remote: capabilities: batch branchmap $USUAL_BUNDLE2_CAPS$ changegroupsubset getbundle known lookup protocaps pushkey streamreqs=generaldelta,revlogv1,sparserevlog unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash
devel-peer-request: protocaps
devel-peer-request: caps: * bytes (glob)
sending protocaps command
@@ -1037,7 +1037,7 @@
sending hello command
sending between command
protocol upgraded to exp-ssh-v2-0003
- remote: capabilities: batch branchmap $USUAL_BUNDLE2_CAPS$ changegroupsubset getbundle known lookup protocaps pushkey streamreqs=generaldelta,revlogv1 unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash
+ remote: capabilities: batch branchmap $USUAL_BUNDLE2_CAPS$ changegroupsubset getbundle known lookup protocaps pushkey streamreqs=generaldelta,revlogv1,sparserevlog unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash
devel-peer-request: protocaps
devel-peer-request: caps: * bytes (glob)
sending protocaps command
@@ -1051,7 +1051,7 @@
lookup
protocaps
pushkey
- streamreqs=generaldelta,revlogv1
+ streamreqs=generaldelta,revlogv1,sparserevlog
unbundle=HG10GZ,HG10BZ,HG10UN
unbundlehash
Bundle2 capabilities:
@@ -1110,15 +1110,15 @@
o> readline() -> 44:
o> upgraded this-is-some-token exp-ssh-v2-0003\n
o> readline() -> 4:
- o> 426\n
- o> readline() -> 427:
- o> capabilities: batch branchmap $USUAL_BUNDLE2_CAPS$ changegroupsubset getbundle known lookup protocaps pushkey streamreqs=generaldelta,revlogv1 unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash\n
+ o> 439\n
+ o> readline() -> 440:
+ o> capabilities: batch branchmap $USUAL_BUNDLE2_CAPS$ changegroupsubset getbundle known lookup protocaps pushkey streamreqs=generaldelta,revlogv1,sparserevlog unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash\n
i> write(6) -> 6:
i> hello\n
o> readline() -> 4:
- o> 411\n
- o> readline() -> 411:
- o> capabilities: branchmap $USUAL_BUNDLE2_CAPS$ changegroupsubset getbundle known lookup pushkey streamreqs=generaldelta,revlogv1 unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash\n
+ o> 424\n
+ o> readline() -> 424:
+ o> capabilities: branchmap $USUAL_BUNDLE2_CAPS$ changegroupsubset getbundle known lookup pushkey streamreqs=generaldelta,revlogv1,sparserevlog unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash\n
Multiple upgrades is not allowed
@@ -1148,9 +1148,9 @@
o> readline() -> 44:
o> upgraded this-is-some-token exp-ssh-v2-0003\n
o> readline() -> 4:
- o> 426\n
- o> readline() -> 427:
- o> capabilities: batch branchmap $USUAL_BUNDLE2_CAPS$ changegroupsubset getbundle known lookup protocaps pushkey streamreqs=generaldelta,revlogv1 unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash\n
+ o> 439\n
+ o> readline() -> 440:
+ o> capabilities: batch branchmap $USUAL_BUNDLE2_CAPS$ changegroupsubset getbundle known lookup protocaps pushkey streamreqs=generaldelta,revlogv1,sparserevlog unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash\n
i> write(45) -> 45:
i> upgrade another-token proto=irrelevant\n
i> hello\n
@@ -1220,9 +1220,9 @@
i> write(6) -> 6:
i> hello\n
o> readline() -> 4:
- o> 427\n
- o> readline() -> 427:
- o> capabilities: batch branchmap $USUAL_BUNDLE2_CAPS$ changegroupsubset getbundle known lookup protocaps pushkey streamreqs=generaldelta,revlogv1 unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash\n
+ o> 440\n
+ o> readline() -> 440:
+ o> capabilities: batch branchmap $USUAL_BUNDLE2_CAPS$ changegroupsubset getbundle known lookup protocaps pushkey streamreqs=generaldelta,revlogv1,sparserevlog unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash\n
i> write(98) -> 98:
i> between\n
i> pairs 81\n
@@ -1339,9 +1339,9 @@
i> 0000000000000000000000000000000000000000-0000000000000000000000000000000000000000
i> flush() -> None
o> readline() -> 4:
- o> 427\n
- o> readline() -> 427:
- o> capabilities: batch branchmap $USUAL_BUNDLE2_CAPS$ changegroupsubset getbundle known lookup protocaps pushkey streamreqs=generaldelta,revlogv1 unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash\n
+ o> 440\n
+ o> readline() -> 440:
+ o> capabilities: batch branchmap $USUAL_BUNDLE2_CAPS$ changegroupsubset getbundle known lookup protocaps pushkey streamreqs=generaldelta,revlogv1,sparserevlog unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash\n
o> readline() -> 2:
o> 1\n
o> readline() -> 1:
@@ -1377,8 +1377,8 @@
o> readline() -> 62:
o> upgraded * exp-ssh-v2-0003\n (glob)
o> readline() -> 4:
- o> 426\n
- o> read(426) -> 426: capabilities: batch branchmap $USUAL_BUNDLE2_CAPS$ changegroupsubset getbundle known lookup protocaps pushkey streamreqs=generaldelta,revlogv1 unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash
+ o> 439\n
+ o> read(439) -> 439: capabilities: batch branchmap $USUAL_BUNDLE2_CAPS$ changegroupsubset getbundle known lookup protocaps pushkey streamreqs=generaldelta,revlogv1,sparserevlog unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash
o> read(1) -> 1:
o> \n
sending listkeys command
@@ -1427,9 +1427,9 @@
i> 0000000000000000000000000000000000000000-0000000000000000000000000000000000000000
i> flush() -> None
o> readline() -> 4:
- o> 427\n
- o> readline() -> 427:
- o> capabilities: batch branchmap $USUAL_BUNDLE2_CAPS$ changegroupsubset getbundle known lookup protocaps pushkey streamreqs=generaldelta,revlogv1 unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash\n
+ o> 440\n
+ o> readline() -> 440:
+ o> capabilities: batch branchmap $USUAL_BUNDLE2_CAPS$ changegroupsubset getbundle known lookup protocaps pushkey streamreqs=generaldelta,revlogv1,sparserevlog unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash\n
o> readline() -> 2:
o> 1\n
o> readline() -> 1:
@@ -1457,8 +1457,8 @@
o> readline() -> 62:
o> upgraded * exp-ssh-v2-0003\n (glob)
o> readline() -> 4:
- o> 426\n
- o> read(426) -> 426: capabilities: batch branchmap $USUAL_BUNDLE2_CAPS$ changegroupsubset getbundle known lookup protocaps pushkey streamreqs=generaldelta,revlogv1 unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash
+ o> 439\n
+ o> read(439) -> 439: capabilities: batch branchmap $USUAL_BUNDLE2_CAPS$ changegroupsubset getbundle known lookup protocaps pushkey streamreqs=generaldelta,revlogv1,sparserevlog unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash
o> read(1) -> 1:
o> \n
sending listkeys command
@@ -1488,9 +1488,9 @@
i> 0000000000000000000000000000000000000000-0000000000000000000000000000000000000000
i> flush() -> None
o> readline() -> 4:
- o> 427\n
- o> readline() -> 427:
- o> capabilities: batch branchmap $USUAL_BUNDLE2_CAPS$ changegroupsubset getbundle known lookup protocaps pushkey streamreqs=generaldelta,revlogv1 unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash\n
+ o> 440\n
+ o> readline() -> 440:
+ o> capabilities: batch branchmap $USUAL_BUNDLE2_CAPS$ changegroupsubset getbundle known lookup protocaps pushkey streamreqs=generaldelta,revlogv1,sparserevlog unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash\n
o> readline() -> 2:
o> 1\n
o> readline() -> 1:
@@ -1521,8 +1521,8 @@
o> readline() -> 62:
o> upgraded * exp-ssh-v2-0003\n (glob)
o> readline() -> 4:
- o> 426\n
- o> read(426) -> 426: capabilities: batch branchmap $USUAL_BUNDLE2_CAPS$ changegroupsubset getbundle known lookup protocaps pushkey streamreqs=generaldelta,revlogv1 unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash
+ o> 439\n
+ o> read(439) -> 439: capabilities: batch branchmap $USUAL_BUNDLE2_CAPS$ changegroupsubset getbundle known lookup protocaps pushkey streamreqs=generaldelta,revlogv1,sparserevlog unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash
o> read(1) -> 1:
o> \n
sending listkeys command
@@ -1555,9 +1555,9 @@
i> 0000000000000000000000000000000000000000-0000000000000000000000000000000000000000
i> flush() -> None
o> readline() -> 4:
- o> 427\n
- o> readline() -> 427:
- o> capabilities: batch branchmap $USUAL_BUNDLE2_CAPS$ changegroupsubset getbundle known lookup protocaps pushkey streamreqs=generaldelta,revlogv1 unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash\n
+ o> 440\n
+ o> readline() -> 440:
+ o> capabilities: batch branchmap $USUAL_BUNDLE2_CAPS$ changegroupsubset getbundle known lookup protocaps pushkey streamreqs=generaldelta,revlogv1,sparserevlog unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash\n
o> readline() -> 2:
o> 1\n
o> readline() -> 1:
@@ -1591,8 +1591,8 @@
o> readline() -> 62:
o> upgraded * exp-ssh-v2-0003\n (glob)
o> readline() -> 4:
- o> 426\n
- o> read(426) -> 426: capabilities: batch branchmap $USUAL_BUNDLE2_CAPS$ changegroupsubset getbundle known lookup protocaps pushkey streamreqs=generaldelta,revlogv1 unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash
+ o> 439\n
+ o> read(439) -> 439: capabilities: batch branchmap $USUAL_BUNDLE2_CAPS$ changegroupsubset getbundle known lookup protocaps pushkey streamreqs=generaldelta,revlogv1,sparserevlog unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash
o> read(1) -> 1:
o> \n
sending listkeys command
@@ -1630,9 +1630,9 @@
i> 0000000000000000000000000000000000000000-0000000000000000000000000000000000000000
i> flush() -> None
o> readline() -> 4:
- o> 427\n
- o> readline() -> 427:
- o> capabilities: batch branchmap $USUAL_BUNDLE2_CAPS$ changegroupsubset getbundle known lookup protocaps pushkey streamreqs=generaldelta,revlogv1 unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash\n
+ o> 440\n
+ o> readline() -> 440:
+ o> capabilities: batch branchmap $USUAL_BUNDLE2_CAPS$ changegroupsubset getbundle known lookup protocaps pushkey streamreqs=generaldelta,revlogv1,sparserevlog unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash\n
o> readline() -> 2:
o> 1\n
o> readline() -> 1:
@@ -1670,8 +1670,8 @@
o> readline() -> 62:
o> upgraded * exp-ssh-v2-0003\n (glob)
o> readline() -> 4:
- o> 426\n
- o> read(426) -> 426: capabilities: batch branchmap $USUAL_BUNDLE2_CAPS$ changegroupsubset getbundle known lookup protocaps pushkey streamreqs=generaldelta,revlogv1 unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash
+ o> 439\n
+ o> read(439) -> 439: capabilities: batch branchmap $USUAL_BUNDLE2_CAPS$ changegroupsubset getbundle known lookup protocaps pushkey streamreqs=generaldelta,revlogv1,sparserevlog unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash
o> read(1) -> 1:
o> \n
sending pushkey command
@@ -1722,9 +1722,9 @@
i> 0000000000000000000000000000000000000000-0000000000000000000000000000000000000000
i> flush() -> None
o> readline() -> 4:
- o> 427\n
- o> readline() -> 427:
- o> capabilities: batch branchmap $USUAL_BUNDLE2_CAPS$ changegroupsubset getbundle known lookup protocaps pushkey streamreqs=generaldelta,revlogv1 unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash\n
+ o> 440\n
+ o> readline() -> 440:
+ o> capabilities: batch branchmap $USUAL_BUNDLE2_CAPS$ changegroupsubset getbundle known lookup protocaps pushkey streamreqs=generaldelta,revlogv1,sparserevlog unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash\n
o> readline() -> 2:
o> 1\n
o> readline() -> 1:
@@ -1755,8 +1755,8 @@
o> readline() -> 62:
o> upgraded * exp-ssh-v2-0003\n (glob)
o> readline() -> 4:
- o> 426\n
- o> read(426) -> 426: capabilities: batch branchmap $USUAL_BUNDLE2_CAPS$ changegroupsubset getbundle known lookup protocaps pushkey streamreqs=generaldelta,revlogv1 unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash
+ o> 439\n
+ o> read(439) -> 439: capabilities: batch branchmap $USUAL_BUNDLE2_CAPS$ changegroupsubset getbundle known lookup protocaps pushkey streamreqs=generaldelta,revlogv1,sparserevlog unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash
o> read(1) -> 1:
o> \n
sending listkeys command
@@ -1805,9 +1805,9 @@
i> 0000000000000000000000000000000000000000-0000000000000000000000000000000000000000
i> flush() -> None
o> readline() -> 4:
- o> 427\n
- o> readline() -> 427:
- o> capabilities: batch branchmap $USUAL_BUNDLE2_CAPS$ changegroupsubset getbundle known lookup protocaps pushkey streamreqs=generaldelta,revlogv1 unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash\n
+ o> 440\n
+ o> readline() -> 440:
+ o> capabilities: batch branchmap $USUAL_BUNDLE2_CAPS$ changegroupsubset getbundle known lookup protocaps pushkey streamreqs=generaldelta,revlogv1,sparserevlog unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash\n
o> readline() -> 2:
o> 1\n
o> readline() -> 1:
@@ -1843,8 +1843,8 @@
o> readline() -> 62:
o> upgraded * exp-ssh-v2-0003\n (glob)
o> readline() -> 4:
- o> 426\n
- o> read(426) -> 426: capabilities: batch branchmap $USUAL_BUNDLE2_CAPS$ changegroupsubset getbundle known lookup protocaps pushkey streamreqs=generaldelta,revlogv1 unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash
+ o> 439\n
+ o> read(439) -> 439: capabilities: batch branchmap $USUAL_BUNDLE2_CAPS$ changegroupsubset getbundle known lookup protocaps pushkey streamreqs=generaldelta,revlogv1,sparserevlog unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash
o> read(1) -> 1:
o> \n
sending listkeys command
@@ -1882,9 +1882,9 @@
i> 0000000000000000000000000000000000000000-0000000000000000000000000000000000000000
i> flush() -> None
o> readline() -> 4:
- o> 427\n
- o> readline() -> 427:
- o> capabilities: batch branchmap $USUAL_BUNDLE2_CAPS$ changegroupsubset getbundle known lookup protocaps pushkey streamreqs=generaldelta,revlogv1 unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash\n
+ o> 440\n
+ o> readline() -> 440:
+ o> capabilities: batch branchmap $USUAL_BUNDLE2_CAPS$ changegroupsubset getbundle known lookup protocaps pushkey streamreqs=generaldelta,revlogv1,sparserevlog unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash\n
o> readline() -> 2:
o> 1\n
o> readline() -> 1:
@@ -1918,8 +1918,8 @@
o> readline() -> 62:
o> upgraded * exp-ssh-v2-0003\n (glob)
o> readline() -> 4:
- o> 426\n
- o> read(426) -> 426: capabilities: batch branchmap $USUAL_BUNDLE2_CAPS$ changegroupsubset getbundle known lookup protocaps pushkey streamreqs=generaldelta,revlogv1 unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash
+ o> 439\n
+ o> read(439) -> 439: capabilities: batch branchmap $USUAL_BUNDLE2_CAPS$ changegroupsubset getbundle known lookup protocaps pushkey streamreqs=generaldelta,revlogv1,sparserevlog unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash
o> read(1) -> 1:
o> \n
sending listkeys command
@@ -1955,9 +1955,9 @@
i> 0000000000000000000000000000000000000000-0000000000000000000000000000000000000000
i> flush() -> None
o> readline() -> 4:
- o> 427\n
- o> readline() -> 427:
- o> capabilities: batch branchmap $USUAL_BUNDLE2_CAPS$ changegroupsubset getbundle known lookup protocaps pushkey streamreqs=generaldelta,revlogv1 unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash\n
+ o> 440\n
+ o> readline() -> 440:
+ o> capabilities: batch branchmap $USUAL_BUNDLE2_CAPS$ changegroupsubset getbundle known lookup protocaps pushkey streamreqs=generaldelta,revlogv1,sparserevlog unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash\n
o> readline() -> 2:
o> 1\n
o> readline() -> 1:
@@ -1988,8 +1988,8 @@
o> readline() -> 62:
o> upgraded * exp-ssh-v2-0003\n (glob)
o> readline() -> 4:
- o> 426\n
- o> read(426) -> 426: capabilities: batch branchmap $USUAL_BUNDLE2_CAPS$ changegroupsubset getbundle known lookup protocaps pushkey streamreqs=generaldelta,revlogv1 unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash
+ o> 439\n
+ o> read(439) -> 439: capabilities: batch branchmap $USUAL_BUNDLE2_CAPS$ changegroupsubset getbundle known lookup protocaps pushkey streamreqs=generaldelta,revlogv1,sparserevlog unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash
o> read(1) -> 1:
o> \n
sending listkeys command
@@ -2026,9 +2026,9 @@
i> 0000000000000000000000000000000000000000-0000000000000000000000000000000000000000
i> flush() -> None
o> readline() -> 4:
- o> 427\n
- o> readline() -> 427:
- o> capabilities: batch branchmap $USUAL_BUNDLE2_CAPS$ changegroupsubset getbundle known lookup protocaps pushkey streamreqs=generaldelta,revlogv1 unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash\n
+ o> 440\n
+ o> readline() -> 440:
+ o> capabilities: batch branchmap $USUAL_BUNDLE2_CAPS$ changegroupsubset getbundle known lookup protocaps pushkey streamreqs=generaldelta,revlogv1,sparserevlog unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash\n
o> readline() -> 2:
o> 1\n
o> readline() -> 1:
@@ -2067,8 +2067,8 @@
o> readline() -> 62:
o> upgraded * exp-ssh-v2-0003\n (glob)
o> readline() -> 4:
- o> 426\n
- o> read(426) -> 426: capabilities: batch branchmap $USUAL_BUNDLE2_CAPS$ changegroupsubset getbundle known lookup protocaps pushkey streamreqs=generaldelta,revlogv1 unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash
+ o> 439\n
+ o> read(439) -> 439: capabilities: batch branchmap $USUAL_BUNDLE2_CAPS$ changegroupsubset getbundle known lookup protocaps pushkey streamreqs=generaldelta,revlogv1,sparserevlog unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash
o> read(1) -> 1:
o> \n
sending pushkey command
@@ -2133,9 +2133,9 @@
i> 0000000000000000000000000000000000000000-0000000000000000000000000000000000000000
i> flush() -> None
o> readline() -> 4:
- o> 427\n
- o> readline() -> 427:
- o> capabilities: batch branchmap $USUAL_BUNDLE2_CAPS$ changegroupsubset getbundle known lookup protocaps pushkey streamreqs=generaldelta,revlogv1 unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash\n
+ o> 440\n
+ o> readline() -> 440:
+ o> capabilities: batch branchmap $USUAL_BUNDLE2_CAPS$ changegroupsubset getbundle known lookup protocaps pushkey streamreqs=generaldelta,revlogv1,sparserevlog unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash\n
o> readline() -> 2:
o> 1\n
o> readline() -> 1:
@@ -2173,8 +2173,8 @@
o> readline() -> 62:
o> upgraded * exp-ssh-v2-0003\n (glob)
o> readline() -> 4:
- o> 426\n
- o> read(426) -> 426: capabilities: batch branchmap $USUAL_BUNDLE2_CAPS$ changegroupsubset getbundle known lookup protocaps pushkey streamreqs=generaldelta,revlogv1 unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash
+ o> 439\n
+ o> read(439) -> 439: capabilities: batch branchmap $USUAL_BUNDLE2_CAPS$ changegroupsubset getbundle known lookup protocaps pushkey streamreqs=generaldelta,revlogv1,sparserevlog unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash
o> read(1) -> 1:
o> \n
sending batch with 3 sub-commands
--- a/tests/test-ssh.t Wed Jan 09 20:00:35 2019 -0800
+++ b/tests/test-ssh.t Fri Jan 18 13:28:22 2019 -0500
@@ -513,9 +513,9 @@
devel-peer-request: pairs: 81 bytes
sending hello command
sending between command
- remote: 427 (sshv1 !)
+ remote: 440 (sshv1 !)
protocol upgraded to exp-ssh-v2-0003 (sshv2 !)
- remote: capabilities: batch branchmap $USUAL_BUNDLE2_CAPS$ changegroupsubset getbundle known lookup protocaps pushkey streamreqs=generaldelta,revlogv1 unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash
+ remote: capabilities: batch branchmap $USUAL_BUNDLE2_CAPS$ changegroupsubset getbundle known lookup protocaps pushkey streamreqs=generaldelta,revlogv1,sparserevlog unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash
remote: 1 (sshv1 !)
devel-peer-request: protocaps
devel-peer-request: caps: * bytes (glob)
--- a/tests/test-sshserver.py Wed Jan 09 20:00:35 2019 -0800
+++ b/tests/test-sshserver.py Fri Jan 18 13:28:22 2019 -0500
@@ -47,6 +47,12 @@
self.fout = io.BytesIO()
self.ferr = io.BytesIO()
+ def protectfinout(self):
+ return self.fin, self.fout
+
+ def restorefinout(self, fin, fout):
+ pass
+
if __name__ == '__main__':
# Don't call into msvcrt to set BytesIO to binary mode
procutil.setbinary = lambda fp: True
--- a/tests/test-stream-bundle-v2.t Wed Jan 09 20:00:35 2019 -0800
+++ b/tests/test-stream-bundle-v2.t Fri Jan 18 13:28:22 2019 -0500
@@ -46,9 +46,9 @@
$ hg bundle -a --type="none-v2;stream=v2" bundle.hg
$ hg debugbundle bundle.hg
Stream params: {}
- stream2 -- {bytecount: 1693, filecount: 11, requirements: dotencode%2Cfncache%2Cgeneraldelta%2Crevlogv1%2Cstore} (mandatory: True)
+ stream2 -- {bytecount: 1693, filecount: 11, requirements: dotencode%2Cfncache%2Cgeneraldelta%2Crevlogv1%2Csparserevlog%2Cstore} (mandatory: True)
$ hg debugbundle --spec bundle.hg
- none-v2;stream=v2;requirements%3Ddotencode%2Cfncache%2Cgeneraldelta%2Crevlogv1%2Cstore
+ none-v2;stream=v2;requirements%3Ddotencode%2Cfncache%2Cgeneraldelta%2Crevlogv1%2Csparserevlog%2Cstore
Test that we can apply the bundle as a stream clone bundle
--- a/tests/test-subrepo-recursion.t Wed Jan 09 20:00:35 2019 -0800
+++ b/tests/test-subrepo-recursion.t Fri Jan 18 13:28:22 2019 -0500
@@ -559,17 +559,18 @@
commit: (clean)
update: 4 new changesets (update)
-Sharing a local repo without the locally referenced subrepo (i.e. it was never
-updated from null), fails the same as a clone operation.
+Sharing a local repo with missing local subrepos (i.e. it was never updated
+from null) works because the default path is copied from the source repo,
+whereas clone should fail.
$ hg --config progress.disable=True clone -U ../empty ../empty2
$ hg --config extensions.share= --config progress.disable=True \
> share ../empty2 ../empty_share
updating working directory
- sharing subrepo foo from $TESTTMP/empty2/foo
- abort: repository $TESTTMP/empty2/foo not found!
- [255]
+ sharing subrepo foo from $TESTTMP/empty/foo
+ sharing subrepo foo/bar from $TESTTMP/empty/foo/bar
+ 3 files updated, 0 files merged, 0 files removed, 0 files unresolved
$ hg --config progress.disable=True clone ../empty2 ../empty_clone
updating to branch default
--- a/tests/test-subrepo.t Wed Jan 09 20:00:35 2019 -0800
+++ b/tests/test-subrepo.t Fri Jan 18 13:28:22 2019 -0500
@@ -1242,19 +1242,21 @@
../shared/subrepo-1/.hg/hgrc
../shared/subrepo-1/.hg/requires
../shared/subrepo-1/.hg/sharedpath
+ ../shared/subrepo-1/.hg/wcache
../shared/subrepo-2
../shared/subrepo-2/.hg
../shared/subrepo-2/.hg/branch
../shared/subrepo-2/.hg/cache
- ../shared/subrepo-2/.hg/cache/checkisexec (execbit !)
- ../shared/subrepo-2/.hg/cache/checklink (symlink !)
- ../shared/subrepo-2/.hg/cache/checklink-target (symlink !)
../shared/subrepo-2/.hg/cache/storehash
../shared/subrepo-2/.hg/cache/storehash/* (glob)
../shared/subrepo-2/.hg/dirstate
../shared/subrepo-2/.hg/hgrc
../shared/subrepo-2/.hg/requires
../shared/subrepo-2/.hg/sharedpath
+ ../shared/subrepo-2/.hg/wcache
+ ../shared/subrepo-2/.hg/wcache/checkisexec (execbit !)
+ ../shared/subrepo-2/.hg/wcache/checklink (symlink !)
+ ../shared/subrepo-2/.hg/wcache/checklink-target (symlink !)
../shared/subrepo-2/file
$ hg -R ../shared in
abort: repository default not found!
--- a/tests/test-tags.t Wed Jan 09 20:00:35 2019 -0800
+++ b/tests/test-tags.t Fri Jan 18 13:28:22 2019 -0500
@@ -4,6 +4,8 @@
> [extensions]
> blackbox=
> mock=$TESTDIR/mockblackbox.py
+ > [blackbox]
+ > track = command, commandfinish, tagscache
> EOF
Helper functions:
@@ -698,9 +700,6 @@
$ ls tagsclient/.hg/cache
branch2-base
- checkisexec (execbit !)
- checklink (symlink !)
- checklink-target (symlink !)
hgtagsfnodes1
rbc-names-v1
rbc-revs-v1
@@ -725,9 +724,6 @@
$ ls tagsclient/.hg/cache
branch2-base
- checkisexec (execbit !)
- checklink (symlink !)
- checklink-target (symlink !)
hgtagsfnodes1
rbc-names-v1
rbc-revs-v1
--- a/tests/test-template-basic.t Wed Jan 09 20:00:35 2019 -0800
+++ b/tests/test-template-basic.t Fri Jan 18 13:28:22 2019 -0500
@@ -188,7 +188,8 @@
$ hg debugtemplate '{" "|separate}'
$ hg debugtemplate '{("not", "an", "argument", "list")|separate}'
- hg: parse error: unknown method 'list'
+ hg: parse error: can't use a list in this context
+ (check place of comma and parens)
[255]
Second branch starting at nullrev:
@@ -859,7 +860,7 @@
$ hg log -R a -r 8 --template '{join(files, ifeq(branch, "default", r"\x5c\x786e"))}\n'
fourth\x5c\x786esecond\x5c\x786ethird
- $ hg log -R a -r 3:4 --template '{rev}:{sub(if("1", "\x6e"), ifeq(branch, "foo", r"\x5c\x786e", "\x5c\x786e"), desc)}\n'
+ $ hg log -R a -r 3:4 --template '{rev}:{sub(if("1", "\x6e"), ifeq(branch, "foo", r"\\x5c\\x786e", "\x5c\x5c\x786e"), desc)}\n'
3:\x6eo user, \x6eo domai\x6e
4:\x5c\x786eew bra\x5c\x786ech
--- a/tests/test-template-functions.t Wed Jan 09 20:00:35 2019 -0800
+++ b/tests/test-template-functions.t Fri Jan 18 13:28:22 2019 -0500
@@ -603,6 +603,51 @@
$ hg debugtemplate '{ifeq(0, 0, "", count(0))}'
$ hg debugtemplate '{ifeq(0, 1, count(0), "")}'
+Test search() function:
+
+ $ hg log -R a -r2 -T '{desc}\n'
+ no person
+
+ $ hg log -R a -r2 -T '{search(r"p.*", desc)}\n'
+ person
+
+ as bool
+
+ $ hg log -R a -r2 -T '{if(search(r"p.*", desc), "", "not ")}found\n'
+ found
+ $ hg log -R a -r2 -T '{if(search(r"q", desc), "", "not ")}found\n'
+ not found
+
+ match as json
+
+ $ hg log -R a -r2 -T '{search(r"(no) p.*", desc)|json}\n'
+ {"0": "no person", "1": "no"}
+ $ hg log -R a -r2 -T '{search(r"q", desc)|json}\n'
+ null
+
+ group reference
+
+ $ hg log -R a -r2 -T '{search(r"(no) (p.*)", desc) % "{1|upper} {2|hex}"}\n'
+ NO 706572736f6e
+ $ hg log -R a -r2 -T '{search(r"(?P<foo>[a-z]*)", desc) % "{foo}"}\n'
+ no
+ $ hg log -R a -r2 -T '{search(r"(?P<foo>[a-z]*)", desc).foo}\n'
+ no
+
+ group reference with no match
+
+ $ hg log -R a -r2 -T '{search(r"q", desc) % "match: {0}"}\n'
+
+
+ bad group names
+
+ $ hg log -R a -r2 -T '{search(r"(?P<0>.)", desc) % "{0}"}\n'
+ hg: parse error: search got an invalid pattern: (?P<0>.)
+ [255]
+ $ hg log -R a -r2 -T '{search(r"(?P<repo>.)", desc) % "{repo}"}\n'
+ hg: parse error: invalid group 'repo' in search pattern: (?P<repo>.)
+ [255]
+
Test the sub function of templating for expansion:
$ hg log -R latesttag -r 10 --template '{sub("[0-9]", "x", "{rev}")}\n'
--- a/tests/test-template-keywords.t Wed Jan 09 20:00:35 2019 -0800
+++ b/tests/test-template-keywords.t Fri Jan 18 13:28:22 2019 -0500
@@ -836,6 +836,25 @@
0 default
1 foo
+p1/p2 keywords:
+
+ $ hg log -r4:7 -GT '{rev} p1:{p1} p2:{p2} p1.rev:{p1.rev} p2.node:{p2.node}\n'
+ o 7 p1:-1:000000000000 p2:-1:000000000000 p1.rev:-1 p2.node:0000000000000000000000000000000000000000
+
+ o 6 p1:5:13207e5a10d9 p2:4:bbe44766e73d p1.rev:5 p2.node:bbe44766e73d5f11ed2177f1838de10c53ef3e74
+ |\
+ | o 5 p1:3:10e46f2dcbf4 p2:-1:000000000000 p1.rev:3 p2.node:0000000000000000000000000000000000000000
+ | |
+ | ~
+ o 4 p1:3:10e46f2dcbf4 p2:-1:000000000000 p1.rev:3 p2.node:0000000000000000000000000000000000000000
+ |
+ ~
+
+TODO: no idea what should be displayed as a JSON representation
+ $ hg log -r6 -T 'p1:{p1|json}\np2:{p2|json}\n'
+ p1:{}
+ p2:{}
+
ui verbosity:
$ hg log -l1 -T '{verbosity}\n'
--- a/tests/test-treemanifest.t Wed Jan 09 20:00:35 2019 -0800
+++ b/tests/test-treemanifest.t Fri Jan 18 13:28:22 2019 -0500
@@ -833,9 +833,9 @@
Packed bundle
$ hg -R deeprepo debugcreatestreamclonebundle repo-packed.hg
writing 5330 bytes for 18 files
- bundle requirements: generaldelta, revlogv1, treemanifest
+ bundle requirements: generaldelta, revlogv1, sparserevlog, treemanifest
$ hg debugbundle --spec repo-packed.hg
- none-packed1;requirements%3Dgeneraldelta%2Crevlogv1%2Ctreemanifest
+ none-packed1;requirements%3Dgeneraldelta%2Crevlogv1%2Csparserevlog%2Ctreemanifest
#endif
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/tests/test-update-atomic.t Fri Jan 18 13:28:22 2019 -0500
@@ -0,0 +1,142 @@
+#require execbit unix-permissions
+
+Checking that experimental.atomic-file works.
+
+ $ cat > $TESTTMP/show_mode.py <<EOF
+ > from __future__ import print_function
+ > import sys
+ > import os
+ > from stat import ST_MODE
+ >
+ > for file_path in sys.argv[1:]:
+ > file_stat = os.stat(file_path)
+ > octal_mode = oct(file_stat[ST_MODE] & 0o777)
+ > print("%s:%s" % (file_path, octal_mode))
+ >
+ > EOF
+
+ $ hg init repo
+ $ cd repo
+
+ $ cat > .hg/showwrites.py <<EOF
+ > def uisetup(ui):
+ > from mercurial import vfs
+ > class newvfs(vfs.vfs):
+ > def __call__(self, *args, **kwargs):
+ > print('vfs open', args, sorted(list(kwargs.items())))
+ > return super(newvfs, self).__call__(*args, **kwargs)
+ > vfs.vfs = newvfs
+ > EOF
+
+ $ for v in a1 a2 b1 b2 c ro; do echo $v > $v; done
+ $ chmod +x b*
+ $ hg commit -Aqm _
+
+# We check that
+# - the changes are actually atomic
+# - that permissions are correct (all 4 cases of (executable before) * (executable after))
+# - that renames work, though they should be atomic anyway
+# - that it works when source files are read-only (but directories are read-write still)
+
+ $ for v in a1 a2 b1 b2 ro; do echo changed-$v > $v; done
+ $ chmod -x *1; chmod +x *2
+ $ hg rename c d
+ $ hg commit -qm _
+
+Check behavior without update.atomic-file
+
+ $ hg update -r 0 -q
+ $ hg update -r 1 --config extensions.showwrites=.hg/showwrites.py 2>&1 | grep "a1'.*wb"
+ ('vfs open', ('a1', 'wb'), [('atomictemp', False), ('backgroundclose', True)])
+
+ $ python $TESTTMP/show_mode.py *
+ a1:0644
+ a2:0755
+ b1:0644
+ b2:0755
+ d:0644
+ ro:0644
+
+Add a second revision for the ro file so we can test update when the file is
+present or not
+
+ $ echo "ro" > ro
+
+ $ hg commit -qm _
+
+Check behavior without update.atomic-file first
+
+ $ hg update -C -r 0 -q
+
+ $ hg update -r 1
+ 6 files updated, 0 files merged, 1 files removed, 0 files unresolved
+
+ $ python $TESTTMP/show_mode.py *
+ a1:0644
+ a2:0755
+ b1:0644
+ b2:0755
+ d:0644
+ ro:0644
+
+Manually reset the mode of the read-only file
+
+ $ chmod a-w ro
+
+ $ python $TESTTMP/show_mode.py ro
+ ro:0444
+
+Now the file is present, try to update and check the permissions of the file
+
+ $ hg up -r 2
+ 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
+
+ $ python $TESTTMP/show_mode.py ro
+ ro:0644
+
+# The file which was read-only is now writable in the default behavior
+
+Check behavior with update.atomic-files
+
+
+ $ cat >> .hg/hgrc <<EOF
+ > [experimental]
+ > update.atomic-file = true
+ > EOF
+
+ $ hg update -C -r 0 -q
+ $ hg update -r 1 --config extensions.showwrites=.hg/showwrites.py 2>&1 | grep "a1'.*wb"
+ ('vfs open', ('a1', 'wb'), [('atomictemp', True), ('backgroundclose', True)])
+ $ hg st -A --rev 1
+ C a1
+ C a2
+ C b1
+ C b2
+ C d
+ C ro
+
+Check the file permission after update
+ $ python $TESTTMP/show_mode.py *
+ a1:0644
+ a2:0755
+ b1:0644
+ b2:0755
+ d:0644
+ ro:0644
+
+Manually reset the mode of the read-only file
+
+ $ chmod a-w ro
+
+ $ python $TESTTMP/show_mode.py ro
+ ro:0444
+
+Now the file is present, try to update and check the permissions of the file
+
+ $ hg update -r 2 --traceback
+ 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
+
+ $ python $TESTTMP/show_mode.py ro
+ ro:0644
+
+# The behavior is the same as without atomic update
--- a/tests/test-upgrade-repo.t Wed Jan 09 20:00:35 2019 -0800
+++ b/tests/test-upgrade-repo.t Fri Jan 18 13:28:22 2019 -0500
@@ -56,7 +56,7 @@
fncache: yes
dotencode: yes
generaldelta: yes
- sparserevlog: no
+ sparserevlog: yes
plain-cl-delta: yes
compression: zlib
$ hg debugformat --verbose
@@ -64,23 +64,23 @@
fncache: yes yes yes
dotencode: yes yes yes
generaldelta: yes yes yes
- sparserevlog: no no no
+ sparserevlog: yes yes yes
plain-cl-delta: yes yes yes
compression: zlib zlib zlib
- $ hg debugformat --verbose --config format.usegfncache=no
+ $ hg debugformat --verbose --config format.usefncache=no
format-variant repo config default
- fncache: yes yes yes
- dotencode: yes yes yes
+ fncache: yes no yes
+ dotencode: yes no yes
generaldelta: yes yes yes
- sparserevlog: no no no
+ sparserevlog: yes yes yes
plain-cl-delta: yes yes yes
compression: zlib zlib zlib
- $ hg debugformat --verbose --config format.usegfncache=no --color=debug
+ $ hg debugformat --verbose --config format.usefncache=no --color=debug
format-variant repo config default
- [formatvariant.name.uptodate|fncache: ][formatvariant.repo.uptodate| yes][formatvariant.config.default| yes][formatvariant.default| yes]
- [formatvariant.name.uptodate|dotencode: ][formatvariant.repo.uptodate| yes][formatvariant.config.default| yes][formatvariant.default| yes]
+ [formatvariant.name.mismatchconfig|fncache: ][formatvariant.repo.mismatchconfig| yes][formatvariant.config.special| no][formatvariant.default| yes]
+ [formatvariant.name.mismatchconfig|dotencode: ][formatvariant.repo.mismatchconfig| yes][formatvariant.config.special| no][formatvariant.default| yes]
[formatvariant.name.uptodate|generaldelta: ][formatvariant.repo.uptodate| yes][formatvariant.config.default| yes][formatvariant.default| yes]
- [formatvariant.name.uptodate|sparserevlog: ][formatvariant.repo.uptodate| no][formatvariant.config.default| no][formatvariant.default| no]
+ [formatvariant.name.uptodate|sparserevlog: ][formatvariant.repo.uptodate| yes][formatvariant.config.default| yes][formatvariant.default| yes]
[formatvariant.name.uptodate|plain-cl-delta:][formatvariant.repo.uptodate| yes][formatvariant.config.default| yes][formatvariant.default| yes]
[formatvariant.name.uptodate|compression: ][formatvariant.repo.uptodate| zlib][formatvariant.config.default| zlib][formatvariant.default| zlib]
$ hg debugformat -Tjson
@@ -104,10 +104,10 @@
"repo": true
},
{
- "config": false,
- "default": false,
+ "config": true,
+ "default": true,
"name": "sparserevlog",
- "repo": false
+ "repo": true
},
{
"config": true,
@@ -127,21 +127,21 @@
performing an upgrade with "--run" will make the following changes:
requirements
- preserved: dotencode, fncache, generaldelta, revlogv1, store
+ preserved: dotencode, fncache, generaldelta, revlogv1, sparserevlog, store
additional optimizations are available by specifying "--optimize <name>":
- redeltaparent
+ re-delta-parent
deltas within internal storage will be recalculated to choose an optimal base revision where this was not already done; the size of the repository may shrink and various operations may become faster; the first time this optimization is performed could slow down upgrade execution considerably; subsequent invocations should not run noticeably slower
- redeltamultibase
+ re-delta-multibase
deltas within internal storage will be recalculated against multiple base revision and the smallest difference will be used; the size of the repository may shrink significantly when there are many merges; this optimization will slow down execution in proportion to the number of merges in the repository and the amount of files in the repository; this slow down should not be significant unless there are tens of thousands of files and thousands of merges
- redeltaall
+ re-delta-all
deltas within internal storage will always be recalculated without reusing prior deltas; this will likely make execution run several times slower; this optimization is typically not needed
- redeltafulladd
- every revision will be re-added as if it was new content. It will go through the full storage mechanism giving extensions a chance to process it (eg. lfs). This is similar to "redeltaall" but even slower since more logic is involved.
+ re-delta-fulladd
+ every revision will be re-added as if it was new content. It will go through the full storage mechanism giving extensions a chance to process it (eg. lfs). This is similar to "re-delta-all" but even slower since more logic is involved.
--optimize can be used to add optimizations
@@ -151,22 +151,53 @@
performing an upgrade with "--run" will make the following changes:
requirements
- preserved: dotencode, fncache, generaldelta, revlogv1, store
+ preserved: dotencode, fncache, generaldelta, revlogv1, sparserevlog, store
- redeltaparent
+ re-delta-parent
deltas within internal storage will choose a new base revision if needed
additional optimizations are available by specifying "--optimize <name>":
- redeltamultibase
+ re-delta-multibase
deltas within internal storage will be recalculated against multiple base revision and the smallest difference will be used; the size of the repository may shrink significantly when there are many merges; this optimization will slow down execution in proportion to the number of merges in the repository and the amount of files in the repository; this slow down should not be significant unless there are tens of thousands of files and thousands of merges
- redeltaall
+ re-delta-all
deltas within internal storage will always be recalculated without reusing prior deltas; this will likely make execution run several times slower; this optimization is typically not needed
- redeltafulladd
- every revision will be re-added as if it was new content. It will go through the full storage mechanism giving extensions a chance to process it (eg. lfs). This is similar to "redeltaall" but even slower since more logic is involved.
+ re-delta-fulladd
+ every revision will be re-added as if it was new content. It will go through the full storage mechanism giving extensions a chance to process it (eg. lfs). This is similar to "re-delta-all" but even slower since more logic is involved.
+
+
+modern form of the option
+
+ $ hg debugupgrade --optimize re-delta-parent
+ (no feature deficiencies found in existing repository)
+ performing an upgrade with "--run" will make the following changes:
+
+ requirements
+ preserved: dotencode, fncache, generaldelta, revlogv1, sparserevlog, store
+
+ re-delta-parent
+ deltas within internal storage will choose a new base revision if needed
+ additional optimizations are available by specifying "--optimize <name>":
+
+ re-delta-multibase
+ deltas within internal storage will be recalculated against multiple base revision and the smallest difference will be used; the size of the repository may shrink significantly when there are many merges; this optimization will slow down execution in proportion to the number of merges in the repository and the amount of files in the repository; this slow down should not be significant unless there are tens of thousands of files and thousands of merges
+
+ re-delta-all
+ deltas within internal storage will always be recalculated without reusing prior deltas; this will likely make execution run several times slower; this optimization is typically not needed
+
+ re-delta-fulladd
+ every revision will be re-added as if it was new content. It will go through the full storage mechanism giving extensions a chance to process it (eg. lfs). This is similar to "re-delta-all" but even slower since more logic is involved.
+
+
+unknown optimization:
+
+ $ hg debugupgrade --optimize foobar
+ abort: unknown optimization action requested: foobar
+ (run without arguments to see valid optimizations)
+ [255]
Various sub-optimal detections work
@@ -188,7 +219,7 @@
fncache: no yes yes
dotencode: no yes yes
generaldelta: no yes yes
- sparserevlog: no no no
+ sparserevlog: no yes yes
plain-cl-delta: yes yes yes
compression: zlib zlib zlib
$ hg debugformat --verbose --config format.usegeneraldelta=no
@@ -196,7 +227,7 @@
fncache: no yes yes
dotencode: no yes yes
generaldelta: no no yes
- sparserevlog: no no no
+ sparserevlog: no no yes
plain-cl-delta: yes yes yes
compression: zlib zlib zlib
$ hg debugformat --verbose --config format.usegeneraldelta=no --color=debug
@@ -204,7 +235,7 @@
[formatvariant.name.mismatchconfig|fncache: ][formatvariant.repo.mismatchconfig| no][formatvariant.config.default| yes][formatvariant.default| yes]
[formatvariant.name.mismatchconfig|dotencode: ][formatvariant.repo.mismatchconfig| no][formatvariant.config.default| yes][formatvariant.default| yes]
[formatvariant.name.mismatchdefault|generaldelta: ][formatvariant.repo.mismatchdefault| no][formatvariant.config.special| no][formatvariant.default| yes]
- [formatvariant.name.uptodate|sparserevlog: ][formatvariant.repo.uptodate| no][formatvariant.config.default| no][formatvariant.default| no]
+ [formatvariant.name.mismatchdefault|sparserevlog: ][formatvariant.repo.mismatchdefault| no][formatvariant.config.special| no][formatvariant.default| yes]
[formatvariant.name.uptodate|plain-cl-delta:][formatvariant.repo.uptodate| yes][formatvariant.config.default| yes][formatvariant.default| yes]
[formatvariant.name.uptodate|compression: ][formatvariant.repo.uptodate| zlib][formatvariant.config.default| zlib][formatvariant.default| zlib]
$ hg debugupgraderepo
@@ -219,12 +250,15 @@
generaldelta
deltas within internal storage are unable to choose optimal revisions; repository is larger and slower than it could be; interaction with other repositories may require extra network and CPU resources, making "hg push" and "hg pull" slower
+ sparserevlog
+ in order to limit disk reading and memory usage on older version, the span of a delta chain from its root to its end is limited, whatever the relevant data in this span. This can severly limit Mercurial ability to build good chain of delta resulting is much more storage space being taken and limit reusability of on disk delta during exchange.
+
performing an upgrade with "--run" will make the following changes:
requirements
preserved: revlogv1, store
- added: dotencode, fncache, generaldelta
+ added: dotencode, fncache, generaldelta, sparserevlog
fncache
repository will be more resilient to storing certain paths and performance of certain operations should be improved
@@ -235,19 +269,22 @@
generaldelta
repository storage will be able to create optimal deltas; new repository data will be smaller and read times should decrease; interacting with other repositories using this storage model should require less network and CPU resources, making "hg push" and "hg pull" faster
+ sparserevlog
+ Revlog supports delta chain with more unused data between payload. These gaps will be skipped at read time. This allows for better delta chains, making a better compression and faster exchange with server.
+
additional optimizations are available by specifying "--optimize <name>":
- redeltaparent
+ re-delta-parent
deltas within internal storage will be recalculated to choose an optimal base revision where this was not already done; the size of the repository may shrink and various operations may become faster; the first time this optimization is performed could slow down upgrade execution considerably; subsequent invocations should not run noticeably slower
- redeltamultibase
+ re-delta-multibase
deltas within internal storage will be recalculated against multiple base revision and the smallest difference will be used; the size of the repository may shrink significantly when there are many merges; this optimization will slow down execution in proportion to the number of merges in the repository and the amount of files in the repository; this slow down should not be significant unless there are tens of thousands of files and thousands of merges
- redeltaall
+ re-delta-all
deltas within internal storage will always be recalculated without reusing prior deltas; this will likely make execution run several times slower; this optimization is typically not needed
- redeltafulladd
- every revision will be re-added as if it was new content. It will go through the full storage mechanism giving extensions a chance to process it (eg. lfs). This is similar to "redeltaall" but even slower since more logic is involved.
+ re-delta-fulladd
+ every revision will be re-added as if it was new content. It will go through the full storage mechanism giving extensions a chance to process it (eg. lfs). This is similar to "re-delta-all" but even slower since more logic is involved.
$ hg --config format.dotencode=false debugupgraderepo
@@ -259,6 +296,9 @@
generaldelta
deltas within internal storage are unable to choose optimal revisions; repository is larger and slower than it could be; interaction with other repositories may require extra network and CPU resources, making "hg push" and "hg pull" slower
+ sparserevlog
+ in order to limit disk reading and memory usage on older version, the span of a delta chain from its root to its end is limited, whatever the relevant data in this span. This can severly limit Mercurial ability to build good chain of delta resulting is much more storage space being taken and limit reusability of on disk delta during exchange.
+
repository lacks features used by the default config options:
dotencode
@@ -269,7 +309,7 @@
requirements
preserved: revlogv1, store
- added: fncache, generaldelta
+ added: fncache, generaldelta, sparserevlog
fncache
repository will be more resilient to storing certain paths and performance of certain operations should be improved
@@ -277,19 +317,22 @@
generaldelta
repository storage will be able to create optimal deltas; new repository data will be smaller and read times should decrease; interacting with other repositories using this storage model should require less network and CPU resources, making "hg push" and "hg pull" faster
+ sparserevlog
+ Revlog supports delta chain with more unused data between payload. These gaps will be skipped at read time. This allows for better delta chains, making a better compression and faster exchange with server.
+
additional optimizations are available by specifying "--optimize <name>":
- redeltaparent
+ re-delta-parent
deltas within internal storage will be recalculated to choose an optimal base revision where this was not already done; the size of the repository may shrink and various operations may become faster; the first time this optimization is performed could slow down upgrade execution considerably; subsequent invocations should not run noticeably slower
- redeltamultibase
+ re-delta-multibase
deltas within internal storage will be recalculated against multiple base revision and the smallest difference will be used; the size of the repository may shrink significantly when there are many merges; this optimization will slow down execution in proportion to the number of merges in the repository and the amount of files in the repository; this slow down should not be significant unless there are tens of thousands of files and thousands of merges
- redeltaall
+ re-delta-all
deltas within internal storage will always be recalculated without reusing prior deltas; this will likely make execution run several times slower; this optimization is typically not needed
- redeltafulladd
- every revision will be re-added as if it was new content. It will go through the full storage mechanism giving extensions a chance to process it (eg. lfs). This is similar to "redeltaall" but even slower since more logic is involved.
+ re-delta-fulladd
+ every revision will be re-added as if it was new content. It will go through the full storage mechanism giving extensions a chance to process it (eg. lfs). This is similar to "re-delta-all" but even slower since more logic is involved.
$ cd ..
@@ -301,7 +344,7 @@
upgrade will perform the following actions:
requirements
- preserved: dotencode, fncache, generaldelta, revlogv1, store
+ preserved: dotencode, fncache, generaldelta, revlogv1, sparserevlog, store
beginning upgrade...
repository locked and read-only
@@ -330,7 +373,7 @@
$ touch f2
$ hg -q commit -A -m 'add f2'
- $ hg debugupgraderepo --run
+ $ hg debugupgraderepo --run --config format.sparse-revlog=false
upgrade will perform the following actions:
requirements
@@ -410,6 +453,8 @@
old store should be backed up
+ $ ls -d .hg/upgradebackup.*/
+ .hg/upgradebackup.*/ (glob)
$ ls .hg/upgradebackup.*/store
00changelog.i
00manifest.i
@@ -421,8 +466,47 @@
undo.backupfiles
undo.phaseroots
+unless --no-backup is passed
+
+ $ rm -rf .hg/upgradebackup.*/
+ $ hg debugupgraderepo --run --no-backup
+ upgrade will perform the following actions:
+
+ requirements
+ preserved: dotencode, fncache, generaldelta, revlogv1, store
+ added: sparserevlog
+
+ sparserevlog
+ Revlog supports delta chain with more unused data between payload. These gaps will be skipped at read time. This allows for better delta chains, making a better compression and faster exchange with server.
+
+ beginning upgrade...
+ repository locked and read-only
+ creating temporary repository to stage migrated data: $TESTTMP/upgradegd/.hg/upgrade.* (glob)
+ (it is safe to interrupt this process any time before data migration completes)
+ migrating 9 total revisions (3 in filelogs, 3 in manifests, 3 in changelog)
+ migrating 917 bytes in store; 401 bytes tracked data
+ migrating 3 filelogs containing 3 revisions (192 bytes in store; 0 bytes tracked data)
+ finished migrating 3 filelog revisions across 3 filelogs; change in size: 0 bytes
+ migrating 1 manifests containing 3 revisions (349 bytes in store; 220 bytes tracked data)
+ finished migrating 3 manifest revisions across 1 manifests; change in size: 0 bytes
+ migrating changelog containing 3 revisions (376 bytes in store; 181 bytes tracked data)
+ finished migrating 3 changelog revisions; change in size: 0 bytes
+ finished migrating 9 total revisions; total change in store size: 0 bytes
+ copying phaseroots
+ data fully migrated to temporary repository
+ marking source repository as being upgraded; clients will be unable to read from repository
+ starting in-place swap of repository data
+ replaced files will be backed up at $TESTTMP/upgradegd/.hg/upgradebackup.* (glob)
+ replacing store...
+ store replacement complete; repository was inconsistent for 0.0s
+ finalizing requirements file and making repository readable again
+ removing old repository content$TESTTMP/upgradegd/.hg/upgradebackup.* (glob)
+ removing temporary repository $TESTTMP/upgradegd/.hg/upgrade.* (glob)
+ $ ls -1 .hg/ | grep upgradebackup
+ [1]
$ cd ..
+
store files with special filenames aren't encoded during copy
$ hg init store-filenames
@@ -435,7 +519,7 @@
upgrade will perform the following actions:
requirements
- preserved: dotencode, fncache, generaldelta, revlogv1, store
+ preserved: dotencode, fncache, generaldelta, revlogv1, sparserevlog, store
beginning upgrade...
repository locked and read-only
@@ -466,9 +550,9 @@
upgrade will perform the following actions:
requirements
- preserved: dotencode, fncache, generaldelta, revlogv1, store
+ preserved: dotencode, fncache, generaldelta, revlogv1, sparserevlog, store
- redeltafulladd
+ re-delta-fulladd
each revision will be added as new content to the internal storage; this will likely drastically slow down execution time, but some extensions might need it
beginning upgrade...
@@ -497,6 +581,11 @@
copy of old repository backed up at $TESTTMP/store-filenames/.hg/upgradebackup.* (glob)
the old repository will not be deleted; remove it to free up disk space once the upgraded repository is verified
+fncache is valid after upgrade
+
+ $ hg debugrebuildfncache
+ fncache already up to date
+
$ cd ..
Check upgrading a large file repository
@@ -518,13 +607,14 @@
generaldelta
largefiles
revlogv1
+ sparserevlog
store
$ hg debugupgraderepo --run
upgrade will perform the following actions:
requirements
- preserved: dotencode, fncache, generaldelta, largefiles, revlogv1, store
+ preserved: dotencode, fncache, generaldelta, largefiles, revlogv1, sparserevlog, store
beginning upgrade...
repository locked and read-only
@@ -556,6 +646,7 @@
generaldelta
largefiles
revlogv1
+ sparserevlog
store
$ cat << EOF >> .hg/hgrc
@@ -576,7 +667,7 @@
upgrade will perform the following actions:
requirements
- preserved: dotencode, fncache, generaldelta, largefiles, lfs, revlogv1, store
+ preserved: dotencode, fncache, generaldelta, largefiles, lfs, revlogv1, sparserevlog, store
beginning upgrade...
repository locked and read-only
@@ -662,18 +753,18 @@
$ hg config format
format.maxchainlen=9001
$ hg debugdeltachain file
- rev chain# chainlen prev delta size rawsize chainsize ratio lindist extradist extraratio
- 0 1 1 -1 base 77 182 77 0.42308 77 0 0.00000
- 1 1 2 0 p1 21 191 98 0.51309 98 0 0.00000
- 2 2 1 -1 base 84 200 84 0.42000 84 0 0.00000
+ rev chain# chainlen prev delta size rawsize chainsize ratio lindist extradist extraratio readsize largestblk rddensity srchunks
+ 0 1 1 -1 base 77 182 77 0.42308 77 0 0.00000 77 77 1.00000 1
+ 1 1 2 0 p1 21 191 98 0.51309 98 0 0.00000 98 98 1.00000 1
+ 2 1 2 0 other 30 200 107 0.53500 128 21 0.19626 128 128 0.83594 1
$ hg debugupgraderepo --run --optimize redeltaall
upgrade will perform the following actions:
requirements
- preserved: dotencode, fncache, generaldelta, revlogv1, store
+ preserved: dotencode, fncache, generaldelta, revlogv1, sparserevlog, store
- redeltaall
+ re-delta-all
deltas within internal storage will be fully recomputed; this will likely drastically slow down execution time
beginning upgrade...
@@ -681,14 +772,14 @@
creating temporary repository to stage migrated data: $TESTTMP/localconfig/.hg/upgrade.* (glob)
(it is safe to interrupt this process any time before data migration completes)
migrating 9 total revisions (3 in filelogs, 3 in manifests, 3 in changelog)
- migrating 1.05 KB in store; 882 bytes tracked data
- migrating 1 filelogs containing 3 revisions (374 bytes in store; 573 bytes tracked data)
- finished migrating 3 filelog revisions across 1 filelogs; change in size: -63 bytes
+ migrating 1019 bytes in store; 882 bytes tracked data
+ migrating 1 filelogs containing 3 revisions (320 bytes in store; 573 bytes tracked data)
+ finished migrating 3 filelog revisions across 1 filelogs; change in size: -9 bytes
migrating 1 manifests containing 3 revisions (333 bytes in store; 138 bytes tracked data)
finished migrating 3 manifest revisions across 1 manifests; change in size: 0 bytes
migrating changelog containing 3 revisions (366 bytes in store; 171 bytes tracked data)
finished migrating 3 changelog revisions; change in size: 0 bytes
- finished migrating 9 total revisions; total change in store size: -63 bytes
+ finished migrating 9 total revisions; total change in store size: -9 bytes
copying phaseroots
data fully migrated to temporary repository
marking source repository as being upgraded; clients will be unable to read from repository
@@ -701,10 +792,10 @@
copy of old repository backed up at $TESTTMP/localconfig/.hg/upgradebackup.* (glob)
the old repository will not be deleted; remove it to free up disk space once the upgraded repository is verified
$ hg debugdeltachain file
- rev chain# chainlen prev delta size rawsize chainsize ratio lindist extradist extraratio
- 0 1 1 -1 base 77 182 77 0.42308 77 0 0.00000
- 1 1 2 0 p1 21 191 98 0.51309 98 0 0.00000
- 2 1 3 1 p1 21 200 119 0.59500 119 0 0.00000
+ rev chain# chainlen prev delta size rawsize chainsize ratio lindist extradist extraratio readsize largestblk rddensity srchunks
+ 0 1 1 -1 base 77 182 77 0.42308 77 0 0.00000 77 77 1.00000 1
+ 1 1 2 0 p1 21 191 98 0.51309 98 0 0.00000 98 98 1.00000 1
+ 2 1 3 1 p1 21 200 119 0.59500 119 0 0.00000 119 119 1.00000 1
$ cd ..
$ cat << EOF >> $HGRCPATH
@@ -715,7 +806,7 @@
Check upgrading a sparse-revlog repository
---------------------------------------
- $ hg init sparserevlogrepo
+ $ hg init sparserevlogrepo --config format.sparse-revlog=no
$ cd sparserevlogrepo
$ touch foo
$ hg add foo
--- a/tests/test-walk.t Wed Jan 09 20:00:35 2019 -0800
+++ b/tests/test-walk.t Fri Jan 18 13:28:22 2019 -0500
@@ -46,7 +46,7 @@
f mammals/skunk mammals/skunk
$ hg debugwalk -v -I.
* matcher:
- <includematcher includes='(?:)'>
+ <includematcher includes=''>
f beans/black beans/black
f beans/borlotti beans/borlotti
f beans/kidney beans/kidney
@@ -82,7 +82,7 @@
* matcher:
<differencematcher
m1=<alwaysmatcher>,
- m2=<includematcher includes='(?:beans(?:/|$))'>>
+ m2=<includematcher includes='beans(?:/|$)'>>
f fennel ../fennel
f fenugreek ../fenugreek
f fiddlehead ../fiddlehead
@@ -92,39 +92,39 @@
f mammals/skunk skunk
$ hg debugwalk -v -I '*k'
* matcher:
- <includematcher includes='(?:mammals/[^/]*k(?:/|$))'>
+ <includematcher includes='mammals/[^/]*k(?:/|$)'>
f mammals/skunk skunk
$ hg debugwalk -v -I 'glob:*k'
* matcher:
- <includematcher includes='(?:mammals/[^/]*k(?:/|$))'>
+ <includematcher includes='mammals/[^/]*k(?:/|$)'>
f mammals/skunk skunk
$ hg debugwalk -v -I 'relglob:*k'
* matcher:
- <includematcher includes='(?:(?:|.*/)[^/]*k(?:/|$))'>
+ <includematcher includes='(?:|.*/)[^/]*k(?:/|$)'>
f beans/black ../beans/black
f fenugreek ../fenugreek
f mammals/skunk skunk
$ hg debugwalk -v -I 'relglob:*k' .
* matcher:
<intersectionmatcher
- m1=<patternmatcher patterns='(?:mammals(?:/|$))'>,
- m2=<includematcher includes='(?:(?:|.*/)[^/]*k(?:/|$))'>>
+ m1=<patternmatcher patterns='mammals(?:/|$)'>,
+ m2=<includematcher includes='(?:|.*/)[^/]*k(?:/|$)'>>
f mammals/skunk skunk
$ hg debugwalk -v -I 're:.*k$'
* matcher:
- <includematcher includes='(?:.*k$)'>
+ <includematcher includes='.*k$'>
f beans/black ../beans/black
f fenugreek ../fenugreek
f mammals/skunk skunk
$ hg debugwalk -v -I 'relre:.*k$'
* matcher:
- <includematcher includes='(?:.*.*k$)'>
+ <includematcher includes='.*.*k$'>
f beans/black ../beans/black
f fenugreek ../fenugreek
f mammals/skunk skunk
$ hg debugwalk -v -I 'path:beans'
* matcher:
- <includematcher includes='(?:beans(?:/|$))'>
+ <includematcher includes='beans(?:/|$)'>
f beans/black ../beans/black
f beans/borlotti ../beans/borlotti
f beans/kidney ../beans/kidney
@@ -133,7 +133,7 @@
f beans/turtle ../beans/turtle
$ hg debugwalk -v -I 'relpath:detour/../../beans'
* matcher:
- <includematcher includes='(?:beans(?:/|$))'>
+ <includematcher includes='beans(?:/|$)'>
f beans/black ../beans/black
f beans/borlotti ../beans/borlotti
f beans/kidney ../beans/kidney
@@ -246,21 +246,21 @@
$ hg debugwalk -v .
* matcher:
- <patternmatcher patterns='(?:mammals(?:/|$))'>
+ <patternmatcher patterns='mammals(?:/|$)'>
f mammals/Procyonidae/cacomistle Procyonidae/cacomistle
f mammals/Procyonidae/coatimundi Procyonidae/coatimundi
f mammals/Procyonidae/raccoon Procyonidae/raccoon
f mammals/skunk skunk
$ hg debugwalk -v -I.
* matcher:
- <includematcher includes='(?:mammals(?:/|$))'>
+ <includematcher includes='mammals(?:/|$)'>
f mammals/Procyonidae/cacomistle Procyonidae/cacomistle
f mammals/Procyonidae/coatimundi Procyonidae/coatimundi
f mammals/Procyonidae/raccoon Procyonidae/raccoon
f mammals/skunk skunk
$ hg debugwalk -v Procyonidae
* matcher:
- <patternmatcher patterns='(?:mammals/Procyonidae(?:/|$))'>
+ <patternmatcher patterns='mammals/Procyonidae(?:/|$)'>
f mammals/Procyonidae/cacomistle Procyonidae/cacomistle
f mammals/Procyonidae/coatimundi Procyonidae/coatimundi
f mammals/Procyonidae/raccoon Procyonidae/raccoon
@@ -268,13 +268,13 @@
$ cd Procyonidae
$ hg debugwalk -v .
* matcher:
- <patternmatcher patterns='(?:mammals/Procyonidae(?:/|$))'>
+ <patternmatcher patterns='mammals/Procyonidae(?:/|$)'>
f mammals/Procyonidae/cacomistle cacomistle
f mammals/Procyonidae/coatimundi coatimundi
f mammals/Procyonidae/raccoon raccoon
$ hg debugwalk -v ..
* matcher:
- <patternmatcher patterns='(?:mammals(?:/|$))'>
+ <patternmatcher patterns='mammals(?:/|$)'>
f mammals/Procyonidae/cacomistle cacomistle
f mammals/Procyonidae/coatimundi coatimundi
f mammals/Procyonidae/raccoon raccoon
@@ -283,7 +283,7 @@
$ hg debugwalk -v ../beans
* matcher:
- <patternmatcher patterns='(?:beans(?:/|$))'>
+ <patternmatcher patterns='beans(?:/|$)'>
f beans/black ../beans/black
f beans/borlotti ../beans/borlotti
f beans/kidney ../beans/kidney
@@ -292,7 +292,7 @@
f beans/turtle ../beans/turtle
$ hg debugwalk -v .
* matcher:
- <patternmatcher patterns='(?:mammals(?:/|$))'>
+ <patternmatcher patterns='mammals(?:/|$)'>
f mammals/Procyonidae/cacomistle Procyonidae/cacomistle
f mammals/Procyonidae/coatimundi Procyonidae/coatimundi
f mammals/Procyonidae/raccoon Procyonidae/raccoon
@@ -307,7 +307,7 @@
$ hg debugwalk -v -Ibeans
* matcher:
- <includematcher includes='(?:beans(?:/|$))'>
+ <includematcher includes='beans(?:/|$)'>
f beans/black beans/black
f beans/borlotti beans/borlotti
f beans/kidney beans/kidney
@@ -316,39 +316,39 @@
f beans/turtle beans/turtle
$ hg debugwalk -v -I '{*,{b,m}*/*}k'
* matcher:
- <includematcher includes='(?:(?:[^/]*|(?:b|m)[^/]*/[^/]*)k(?:/|$))'>
+ <includematcher includes='(?:[^/]*|(?:b|m)[^/]*/[^/]*)k(?:/|$)'>
f beans/black beans/black
f fenugreek fenugreek
f mammals/skunk mammals/skunk
$ hg debugwalk -v -Ibeans mammals
* matcher:
<intersectionmatcher
- m1=<patternmatcher patterns='(?:mammals(?:/|$))'>,
- m2=<includematcher includes='(?:beans(?:/|$))'>>
+ m1=<patternmatcher patterns='mammals(?:/|$)'>,
+ m2=<includematcher includes='beans(?:/|$)'>>
$ hg debugwalk -v -Inon-existent
* matcher:
- <includematcher includes='(?:non\\-existent(?:/|$))'>
+ <includematcher includes='non\\-existent(?:/|$)'>
$ hg debugwalk -v -Inon-existent -Ibeans/black
* matcher:
- <includematcher includes='(?:non\\-existent(?:/|$)|beans/black(?:/|$))'>
+ <includematcher includes='non\\-existent(?:/|$)|beans/black(?:/|$)'>
f beans/black beans/black
$ hg debugwalk -v -Ibeans beans/black
* matcher:
<intersectionmatcher
- m1=<patternmatcher patterns='(?:beans/black(?:/|$))'>,
- m2=<includematcher includes='(?:beans(?:/|$))'>>
+ m1=<patternmatcher patterns='beans/black(?:/|$)'>,
+ m2=<includematcher includes='beans(?:/|$)'>>
f beans/black beans/black exact
$ hg debugwalk -v -Ibeans/black beans
* matcher:
<intersectionmatcher
- m1=<patternmatcher patterns='(?:beans(?:/|$))'>,
- m2=<includematcher includes='(?:beans/black(?:/|$))'>>
+ m1=<patternmatcher patterns='beans(?:/|$)'>,
+ m2=<includematcher includes='beans/black(?:/|$)'>>
f beans/black beans/black
$ hg debugwalk -v -Xbeans/black beans
* matcher:
<differencematcher
- m1=<patternmatcher patterns='(?:beans(?:/|$))'>,
- m2=<includematcher includes='(?:beans/black(?:/|$))'>>
+ m1=<patternmatcher patterns='beans(?:/|$)'>,
+ m2=<includematcher includes='beans/black(?:/|$)'>>
f beans/borlotti beans/borlotti
f beans/kidney beans/kidney
f beans/navy beans/navy
@@ -357,8 +357,8 @@
$ hg debugwalk -v -Xbeans/black -Ibeans
* matcher:
<differencematcher
- m1=<includematcher includes='(?:beans(?:/|$))'>,
- m2=<includematcher includes='(?:beans/black(?:/|$))'>>
+ m1=<includematcher includes='beans(?:/|$)'>,
+ m2=<includematcher includes='beans/black(?:/|$)'>>
f beans/borlotti beans/borlotti
f beans/kidney beans/kidney
f beans/navy beans/navy
@@ -367,37 +367,37 @@
$ hg debugwalk -v -Xbeans/black beans/black
* matcher:
<differencematcher
- m1=<patternmatcher patterns='(?:beans/black(?:/|$))'>,
- m2=<includematcher includes='(?:beans/black(?:/|$))'>>
+ m1=<patternmatcher patterns='beans/black(?:/|$)'>,
+ m2=<includematcher includes='beans/black(?:/|$)'>>
$ hg debugwalk -v -Xbeans/black -Ibeans/black
* matcher:
<differencematcher
- m1=<includematcher includes='(?:beans/black(?:/|$))'>,
- m2=<includematcher includes='(?:beans/black(?:/|$))'>>
+ m1=<includematcher includes='beans/black(?:/|$)'>,
+ m2=<includematcher includes='beans/black(?:/|$)'>>
$ hg debugwalk -v -Xbeans beans/black
* matcher:
<differencematcher
- m1=<patternmatcher patterns='(?:beans/black(?:/|$))'>,
- m2=<includematcher includes='(?:beans(?:/|$))'>>
+ m1=<patternmatcher patterns='beans/black(?:/|$)'>,
+ m2=<includematcher includes='beans(?:/|$)'>>
$ hg debugwalk -v -Xbeans -Ibeans/black
* matcher:
<differencematcher
- m1=<includematcher includes='(?:beans/black(?:/|$))'>,
- m2=<includematcher includes='(?:beans(?:/|$))'>>
+ m1=<includematcher includes='beans/black(?:/|$)'>,
+ m2=<includematcher includes='beans(?:/|$)'>>
$ hg debugwalk -v 'glob:mammals/../beans/b*'
* matcher:
- <patternmatcher patterns='(?:beans/b[^/]*$)'>
+ <patternmatcher patterns='beans/b[^/]*$'>
f beans/black beans/black
f beans/borlotti beans/borlotti
$ hg debugwalk -v '-X*/Procyonidae' mammals
* matcher:
<differencematcher
- m1=<patternmatcher patterns='(?:mammals(?:/|$))'>,
- m2=<includematcher includes='(?:[^/]*/Procyonidae(?:/|$))'>>
+ m1=<patternmatcher patterns='mammals(?:/|$)'>,
+ m2=<includematcher includes='[^/]*/Procyonidae(?:/|$)'>>
f mammals/skunk mammals/skunk
$ hg debugwalk -v path:mammals
* matcher:
- <patternmatcher patterns='(?:mammals(?:/|$))'>
+ <patternmatcher patterns='mammals(?:/|$)'>
f mammals/Procyonidae/cacomistle mammals/Procyonidae/cacomistle
f mammals/Procyonidae/coatimundi mammals/Procyonidae/coatimundi
f mammals/Procyonidae/raccoon mammals/Procyonidae/raccoon
@@ -426,29 +426,29 @@
$ hg debugwalk -v fennel -X fennel
* matcher:
<differencematcher
- m1=<patternmatcher patterns='(?:fennel(?:/|$))'>,
- m2=<includematcher includes='(?:fennel(?:/|$))'>>
+ m1=<patternmatcher patterns='fennel(?:/|$)'>,
+ m2=<includematcher includes='fennel(?:/|$)'>>
$ hg debugwalk -v fennel -X 'f*'
* matcher:
<differencematcher
- m1=<patternmatcher patterns='(?:fennel(?:/|$))'>,
- m2=<includematcher includes='(?:f[^/]*(?:/|$))'>>
+ m1=<patternmatcher patterns='fennel(?:/|$)'>,
+ m2=<includematcher includes='f[^/]*(?:/|$)'>>
$ hg debugwalk -v beans/black -X 'path:beans'
* matcher:
<differencematcher
- m1=<patternmatcher patterns='(?:beans/black(?:/|$))'>,
- m2=<includematcher includes='(?:beans(?:/|$))'>>
+ m1=<patternmatcher patterns='beans/black(?:/|$)'>,
+ m2=<includematcher includes='beans(?:/|$)'>>
$ hg debugwalk -v -I 'path:beans/black' -X 'path:beans'
* matcher:
<differencematcher
- m1=<includematcher includes='(?:beans/black(?:/|$))'>,
- m2=<includematcher includes='(?:beans(?:/|$))'>>
+ m1=<includematcher includes='beans/black(?:/|$)'>,
+ m2=<includematcher includes='beans(?:/|$)'>>
Test absolute paths:
$ hg debugwalk -v `pwd`/beans
* matcher:
- <patternmatcher patterns='(?:beans(?:/|$))'>
+ <patternmatcher patterns='beans(?:/|$)'>
f beans/black beans/black
f beans/borlotti beans/borlotti
f beans/kidney beans/kidney
@@ -463,7 +463,7 @@
$ hg debugwalk -v glob:\*
* matcher:
- <patternmatcher patterns='(?:[^/]*$)'>
+ <patternmatcher patterns='[^/]*$'>
f fennel fennel
f fenugreek fenugreek
f fiddlehead fiddlehead
@@ -474,22 +474,22 @@
warning: filename contains ':', which is reserved on Windows: 'glob:glob'
$ hg debugwalk -v glob:\*
* matcher:
- <patternmatcher patterns='(?:[^/]*$)'>
+ <patternmatcher patterns='[^/]*$'>
f fennel fennel
f fenugreek fenugreek
f fiddlehead fiddlehead
f glob:glob glob:glob
$ hg debugwalk -v glob:glob
* matcher:
- <patternmatcher patterns='(?:glob$)'>
+ <patternmatcher patterns='glob$'>
glob: $ENOENT$
$ hg debugwalk -v glob:glob:glob
* matcher:
- <patternmatcher patterns='(?:glob:glob$)'>
+ <patternmatcher patterns='glob:glob$'>
f glob:glob glob:glob exact
$ hg debugwalk -v path:glob:glob
* matcher:
- <patternmatcher patterns='(?:glob:glob(?:/|$))'>
+ <patternmatcher patterns='glob:glob(?:/|$)'>
f glob:glob glob:glob exact
$ rm glob:glob
$ hg addremove
@@ -498,45 +498,45 @@
$ hg debugwalk -v 'glob:**e'
* matcher:
- <patternmatcher patterns='(?:.*e$)'>
+ <patternmatcher patterns='.*e$'>
f beans/turtle beans/turtle
f mammals/Procyonidae/cacomistle mammals/Procyonidae/cacomistle
$ hg debugwalk -v 're:.*[kb]$'
* matcher:
- <patternmatcher patterns='(?:.*[kb]$)'>
+ <patternmatcher patterns='.*[kb]$'>
f beans/black beans/black
f fenugreek fenugreek
f mammals/skunk mammals/skunk
$ hg debugwalk -v path:beans/black
* matcher:
- <patternmatcher patterns='(?:beans/black(?:/|$))'>
+ <patternmatcher patterns='beans/black(?:/|$)'>
f beans/black beans/black exact
$ hg debugwalk -v path:beans//black
* matcher:
- <patternmatcher patterns='(?:beans/black(?:/|$))'>
+ <patternmatcher patterns='beans/black(?:/|$)'>
f beans/black beans/black exact
$ hg debugwalk -v relglob:Procyonidae
* matcher:
- <patternmatcher patterns='(?:(?:|.*/)Procyonidae$)'>
+ <patternmatcher patterns='(?:|.*/)Procyonidae$'>
$ hg debugwalk -v 'relglob:Procyonidae/**'
* matcher:
- <patternmatcher patterns='(?:(?:|.*/)Procyonidae/.*$)'>
+ <patternmatcher patterns='(?:|.*/)Procyonidae/.*$'>
f mammals/Procyonidae/cacomistle mammals/Procyonidae/cacomistle
f mammals/Procyonidae/coatimundi mammals/Procyonidae/coatimundi
f mammals/Procyonidae/raccoon mammals/Procyonidae/raccoon
$ hg debugwalk -v 'relglob:Procyonidae/**' fennel
* matcher:
- <patternmatcher patterns='(?:(?:|.*/)Procyonidae/.*$|fennel(?:/|$))'>
+ <patternmatcher patterns='(?:|.*/)Procyonidae/.*$|fennel(?:/|$)'>
f fennel fennel exact
f mammals/Procyonidae/cacomistle mammals/Procyonidae/cacomistle
f mammals/Procyonidae/coatimundi mammals/Procyonidae/coatimundi
f mammals/Procyonidae/raccoon mammals/Procyonidae/raccoon
$ hg debugwalk -v beans 'glob:beans/*'
* matcher:
- <patternmatcher patterns='(?:beans(?:/|$)|beans/[^/]*$)'>
+ <patternmatcher patterns='beans(?:/|$)|beans/[^/]*$'>
f beans/black beans/black
f beans/borlotti beans/borlotti
f beans/kidney beans/kidney
@@ -545,14 +545,14 @@
f beans/turtle beans/turtle
$ hg debugwalk -v 'glob:mamm**'
* matcher:
- <patternmatcher patterns='(?:mamm.*$)'>
+ <patternmatcher patterns='mamm.*$'>
f mammals/Procyonidae/cacomistle mammals/Procyonidae/cacomistle
f mammals/Procyonidae/coatimundi mammals/Procyonidae/coatimundi
f mammals/Procyonidae/raccoon mammals/Procyonidae/raccoon
f mammals/skunk mammals/skunk
$ hg debugwalk -v 'glob:mamm**' fennel
* matcher:
- <patternmatcher patterns='(?:mamm.*$|fennel(?:/|$))'>
+ <patternmatcher patterns='mamm.*$|fennel(?:/|$)'>
f fennel fennel exact
f mammals/Procyonidae/cacomistle mammals/Procyonidae/cacomistle
f mammals/Procyonidae/coatimundi mammals/Procyonidae/coatimundi
@@ -560,34 +560,34 @@
f mammals/skunk mammals/skunk
$ hg debugwalk -v 'glob:j*'
* matcher:
- <patternmatcher patterns='(?:j[^/]*$)'>
+ <patternmatcher patterns='j[^/]*$'>
$ hg debugwalk -v NOEXIST
* matcher:
- <patternmatcher patterns='(?:NOEXIST(?:/|$))'>
+ <patternmatcher patterns='NOEXIST(?:/|$)'>
NOEXIST: * (glob)
#if fifo
$ mkfifo fifo
$ hg debugwalk -v fifo
* matcher:
- <patternmatcher patterns='(?:fifo(?:/|$))'>
+ <patternmatcher patterns='fifo(?:/|$)'>
fifo: unsupported file type (type is fifo)
#endif
$ rm fenugreek
$ hg debugwalk -v fenugreek
* matcher:
- <patternmatcher patterns='(?:fenugreek(?:/|$))'>
+ <patternmatcher patterns='fenugreek(?:/|$)'>
f fenugreek fenugreek exact
$ hg rm fenugreek
$ hg debugwalk -v fenugreek
* matcher:
- <patternmatcher patterns='(?:fenugreek(?:/|$))'>
+ <patternmatcher patterns='fenugreek(?:/|$)'>
f fenugreek fenugreek exact
$ touch new
$ hg debugwalk -v new
* matcher:
- <patternmatcher patterns='(?:new(?:/|$))'>
+ <patternmatcher patterns='new(?:/|$)'>
f new new exact
$ mkdir ignored
@@ -595,10 +595,10 @@
$ echo '^ignored$' > .hgignore
$ hg debugwalk -v ignored
* matcher:
- <patternmatcher patterns='(?:ignored(?:/|$))'>
+ <patternmatcher patterns='ignored(?:/|$)'>
$ hg debugwalk -v ignored/file
* matcher:
- <patternmatcher patterns='(?:ignored/file(?:/|$))'>
+ <patternmatcher patterns='ignored/file(?:/|$)'>
f ignored/file ignored/file exact
Test listfile and listfile0
@@ -606,13 +606,13 @@
$ "$PYTHON" -c "open('listfile0', 'wb').write(b'fenugreek\0new\0')"
$ hg debugwalk -v -I 'listfile0:listfile0'
* matcher:
- <includematcher includes='(?:fenugreek(?:/|$)|new(?:/|$))'>
+ <includematcher includes='fenugreek(?:/|$)|new(?:/|$)'>
f fenugreek fenugreek
f new new
$ "$PYTHON" -c "open('listfile', 'wb').write(b'fenugreek\nnew\r\nmammals/skunk\n')"
$ hg debugwalk -v -I 'listfile:listfile'
* matcher:
- <includematcher includes='(?:fenugreek(?:/|$)|new(?:/|$)|mammals/skunk(?:/|$))'>
+ <includematcher includes='fenugreek(?:/|$)|new(?:/|$)|mammals/skunk(?:/|$)'>
f fenugreek fenugreek
f mammals/skunk mammals/skunk
f new new
@@ -620,17 +620,17 @@
$ cd ..
$ hg debugwalk -v -R t t/mammals/skunk
* matcher:
- <patternmatcher patterns='(?:mammals/skunk(?:/|$))'>
+ <patternmatcher patterns='mammals/skunk(?:/|$)'>
f mammals/skunk t/mammals/skunk exact
$ mkdir t2
$ cd t2
$ hg debugwalk -v -R ../t ../t/mammals/skunk
* matcher:
- <patternmatcher patterns='(?:mammals/skunk(?:/|$))'>
+ <patternmatcher patterns='mammals/skunk(?:/|$)'>
f mammals/skunk ../t/mammals/skunk exact
$ hg debugwalk -v --cwd ../t mammals/skunk
* matcher:
- <patternmatcher patterns='(?:mammals/skunk(?:/|$))'>
+ <patternmatcher patterns='mammals/skunk(?:/|$)'>
f mammals/skunk mammals/skunk exact
$ cd ..
--- a/tests/test-wireproto-caching.t Wed Jan 09 20:00:35 2019 -0800
+++ b/tests/test-wireproto-caching.t Fri Jan 18 13:28:22 2019 -0500
@@ -432,7 +432,8 @@
]),
b'rawrepoformats': [
b'generaldelta',
- b'revlogv1'
+ b'revlogv1',
+ b'sparserevlog'
]
}
]
--- a/tests/test-wireproto-clientreactor.py Wed Jan 09 20:00:35 2019 -0800
+++ b/tests/test-wireproto-clientreactor.py Fri Jan 18 13:28:22 2019 -0500
@@ -1,5 +1,6 @@
from __future__ import absolute_import
+import sys
import unittest
import zlib
@@ -600,5 +601,10 @@
self.assertEqual(meta[b'data'], response2)
if __name__ == '__main__':
+ if (3, 6, 0) <= sys.version_info < (3, 6, 4):
+ # Python 3.6.0 through 3.6.3 inclusive shipped with
+ # https://bugs.python.org/issue31825 and we can't run these
+ # tests on those specific versions of Python. Sigh.
+ sys.exit(80)
import silenttestrunner
silenttestrunner.main(__name__)
--- a/tests/test-wireproto-command-capabilities.t Wed Jan 09 20:00:35 2019 -0800
+++ b/tests/test-wireproto-command-capabilities.t Fri Jan 18 13:28:22 2019 -0500
@@ -34,7 +34,7 @@
s> Content-Type: application/mercurial-0.1\r\n
s> Content-Length: *\r\n (glob)
s> \r\n
- s> batch branchmap $USUAL_BUNDLE2_CAPS$ changegroupsubset compression=$BUNDLE2_COMPRESSIONS$ getbundle httpheader=1024 httpmediatype=0.1rx,0.1tx,0.2tx known lookup pushkey streamreqs=generaldelta,revlogv1 unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash
+ s> batch branchmap $USUAL_BUNDLE2_CAPS$ changegroupsubset compression=$BUNDLE2_COMPRESSIONS$ getbundle httpheader=1024 httpmediatype=0.1rx,0.1tx,0.2tx known lookup pushkey streamreqs=generaldelta,revlogv1,sparserevlog unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash
A proper request without the API server enabled returns the legacy response
@@ -59,7 +59,7 @@
s> Content-Type: application/mercurial-0.1\r\n
s> Content-Length: *\r\n (glob)
s> \r\n
- s> batch branchmap $USUAL_BUNDLE2_CAPS$ changegroupsubset compression=$BUNDLE2_COMPRESSIONS$ getbundle httpheader=1024 httpmediatype=0.1rx,0.1tx,0.2tx known lookup pushkey streamreqs=generaldelta,revlogv1 unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash
+ s> batch branchmap $USUAL_BUNDLE2_CAPS$ changegroupsubset compression=$BUNDLE2_COMPRESSIONS$ getbundle httpheader=1024 httpmediatype=0.1rx,0.1tx,0.2tx known lookup pushkey streamreqs=generaldelta,revlogv1,sparserevlog unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash
Restart with just API server enabled. This enables serving the new format.
@@ -95,7 +95,7 @@
s> Content-Type: application/mercurial-0.1\r\n
s> Content-Length: *\r\n (glob)
s> \r\n
- s> batch branchmap $USUAL_BUNDLE2_CAPS$ changegroupsubset compression=$BUNDLE2_COMPRESSIONS$ getbundle httpheader=1024 httpmediatype=0.1rx,0.1tx,0.2tx known lookup pushkey streamreqs=generaldelta,revlogv1 unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash
+ s> batch branchmap $USUAL_BUNDLE2_CAPS$ changegroupsubset compression=$BUNDLE2_COMPRESSIONS$ getbundle httpheader=1024 httpmediatype=0.1rx,0.1tx,0.2tx known lookup pushkey streamreqs=generaldelta,revlogv1,sparserevlog unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash
X-HgUpgrade-<N> without known serialization in X-HgProto-<N> uses legacy response
@@ -120,7 +120,7 @@
s> Content-Type: application/mercurial-0.1\r\n
s> Content-Length: *\r\n (glob)
s> \r\n
- s> batch branchmap $USUAL_BUNDLE2_CAPS$ changegroupsubset compression=$BUNDLE2_COMPRESSIONS$ getbundle httpheader=1024 httpmediatype=0.1rx,0.1tx,0.2tx known lookup pushkey streamreqs=generaldelta,revlogv1 unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash
+ s> batch branchmap $USUAL_BUNDLE2_CAPS$ changegroupsubset compression=$BUNDLE2_COMPRESSIONS$ getbundle httpheader=1024 httpmediatype=0.1rx,0.1tx,0.2tx known lookup pushkey streamreqs=generaldelta,revlogv1,sparserevlog unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash
X-HgUpgrade-<N> + X-HgProto-<N> headers trigger new response format
@@ -145,12 +145,12 @@
s> Content-Type: application/mercurial-cbor\r\n
s> Content-Length: *\r\n (glob)
s> \r\n
- s> \xa3GapibaseDapi/Dapis\xa0Nv1capabilitiesY\x01\xd3batch branchmap $USUAL_BUNDLE2_CAPS$ changegroupsubset compression=$BUNDLE2_COMPRESSIONS$ getbundle httpheader=1024 httpmediatype=0.1rx,0.1tx,0.2tx known lookup pushkey streamreqs=generaldelta,revlogv1 unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash
+ s> \xa3GapibaseDapi/Dapis\xa0Nv1capabilitiesY\x01\xe0batch branchmap $USUAL_BUNDLE2_CAPS$ changegroupsubset compression=$BUNDLE2_COMPRESSIONS$ getbundle httpheader=1024 httpmediatype=0.1rx,0.1tx,0.2tx known lookup pushkey streamreqs=generaldelta,revlogv1,sparserevlog unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash
cbor> [
{
b'apibase': b'api/',
b'apis': {},
- b'v1capabilities': b'batch branchmap $USUAL_BUNDLE2_CAPS$ changegroupsubset compression=$BUNDLE2_COMPRESSIONS$ getbundle httpheader=1024 httpmediatype=0.1rx,0.1tx,0.2tx known lookup pushkey streamreqs=generaldelta,revlogv1 unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash'
+ b'v1capabilities': b'batch branchmap $USUAL_BUNDLE2_CAPS$ changegroupsubset compression=$BUNDLE2_COMPRESSIONS$ getbundle httpheader=1024 httpmediatype=0.1rx,0.1tx,0.2tx known lookup pushkey streamreqs=generaldelta,revlogv1,sparserevlog unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash'
}
]
@@ -184,12 +184,12 @@
s> Content-Type: application/mercurial-cbor\r\n
s> Content-Length: *\r\n (glob)
s> \r\n
- s> \xa3GapibaseDapi/Dapis\xa0Nv1capabilitiesY\x01\xd3batch branchmap $USUAL_BUNDLE2_CAPS$ changegroupsubset compression=$BUNDLE2_COMPRESSIONS$ getbundle httpheader=1024 httpmediatype=0.1rx,0.1tx,0.2tx known lookup pushkey streamreqs=generaldelta,revlogv1 unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash
+ s> \xa3GapibaseDapi/Dapis\xa0Nv1capabilitiesY\x01\xe0batch branchmap $USUAL_BUNDLE2_CAPS$ changegroupsubset compression=$BUNDLE2_COMPRESSIONS$ getbundle httpheader=1024 httpmediatype=0.1rx,0.1tx,0.2tx known lookup pushkey streamreqs=generaldelta,revlogv1,sparserevlog unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash
cbor> [
{
b'apibase': b'api/',
b'apis': {},
- b'v1capabilities': b'batch branchmap $USUAL_BUNDLE2_CAPS$ changegroupsubset compression=$BUNDLE2_COMPRESSIONS$ getbundle httpheader=1024 httpmediatype=0.1rx,0.1tx,0.2tx known lookup pushkey streamreqs=generaldelta,revlogv1 unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash'
+ b'v1capabilities': b'batch branchmap $USUAL_BUNDLE2_CAPS$ changegroupsubset compression=$BUNDLE2_COMPRESSIONS$ getbundle httpheader=1024 httpmediatype=0.1rx,0.1tx,0.2tx known lookup pushkey streamreqs=generaldelta,revlogv1,sparserevlog unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash'
}
]
@@ -216,7 +216,7 @@
s> Content-Type: application/mercurial-cbor\r\n
s> Content-Length: *\r\n (glob)
s> \r\n
- s> \xa3GapibaseDapi/Dapis\xa1Pexp-http-v2-0003\xa4Hcommands\xacIbranchmap\xa2Dargs\xa0Kpermissions\x81DpullLcapabilities\xa2Dargs\xa0Kpermissions\x81DpullMchangesetdata\xa2Dargs\xa2Ffields\xa4Gdefault\xd9\x01\x02\x80Hrequired\xf4DtypeCsetKvalidvalues\xd9\x01\x02\x84IbookmarksGparentsEphaseHrevisionIrevisions\xa2Hrequired\xf5DtypeDlistKpermissions\x81DpullHfiledata\xa2Dargs\xa4Ffields\xa4Gdefault\xd9\x01\x02\x80Hrequired\xf4DtypeCsetKvalidvalues\xd9\x01\x02\x83HlinknodeGparentsHrevisionKhaveparents\xa3Gdefault\xf4Hrequired\xf4DtypeDboolEnodes\xa2Hrequired\xf5DtypeDlistDpath\xa2Hrequired\xf5DtypeEbytesKpermissions\x81DpullIfilesdata\xa3Dargs\xa4Ffields\xa4Gdefault\xd9\x01\x02\x80Hrequired\xf4DtypeCsetKvalidvalues\xd9\x01\x02\x84NfirstchangesetHlinknodeGparentsHrevisionKhaveparents\xa3Gdefault\xf4Hrequired\xf4DtypeDboolJpathfilter\xa3Gdefault\xf6Hrequired\xf4DtypeDdictIrevisions\xa2Hrequired\xf5DtypeDlistKpermissions\x81DpullTrecommendedbatchsize\x19\xc3PEheads\xa2Dargs\xa1Jpubliconly\xa3Gdefault\xf4Hrequired\xf4DtypeDboolKpermissions\x81DpullEknown\xa2Dargs\xa1Enodes\xa3Gdefault\x80Hrequired\xf4DtypeDlistKpermissions\x81DpullHlistkeys\xa2Dargs\xa1Inamespace\xa2Hrequired\xf5DtypeEbytesKpermissions\x81DpullFlookup\xa2Dargs\xa1Ckey\xa2Hrequired\xf5DtypeEbytesKpermissions\x81DpullLmanifestdata\xa3Dargs\xa4Ffields\xa4Gdefault\xd9\x01\x02\x80Hrequired\xf4DtypeCsetKvalidvalues\xd9\x01\x02\x82GparentsHrevisionKhaveparents\xa3Gdefault\xf4Hrequired\xf4DtypeDboolEnodes\xa2Hrequired\xf5DtypeDlistDtree\xa2Hrequired\xf5DtypeEbytesKpermissions\x81DpullTrecommendedbatchsize\x1a\x00\x01\x86\xa0Gpushkey\xa2Dargs\xa4Ckey\xa2Hrequired\xf5DtypeEbytesInamespace\xa2Hrequired\xf5DtypeEbytesCnew\xa2Hrequired\xf5DtypeEbytesCold\xa2Hrequired\xf5DtypeEbytesKpermissions\x81DpushPrawstorefiledata\xa2Dargs\xa2Efiles\xa2Hrequired\xf5DtypeDlistJpathfilter\xa3Gdefault\xf6Hrequired\xf4DtypeDlistKpermissions\x81DpullQframingmediatypes\x81X&application/mercurial-exp-framing-0006Rpathfilterprefixes\xd9\x01\x02\x82Epath:Lrootfilesin:Nrawrepoformats\x82LgeneraldeltaHrevlogv1Nv1capabilitiesY\x01\xd3batch branchmap $USUAL_BUNDLE2_CAPS$ changegroupsubset compression=$BUNDLE2_COMPRESSIONS$ getbundle httpheader=1024 httpmediatype=0.1rx,0.1tx,0.2tx known lookup pushkey streamreqs=generaldelta,revlogv1 unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash
+ s> \xa3GapibaseDapi/Dapis\xa1Pexp-http-v2-0003\xa4Hcommands\xacIbranchmap\xa2Dargs\xa0Kpermissions\x81DpullLcapabilities\xa2Dargs\xa0Kpermissions\x81DpullMchangesetdata\xa2Dargs\xa2Ffields\xa4Gdefault\xd9\x01\x02\x80Hrequired\xf4DtypeCsetKvalidvalues\xd9\x01\x02\x84IbookmarksGparentsEphaseHrevisionIrevisions\xa2Hrequired\xf5DtypeDlistKpermissions\x81DpullHfiledata\xa2Dargs\xa4Ffields\xa4Gdefault\xd9\x01\x02\x80Hrequired\xf4DtypeCsetKvalidvalues\xd9\x01\x02\x83HlinknodeGparentsHrevisionKhaveparents\xa3Gdefault\xf4Hrequired\xf4DtypeDboolEnodes\xa2Hrequired\xf5DtypeDlistDpath\xa2Hrequired\xf5DtypeEbytesKpermissions\x81DpullIfilesdata\xa3Dargs\xa4Ffields\xa4Gdefault\xd9\x01\x02\x80Hrequired\xf4DtypeCsetKvalidvalues\xd9\x01\x02\x84NfirstchangesetHlinknodeGparentsHrevisionKhaveparents\xa3Gdefault\xf4Hrequired\xf4DtypeDboolJpathfilter\xa3Gdefault\xf6Hrequired\xf4DtypeDdictIrevisions\xa2Hrequired\xf5DtypeDlistKpermissions\x81DpullTrecommendedbatchsize\x19\xc3PEheads\xa2Dargs\xa1Jpubliconly\xa3Gdefault\xf4Hrequired\xf4DtypeDboolKpermissions\x81DpullEknown\xa2Dargs\xa1Enodes\xa3Gdefault\x80Hrequired\xf4DtypeDlistKpermissions\x81DpullHlistkeys\xa2Dargs\xa1Inamespace\xa2Hrequired\xf5DtypeEbytesKpermissions\x81DpullFlookup\xa2Dargs\xa1Ckey\xa2Hrequired\xf5DtypeEbytesKpermissions\x81DpullLmanifestdata\xa3Dargs\xa4Ffields\xa4Gdefault\xd9\x01\x02\x80Hrequired\xf4DtypeCsetKvalidvalues\xd9\x01\x02\x82GparentsHrevisionKhaveparents\xa3Gdefault\xf4Hrequired\xf4DtypeDboolEnodes\xa2Hrequired\xf5DtypeDlistDtree\xa2Hrequired\xf5DtypeEbytesKpermissions\x81DpullTrecommendedbatchsize\x1a\x00\x01\x86\xa0Gpushkey\xa2Dargs\xa4Ckey\xa2Hrequired\xf5DtypeEbytesInamespace\xa2Hrequired\xf5DtypeEbytesCnew\xa2Hrequired\xf5DtypeEbytesCold\xa2Hrequired\xf5DtypeEbytesKpermissions\x81DpushPrawstorefiledata\xa2Dargs\xa2Efiles\xa2Hrequired\xf5DtypeDlistJpathfilter\xa3Gdefault\xf6Hrequired\xf4DtypeDlistKpermissions\x81DpullQframingmediatypes\x81X&application/mercurial-exp-framing-0006Rpathfilterprefixes\xd9\x01\x02\x82Epath:Lrootfilesin:Nrawrepoformats\x83LgeneraldeltaHrevlogv1LsparserevlogNv1capabilitiesY\x01\xe0batch branchmap $USUAL_BUNDLE2_CAPS$ changegroupsubset compression=$BUNDLE2_COMPRESSIONS$ getbundle httpheader=1024 httpmediatype=0.1rx,0.1tx,0.2tx known lookup pushkey streamreqs=generaldelta,revlogv1,sparserevlog unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash
cbor> [
{
b'apibase': b'api/',
@@ -445,11 +445,12 @@
]),
b'rawrepoformats': [
b'generaldelta',
- b'revlogv1'
+ b'revlogv1',
+ b'sparserevlog'
]
}
},
- b'v1capabilities': b'batch branchmap $USUAL_BUNDLE2_CAPS$ changegroupsubset compression=$BUNDLE2_COMPRESSIONS$ getbundle httpheader=1024 httpmediatype=0.1rx,0.1tx,0.2tx known lookup pushkey streamreqs=generaldelta,revlogv1 unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash'
+ b'v1capabilities': b'batch branchmap $USUAL_BUNDLE2_CAPS$ changegroupsubset compression=$BUNDLE2_COMPRESSIONS$ getbundle httpheader=1024 httpmediatype=0.1rx,0.1tx,0.2tx known lookup pushkey streamreqs=generaldelta,revlogv1,sparserevlog unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash'
}
]
@@ -475,7 +476,7 @@
s> Content-Type: application/mercurial-cbor\r\n
s> Content-Length: *\r\n (glob)
s> \r\n
- s> \xa3GapibaseDapi/Dapis\xa1Pexp-http-v2-0003\xa4Hcommands\xacIbranchmap\xa2Dargs\xa0Kpermissions\x81DpullLcapabilities\xa2Dargs\xa0Kpermissions\x81DpullMchangesetdata\xa2Dargs\xa2Ffields\xa4Gdefault\xd9\x01\x02\x80Hrequired\xf4DtypeCsetKvalidvalues\xd9\x01\x02\x84IbookmarksGparentsEphaseHrevisionIrevisions\xa2Hrequired\xf5DtypeDlistKpermissions\x81DpullHfiledata\xa2Dargs\xa4Ffields\xa4Gdefault\xd9\x01\x02\x80Hrequired\xf4DtypeCsetKvalidvalues\xd9\x01\x02\x83HlinknodeGparentsHrevisionKhaveparents\xa3Gdefault\xf4Hrequired\xf4DtypeDboolEnodes\xa2Hrequired\xf5DtypeDlistDpath\xa2Hrequired\xf5DtypeEbytesKpermissions\x81DpullIfilesdata\xa3Dargs\xa4Ffields\xa4Gdefault\xd9\x01\x02\x80Hrequired\xf4DtypeCsetKvalidvalues\xd9\x01\x02\x84NfirstchangesetHlinknodeGparentsHrevisionKhaveparents\xa3Gdefault\xf4Hrequired\xf4DtypeDboolJpathfilter\xa3Gdefault\xf6Hrequired\xf4DtypeDdictIrevisions\xa2Hrequired\xf5DtypeDlistKpermissions\x81DpullTrecommendedbatchsize\x19\xc3PEheads\xa2Dargs\xa1Jpubliconly\xa3Gdefault\xf4Hrequired\xf4DtypeDboolKpermissions\x81DpullEknown\xa2Dargs\xa1Enodes\xa3Gdefault\x80Hrequired\xf4DtypeDlistKpermissions\x81DpullHlistkeys\xa2Dargs\xa1Inamespace\xa2Hrequired\xf5DtypeEbytesKpermissions\x81DpullFlookup\xa2Dargs\xa1Ckey\xa2Hrequired\xf5DtypeEbytesKpermissions\x81DpullLmanifestdata\xa3Dargs\xa4Ffields\xa4Gdefault\xd9\x01\x02\x80Hrequired\xf4DtypeCsetKvalidvalues\xd9\x01\x02\x82GparentsHrevisionKhaveparents\xa3Gdefault\xf4Hrequired\xf4DtypeDboolEnodes\xa2Hrequired\xf5DtypeDlistDtree\xa2Hrequired\xf5DtypeEbytesKpermissions\x81DpullTrecommendedbatchsize\x1a\x00\x01\x86\xa0Gpushkey\xa2Dargs\xa4Ckey\xa2Hrequired\xf5DtypeEbytesInamespace\xa2Hrequired\xf5DtypeEbytesCnew\xa2Hrequired\xf5DtypeEbytesCold\xa2Hrequired\xf5DtypeEbytesKpermissions\x81DpushPrawstorefiledata\xa2Dargs\xa2Efiles\xa2Hrequired\xf5DtypeDlistJpathfilter\xa3Gdefault\xf6Hrequired\xf4DtypeDlistKpermissions\x81DpullQframingmediatypes\x81X&application/mercurial-exp-framing-0006Rpathfilterprefixes\xd9\x01\x02\x82Epath:Lrootfilesin:Nrawrepoformats\x82LgeneraldeltaHrevlogv1Nv1capabilitiesY\x01\xd3batch branchmap $USUAL_BUNDLE2_CAPS$ changegroupsubset compression=$BUNDLE2_COMPRESSIONS$ getbundle httpheader=1024 httpmediatype=0.1rx,0.1tx,0.2tx known lookup pushkey streamreqs=generaldelta,revlogv1 unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash
+ s> \xa3GapibaseDapi/Dapis\xa1Pexp-http-v2-0003\xa4Hcommands\xacIbranchmap\xa2Dargs\xa0Kpermissions\x81DpullLcapabilities\xa2Dargs\xa0Kpermissions\x81DpullMchangesetdata\xa2Dargs\xa2Ffields\xa4Gdefault\xd9\x01\x02\x80Hrequired\xf4DtypeCsetKvalidvalues\xd9\x01\x02\x84IbookmarksGparentsEphaseHrevisionIrevisions\xa2Hrequired\xf5DtypeDlistKpermissions\x81DpullHfiledata\xa2Dargs\xa4Ffields\xa4Gdefault\xd9\x01\x02\x80Hrequired\xf4DtypeCsetKvalidvalues\xd9\x01\x02\x83HlinknodeGparentsHrevisionKhaveparents\xa3Gdefault\xf4Hrequired\xf4DtypeDboolEnodes\xa2Hrequired\xf5DtypeDlistDpath\xa2Hrequired\xf5DtypeEbytesKpermissions\x81DpullIfilesdata\xa3Dargs\xa4Ffields\xa4Gdefault\xd9\x01\x02\x80Hrequired\xf4DtypeCsetKvalidvalues\xd9\x01\x02\x84NfirstchangesetHlinknodeGparentsHrevisionKhaveparents\xa3Gdefault\xf4Hrequired\xf4DtypeDboolJpathfilter\xa3Gdefault\xf6Hrequired\xf4DtypeDdictIrevisions\xa2Hrequired\xf5DtypeDlistKpermissions\x81DpullTrecommendedbatchsize\x19\xc3PEheads\xa2Dargs\xa1Jpubliconly\xa3Gdefault\xf4Hrequired\xf4DtypeDboolKpermissions\x81DpullEknown\xa2Dargs\xa1Enodes\xa3Gdefault\x80Hrequired\xf4DtypeDlistKpermissions\x81DpullHlistkeys\xa2Dargs\xa1Inamespace\xa2Hrequired\xf5DtypeEbytesKpermissions\x81DpullFlookup\xa2Dargs\xa1Ckey\xa2Hrequired\xf5DtypeEbytesKpermissions\x81DpullLmanifestdata\xa3Dargs\xa4Ffields\xa4Gdefault\xd9\x01\x02\x80Hrequired\xf4DtypeCsetKvalidvalues\xd9\x01\x02\x82GparentsHrevisionKhaveparents\xa3Gdefault\xf4Hrequired\xf4DtypeDboolEnodes\xa2Hrequired\xf5DtypeDlistDtree\xa2Hrequired\xf5DtypeEbytesKpermissions\x81DpullTrecommendedbatchsize\x1a\x00\x01\x86\xa0Gpushkey\xa2Dargs\xa4Ckey\xa2Hrequired\xf5DtypeEbytesInamespace\xa2Hrequired\xf5DtypeEbytesCnew\xa2Hrequired\xf5DtypeEbytesCold\xa2Hrequired\xf5DtypeEbytesKpermissions\x81DpushPrawstorefiledata\xa2Dargs\xa2Efiles\xa2Hrequired\xf5DtypeDlistJpathfilter\xa3Gdefault\xf6Hrequired\xf4DtypeDlistKpermissions\x81DpullQframingmediatypes\x81X&application/mercurial-exp-framing-0006Rpathfilterprefixes\xd9\x01\x02\x82Epath:Lrootfilesin:Nrawrepoformats\x83LgeneraldeltaHrevlogv1LsparserevlogNv1capabilitiesY\x01\xe0batch branchmap $USUAL_BUNDLE2_CAPS$ changegroupsubset compression=$BUNDLE2_COMPRESSIONS$ getbundle httpheader=1024 httpmediatype=0.1rx,0.1tx,0.2tx known lookup pushkey streamreqs=generaldelta,revlogv1,sparserevlog unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash
sending capabilities command
s> POST /api/exp-http-v2-0003/ro/capabilities HTTP/1.1\r\n
s> Accept-Encoding: identity\r\n
@@ -503,11 +504,11 @@
s> \xa1FstatusBok
s> \r\n
received frame(size=11; request=1; stream=2; streamflags=encoded; type=command-response; flags=continuation)
- s> 651\r\n
- s> I\x06\x00\x01\x00\x02\x041
- s> \xa4Hcommands\xacIbranchmap\xa2Dargs\xa0Kpermissions\x81DpullLcapabilities\xa2Dargs\xa0Kpermissions\x81DpullMchangesetdata\xa2Dargs\xa2Ffields\xa4Gdefault\xd9\x01\x02\x80Hrequired\xf4DtypeCsetKvalidvalues\xd9\x01\x02\x84IbookmarksGparentsEphaseHrevisionIrevisions\xa2Hrequired\xf5DtypeDlistKpermissions\x81DpullHfiledata\xa2Dargs\xa4Ffields\xa4Gdefault\xd9\x01\x02\x80Hrequired\xf4DtypeCsetKvalidvalues\xd9\x01\x02\x83HlinknodeGparentsHrevisionKhaveparents\xa3Gdefault\xf4Hrequired\xf4DtypeDboolEnodes\xa2Hrequired\xf5DtypeDlistDpath\xa2Hrequired\xf5DtypeEbytesKpermissions\x81DpullIfilesdata\xa3Dargs\xa4Ffields\xa4Gdefault\xd9\x01\x02\x80Hrequired\xf4DtypeCsetKvalidvalues\xd9\x01\x02\x84NfirstchangesetHlinknodeGparentsHrevisionKhaveparents\xa3Gdefault\xf4Hrequired\xf4DtypeDboolJpathfilter\xa3Gdefault\xf6Hrequired\xf4DtypeDdictIrevisions\xa2Hrequired\xf5DtypeDlistKpermissions\x81DpullTrecommendedbatchsize\x19\xc3PEheads\xa2Dargs\xa1Jpubliconly\xa3Gdefault\xf4Hrequired\xf4DtypeDboolKpermissions\x81DpullEknown\xa2Dargs\xa1Enodes\xa3Gdefault\x80Hrequired\xf4DtypeDlistKpermissions\x81DpullHlistkeys\xa2Dargs\xa1Inamespace\xa2Hrequired\xf5DtypeEbytesKpermissions\x81DpullFlookup\xa2Dargs\xa1Ckey\xa2Hrequired\xf5DtypeEbytesKpermissions\x81DpullLmanifestdata\xa3Dargs\xa4Ffields\xa4Gdefault\xd9\x01\x02\x80Hrequired\xf4DtypeCsetKvalidvalues\xd9\x01\x02\x82GparentsHrevisionKhaveparents\xa3Gdefault\xf4Hrequired\xf4DtypeDboolEnodes\xa2Hrequired\xf5DtypeDlistDtree\xa2Hrequired\xf5DtypeEbytesKpermissions\x81DpullTrecommendedbatchsize\x1a\x00\x01\x86\xa0Gpushkey\xa2Dargs\xa4Ckey\xa2Hrequired\xf5DtypeEbytesInamespace\xa2Hrequired\xf5DtypeEbytesCnew\xa2Hrequired\xf5DtypeEbytesCold\xa2Hrequired\xf5DtypeEbytesKpermissions\x81DpushPrawstorefiledata\xa2Dargs\xa2Efiles\xa2Hrequired\xf5DtypeDlistJpathfilter\xa3Gdefault\xf6Hrequired\xf4DtypeDlistKpermissions\x81DpullQframingmediatypes\x81X&application/mercurial-exp-framing-0006Rpathfilterprefixes\xd9\x01\x02\x82Epath:Lrootfilesin:Nrawrepoformats\x82LgeneraldeltaHrevlogv1
+ s> 65e\r\n
+ s> V\x06\x00\x01\x00\x02\x041
+ s> \xa4Hcommands\xacIbranchmap\xa2Dargs\xa0Kpermissions\x81DpullLcapabilities\xa2Dargs\xa0Kpermissions\x81DpullMchangesetdata\xa2Dargs\xa2Ffields\xa4Gdefault\xd9\x01\x02\x80Hrequired\xf4DtypeCsetKvalidvalues\xd9\x01\x02\x84IbookmarksGparentsEphaseHrevisionIrevisions\xa2Hrequired\xf5DtypeDlistKpermissions\x81DpullHfiledata\xa2Dargs\xa4Ffields\xa4Gdefault\xd9\x01\x02\x80Hrequired\xf4DtypeCsetKvalidvalues\xd9\x01\x02\x83HlinknodeGparentsHrevisionKhaveparents\xa3Gdefault\xf4Hrequired\xf4DtypeDboolEnodes\xa2Hrequired\xf5DtypeDlistDpath\xa2Hrequired\xf5DtypeEbytesKpermissions\x81DpullIfilesdata\xa3Dargs\xa4Ffields\xa4Gdefault\xd9\x01\x02\x80Hrequired\xf4DtypeCsetKvalidvalues\xd9\x01\x02\x84NfirstchangesetHlinknodeGparentsHrevisionKhaveparents\xa3Gdefault\xf4Hrequired\xf4DtypeDboolJpathfilter\xa3Gdefault\xf6Hrequired\xf4DtypeDdictIrevisions\xa2Hrequired\xf5DtypeDlistKpermissions\x81DpullTrecommendedbatchsize\x19\xc3PEheads\xa2Dargs\xa1Jpubliconly\xa3Gdefault\xf4Hrequired\xf4DtypeDboolKpermissions\x81DpullEknown\xa2Dargs\xa1Enodes\xa3Gdefault\x80Hrequired\xf4DtypeDlistKpermissions\x81DpullHlistkeys\xa2Dargs\xa1Inamespace\xa2Hrequired\xf5DtypeEbytesKpermissions\x81DpullFlookup\xa2Dargs\xa1Ckey\xa2Hrequired\xf5DtypeEbytesKpermissions\x81DpullLmanifestdata\xa3Dargs\xa4Ffields\xa4Gdefault\xd9\x01\x02\x80Hrequired\xf4DtypeCsetKvalidvalues\xd9\x01\x02\x82GparentsHrevisionKhaveparents\xa3Gdefault\xf4Hrequired\xf4DtypeDboolEnodes\xa2Hrequired\xf5DtypeDlistDtree\xa2Hrequired\xf5DtypeEbytesKpermissions\x81DpullTrecommendedbatchsize\x1a\x00\x01\x86\xa0Gpushkey\xa2Dargs\xa4Ckey\xa2Hrequired\xf5DtypeEbytesInamespace\xa2Hrequired\xf5DtypeEbytesCnew\xa2Hrequired\xf5DtypeEbytesCold\xa2Hrequired\xf5DtypeEbytesKpermissions\x81DpushPrawstorefiledata\xa2Dargs\xa2Efiles\xa2Hrequired\xf5DtypeDlistJpathfilter\xa3Gdefault\xf6Hrequired\xf4DtypeDlistKpermissions\x81DpullQframingmediatypes\x81X&application/mercurial-exp-framing-0006Rpathfilterprefixes\xd9\x01\x02\x82Epath:Lrootfilesin:Nrawrepoformats\x83LgeneraldeltaHrevlogv1Lsparserevlog
s> \r\n
- received frame(size=1609; request=1; stream=2; streamflags=encoded; type=command-response; flags=continuation)
+ received frame(size=1622; request=1; stream=2; streamflags=encoded; type=command-response; flags=continuation)
s> 8\r\n
s> \x00\x00\x00\x01\x00\x02\x002
s> \r\n
@@ -739,7 +740,8 @@
]),
b'rawrepoformats': [
b'generaldelta',
- b'revlogv1'
+ b'revlogv1',
+ b'sparserevlog'
]
}
]
--- a/tests/test-wireproto-command-filedata.t Wed Jan 09 20:00:35 2019 -0800
+++ b/tests/test-wireproto-command-filedata.t Fri Jan 18 13:28:22 2019 -0500
@@ -19,19 +19,36 @@
$ echo a1 >> a
$ echo d1 > dir0/d
$ hg commit -m 'commit 1'
- $ echo f0 > dir0/child1/f
+ $ echo f1 > dir0/child1/f
$ hg commit -m 'commit 2'
- nothing changed
- [1]
$ hg -q up -r 0
$ echo a2 >> a
$ hg commit -m 'commit 3'
created new head
+Create multiple heads introducing the same changeset
+
+ $ hg -q up -r 0
+ $ echo foo > dupe-file
+ $ hg commit -Am 'dupe 1'
+ adding dupe-file
+ created new head
+ $ hg -q up -r 0
+ $ echo foo > dupe-file
+ $ hg commit -Am 'dupe 2'
+ adding dupe-file
+ created new head
+
$ hg log -G -T '{rev}:{node} {desc}\n'
- @ 2:5ce944d7fece1252dae06c34422b573c191b9489 commit 3
+ @ 5:732c3dd7bee94242de656000e5f458e7ccfe2828 dupe 2
|
+ | o 4:4334f10897d13c3e8beb4b636f7272b4ec2d0322 dupe 1
+ |/
+ | o 3:5ce944d7fece1252dae06c34422b573c191b9489 commit 3
+ |/
+ | o 2:b3c27db01410dae01e5485d425b1440078df540c commit 2
+ | |
| o 1:3ef5e551f219ba505481d34d6b0316b017fa3f00 commit 1
|/
o 0:91b232a2253ce0638496f67bdfd7a4933fb51b25 commit 0
@@ -41,12 +58,16 @@
rev linkrev nodeid p1 p2
0 0 649d149df43d83882523b7fb1e6a3af6f1907b39 0000000000000000000000000000000000000000 0000000000000000000000000000000000000000
1 1 0a86321f1379d1a9ecd0579a22977af7a5acaf11 649d149df43d83882523b7fb1e6a3af6f1907b39 0000000000000000000000000000000000000000
- 2 2 7e5801b6d5f03a5a54f3c47b583f7567aad43e5b 649d149df43d83882523b7fb1e6a3af6f1907b39 0000000000000000000000000000000000000000
+ 2 3 7e5801b6d5f03a5a54f3c47b583f7567aad43e5b 649d149df43d83882523b7fb1e6a3af6f1907b39 0000000000000000000000000000000000000000
$ hg --debug debugindex dir0/child0/e
rev linkrev nodeid p1 p2
0 0 bbba6c06b30f443d34ff841bc985c4d0827c6be4 0000000000000000000000000000000000000000 0000000000000000000000000000000000000000
+ $ hg --debug debugindex dupe-file
+ rev linkrev nodeid p1 p2
+ 0 4 2ed2a3912a0b24502043eae84ee4b279c18b90dd 0000000000000000000000000000000000000000 0000000000000000000000000000000000000000
+
$ hg serve -p $HGPORT -d --pid-file hg.pid -E error.log
$ cat hg.pid > $DAEMON_PIDS
@@ -309,4 +330,35 @@
b'a0\n00000000000000000000000000000000000000\n11111111111111111111111111111111111111\na2\n'
]
+Linknode for duplicate revision is the initial revision
+
+ $ sendhttpv2peer << EOF
+ > command filedata
+ > nodes eval:[b'\x2e\xd2\xa3\x91\x2a\x0b\x24\x50\x20\x43\xea\xe8\x4e\xe4\xb2\x79\xc1\x8b\x90\xdd']
+ > path eval:b'dupe-file'
+ > fields eval:[b'linknode', b'parents', b'revision']
+ > EOF
+ creating http peer for wire protocol version 2
+ sending filedata command
+ response: gen[
+ {
+ b'totalitems': 1
+ },
+ {
+ b'fieldsfollowing': [
+ [
+ b'revision',
+ 4
+ ]
+ ],
+ b'linknode': b'C4\xf1\x08\x97\xd1<>\x8b\xebKcorr\xb4\xec-\x03"',
+ b'node': b'.\xd2\xa3\x91*\x0b$P C\xea\xe8N\xe4\xb2y\xc1\x8b\x90\xdd',
+ b'parents': [
+ b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00',
+ b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
+ ]
+ },
+ b'foo\n'
+ ]
+
$ cat error.log
--- a/tests/test-wireproto-command-filesdata.t Wed Jan 09 20:00:35 2019 -0800
+++ b/tests/test-wireproto-command-filesdata.t Fri Jan 18 13:28:22 2019 -0500
@@ -34,9 +34,26 @@
$ hg commit -m 'commit 3'
created new head
+Create multiple heads introducing the same file nodefile node
+
+ $ hg -q up -r 0
+ $ echo foo > dupe-file
+ $ hg commit -Am 'dupe 1'
+ adding dupe-file
+ created new head
+ $ hg -q up -r 0
+ $ echo foo > dupe-file
+ $ hg commit -Am 'dupe 2'
+ adding dupe-file
+ created new head
+
$ hg log -G -T '{rev}:{node} {desc}\n'
- @ 3:476fbf122cd82f6726f0191ff146f67140946abc commit 3
+ @ 5:47fc30580911232cb264675b402819deddf6c6f0 dupe 2
|
+ | o 4:b16cce2967c1749ef4f4e3086a806cfbad8a3af7 dupe 1
+ |/
+ | o 3:476fbf122cd82f6726f0191ff146f67140946abc commit 3
+ |/
| o 2:b91c03cbba3519ab149b6cd0a0afbdb5cf1b5c8a commit 2
| |
| o 1:5b0b1a23577e205ea240e39c9704e28d7697cbd8 commit 1
@@ -1161,4 +1178,121 @@
}
]
+Test behavior where a file node is introduced in 2 DAG heads
+
+Request for changeset introducing filenode returns linknode as self
+
+ $ sendhttpv2peer << EOF
+ > command filesdata
+ > revisions eval:[{
+ > b'type': b'changesetexplicit',
+ > b'nodes': [
+ > b'\xb1\x6c\xce\x29\x67\xc1\x74\x9e\xf4\xf4\xe3\x08\x6a\x80\x6c\xfb\xad\x8a\x3a\xf7',
+ > ]}]
+ > fields eval:[b'linknode']
+ > pathfilter eval:{b'include': [b'path:dupe-file']}
+ > EOF
+ creating http peer for wire protocol version 2
+ sending filesdata command
+ response: gen[
+ {
+ b'totalitems': 1,
+ b'totalpaths': 1
+ },
+ {
+ b'path': b'dupe-file',
+ b'totalitems': 1
+ },
+ {
+ b'linknode': b'\xb1l\xce)g\xc1t\x9e\xf4\xf4\xe3\x08j\x80l\xfb\xad\x8a:\xf7',
+ b'node': b'.\xd2\xa3\x91*\x0b$P C\xea\xe8N\xe4\xb2y\xc1\x8b\x90\xdd'
+ }
+ ]
+
+ $ sendhttpv2peer << EOF
+ > command filesdata
+ > revisions eval:[{
+ > b'type': b'changesetexplicit',
+ > b'nodes': [
+ > b'\xb1\x6c\xce\x29\x67\xc1\x74\x9e\xf4\xf4\xe3\x08\x6a\x80\x6c\xfb\xad\x8a\x3a\xf7',
+ > ]}]
+ > fields eval:[b'linknode']
+ > haveparents eval:True
+ > pathfilter eval:{b'include': [b'path:dupe-file']}
+ > EOF
+ creating http peer for wire protocol version 2
+ sending filesdata command
+ response: gen[
+ {
+ b'totalitems': 1,
+ b'totalpaths': 1
+ },
+ {
+ b'path': b'dupe-file',
+ b'totalitems': 1
+ },
+ {
+ b'linknode': b'\xb1l\xce)g\xc1t\x9e\xf4\xf4\xe3\x08j\x80l\xfb\xad\x8a:\xf7',
+ b'node': b'.\xd2\xa3\x91*\x0b$P C\xea\xe8N\xe4\xb2y\xc1\x8b\x90\xdd'
+ }
+ ]
+
+Request for changeset where recorded linknode isn't in DAG ancestry will get
+rewritten accordingly
+
+ $ sendhttpv2peer << EOF
+ > command filesdata
+ > revisions eval:[{
+ > b'type': b'changesetexplicit',
+ > b'nodes': [
+ > b'\x47\xfc\x30\x58\x09\x11\x23\x2c\xb2\x64\x67\x5b\x40\x28\x19\xde\xdd\xf6\xc6\xf0',
+ > ]}]
+ > fields eval:[b'linknode']
+ > pathfilter eval:{b'include': [b'path:dupe-file']}
+ > EOF
+ creating http peer for wire protocol version 2
+ sending filesdata command
+ response: gen[
+ {
+ b'totalitems': 1,
+ b'totalpaths': 1
+ },
+ {
+ b'path': b'dupe-file',
+ b'totalitems': 1
+ },
+ {
+ b'linknode': b'G\xfc0X\t\x11#,\xb2dg[@(\x19\xde\xdd\xf6\xc6\xf0',
+ b'node': b'.\xd2\xa3\x91*\x0b$P C\xea\xe8N\xe4\xb2y\xc1\x8b\x90\xdd'
+ }
+ ]
+
+ $ sendhttpv2peer << EOF
+ > command filesdata
+ > revisions eval:[{
+ > b'type': b'changesetexplicit',
+ > b'nodes': [
+ > b'\x47\xfc\x30\x58\x09\x11\x23\x2c\xb2\x64\x67\x5b\x40\x28\x19\xde\xdd\xf6\xc6\xf0',
+ > ]}]
+ > fields eval:[b'linknode']
+ > haveparents eval:True
+ > pathfilter eval:{b'include': [b'path:dupe-file']}
+ > EOF
+ creating http peer for wire protocol version 2
+ sending filesdata command
+ response: gen[
+ {
+ b'totalitems': 1,
+ b'totalpaths': 1
+ },
+ {
+ b'path': b'dupe-file',
+ b'totalitems': 1
+ },
+ {
+ b'linknode': b'G\xfc0X\t\x11#,\xb2dg[@(\x19\xde\xdd\xf6\xc6\xf0',
+ b'node': b'.\xd2\xa3\x91*\x0b$P C\xea\xe8N\xe4\xb2y\xc1\x8b\x90\xdd'
+ }
+ ]
+
$ cat error.log
--- a/tests/test-wireproto-content-redirects.t Wed Jan 09 20:00:35 2019 -0800
+++ b/tests/test-wireproto-content-redirects.t Fri Jan 18 13:28:22 2019 -0500
@@ -65,9 +65,9 @@
s> Server: testing stub value\r\n
s> Date: $HTTP_DATE$\r\n
s> Content-Type: application/mercurial-cbor\r\n
- s> Content-Length: 2259\r\n
+ s> Content-Length: 2285\r\n
s> \r\n
- s> \xa3GapibaseDapi/Dapis\xa1Pexp-http-v2-0003\xa5Hcommands\xacIbranchmap\xa2Dargs\xa0Kpermissions\x81DpullLcapabilities\xa2Dargs\xa0Kpermissions\x81DpullMchangesetdata\xa2Dargs\xa2Ffields\xa4Gdefault\xd9\x01\x02\x80Hrequired\xf4DtypeCsetKvalidvalues\xd9\x01\x02\x84IbookmarksGparentsEphaseHrevisionIrevisions\xa2Hrequired\xf5DtypeDlistKpermissions\x81DpullHfiledata\xa2Dargs\xa4Ffields\xa4Gdefault\xd9\x01\x02\x80Hrequired\xf4DtypeCsetKvalidvalues\xd9\x01\x02\x83HlinknodeGparentsHrevisionKhaveparents\xa3Gdefault\xf4Hrequired\xf4DtypeDboolEnodes\xa2Hrequired\xf5DtypeDlistDpath\xa2Hrequired\xf5DtypeEbytesKpermissions\x81DpullIfilesdata\xa3Dargs\xa4Ffields\xa4Gdefault\xd9\x01\x02\x80Hrequired\xf4DtypeCsetKvalidvalues\xd9\x01\x02\x84NfirstchangesetHlinknodeGparentsHrevisionKhaveparents\xa3Gdefault\xf4Hrequired\xf4DtypeDboolJpathfilter\xa3Gdefault\xf6Hrequired\xf4DtypeDdictIrevisions\xa2Hrequired\xf5DtypeDlistKpermissions\x81DpullTrecommendedbatchsize\x19\xc3PEheads\xa2Dargs\xa1Jpubliconly\xa3Gdefault\xf4Hrequired\xf4DtypeDboolKpermissions\x81DpullEknown\xa2Dargs\xa1Enodes\xa3Gdefault\x80Hrequired\xf4DtypeDlistKpermissions\x81DpullHlistkeys\xa2Dargs\xa1Inamespace\xa2Hrequired\xf5DtypeEbytesKpermissions\x81DpullFlookup\xa2Dargs\xa1Ckey\xa2Hrequired\xf5DtypeEbytesKpermissions\x81DpullLmanifestdata\xa3Dargs\xa4Ffields\xa4Gdefault\xd9\x01\x02\x80Hrequired\xf4DtypeCsetKvalidvalues\xd9\x01\x02\x82GparentsHrevisionKhaveparents\xa3Gdefault\xf4Hrequired\xf4DtypeDboolEnodes\xa2Hrequired\xf5DtypeDlistDtree\xa2Hrequired\xf5DtypeEbytesKpermissions\x81DpullTrecommendedbatchsize\x1a\x00\x01\x86\xa0Gpushkey\xa2Dargs\xa4Ckey\xa2Hrequired\xf5DtypeEbytesInamespace\xa2Hrequired\xf5DtypeEbytesCnew\xa2Hrequired\xf5DtypeEbytesCold\xa2Hrequired\xf5DtypeEbytesKpermissions\x81DpushPrawstorefiledata\xa2Dargs\xa2Efiles\xa2Hrequired\xf5DtypeDlistJpathfilter\xa3Gdefault\xf6Hrequired\xf4DtypeDlistKpermissions\x81DpullQframingmediatypes\x81X&application/mercurial-exp-framing-0006Rpathfilterprefixes\xd9\x01\x02\x82Epath:Lrootfilesin:Nrawrepoformats\x82LgeneraldeltaHrevlogv1Hredirect\xa2Fhashes\x82Fsha256Dsha1Gtargets\x81\xa5DnameHtarget-aHprotocolDhttpKsnirequired\xf4Ktlsversions\x82C1.2C1.3Duris\x81Shttp://example.com/Nv1capabilitiesY\x01\xd3batch branchmap $USUAL_BUNDLE2_CAPS$ changegroupsubset compression=$BUNDLE2_COMPRESSIONS$ getbundle httpheader=1024 httpmediatype=0.1rx,0.1tx,0.2tx known lookup pushkey streamreqs=generaldelta,revlogv1 unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash
+ s> \xa3GapibaseDapi/Dapis\xa1Pexp-http-v2-0003\xa5Hcommands\xacIbranchmap\xa2Dargs\xa0Kpermissions\x81DpullLcapabilities\xa2Dargs\xa0Kpermissions\x81DpullMchangesetdata\xa2Dargs\xa2Ffields\xa4Gdefault\xd9\x01\x02\x80Hrequired\xf4DtypeCsetKvalidvalues\xd9\x01\x02\x84IbookmarksGparentsEphaseHrevisionIrevisions\xa2Hrequired\xf5DtypeDlistKpermissions\x81DpullHfiledata\xa2Dargs\xa4Ffields\xa4Gdefault\xd9\x01\x02\x80Hrequired\xf4DtypeCsetKvalidvalues\xd9\x01\x02\x83HlinknodeGparentsHrevisionKhaveparents\xa3Gdefault\xf4Hrequired\xf4DtypeDboolEnodes\xa2Hrequired\xf5DtypeDlistDpath\xa2Hrequired\xf5DtypeEbytesKpermissions\x81DpullIfilesdata\xa3Dargs\xa4Ffields\xa4Gdefault\xd9\x01\x02\x80Hrequired\xf4DtypeCsetKvalidvalues\xd9\x01\x02\x84NfirstchangesetHlinknodeGparentsHrevisionKhaveparents\xa3Gdefault\xf4Hrequired\xf4DtypeDboolJpathfilter\xa3Gdefault\xf6Hrequired\xf4DtypeDdictIrevisions\xa2Hrequired\xf5DtypeDlistKpermissions\x81DpullTrecommendedbatchsize\x19\xc3PEheads\xa2Dargs\xa1Jpubliconly\xa3Gdefault\xf4Hrequired\xf4DtypeDboolKpermissions\x81DpullEknown\xa2Dargs\xa1Enodes\xa3Gdefault\x80Hrequired\xf4DtypeDlistKpermissions\x81DpullHlistkeys\xa2Dargs\xa1Inamespace\xa2Hrequired\xf5DtypeEbytesKpermissions\x81DpullFlookup\xa2Dargs\xa1Ckey\xa2Hrequired\xf5DtypeEbytesKpermissions\x81DpullLmanifestdata\xa3Dargs\xa4Ffields\xa4Gdefault\xd9\x01\x02\x80Hrequired\xf4DtypeCsetKvalidvalues\xd9\x01\x02\x82GparentsHrevisionKhaveparents\xa3Gdefault\xf4Hrequired\xf4DtypeDboolEnodes\xa2Hrequired\xf5DtypeDlistDtree\xa2Hrequired\xf5DtypeEbytesKpermissions\x81DpullTrecommendedbatchsize\x1a\x00\x01\x86\xa0Gpushkey\xa2Dargs\xa4Ckey\xa2Hrequired\xf5DtypeEbytesInamespace\xa2Hrequired\xf5DtypeEbytesCnew\xa2Hrequired\xf5DtypeEbytesCold\xa2Hrequired\xf5DtypeEbytesKpermissions\x81DpushPrawstorefiledata\xa2Dargs\xa2Efiles\xa2Hrequired\xf5DtypeDlistJpathfilter\xa3Gdefault\xf6Hrequired\xf4DtypeDlistKpermissions\x81DpullQframingmediatypes\x81X&application/mercurial-exp-framing-0006Rpathfilterprefixes\xd9\x01\x02\x82Epath:Lrootfilesin:Nrawrepoformats\x83LgeneraldeltaHrevlogv1LsparserevlogHredirect\xa2Fhashes\x82Fsha256Dsha1Gtargets\x81\xa5DnameHtarget-aHprotocolDhttpKsnirequired\xf4Ktlsversions\x82C1.2C1.3Duris\x81Shttp://example.com/Nv1capabilitiesY\x01\xe0batch branchmap $USUAL_BUNDLE2_CAPS$ changegroupsubset compression=$BUNDLE2_COMPRESSIONS$ getbundle httpheader=1024 httpmediatype=0.1rx,0.1tx,0.2tx known lookup pushkey streamreqs=generaldelta,revlogv1,sparserevlog unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash
(remote redirect target target-a is compatible) (tls1.2 !)
(remote redirect target target-a requires unsupported TLS versions: 1.2, 1.3) (no-tls1.2 !)
sending capabilities command
@@ -99,9 +99,675 @@
s> \xa1FstatusBok
s> \r\n
received frame(size=11; request=1; stream=2; streamflags=encoded; type=command-response; flags=continuation)
+ s> 6de\r\n
+ s> \xd6\x06\x00\x01\x00\x02\x041
+ s> \xa5Hcommands\xacIbranchmap\xa2Dargs\xa0Kpermissions\x81DpullLcapabilities\xa2Dargs\xa0Kpermissions\x81DpullMchangesetdata\xa2Dargs\xa2Ffields\xa4Gdefault\xd9\x01\x02\x80Hrequired\xf4DtypeCsetKvalidvalues\xd9\x01\x02\x84IbookmarksGparentsEphaseHrevisionIrevisions\xa2Hrequired\xf5DtypeDlistKpermissions\x81DpullHfiledata\xa2Dargs\xa4Ffields\xa4Gdefault\xd9\x01\x02\x80Hrequired\xf4DtypeCsetKvalidvalues\xd9\x01\x02\x83HlinknodeGparentsHrevisionKhaveparents\xa3Gdefault\xf4Hrequired\xf4DtypeDboolEnodes\xa2Hrequired\xf5DtypeDlistDpath\xa2Hrequired\xf5DtypeEbytesKpermissions\x81DpullIfilesdata\xa3Dargs\xa4Ffields\xa4Gdefault\xd9\x01\x02\x80Hrequired\xf4DtypeCsetKvalidvalues\xd9\x01\x02\x84NfirstchangesetHlinknodeGparentsHrevisionKhaveparents\xa3Gdefault\xf4Hrequired\xf4DtypeDboolJpathfilter\xa3Gdefault\xf6Hrequired\xf4DtypeDdictIrevisions\xa2Hrequired\xf5DtypeDlistKpermissions\x81DpullTrecommendedbatchsize\x19\xc3PEheads\xa2Dargs\xa1Jpubliconly\xa3Gdefault\xf4Hrequired\xf4DtypeDboolKpermissions\x81DpullEknown\xa2Dargs\xa1Enodes\xa3Gdefault\x80Hrequired\xf4DtypeDlistKpermissions\x81DpullHlistkeys\xa2Dargs\xa1Inamespace\xa2Hrequired\xf5DtypeEbytesKpermissions\x81DpullFlookup\xa2Dargs\xa1Ckey\xa2Hrequired\xf5DtypeEbytesKpermissions\x81DpullLmanifestdata\xa3Dargs\xa4Ffields\xa4Gdefault\xd9\x01\x02\x80Hrequired\xf4DtypeCsetKvalidvalues\xd9\x01\x02\x82GparentsHrevisionKhaveparents\xa3Gdefault\xf4Hrequired\xf4DtypeDboolEnodes\xa2Hrequired\xf5DtypeDlistDtree\xa2Hrequired\xf5DtypeEbytesKpermissions\x81DpullTrecommendedbatchsize\x1a\x00\x01\x86\xa0Gpushkey\xa2Dargs\xa4Ckey\xa2Hrequired\xf5DtypeEbytesInamespace\xa2Hrequired\xf5DtypeEbytesCnew\xa2Hrequired\xf5DtypeEbytesCold\xa2Hrequired\xf5DtypeEbytesKpermissions\x81DpushPrawstorefiledata\xa2Dargs\xa2Efiles\xa2Hrequired\xf5DtypeDlistJpathfilter\xa3Gdefault\xf6Hrequired\xf4DtypeDlistKpermissions\x81DpullQframingmediatypes\x81X&application/mercurial-exp-framing-0006Rpathfilterprefixes\xd9\x01\x02\x82Epath:Lrootfilesin:Nrawrepoformats\x83LgeneraldeltaHrevlogv1LsparserevlogHredirect\xa2Fhashes\x82Fsha256Dsha1Gtargets\x81\xa5DnameHtarget-aHprotocolDhttpKsnirequired\xf4Ktlsversions\x82C1.2C1.3Duris\x81Shttp://example.com/
+ s> \r\n
+ received frame(size=1750; request=1; stream=2; streamflags=encoded; type=command-response; flags=continuation)
+ s> 8\r\n
+ s> \x00\x00\x00\x01\x00\x02\x002
+ s> \r\n
+ s> 0\r\n
+ s> \r\n
+ received frame(size=0; request=1; stream=2; streamflags=; type=command-response; flags=eos)
+ response: gen[
+ {
+ b'commands': {
+ b'branchmap': {
+ b'args': {},
+ b'permissions': [
+ b'pull'
+ ]
+ },
+ b'capabilities': {
+ b'args': {},
+ b'permissions': [
+ b'pull'
+ ]
+ },
+ b'changesetdata': {
+ b'args': {
+ b'fields': {
+ b'default': set([]),
+ b'required': False,
+ b'type': b'set',
+ b'validvalues': set([
+ b'bookmarks',
+ b'parents',
+ b'phase',
+ b'revision'
+ ])
+ },
+ b'revisions': {
+ b'required': True,
+ b'type': b'list'
+ }
+ },
+ b'permissions': [
+ b'pull'
+ ]
+ },
+ b'filedata': {
+ b'args': {
+ b'fields': {
+ b'default': set([]),
+ b'required': False,
+ b'type': b'set',
+ b'validvalues': set([
+ b'linknode',
+ b'parents',
+ b'revision'
+ ])
+ },
+ b'haveparents': {
+ b'default': False,
+ b'required': False,
+ b'type': b'bool'
+ },
+ b'nodes': {
+ b'required': True,
+ b'type': b'list'
+ },
+ b'path': {
+ b'required': True,
+ b'type': b'bytes'
+ }
+ },
+ b'permissions': [
+ b'pull'
+ ]
+ },
+ b'filesdata': {
+ b'args': {
+ b'fields': {
+ b'default': set([]),
+ b'required': False,
+ b'type': b'set',
+ b'validvalues': set([
+ b'firstchangeset',
+ b'linknode',
+ b'parents',
+ b'revision'
+ ])
+ },
+ b'haveparents': {
+ b'default': False,
+ b'required': False,
+ b'type': b'bool'
+ },
+ b'pathfilter': {
+ b'default': None,
+ b'required': False,
+ b'type': b'dict'
+ },
+ b'revisions': {
+ b'required': True,
+ b'type': b'list'
+ }
+ },
+ b'permissions': [
+ b'pull'
+ ],
+ b'recommendedbatchsize': 50000
+ },
+ b'heads': {
+ b'args': {
+ b'publiconly': {
+ b'default': False,
+ b'required': False,
+ b'type': b'bool'
+ }
+ },
+ b'permissions': [
+ b'pull'
+ ]
+ },
+ b'known': {
+ b'args': {
+ b'nodes': {
+ b'default': [],
+ b'required': False,
+ b'type': b'list'
+ }
+ },
+ b'permissions': [
+ b'pull'
+ ]
+ },
+ b'listkeys': {
+ b'args': {
+ b'namespace': {
+ b'required': True,
+ b'type': b'bytes'
+ }
+ },
+ b'permissions': [
+ b'pull'
+ ]
+ },
+ b'lookup': {
+ b'args': {
+ b'key': {
+ b'required': True,
+ b'type': b'bytes'
+ }
+ },
+ b'permissions': [
+ b'pull'
+ ]
+ },
+ b'manifestdata': {
+ b'args': {
+ b'fields': {
+ b'default': set([]),
+ b'required': False,
+ b'type': b'set',
+ b'validvalues': set([
+ b'parents',
+ b'revision'
+ ])
+ },
+ b'haveparents': {
+ b'default': False,
+ b'required': False,
+ b'type': b'bool'
+ },
+ b'nodes': {
+ b'required': True,
+ b'type': b'list'
+ },
+ b'tree': {
+ b'required': True,
+ b'type': b'bytes'
+ }
+ },
+ b'permissions': [
+ b'pull'
+ ],
+ b'recommendedbatchsize': 100000
+ },
+ b'pushkey': {
+ b'args': {
+ b'key': {
+ b'required': True,
+ b'type': b'bytes'
+ },
+ b'namespace': {
+ b'required': True,
+ b'type': b'bytes'
+ },
+ b'new': {
+ b'required': True,
+ b'type': b'bytes'
+ },
+ b'old': {
+ b'required': True,
+ b'type': b'bytes'
+ }
+ },
+ b'permissions': [
+ b'push'
+ ]
+ },
+ b'rawstorefiledata': {
+ b'args': {
+ b'files': {
+ b'required': True,
+ b'type': b'list'
+ },
+ b'pathfilter': {
+ b'default': None,
+ b'required': False,
+ b'type': b'list'
+ }
+ },
+ b'permissions': [
+ b'pull'
+ ]
+ }
+ },
+ b'framingmediatypes': [
+ b'application/mercurial-exp-framing-0006'
+ ],
+ b'pathfilterprefixes': set([
+ b'path:',
+ b'rootfilesin:'
+ ]),
+ b'rawrepoformats': [
+ b'generaldelta',
+ b'revlogv1',
+ b'sparserevlog'
+ ],
+ b'redirect': {
+ b'hashes': [
+ b'sha256',
+ b'sha1'
+ ],
+ b'targets': [
+ {
+ b'name': b'target-a',
+ b'protocol': b'http',
+ b'snirequired': False,
+ b'tlsversions': [
+ b'1.2',
+ b'1.3'
+ ],
+ b'uris': [
+ b'http://example.com/'
+ ]
+ }
+ ]
+ }
+ }
+ ]
+ (sent 2 HTTP requests and * bytes; received * bytes in responses) (glob)
+
+Unknown protocol is filtered from compatible targets
+
+ $ cat > redirects.py << EOF
+ > [
+ > {
+ > b'name': b'target-a',
+ > b'protocol': b'http',
+ > b'uris': [b'http://example.com/'],
+ > },
+ > {
+ > b'name': b'target-b',
+ > b'protocol': b'unknown',
+ > b'uris': [b'unknown://example.com/'],
+ > },
+ > ]
+ > EOF
+
+ $ sendhttpv2peerhandshake << EOF
+ > command capabilities
+ > EOF
+ creating http peer for wire protocol version 2
+ s> GET /?cmd=capabilities HTTP/1.1\r\n
+ s> Accept-Encoding: identity\r\n
+ s> vary: X-HgProto-1,X-HgUpgrade-1\r\n
+ s> x-hgproto-1: cbor\r\n
+ s> x-hgupgrade-1: exp-http-v2-0003\r\n
+ s> accept: application/mercurial-0.1\r\n
+ s> host: $LOCALIP:$HGPORT\r\n (glob)
+ s> user-agent: Mercurial debugwireproto\r\n
+ s> \r\n
+ s> makefile('rb', None)
+ s> HTTP/1.1 200 OK\r\n
+ s> Server: testing stub value\r\n
+ s> Date: $HTTP_DATE$\r\n
+ s> Content-Type: application/mercurial-cbor\r\n
+ s> Content-Length: 2312\r\n
+ s> \r\n
+ s> \xa3GapibaseDapi/Dapis\xa1Pexp-http-v2-0003\xa5Hcommands\xacIbranchmap\xa2Dargs\xa0Kpermissions\x81DpullLcapabilities\xa2Dargs\xa0Kpermissions\x81DpullMchangesetdata\xa2Dargs\xa2Ffields\xa4Gdefault\xd9\x01\x02\x80Hrequired\xf4DtypeCsetKvalidvalues\xd9\x01\x02\x84IbookmarksGparentsEphaseHrevisionIrevisions\xa2Hrequired\xf5DtypeDlistKpermissions\x81DpullHfiledata\xa2Dargs\xa4Ffields\xa4Gdefault\xd9\x01\x02\x80Hrequired\xf4DtypeCsetKvalidvalues\xd9\x01\x02\x83HlinknodeGparentsHrevisionKhaveparents\xa3Gdefault\xf4Hrequired\xf4DtypeDboolEnodes\xa2Hrequired\xf5DtypeDlistDpath\xa2Hrequired\xf5DtypeEbytesKpermissions\x81DpullIfilesdata\xa3Dargs\xa4Ffields\xa4Gdefault\xd9\x01\x02\x80Hrequired\xf4DtypeCsetKvalidvalues\xd9\x01\x02\x84NfirstchangesetHlinknodeGparentsHrevisionKhaveparents\xa3Gdefault\xf4Hrequired\xf4DtypeDboolJpathfilter\xa3Gdefault\xf6Hrequired\xf4DtypeDdictIrevisions\xa2Hrequired\xf5DtypeDlistKpermissions\x81DpullTrecommendedbatchsize\x19\xc3PEheads\xa2Dargs\xa1Jpubliconly\xa3Gdefault\xf4Hrequired\xf4DtypeDboolKpermissions\x81DpullEknown\xa2Dargs\xa1Enodes\xa3Gdefault\x80Hrequired\xf4DtypeDlistKpermissions\x81DpullHlistkeys\xa2Dargs\xa1Inamespace\xa2Hrequired\xf5DtypeEbytesKpermissions\x81DpullFlookup\xa2Dargs\xa1Ckey\xa2Hrequired\xf5DtypeEbytesKpermissions\x81DpullLmanifestdata\xa3Dargs\xa4Ffields\xa4Gdefault\xd9\x01\x02\x80Hrequired\xf4DtypeCsetKvalidvalues\xd9\x01\x02\x82GparentsHrevisionKhaveparents\xa3Gdefault\xf4Hrequired\xf4DtypeDboolEnodes\xa2Hrequired\xf5DtypeDlistDtree\xa2Hrequired\xf5DtypeEbytesKpermissions\x81DpullTrecommendedbatchsize\x1a\x00\x01\x86\xa0Gpushkey\xa2Dargs\xa4Ckey\xa2Hrequired\xf5DtypeEbytesInamespace\xa2Hrequired\xf5DtypeEbytesCnew\xa2Hrequired\xf5DtypeEbytesCold\xa2Hrequired\xf5DtypeEbytesKpermissions\x81DpushPrawstorefiledata\xa2Dargs\xa2Efiles\xa2Hrequired\xf5DtypeDlistJpathfilter\xa3Gdefault\xf6Hrequired\xf4DtypeDlistKpermissions\x81DpullQframingmediatypes\x81X&application/mercurial-exp-framing-0006Rpathfilterprefixes\xd9\x01\x02\x82Epath:Lrootfilesin:Nrawrepoformats\x83LgeneraldeltaHrevlogv1LsparserevlogHredirect\xa2Fhashes\x82Fsha256Dsha1Gtargets\x82\xa3DnameHtarget-aHprotocolDhttpDuris\x81Shttp://example.com/\xa3DnameHtarget-bHprotocolGunknownDuris\x81Vunknown://example.com/Nv1capabilitiesY\x01\xe0batch branchmap $USUAL_BUNDLE2_CAPS$ changegroupsubset compression=$BUNDLE2_COMPRESSIONS$ getbundle httpheader=1024 httpmediatype=0.1rx,0.1tx,0.2tx known lookup pushkey streamreqs=generaldelta,revlogv1,sparserevlog unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash
+ (remote redirect target target-a is compatible)
+ (remote redirect target target-b uses unsupported protocol: unknown)
+ sending capabilities command
+ s> POST /api/exp-http-v2-0003/ro/capabilities HTTP/1.1\r\n
+ s> Accept-Encoding: identity\r\n
+ s> accept: application/mercurial-exp-framing-0006\r\n
+ s> content-type: application/mercurial-exp-framing-0006\r\n
+ s> content-length: 111\r\n
+ s> host: $LOCALIP:$HGPORT\r\n (glob)
+ s> user-agent: Mercurial debugwireproto\r\n
+ s> \r\n
+ s> \x1c\x00\x00\x01\x00\x01\x01\x82\xa1Pcontentencodings\x81HidentityC\x00\x00\x01\x00\x01\x00\x11\xa2DnameLcapabilitiesHredirect\xa2Fhashes\x82Fsha256Dsha1Gtargets\x81Htarget-a
+ s> makefile('rb', None)
+ s> HTTP/1.1 200 OK\r\n
+ s> Server: testing stub value\r\n
+ s> Date: $HTTP_DATE$\r\n
+ s> Content-Type: application/mercurial-exp-framing-0006\r\n
+ s> Transfer-Encoding: chunked\r\n
+ s> \r\n
+ s> 11\r\n
+ s> \t\x00\x00\x01\x00\x02\x01\x92
+ s> Hidentity
+ s> \r\n
+ received frame(size=9; request=1; stream=2; streamflags=stream-begin; type=stream-settings; flags=eos)
+ s> 13\r\n
+ s> \x0b\x00\x00\x01\x00\x02\x041
+ s> \xa1FstatusBok
+ s> \r\n
+ received frame(size=11; request=1; stream=2; streamflags=encoded; type=command-response; flags=continuation)
+ s> 6f9\r\n
+ s> \xf1\x06\x00\x01\x00\x02\x041
+ s> \xa5Hcommands\xacIbranchmap\xa2Dargs\xa0Kpermissions\x81DpullLcapabilities\xa2Dargs\xa0Kpermissions\x81DpullMchangesetdata\xa2Dargs\xa2Ffields\xa4Gdefault\xd9\x01\x02\x80Hrequired\xf4DtypeCsetKvalidvalues\xd9\x01\x02\x84IbookmarksGparentsEphaseHrevisionIrevisions\xa2Hrequired\xf5DtypeDlistKpermissions\x81DpullHfiledata\xa2Dargs\xa4Ffields\xa4Gdefault\xd9\x01\x02\x80Hrequired\xf4DtypeCsetKvalidvalues\xd9\x01\x02\x83HlinknodeGparentsHrevisionKhaveparents\xa3Gdefault\xf4Hrequired\xf4DtypeDboolEnodes\xa2Hrequired\xf5DtypeDlistDpath\xa2Hrequired\xf5DtypeEbytesKpermissions\x81DpullIfilesdata\xa3Dargs\xa4Ffields\xa4Gdefault\xd9\x01\x02\x80Hrequired\xf4DtypeCsetKvalidvalues\xd9\x01\x02\x84NfirstchangesetHlinknodeGparentsHrevisionKhaveparents\xa3Gdefault\xf4Hrequired\xf4DtypeDboolJpathfilter\xa3Gdefault\xf6Hrequired\xf4DtypeDdictIrevisions\xa2Hrequired\xf5DtypeDlistKpermissions\x81DpullTrecommendedbatchsize\x19\xc3PEheads\xa2Dargs\xa1Jpubliconly\xa3Gdefault\xf4Hrequired\xf4DtypeDboolKpermissions\x81DpullEknown\xa2Dargs\xa1Enodes\xa3Gdefault\x80Hrequired\xf4DtypeDlistKpermissions\x81DpullHlistkeys\xa2Dargs\xa1Inamespace\xa2Hrequired\xf5DtypeEbytesKpermissions\x81DpullFlookup\xa2Dargs\xa1Ckey\xa2Hrequired\xf5DtypeEbytesKpermissions\x81DpullLmanifestdata\xa3Dargs\xa4Ffields\xa4Gdefault\xd9\x01\x02\x80Hrequired\xf4DtypeCsetKvalidvalues\xd9\x01\x02\x82GparentsHrevisionKhaveparents\xa3Gdefault\xf4Hrequired\xf4DtypeDboolEnodes\xa2Hrequired\xf5DtypeDlistDtree\xa2Hrequired\xf5DtypeEbytesKpermissions\x81DpullTrecommendedbatchsize\x1a\x00\x01\x86\xa0Gpushkey\xa2Dargs\xa4Ckey\xa2Hrequired\xf5DtypeEbytesInamespace\xa2Hrequired\xf5DtypeEbytesCnew\xa2Hrequired\xf5DtypeEbytesCold\xa2Hrequired\xf5DtypeEbytesKpermissions\x81DpushPrawstorefiledata\xa2Dargs\xa2Efiles\xa2Hrequired\xf5DtypeDlistJpathfilter\xa3Gdefault\xf6Hrequired\xf4DtypeDlistKpermissions\x81DpullQframingmediatypes\x81X&application/mercurial-exp-framing-0006Rpathfilterprefixes\xd9\x01\x02\x82Epath:Lrootfilesin:Nrawrepoformats\x83LgeneraldeltaHrevlogv1LsparserevlogHredirect\xa2Fhashes\x82Fsha256Dsha1Gtargets\x82\xa3DnameHtarget-aHprotocolDhttpDuris\x81Shttp://example.com/\xa3DnameHtarget-bHprotocolGunknownDuris\x81Vunknown://example.com/
+ s> \r\n
+ received frame(size=1777; request=1; stream=2; streamflags=encoded; type=command-response; flags=continuation)
+ s> 8\r\n
+ s> \x00\x00\x00\x01\x00\x02\x002
+ s> \r\n
+ s> 0\r\n
+ s> \r\n
+ received frame(size=0; request=1; stream=2; streamflags=; type=command-response; flags=eos)
+ response: gen[
+ {
+ b'commands': {
+ b'branchmap': {
+ b'args': {},
+ b'permissions': [
+ b'pull'
+ ]
+ },
+ b'capabilities': {
+ b'args': {},
+ b'permissions': [
+ b'pull'
+ ]
+ },
+ b'changesetdata': {
+ b'args': {
+ b'fields': {
+ b'default': set([]),
+ b'required': False,
+ b'type': b'set',
+ b'validvalues': set([
+ b'bookmarks',
+ b'parents',
+ b'phase',
+ b'revision'
+ ])
+ },
+ b'revisions': {
+ b'required': True,
+ b'type': b'list'
+ }
+ },
+ b'permissions': [
+ b'pull'
+ ]
+ },
+ b'filedata': {
+ b'args': {
+ b'fields': {
+ b'default': set([]),
+ b'required': False,
+ b'type': b'set',
+ b'validvalues': set([
+ b'linknode',
+ b'parents',
+ b'revision'
+ ])
+ },
+ b'haveparents': {
+ b'default': False,
+ b'required': False,
+ b'type': b'bool'
+ },
+ b'nodes': {
+ b'required': True,
+ b'type': b'list'
+ },
+ b'path': {
+ b'required': True,
+ b'type': b'bytes'
+ }
+ },
+ b'permissions': [
+ b'pull'
+ ]
+ },
+ b'filesdata': {
+ b'args': {
+ b'fields': {
+ b'default': set([]),
+ b'required': False,
+ b'type': b'set',
+ b'validvalues': set([
+ b'firstchangeset',
+ b'linknode',
+ b'parents',
+ b'revision'
+ ])
+ },
+ b'haveparents': {
+ b'default': False,
+ b'required': False,
+ b'type': b'bool'
+ },
+ b'pathfilter': {
+ b'default': None,
+ b'required': False,
+ b'type': b'dict'
+ },
+ b'revisions': {
+ b'required': True,
+ b'type': b'list'
+ }
+ },
+ b'permissions': [
+ b'pull'
+ ],
+ b'recommendedbatchsize': 50000
+ },
+ b'heads': {
+ b'args': {
+ b'publiconly': {
+ b'default': False,
+ b'required': False,
+ b'type': b'bool'
+ }
+ },
+ b'permissions': [
+ b'pull'
+ ]
+ },
+ b'known': {
+ b'args': {
+ b'nodes': {
+ b'default': [],
+ b'required': False,
+ b'type': b'list'
+ }
+ },
+ b'permissions': [
+ b'pull'
+ ]
+ },
+ b'listkeys': {
+ b'args': {
+ b'namespace': {
+ b'required': True,
+ b'type': b'bytes'
+ }
+ },
+ b'permissions': [
+ b'pull'
+ ]
+ },
+ b'lookup': {
+ b'args': {
+ b'key': {
+ b'required': True,
+ b'type': b'bytes'
+ }
+ },
+ b'permissions': [
+ b'pull'
+ ]
+ },
+ b'manifestdata': {
+ b'args': {
+ b'fields': {
+ b'default': set([]),
+ b'required': False,
+ b'type': b'set',
+ b'validvalues': set([
+ b'parents',
+ b'revision'
+ ])
+ },
+ b'haveparents': {
+ b'default': False,
+ b'required': False,
+ b'type': b'bool'
+ },
+ b'nodes': {
+ b'required': True,
+ b'type': b'list'
+ },
+ b'tree': {
+ b'required': True,
+ b'type': b'bytes'
+ }
+ },
+ b'permissions': [
+ b'pull'
+ ],
+ b'recommendedbatchsize': 100000
+ },
+ b'pushkey': {
+ b'args': {
+ b'key': {
+ b'required': True,
+ b'type': b'bytes'
+ },
+ b'namespace': {
+ b'required': True,
+ b'type': b'bytes'
+ },
+ b'new': {
+ b'required': True,
+ b'type': b'bytes'
+ },
+ b'old': {
+ b'required': True,
+ b'type': b'bytes'
+ }
+ },
+ b'permissions': [
+ b'push'
+ ]
+ },
+ b'rawstorefiledata': {
+ b'args': {
+ b'files': {
+ b'required': True,
+ b'type': b'list'
+ },
+ b'pathfilter': {
+ b'default': None,
+ b'required': False,
+ b'type': b'list'
+ }
+ },
+ b'permissions': [
+ b'pull'
+ ]
+ }
+ },
+ b'framingmediatypes': [
+ b'application/mercurial-exp-framing-0006'
+ ],
+ b'pathfilterprefixes': set([
+ b'path:',
+ b'rootfilesin:'
+ ]),
+ b'rawrepoformats': [
+ b'generaldelta',
+ b'revlogv1',
+ b'sparserevlog'
+ ],
+ b'redirect': {
+ b'hashes': [
+ b'sha256',
+ b'sha1'
+ ],
+ b'targets': [
+ {
+ b'name': b'target-a',
+ b'protocol': b'http',
+ b'uris': [
+ b'http://example.com/'
+ ]
+ },
+ {
+ b'name': b'target-b',
+ b'protocol': b'unknown',
+ b'uris': [
+ b'unknown://example.com/'
+ ]
+ }
+ ]
+ }
+ }
+ ]
+ (sent 2 HTTP requests and * bytes; received * bytes in responses) (glob)
+
+Missing SNI support filters targets that require SNI
+
+ $ cat > nosni.py << EOF
+ > from mercurial import sslutil
+ > sslutil.hassni = False
+ > EOF
+ $ cat >> $HGRCPATH << EOF
+ > [extensions]
+ > nosni=`pwd`/nosni.py
+ > EOF
+
+ $ cat > redirects.py << EOF
+ > [
+ > {
+ > b'name': b'target-bad-tls',
+ > b'protocol': b'https',
+ > b'uris': [b'https://example.com/'],
+ > b'snirequired': True,
+ > },
+ > ]
+ > EOF
+
+ $ sendhttpv2peerhandshake << EOF
+ > command capabilities
+ > EOF
+ creating http peer for wire protocol version 2
+ s> GET /?cmd=capabilities HTTP/1.1\r\n
+ s> Accept-Encoding: identity\r\n
+ s> vary: X-HgProto-1,X-HgUpgrade-1\r\n
+ s> x-hgproto-1: cbor\r\n
+ s> x-hgupgrade-1: exp-http-v2-0003\r\n
+ s> accept: application/mercurial-0.1\r\n
+ s> host: $LOCALIP:$HGPORT\r\n (glob)
+ s> user-agent: Mercurial debugwireproto\r\n
+ s> \r\n
+ s> makefile('rb', None)
+ s> HTTP/1.1 200 OK\r\n
+ s> Server: testing stub value\r\n
+ s> Date: $HTTP_DATE$\r\n
+ s> Content-Type: application/mercurial-cbor\r\n
+ s> Content-Length: 2272\r\n
+ s> \r\n
+ s> \xa3GapibaseDapi/Dapis\xa1Pexp-http-v2-0003\xa5Hcommands\xacIbranchmap\xa2Dargs\xa0Kpermissions\x81DpullLcapabilities\xa2Dargs\xa0Kpermissions\x81DpullMchangesetdata\xa2Dargs\xa2Ffields\xa4Gdefault\xd9\x01\x02\x80Hrequired\xf4DtypeCsetKvalidvalues\xd9\x01\x02\x84IbookmarksGparentsEphaseHrevisionIrevisions\xa2Hrequired\xf5DtypeDlistKpermissions\x81DpullHfiledata\xa2Dargs\xa4Ffields\xa4Gdefault\xd9\x01\x02\x80Hrequired\xf4DtypeCsetKvalidvalues\xd9\x01\x02\x83HlinknodeGparentsHrevisionKhaveparents\xa3Gdefault\xf4Hrequired\xf4DtypeDboolEnodes\xa2Hrequired\xf5DtypeDlistDpath\xa2Hrequired\xf5DtypeEbytesKpermissions\x81DpullIfilesdata\xa3Dargs\xa4Ffields\xa4Gdefault\xd9\x01\x02\x80Hrequired\xf4DtypeCsetKvalidvalues\xd9\x01\x02\x84NfirstchangesetHlinknodeGparentsHrevisionKhaveparents\xa3Gdefault\xf4Hrequired\xf4DtypeDboolJpathfilter\xa3Gdefault\xf6Hrequired\xf4DtypeDdictIrevisions\xa2Hrequired\xf5DtypeDlistKpermissions\x81DpullTrecommendedbatchsize\x19\xc3PEheads\xa2Dargs\xa1Jpubliconly\xa3Gdefault\xf4Hrequired\xf4DtypeDboolKpermissions\x81DpullEknown\xa2Dargs\xa1Enodes\xa3Gdefault\x80Hrequired\xf4DtypeDlistKpermissions\x81DpullHlistkeys\xa2Dargs\xa1Inamespace\xa2Hrequired\xf5DtypeEbytesKpermissions\x81DpullFlookup\xa2Dargs\xa1Ckey\xa2Hrequired\xf5DtypeEbytesKpermissions\x81DpullLmanifestdata\xa3Dargs\xa4Ffields\xa4Gdefault\xd9\x01\x02\x80Hrequired\xf4DtypeCsetKvalidvalues\xd9\x01\x02\x82GparentsHrevisionKhaveparents\xa3Gdefault\xf4Hrequired\xf4DtypeDboolEnodes\xa2Hrequired\xf5DtypeDlistDtree\xa2Hrequired\xf5DtypeEbytesKpermissions\x81DpullTrecommendedbatchsize\x1a\x00\x01\x86\xa0Gpushkey\xa2Dargs\xa4Ckey\xa2Hrequired\xf5DtypeEbytesInamespace\xa2Hrequired\xf5DtypeEbytesCnew\xa2Hrequired\xf5DtypeEbytesCold\xa2Hrequired\xf5DtypeEbytesKpermissions\x81DpushPrawstorefiledata\xa2Dargs\xa2Efiles\xa2Hrequired\xf5DtypeDlistJpathfilter\xa3Gdefault\xf6Hrequired\xf4DtypeDlistKpermissions\x81DpullQframingmediatypes\x81X&application/mercurial-exp-framing-0006Rpathfilterprefixes\xd9\x01\x02\x82Epath:Lrootfilesin:Nrawrepoformats\x83LgeneraldeltaHrevlogv1LsparserevlogHredirect\xa2Fhashes\x82Fsha256Dsha1Gtargets\x81\xa4DnameNtarget-bad-tlsHprotocolEhttpsKsnirequired\xf5Duris\x81Thttps://example.com/Nv1capabilitiesY\x01\xe0batch branchmap $USUAL_BUNDLE2_CAPS$ changegroupsubset compression=$BUNDLE2_COMPRESSIONS$ getbundle httpheader=1024 httpmediatype=0.1rx,0.1tx,0.2tx known lookup pushkey streamreqs=generaldelta,revlogv1,sparserevlog unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash
+ (redirect target target-bad-tls requires SNI, which is unsupported)
+ sending capabilities command
+ s> POST /api/exp-http-v2-0003/ro/capabilities HTTP/1.1\r\n
+ s> Accept-Encoding: identity\r\n
+ s> accept: application/mercurial-exp-framing-0006\r\n
+ s> content-type: application/mercurial-exp-framing-0006\r\n
+ s> content-length: 102\r\n
+ s> host: $LOCALIP:$HGPORT\r\n (glob)
+ s> user-agent: Mercurial debugwireproto\r\n
+ s> \r\n
+ s> \x1c\x00\x00\x01\x00\x01\x01\x82\xa1Pcontentencodings\x81Hidentity:\x00\x00\x01\x00\x01\x00\x11\xa2DnameLcapabilitiesHredirect\xa2Fhashes\x82Fsha256Dsha1Gtargets\x80
+ s> makefile('rb', None)
+ s> HTTP/1.1 200 OK\r\n
+ s> Server: testing stub value\r\n
+ s> Date: $HTTP_DATE$\r\n
+ s> Content-Type: application/mercurial-exp-framing-0006\r\n
+ s> Transfer-Encoding: chunked\r\n
+ s> \r\n
+ s> 11\r\n
+ s> \t\x00\x00\x01\x00\x02\x01\x92
+ s> Hidentity
+ s> \r\n
+ received frame(size=9; request=1; stream=2; streamflags=stream-begin; type=stream-settings; flags=eos)
+ s> 13\r\n
+ s> \x0b\x00\x00\x01\x00\x02\x041
+ s> \xa1FstatusBok
+ s> \r\n
+ received frame(size=11; request=1; stream=2; streamflags=encoded; type=command-response; flags=continuation)
s> 6d1\r\n
s> \xc9\x06\x00\x01\x00\x02\x041
- s> \xa5Hcommands\xacIbranchmap\xa2Dargs\xa0Kpermissions\x81DpullLcapabilities\xa2Dargs\xa0Kpermissions\x81DpullMchangesetdata\xa2Dargs\xa2Ffields\xa4Gdefault\xd9\x01\x02\x80Hrequired\xf4DtypeCsetKvalidvalues\xd9\x01\x02\x84IbookmarksGparentsEphaseHrevisionIrevisions\xa2Hrequired\xf5DtypeDlistKpermissions\x81DpullHfiledata\xa2Dargs\xa4Ffields\xa4Gdefault\xd9\x01\x02\x80Hrequired\xf4DtypeCsetKvalidvalues\xd9\x01\x02\x83HlinknodeGparentsHrevisionKhaveparents\xa3Gdefault\xf4Hrequired\xf4DtypeDboolEnodes\xa2Hrequired\xf5DtypeDlistDpath\xa2Hrequired\xf5DtypeEbytesKpermissions\x81DpullIfilesdata\xa3Dargs\xa4Ffields\xa4Gdefault\xd9\x01\x02\x80Hrequired\xf4DtypeCsetKvalidvalues\xd9\x01\x02\x84NfirstchangesetHlinknodeGparentsHrevisionKhaveparents\xa3Gdefault\xf4Hrequired\xf4DtypeDboolJpathfilter\xa3Gdefault\xf6Hrequired\xf4DtypeDdictIrevisions\xa2Hrequired\xf5DtypeDlistKpermissions\x81DpullTrecommendedbatchsize\x19\xc3PEheads\xa2Dargs\xa1Jpubliconly\xa3Gdefault\xf4Hrequired\xf4DtypeDboolKpermissions\x81DpullEknown\xa2Dargs\xa1Enodes\xa3Gdefault\x80Hrequired\xf4DtypeDlistKpermissions\x81DpullHlistkeys\xa2Dargs\xa1Inamespace\xa2Hrequired\xf5DtypeEbytesKpermissions\x81DpullFlookup\xa2Dargs\xa1Ckey\xa2Hrequired\xf5DtypeEbytesKpermissions\x81DpullLmanifestdata\xa3Dargs\xa4Ffields\xa4Gdefault\xd9\x01\x02\x80Hrequired\xf4DtypeCsetKvalidvalues\xd9\x01\x02\x82GparentsHrevisionKhaveparents\xa3Gdefault\xf4Hrequired\xf4DtypeDboolEnodes\xa2Hrequired\xf5DtypeDlistDtree\xa2Hrequired\xf5DtypeEbytesKpermissions\x81DpullTrecommendedbatchsize\x1a\x00\x01\x86\xa0Gpushkey\xa2Dargs\xa4Ckey\xa2Hrequired\xf5DtypeEbytesInamespace\xa2Hrequired\xf5DtypeEbytesCnew\xa2Hrequired\xf5DtypeEbytesCold\xa2Hrequired\xf5DtypeEbytesKpermissions\x81DpushPrawstorefiledata\xa2Dargs\xa2Efiles\xa2Hrequired\xf5DtypeDlistJpathfilter\xa3Gdefault\xf6Hrequired\xf4DtypeDlistKpermissions\x81DpullQframingmediatypes\x81X&application/mercurial-exp-framing-0006Rpathfilterprefixes\xd9\x01\x02\x82Epath:Lrootfilesin:Nrawrepoformats\x82LgeneraldeltaHrevlogv1Hredirect\xa2Fhashes\x82Fsha256Dsha1Gtargets\x81\xa5DnameHtarget-aHprotocolDhttpKsnirequired\xf4Ktlsversions\x82C1.2C1.3Duris\x81Shttp://example.com/
+ s> \xa5Hcommands\xacIbranchmap\xa2Dargs\xa0Kpermissions\x81DpullLcapabilities\xa2Dargs\xa0Kpermissions\x81DpullMchangesetdata\xa2Dargs\xa2Ffields\xa4Gdefault\xd9\x01\x02\x80Hrequired\xf4DtypeCsetKvalidvalues\xd9\x01\x02\x84IbookmarksGparentsEphaseHrevisionIrevisions\xa2Hrequired\xf5DtypeDlistKpermissions\x81DpullHfiledata\xa2Dargs\xa4Ffields\xa4Gdefault\xd9\x01\x02\x80Hrequired\xf4DtypeCsetKvalidvalues\xd9\x01\x02\x83HlinknodeGparentsHrevisionKhaveparents\xa3Gdefault\xf4Hrequired\xf4DtypeDboolEnodes\xa2Hrequired\xf5DtypeDlistDpath\xa2Hrequired\xf5DtypeEbytesKpermissions\x81DpullIfilesdata\xa3Dargs\xa4Ffields\xa4Gdefault\xd9\x01\x02\x80Hrequired\xf4DtypeCsetKvalidvalues\xd9\x01\x02\x84NfirstchangesetHlinknodeGparentsHrevisionKhaveparents\xa3Gdefault\xf4Hrequired\xf4DtypeDboolJpathfilter\xa3Gdefault\xf6Hrequired\xf4DtypeDdictIrevisions\xa2Hrequired\xf5DtypeDlistKpermissions\x81DpullTrecommendedbatchsize\x19\xc3PEheads\xa2Dargs\xa1Jpubliconly\xa3Gdefault\xf4Hrequired\xf4DtypeDboolKpermissions\x81DpullEknown\xa2Dargs\xa1Enodes\xa3Gdefault\x80Hrequired\xf4DtypeDlistKpermissions\x81DpullHlistkeys\xa2Dargs\xa1Inamespace\xa2Hrequired\xf5DtypeEbytesKpermissions\x81DpullFlookup\xa2Dargs\xa1Ckey\xa2Hrequired\xf5DtypeEbytesKpermissions\x81DpullLmanifestdata\xa3Dargs\xa4Ffields\xa4Gdefault\xd9\x01\x02\x80Hrequired\xf4DtypeCsetKvalidvalues\xd9\x01\x02\x82GparentsHrevisionKhaveparents\xa3Gdefault\xf4Hrequired\xf4DtypeDboolEnodes\xa2Hrequired\xf5DtypeDlistDtree\xa2Hrequired\xf5DtypeEbytesKpermissions\x81DpullTrecommendedbatchsize\x1a\x00\x01\x86\xa0Gpushkey\xa2Dargs\xa4Ckey\xa2Hrequired\xf5DtypeEbytesInamespace\xa2Hrequired\xf5DtypeEbytesCnew\xa2Hrequired\xf5DtypeEbytesCold\xa2Hrequired\xf5DtypeEbytesKpermissions\x81DpushPrawstorefiledata\xa2Dargs\xa2Efiles\xa2Hrequired\xf5DtypeDlistJpathfilter\xa3Gdefault\xf6Hrequired\xf4DtypeDlistKpermissions\x81DpullQframingmediatypes\x81X&application/mercurial-exp-framing-0006Rpathfilterprefixes\xd9\x01\x02\x82Epath:Lrootfilesin:Nrawrepoformats\x83LgeneraldeltaHrevlogv1LsparserevlogHredirect\xa2Fhashes\x82Fsha256Dsha1Gtargets\x81\xa4DnameNtarget-bad-tlsHprotocolEhttpsKsnirequired\xf5Duris\x81Thttps://example.com/
s> \r\n
received frame(size=1737; request=1; stream=2; streamflags=encoded; type=command-response; flags=continuation)
s> 8\r\n
@@ -335,7 +1001,8 @@
]),
b'rawrepoformats': [
b'generaldelta',
- b'revlogv1'
+ b'revlogv1',
+ b'sparserevlog'
],
b'redirect': {
b'hashes': [
@@ -344,15 +1011,11 @@
],
b'targets': [
{
- b'name': b'target-a',
- b'protocol': b'http',
- b'snirequired': False,
- b'tlsversions': [
- b'1.2',
- b'1.3'
- ],
+ b'name': b'target-bad-tls',
+ b'protocol': b'https',
+ b'snirequired': True,
b'uris': [
- b'http://example.com/'
+ b'https://example.com/'
]
}
]
@@ -361,19 +1024,20 @@
]
(sent 2 HTTP requests and * bytes; received * bytes in responses) (glob)
-Unknown protocol is filtered from compatible targets
+ $ cat >> $HGRCPATH << EOF
+ > [extensions]
+ > nosni=!
+ > EOF
+
+Unknown tls value is filtered from compatible targets
$ cat > redirects.py << EOF
> [
> {
- > b'name': b'target-a',
- > b'protocol': b'http',
- > b'uris': [b'http://example.com/'],
- > },
- > {
- > b'name': b'target-b',
- > b'protocol': b'unknown',
- > b'uris': [b'unknown://example.com/'],
+ > b'name': b'target-bad-tls',
+ > b'protocol': b'https',
+ > b'uris': [b'https://example.com/'],
+ > b'tlsversions': [b'42', b'39'],
> },
> ]
> EOF
@@ -396,21 +1060,20 @@
s> Server: testing stub value\r\n
s> Date: $HTTP_DATE$\r\n
s> Content-Type: application/mercurial-cbor\r\n
- s> Content-Length: 2286\r\n
+ s> Content-Length: 2278\r\n
s> \r\n
- s> \xa3GapibaseDapi/Dapis\xa1Pexp-http-v2-0003\xa5Hcommands\xacIbranchmap\xa2Dargs\xa0Kpermissions\x81DpullLcapabilities\xa2Dargs\xa0Kpermissions\x81DpullMchangesetdata\xa2Dargs\xa2Ffields\xa4Gdefault\xd9\x01\x02\x80Hrequired\xf4DtypeCsetKvalidvalues\xd9\x01\x02\x84IbookmarksGparentsEphaseHrevisionIrevisions\xa2Hrequired\xf5DtypeDlistKpermissions\x81DpullHfiledata\xa2Dargs\xa4Ffields\xa4Gdefault\xd9\x01\x02\x80Hrequired\xf4DtypeCsetKvalidvalues\xd9\x01\x02\x83HlinknodeGparentsHrevisionKhaveparents\xa3Gdefault\xf4Hrequired\xf4DtypeDboolEnodes\xa2Hrequired\xf5DtypeDlistDpath\xa2Hrequired\xf5DtypeEbytesKpermissions\x81DpullIfilesdata\xa3Dargs\xa4Ffields\xa4Gdefault\xd9\x01\x02\x80Hrequired\xf4DtypeCsetKvalidvalues\xd9\x01\x02\x84NfirstchangesetHlinknodeGparentsHrevisionKhaveparents\xa3Gdefault\xf4Hrequired\xf4DtypeDboolJpathfilter\xa3Gdefault\xf6Hrequired\xf4DtypeDdictIrevisions\xa2Hrequired\xf5DtypeDlistKpermissions\x81DpullTrecommendedbatchsize\x19\xc3PEheads\xa2Dargs\xa1Jpubliconly\xa3Gdefault\xf4Hrequired\xf4DtypeDboolKpermissions\x81DpullEknown\xa2Dargs\xa1Enodes\xa3Gdefault\x80Hrequired\xf4DtypeDlistKpermissions\x81DpullHlistkeys\xa2Dargs\xa1Inamespace\xa2Hrequired\xf5DtypeEbytesKpermissions\x81DpullFlookup\xa2Dargs\xa1Ckey\xa2Hrequired\xf5DtypeEbytesKpermissions\x81DpullLmanifestdata\xa3Dargs\xa4Ffields\xa4Gdefault\xd9\x01\x02\x80Hrequired\xf4DtypeCsetKvalidvalues\xd9\x01\x02\x82GparentsHrevisionKhaveparents\xa3Gdefault\xf4Hrequired\xf4DtypeDboolEnodes\xa2Hrequired\xf5DtypeDlistDtree\xa2Hrequired\xf5DtypeEbytesKpermissions\x81DpullTrecommendedbatchsize\x1a\x00\x01\x86\xa0Gpushkey\xa2Dargs\xa4Ckey\xa2Hrequired\xf5DtypeEbytesInamespace\xa2Hrequired\xf5DtypeEbytesCnew\xa2Hrequired\xf5DtypeEbytesCold\xa2Hrequired\xf5DtypeEbytesKpermissions\x81DpushPrawstorefiledata\xa2Dargs\xa2Efiles\xa2Hrequired\xf5DtypeDlistJpathfilter\xa3Gdefault\xf6Hrequired\xf4DtypeDlistKpermissions\x81DpullQframingmediatypes\x81X&application/mercurial-exp-framing-0006Rpathfilterprefixes\xd9\x01\x02\x82Epath:Lrootfilesin:Nrawrepoformats\x82LgeneraldeltaHrevlogv1Hredirect\xa2Fhashes\x82Fsha256Dsha1Gtargets\x82\xa3DnameHtarget-aHprotocolDhttpDuris\x81Shttp://example.com/\xa3DnameHtarget-bHprotocolGunknownDuris\x81Vunknown://example.com/Nv1capabilitiesY\x01\xd3batch branchmap $USUAL_BUNDLE2_CAPS$ changegroupsubset compression=$BUNDLE2_COMPRESSIONS$ getbundle httpheader=1024 httpmediatype=0.1rx,0.1tx,0.2tx known lookup pushkey streamreqs=generaldelta,revlogv1 unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash
- (remote redirect target target-a is compatible)
- (remote redirect target target-b uses unsupported protocol: unknown)
+ s> \xa3GapibaseDapi/Dapis\xa1Pexp-http-v2-0003\xa5Hcommands\xacIbranchmap\xa2Dargs\xa0Kpermissions\x81DpullLcapabilities\xa2Dargs\xa0Kpermissions\x81DpullMchangesetdata\xa2Dargs\xa2Ffields\xa4Gdefault\xd9\x01\x02\x80Hrequired\xf4DtypeCsetKvalidvalues\xd9\x01\x02\x84IbookmarksGparentsEphaseHrevisionIrevisions\xa2Hrequired\xf5DtypeDlistKpermissions\x81DpullHfiledata\xa2Dargs\xa4Ffields\xa4Gdefault\xd9\x01\x02\x80Hrequired\xf4DtypeCsetKvalidvalues\xd9\x01\x02\x83HlinknodeGparentsHrevisionKhaveparents\xa3Gdefault\xf4Hrequired\xf4DtypeDboolEnodes\xa2Hrequired\xf5DtypeDlistDpath\xa2Hrequired\xf5DtypeEbytesKpermissions\x81DpullIfilesdata\xa3Dargs\xa4Ffields\xa4Gdefault\xd9\x01\x02\x80Hrequired\xf4DtypeCsetKvalidvalues\xd9\x01\x02\x84NfirstchangesetHlinknodeGparentsHrevisionKhaveparents\xa3Gdefault\xf4Hrequired\xf4DtypeDboolJpathfilter\xa3Gdefault\xf6Hrequired\xf4DtypeDdictIrevisions\xa2Hrequired\xf5DtypeDlistKpermissions\x81DpullTrecommendedbatchsize\x19\xc3PEheads\xa2Dargs\xa1Jpubliconly\xa3Gdefault\xf4Hrequired\xf4DtypeDboolKpermissions\x81DpullEknown\xa2Dargs\xa1Enodes\xa3Gdefault\x80Hrequired\xf4DtypeDlistKpermissions\x81DpullHlistkeys\xa2Dargs\xa1Inamespace\xa2Hrequired\xf5DtypeEbytesKpermissions\x81DpullFlookup\xa2Dargs\xa1Ckey\xa2Hrequired\xf5DtypeEbytesKpermissions\x81DpullLmanifestdata\xa3Dargs\xa4Ffields\xa4Gdefault\xd9\x01\x02\x80Hrequired\xf4DtypeCsetKvalidvalues\xd9\x01\x02\x82GparentsHrevisionKhaveparents\xa3Gdefault\xf4Hrequired\xf4DtypeDboolEnodes\xa2Hrequired\xf5DtypeDlistDtree\xa2Hrequired\xf5DtypeEbytesKpermissions\x81DpullTrecommendedbatchsize\x1a\x00\x01\x86\xa0Gpushkey\xa2Dargs\xa4Ckey\xa2Hrequired\xf5DtypeEbytesInamespace\xa2Hrequired\xf5DtypeEbytesCnew\xa2Hrequired\xf5DtypeEbytesCold\xa2Hrequired\xf5DtypeEbytesKpermissions\x81DpushPrawstorefiledata\xa2Dargs\xa2Efiles\xa2Hrequired\xf5DtypeDlistJpathfilter\xa3Gdefault\xf6Hrequired\xf4DtypeDlistKpermissions\x81DpullQframingmediatypes\x81X&application/mercurial-exp-framing-0006Rpathfilterprefixes\xd9\x01\x02\x82Epath:Lrootfilesin:Nrawrepoformats\x83LgeneraldeltaHrevlogv1LsparserevlogHredirect\xa2Fhashes\x82Fsha256Dsha1Gtargets\x81\xa4DnameNtarget-bad-tlsHprotocolEhttpsKtlsversions\x82B42B39Duris\x81Thttps://example.com/Nv1capabilitiesY\x01\xe0batch branchmap $USUAL_BUNDLE2_CAPS$ changegroupsubset compression=$BUNDLE2_COMPRESSIONS$ getbundle httpheader=1024 httpmediatype=0.1rx,0.1tx,0.2tx known lookup pushkey streamreqs=generaldelta,revlogv1,sparserevlog unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash
+ (remote redirect target target-bad-tls requires unsupported TLS versions: 39, 42)
sending capabilities command
s> POST /api/exp-http-v2-0003/ro/capabilities HTTP/1.1\r\n
s> Accept-Encoding: identity\r\n
s> accept: application/mercurial-exp-framing-0006\r\n
s> content-type: application/mercurial-exp-framing-0006\r\n
- s> content-length: 111\r\n
+ s> content-length: 102\r\n
s> host: $LOCALIP:$HGPORT\r\n (glob)
s> user-agent: Mercurial debugwireproto\r\n
s> \r\n
- s> \x1c\x00\x00\x01\x00\x01\x01\x82\xa1Pcontentencodings\x81HidentityC\x00\x00\x01\x00\x01\x00\x11\xa2DnameLcapabilitiesHredirect\xa2Fhashes\x82Fsha256Dsha1Gtargets\x81Htarget-a
+ s> \x1c\x00\x00\x01\x00\x01\x01\x82\xa1Pcontentencodings\x81Hidentity:\x00\x00\x01\x00\x01\x00\x11\xa2DnameLcapabilitiesHredirect\xa2Fhashes\x82Fsha256Dsha1Gtargets\x80
s> makefile('rb', None)
s> HTTP/1.1 200 OK\r\n
s> Server: testing stub value\r\n
@@ -428,11 +1091,11 @@
s> \xa1FstatusBok
s> \r\n
received frame(size=11; request=1; stream=2; streamflags=encoded; type=command-response; flags=continuation)
- s> 6ec\r\n
- s> \xe4\x06\x00\x01\x00\x02\x041
- s> \xa5Hcommands\xacIbranchmap\xa2Dargs\xa0Kpermissions\x81DpullLcapabilities\xa2Dargs\xa0Kpermissions\x81DpullMchangesetdata\xa2Dargs\xa2Ffields\xa4Gdefault\xd9\x01\x02\x80Hrequired\xf4DtypeCsetKvalidvalues\xd9\x01\x02\x84IbookmarksGparentsEphaseHrevisionIrevisions\xa2Hrequired\xf5DtypeDlistKpermissions\x81DpullHfiledata\xa2Dargs\xa4Ffields\xa4Gdefault\xd9\x01\x02\x80Hrequired\xf4DtypeCsetKvalidvalues\xd9\x01\x02\x83HlinknodeGparentsHrevisionKhaveparents\xa3Gdefault\xf4Hrequired\xf4DtypeDboolEnodes\xa2Hrequired\xf5DtypeDlistDpath\xa2Hrequired\xf5DtypeEbytesKpermissions\x81DpullIfilesdata\xa3Dargs\xa4Ffields\xa4Gdefault\xd9\x01\x02\x80Hrequired\xf4DtypeCsetKvalidvalues\xd9\x01\x02\x84NfirstchangesetHlinknodeGparentsHrevisionKhaveparents\xa3Gdefault\xf4Hrequired\xf4DtypeDboolJpathfilter\xa3Gdefault\xf6Hrequired\xf4DtypeDdictIrevisions\xa2Hrequired\xf5DtypeDlistKpermissions\x81DpullTrecommendedbatchsize\x19\xc3PEheads\xa2Dargs\xa1Jpubliconly\xa3Gdefault\xf4Hrequired\xf4DtypeDboolKpermissions\x81DpullEknown\xa2Dargs\xa1Enodes\xa3Gdefault\x80Hrequired\xf4DtypeDlistKpermissions\x81DpullHlistkeys\xa2Dargs\xa1Inamespace\xa2Hrequired\xf5DtypeEbytesKpermissions\x81DpullFlookup\xa2Dargs\xa1Ckey\xa2Hrequired\xf5DtypeEbytesKpermissions\x81DpullLmanifestdata\xa3Dargs\xa4Ffields\xa4Gdefault\xd9\x01\x02\x80Hrequired\xf4DtypeCsetKvalidvalues\xd9\x01\x02\x82GparentsHrevisionKhaveparents\xa3Gdefault\xf4Hrequired\xf4DtypeDboolEnodes\xa2Hrequired\xf5DtypeDlistDtree\xa2Hrequired\xf5DtypeEbytesKpermissions\x81DpullTrecommendedbatchsize\x1a\x00\x01\x86\xa0Gpushkey\xa2Dargs\xa4Ckey\xa2Hrequired\xf5DtypeEbytesInamespace\xa2Hrequired\xf5DtypeEbytesCnew\xa2Hrequired\xf5DtypeEbytesCold\xa2Hrequired\xf5DtypeEbytesKpermissions\x81DpushPrawstorefiledata\xa2Dargs\xa2Efiles\xa2Hrequired\xf5DtypeDlistJpathfilter\xa3Gdefault\xf6Hrequired\xf4DtypeDlistKpermissions\x81DpullQframingmediatypes\x81X&application/mercurial-exp-framing-0006Rpathfilterprefixes\xd9\x01\x02\x82Epath:Lrootfilesin:Nrawrepoformats\x82LgeneraldeltaHrevlogv1Hredirect\xa2Fhashes\x82Fsha256Dsha1Gtargets\x82\xa3DnameHtarget-aHprotocolDhttpDuris\x81Shttp://example.com/\xa3DnameHtarget-bHprotocolGunknownDuris\x81Vunknown://example.com/
+ s> 6d7\r\n
+ s> \xcf\x06\x00\x01\x00\x02\x041
+ s> \xa5Hcommands\xacIbranchmap\xa2Dargs\xa0Kpermissions\x81DpullLcapabilities\xa2Dargs\xa0Kpermissions\x81DpullMchangesetdata\xa2Dargs\xa2Ffields\xa4Gdefault\xd9\x01\x02\x80Hrequired\xf4DtypeCsetKvalidvalues\xd9\x01\x02\x84IbookmarksGparentsEphaseHrevisionIrevisions\xa2Hrequired\xf5DtypeDlistKpermissions\x81DpullHfiledata\xa2Dargs\xa4Ffields\xa4Gdefault\xd9\x01\x02\x80Hrequired\xf4DtypeCsetKvalidvalues\xd9\x01\x02\x83HlinknodeGparentsHrevisionKhaveparents\xa3Gdefault\xf4Hrequired\xf4DtypeDboolEnodes\xa2Hrequired\xf5DtypeDlistDpath\xa2Hrequired\xf5DtypeEbytesKpermissions\x81DpullIfilesdata\xa3Dargs\xa4Ffields\xa4Gdefault\xd9\x01\x02\x80Hrequired\xf4DtypeCsetKvalidvalues\xd9\x01\x02\x84NfirstchangesetHlinknodeGparentsHrevisionKhaveparents\xa3Gdefault\xf4Hrequired\xf4DtypeDboolJpathfilter\xa3Gdefault\xf6Hrequired\xf4DtypeDdictIrevisions\xa2Hrequired\xf5DtypeDlistKpermissions\x81DpullTrecommendedbatchsize\x19\xc3PEheads\xa2Dargs\xa1Jpubliconly\xa3Gdefault\xf4Hrequired\xf4DtypeDboolKpermissions\x81DpullEknown\xa2Dargs\xa1Enodes\xa3Gdefault\x80Hrequired\xf4DtypeDlistKpermissions\x81DpullHlistkeys\xa2Dargs\xa1Inamespace\xa2Hrequired\xf5DtypeEbytesKpermissions\x81DpullFlookup\xa2Dargs\xa1Ckey\xa2Hrequired\xf5DtypeEbytesKpermissions\x81DpullLmanifestdata\xa3Dargs\xa4Ffields\xa4Gdefault\xd9\x01\x02\x80Hrequired\xf4DtypeCsetKvalidvalues\xd9\x01\x02\x82GparentsHrevisionKhaveparents\xa3Gdefault\xf4Hrequired\xf4DtypeDboolEnodes\xa2Hrequired\xf5DtypeDlistDtree\xa2Hrequired\xf5DtypeEbytesKpermissions\x81DpullTrecommendedbatchsize\x1a\x00\x01\x86\xa0Gpushkey\xa2Dargs\xa4Ckey\xa2Hrequired\xf5DtypeEbytesInamespace\xa2Hrequired\xf5DtypeEbytesCnew\xa2Hrequired\xf5DtypeEbytesCold\xa2Hrequired\xf5DtypeEbytesKpermissions\x81DpushPrawstorefiledata\xa2Dargs\xa2Efiles\xa2Hrequired\xf5DtypeDlistJpathfilter\xa3Gdefault\xf6Hrequired\xf4DtypeDlistKpermissions\x81DpullQframingmediatypes\x81X&application/mercurial-exp-framing-0006Rpathfilterprefixes\xd9\x01\x02\x82Epath:Lrootfilesin:Nrawrepoformats\x83LgeneraldeltaHrevlogv1LsparserevlogHredirect\xa2Fhashes\x82Fsha256Dsha1Gtargets\x81\xa4DnameNtarget-bad-tlsHprotocolEhttpsKtlsversions\x82B42B39Duris\x81Thttps://example.com/
s> \r\n
- received frame(size=1764; request=1; stream=2; streamflags=encoded; type=command-response; flags=continuation)
+ received frame(size=1743; request=1; stream=2; streamflags=encoded; type=command-response; flags=continuation)
s> 8\r\n
s> \x00\x00\x00\x01\x00\x02\x002
s> \r\n
@@ -664,667 +1327,8 @@
]),
b'rawrepoformats': [
b'generaldelta',
- b'revlogv1'
- ],
- b'redirect': {
- b'hashes': [
- b'sha256',
- b'sha1'
- ],
- b'targets': [
- {
- b'name': b'target-a',
- b'protocol': b'http',
- b'uris': [
- b'http://example.com/'
- ]
- },
- {
- b'name': b'target-b',
- b'protocol': b'unknown',
- b'uris': [
- b'unknown://example.com/'
- ]
- }
- ]
- }
- }
- ]
- (sent 2 HTTP requests and * bytes; received * bytes in responses) (glob)
-
-Missing SNI support filters targets that require SNI
-
- $ cat > nosni.py << EOF
- > from mercurial import sslutil
- > sslutil.hassni = False
- > EOF
- $ cat >> $HGRCPATH << EOF
- > [extensions]
- > nosni=`pwd`/nosni.py
- > EOF
-
- $ cat > redirects.py << EOF
- > [
- > {
- > b'name': b'target-bad-tls',
- > b'protocol': b'https',
- > b'uris': [b'https://example.com/'],
- > b'snirequired': True,
- > },
- > ]
- > EOF
-
- $ sendhttpv2peerhandshake << EOF
- > command capabilities
- > EOF
- creating http peer for wire protocol version 2
- s> GET /?cmd=capabilities HTTP/1.1\r\n
- s> Accept-Encoding: identity\r\n
- s> vary: X-HgProto-1,X-HgUpgrade-1\r\n
- s> x-hgproto-1: cbor\r\n
- s> x-hgupgrade-1: exp-http-v2-0003\r\n
- s> accept: application/mercurial-0.1\r\n
- s> host: $LOCALIP:$HGPORT\r\n (glob)
- s> user-agent: Mercurial debugwireproto\r\n
- s> \r\n
- s> makefile('rb', None)
- s> HTTP/1.1 200 OK\r\n
- s> Server: testing stub value\r\n
- s> Date: $HTTP_DATE$\r\n
- s> Content-Type: application/mercurial-cbor\r\n
- s> Content-Length: 2246\r\n
- s> \r\n
- s> \xa3GapibaseDapi/Dapis\xa1Pexp-http-v2-0003\xa5Hcommands\xacIbranchmap\xa2Dargs\xa0Kpermissions\x81DpullLcapabilities\xa2Dargs\xa0Kpermissions\x81DpullMchangesetdata\xa2Dargs\xa2Ffields\xa4Gdefault\xd9\x01\x02\x80Hrequired\xf4DtypeCsetKvalidvalues\xd9\x01\x02\x84IbookmarksGparentsEphaseHrevisionIrevisions\xa2Hrequired\xf5DtypeDlistKpermissions\x81DpullHfiledata\xa2Dargs\xa4Ffields\xa4Gdefault\xd9\x01\x02\x80Hrequired\xf4DtypeCsetKvalidvalues\xd9\x01\x02\x83HlinknodeGparentsHrevisionKhaveparents\xa3Gdefault\xf4Hrequired\xf4DtypeDboolEnodes\xa2Hrequired\xf5DtypeDlistDpath\xa2Hrequired\xf5DtypeEbytesKpermissions\x81DpullIfilesdata\xa3Dargs\xa4Ffields\xa4Gdefault\xd9\x01\x02\x80Hrequired\xf4DtypeCsetKvalidvalues\xd9\x01\x02\x84NfirstchangesetHlinknodeGparentsHrevisionKhaveparents\xa3Gdefault\xf4Hrequired\xf4DtypeDboolJpathfilter\xa3Gdefault\xf6Hrequired\xf4DtypeDdictIrevisions\xa2Hrequired\xf5DtypeDlistKpermissions\x81DpullTrecommendedbatchsize\x19\xc3PEheads\xa2Dargs\xa1Jpubliconly\xa3Gdefault\xf4Hrequired\xf4DtypeDboolKpermissions\x81DpullEknown\xa2Dargs\xa1Enodes\xa3Gdefault\x80Hrequired\xf4DtypeDlistKpermissions\x81DpullHlistkeys\xa2Dargs\xa1Inamespace\xa2Hrequired\xf5DtypeEbytesKpermissions\x81DpullFlookup\xa2Dargs\xa1Ckey\xa2Hrequired\xf5DtypeEbytesKpermissions\x81DpullLmanifestdata\xa3Dargs\xa4Ffields\xa4Gdefault\xd9\x01\x02\x80Hrequired\xf4DtypeCsetKvalidvalues\xd9\x01\x02\x82GparentsHrevisionKhaveparents\xa3Gdefault\xf4Hrequired\xf4DtypeDboolEnodes\xa2Hrequired\xf5DtypeDlistDtree\xa2Hrequired\xf5DtypeEbytesKpermissions\x81DpullTrecommendedbatchsize\x1a\x00\x01\x86\xa0Gpushkey\xa2Dargs\xa4Ckey\xa2Hrequired\xf5DtypeEbytesInamespace\xa2Hrequired\xf5DtypeEbytesCnew\xa2Hrequired\xf5DtypeEbytesCold\xa2Hrequired\xf5DtypeEbytesKpermissions\x81DpushPrawstorefiledata\xa2Dargs\xa2Efiles\xa2Hrequired\xf5DtypeDlistJpathfilter\xa3Gdefault\xf6Hrequired\xf4DtypeDlistKpermissions\x81DpullQframingmediatypes\x81X&application/mercurial-exp-framing-0006Rpathfilterprefixes\xd9\x01\x02\x82Epath:Lrootfilesin:Nrawrepoformats\x82LgeneraldeltaHrevlogv1Hredirect\xa2Fhashes\x82Fsha256Dsha1Gtargets\x81\xa4DnameNtarget-bad-tlsHprotocolEhttpsKsnirequired\xf5Duris\x81Thttps://example.com/Nv1capabilitiesY\x01\xd3batch branchmap $USUAL_BUNDLE2_CAPS$ changegroupsubset compression=$BUNDLE2_COMPRESSIONS$ getbundle httpheader=1024 httpmediatype=0.1rx,0.1tx,0.2tx known lookup pushkey streamreqs=generaldelta,revlogv1 unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash
- (redirect target target-bad-tls requires SNI, which is unsupported)
- sending capabilities command
- s> POST /api/exp-http-v2-0003/ro/capabilities HTTP/1.1\r\n
- s> Accept-Encoding: identity\r\n
- s> accept: application/mercurial-exp-framing-0006\r\n
- s> content-type: application/mercurial-exp-framing-0006\r\n
- s> content-length: 102\r\n
- s> host: $LOCALIP:$HGPORT\r\n (glob)
- s> user-agent: Mercurial debugwireproto\r\n
- s> \r\n
- s> \x1c\x00\x00\x01\x00\x01\x01\x82\xa1Pcontentencodings\x81Hidentity:\x00\x00\x01\x00\x01\x00\x11\xa2DnameLcapabilitiesHredirect\xa2Fhashes\x82Fsha256Dsha1Gtargets\x80
- s> makefile('rb', None)
- s> HTTP/1.1 200 OK\r\n
- s> Server: testing stub value\r\n
- s> Date: $HTTP_DATE$\r\n
- s> Content-Type: application/mercurial-exp-framing-0006\r\n
- s> Transfer-Encoding: chunked\r\n
- s> \r\n
- s> 11\r\n
- s> \t\x00\x00\x01\x00\x02\x01\x92
- s> Hidentity
- s> \r\n
- received frame(size=9; request=1; stream=2; streamflags=stream-begin; type=stream-settings; flags=eos)
- s> 13\r\n
- s> \x0b\x00\x00\x01\x00\x02\x041
- s> \xa1FstatusBok
- s> \r\n
- received frame(size=11; request=1; stream=2; streamflags=encoded; type=command-response; flags=continuation)
- s> 6c4\r\n
- s> \xbc\x06\x00\x01\x00\x02\x041
- s> \xa5Hcommands\xacIbranchmap\xa2Dargs\xa0Kpermissions\x81DpullLcapabilities\xa2Dargs\xa0Kpermissions\x81DpullMchangesetdata\xa2Dargs\xa2Ffields\xa4Gdefault\xd9\x01\x02\x80Hrequired\xf4DtypeCsetKvalidvalues\xd9\x01\x02\x84IbookmarksGparentsEphaseHrevisionIrevisions\xa2Hrequired\xf5DtypeDlistKpermissions\x81DpullHfiledata\xa2Dargs\xa4Ffields\xa4Gdefault\xd9\x01\x02\x80Hrequired\xf4DtypeCsetKvalidvalues\xd9\x01\x02\x83HlinknodeGparentsHrevisionKhaveparents\xa3Gdefault\xf4Hrequired\xf4DtypeDboolEnodes\xa2Hrequired\xf5DtypeDlistDpath\xa2Hrequired\xf5DtypeEbytesKpermissions\x81DpullIfilesdata\xa3Dargs\xa4Ffields\xa4Gdefault\xd9\x01\x02\x80Hrequired\xf4DtypeCsetKvalidvalues\xd9\x01\x02\x84NfirstchangesetHlinknodeGparentsHrevisionKhaveparents\xa3Gdefault\xf4Hrequired\xf4DtypeDboolJpathfilter\xa3Gdefault\xf6Hrequired\xf4DtypeDdictIrevisions\xa2Hrequired\xf5DtypeDlistKpermissions\x81DpullTrecommendedbatchsize\x19\xc3PEheads\xa2Dargs\xa1Jpubliconly\xa3Gdefault\xf4Hrequired\xf4DtypeDboolKpermissions\x81DpullEknown\xa2Dargs\xa1Enodes\xa3Gdefault\x80Hrequired\xf4DtypeDlistKpermissions\x81DpullHlistkeys\xa2Dargs\xa1Inamespace\xa2Hrequired\xf5DtypeEbytesKpermissions\x81DpullFlookup\xa2Dargs\xa1Ckey\xa2Hrequired\xf5DtypeEbytesKpermissions\x81DpullLmanifestdata\xa3Dargs\xa4Ffields\xa4Gdefault\xd9\x01\x02\x80Hrequired\xf4DtypeCsetKvalidvalues\xd9\x01\x02\x82GparentsHrevisionKhaveparents\xa3Gdefault\xf4Hrequired\xf4DtypeDboolEnodes\xa2Hrequired\xf5DtypeDlistDtree\xa2Hrequired\xf5DtypeEbytesKpermissions\x81DpullTrecommendedbatchsize\x1a\x00\x01\x86\xa0Gpushkey\xa2Dargs\xa4Ckey\xa2Hrequired\xf5DtypeEbytesInamespace\xa2Hrequired\xf5DtypeEbytesCnew\xa2Hrequired\xf5DtypeEbytesCold\xa2Hrequired\xf5DtypeEbytesKpermissions\x81DpushPrawstorefiledata\xa2Dargs\xa2Efiles\xa2Hrequired\xf5DtypeDlistJpathfilter\xa3Gdefault\xf6Hrequired\xf4DtypeDlistKpermissions\x81DpullQframingmediatypes\x81X&application/mercurial-exp-framing-0006Rpathfilterprefixes\xd9\x01\x02\x82Epath:Lrootfilesin:Nrawrepoformats\x82LgeneraldeltaHrevlogv1Hredirect\xa2Fhashes\x82Fsha256Dsha1Gtargets\x81\xa4DnameNtarget-bad-tlsHprotocolEhttpsKsnirequired\xf5Duris\x81Thttps://example.com/
- s> \r\n
- received frame(size=1724; request=1; stream=2; streamflags=encoded; type=command-response; flags=continuation)
- s> 8\r\n
- s> \x00\x00\x00\x01\x00\x02\x002
- s> \r\n
- s> 0\r\n
- s> \r\n
- received frame(size=0; request=1; stream=2; streamflags=; type=command-response; flags=eos)
- response: gen[
- {
- b'commands': {
- b'branchmap': {
- b'args': {},
- b'permissions': [
- b'pull'
- ]
- },
- b'capabilities': {
- b'args': {},
- b'permissions': [
- b'pull'
- ]
- },
- b'changesetdata': {
- b'args': {
- b'fields': {
- b'default': set([]),
- b'required': False,
- b'type': b'set',
- b'validvalues': set([
- b'bookmarks',
- b'parents',
- b'phase',
- b'revision'
- ])
- },
- b'revisions': {
- b'required': True,
- b'type': b'list'
- }
- },
- b'permissions': [
- b'pull'
- ]
- },
- b'filedata': {
- b'args': {
- b'fields': {
- b'default': set([]),
- b'required': False,
- b'type': b'set',
- b'validvalues': set([
- b'linknode',
- b'parents',
- b'revision'
- ])
- },
- b'haveparents': {
- b'default': False,
- b'required': False,
- b'type': b'bool'
- },
- b'nodes': {
- b'required': True,
- b'type': b'list'
- },
- b'path': {
- b'required': True,
- b'type': b'bytes'
- }
- },
- b'permissions': [
- b'pull'
- ]
- },
- b'filesdata': {
- b'args': {
- b'fields': {
- b'default': set([]),
- b'required': False,
- b'type': b'set',
- b'validvalues': set([
- b'firstchangeset',
- b'linknode',
- b'parents',
- b'revision'
- ])
- },
- b'haveparents': {
- b'default': False,
- b'required': False,
- b'type': b'bool'
- },
- b'pathfilter': {
- b'default': None,
- b'required': False,
- b'type': b'dict'
- },
- b'revisions': {
- b'required': True,
- b'type': b'list'
- }
- },
- b'permissions': [
- b'pull'
- ],
- b'recommendedbatchsize': 50000
- },
- b'heads': {
- b'args': {
- b'publiconly': {
- b'default': False,
- b'required': False,
- b'type': b'bool'
- }
- },
- b'permissions': [
- b'pull'
- ]
- },
- b'known': {
- b'args': {
- b'nodes': {
- b'default': [],
- b'required': False,
- b'type': b'list'
- }
- },
- b'permissions': [
- b'pull'
- ]
- },
- b'listkeys': {
- b'args': {
- b'namespace': {
- b'required': True,
- b'type': b'bytes'
- }
- },
- b'permissions': [
- b'pull'
- ]
- },
- b'lookup': {
- b'args': {
- b'key': {
- b'required': True,
- b'type': b'bytes'
- }
- },
- b'permissions': [
- b'pull'
- ]
- },
- b'manifestdata': {
- b'args': {
- b'fields': {
- b'default': set([]),
- b'required': False,
- b'type': b'set',
- b'validvalues': set([
- b'parents',
- b'revision'
- ])
- },
- b'haveparents': {
- b'default': False,
- b'required': False,
- b'type': b'bool'
- },
- b'nodes': {
- b'required': True,
- b'type': b'list'
- },
- b'tree': {
- b'required': True,
- b'type': b'bytes'
- }
- },
- b'permissions': [
- b'pull'
- ],
- b'recommendedbatchsize': 100000
- },
- b'pushkey': {
- b'args': {
- b'key': {
- b'required': True,
- b'type': b'bytes'
- },
- b'namespace': {
- b'required': True,
- b'type': b'bytes'
- },
- b'new': {
- b'required': True,
- b'type': b'bytes'
- },
- b'old': {
- b'required': True,
- b'type': b'bytes'
- }
- },
- b'permissions': [
- b'push'
- ]
- },
- b'rawstorefiledata': {
- b'args': {
- b'files': {
- b'required': True,
- b'type': b'list'
- },
- b'pathfilter': {
- b'default': None,
- b'required': False,
- b'type': b'list'
- }
- },
- b'permissions': [
- b'pull'
- ]
- }
- },
- b'framingmediatypes': [
- b'application/mercurial-exp-framing-0006'
- ],
- b'pathfilterprefixes': set([
- b'path:',
- b'rootfilesin:'
- ]),
- b'rawrepoformats': [
- b'generaldelta',
- b'revlogv1'
- ],
- b'redirect': {
- b'hashes': [
- b'sha256',
- b'sha1'
- ],
- b'targets': [
- {
- b'name': b'target-bad-tls',
- b'protocol': b'https',
- b'snirequired': True,
- b'uris': [
- b'https://example.com/'
- ]
- }
- ]
- }
- }
- ]
- (sent 2 HTTP requests and * bytes; received * bytes in responses) (glob)
-
- $ cat >> $HGRCPATH << EOF
- > [extensions]
- > nosni=!
- > EOF
-
-Unknown tls value is filtered from compatible targets
-
- $ cat > redirects.py << EOF
- > [
- > {
- > b'name': b'target-bad-tls',
- > b'protocol': b'https',
- > b'uris': [b'https://example.com/'],
- > b'tlsversions': [b'42', b'39'],
- > },
- > ]
- > EOF
-
- $ sendhttpv2peerhandshake << EOF
- > command capabilities
- > EOF
- creating http peer for wire protocol version 2
- s> GET /?cmd=capabilities HTTP/1.1\r\n
- s> Accept-Encoding: identity\r\n
- s> vary: X-HgProto-1,X-HgUpgrade-1\r\n
- s> x-hgproto-1: cbor\r\n
- s> x-hgupgrade-1: exp-http-v2-0003\r\n
- s> accept: application/mercurial-0.1\r\n
- s> host: $LOCALIP:$HGPORT\r\n (glob)
- s> user-agent: Mercurial debugwireproto\r\n
- s> \r\n
- s> makefile('rb', None)
- s> HTTP/1.1 200 OK\r\n
- s> Server: testing stub value\r\n
- s> Date: $HTTP_DATE$\r\n
- s> Content-Type: application/mercurial-cbor\r\n
- s> Content-Length: 2252\r\n
- s> \r\n
- s> \xa3GapibaseDapi/Dapis\xa1Pexp-http-v2-0003\xa5Hcommands\xacIbranchmap\xa2Dargs\xa0Kpermissions\x81DpullLcapabilities\xa2Dargs\xa0Kpermissions\x81DpullMchangesetdata\xa2Dargs\xa2Ffields\xa4Gdefault\xd9\x01\x02\x80Hrequired\xf4DtypeCsetKvalidvalues\xd9\x01\x02\x84IbookmarksGparentsEphaseHrevisionIrevisions\xa2Hrequired\xf5DtypeDlistKpermissions\x81DpullHfiledata\xa2Dargs\xa4Ffields\xa4Gdefault\xd9\x01\x02\x80Hrequired\xf4DtypeCsetKvalidvalues\xd9\x01\x02\x83HlinknodeGparentsHrevisionKhaveparents\xa3Gdefault\xf4Hrequired\xf4DtypeDboolEnodes\xa2Hrequired\xf5DtypeDlistDpath\xa2Hrequired\xf5DtypeEbytesKpermissions\x81DpullIfilesdata\xa3Dargs\xa4Ffields\xa4Gdefault\xd9\x01\x02\x80Hrequired\xf4DtypeCsetKvalidvalues\xd9\x01\x02\x84NfirstchangesetHlinknodeGparentsHrevisionKhaveparents\xa3Gdefault\xf4Hrequired\xf4DtypeDboolJpathfilter\xa3Gdefault\xf6Hrequired\xf4DtypeDdictIrevisions\xa2Hrequired\xf5DtypeDlistKpermissions\x81DpullTrecommendedbatchsize\x19\xc3PEheads\xa2Dargs\xa1Jpubliconly\xa3Gdefault\xf4Hrequired\xf4DtypeDboolKpermissions\x81DpullEknown\xa2Dargs\xa1Enodes\xa3Gdefault\x80Hrequired\xf4DtypeDlistKpermissions\x81DpullHlistkeys\xa2Dargs\xa1Inamespace\xa2Hrequired\xf5DtypeEbytesKpermissions\x81DpullFlookup\xa2Dargs\xa1Ckey\xa2Hrequired\xf5DtypeEbytesKpermissions\x81DpullLmanifestdata\xa3Dargs\xa4Ffields\xa4Gdefault\xd9\x01\x02\x80Hrequired\xf4DtypeCsetKvalidvalues\xd9\x01\x02\x82GparentsHrevisionKhaveparents\xa3Gdefault\xf4Hrequired\xf4DtypeDboolEnodes\xa2Hrequired\xf5DtypeDlistDtree\xa2Hrequired\xf5DtypeEbytesKpermissions\x81DpullTrecommendedbatchsize\x1a\x00\x01\x86\xa0Gpushkey\xa2Dargs\xa4Ckey\xa2Hrequired\xf5DtypeEbytesInamespace\xa2Hrequired\xf5DtypeEbytesCnew\xa2Hrequired\xf5DtypeEbytesCold\xa2Hrequired\xf5DtypeEbytesKpermissions\x81DpushPrawstorefiledata\xa2Dargs\xa2Efiles\xa2Hrequired\xf5DtypeDlistJpathfilter\xa3Gdefault\xf6Hrequired\xf4DtypeDlistKpermissions\x81DpullQframingmediatypes\x81X&application/mercurial-exp-framing-0006Rpathfilterprefixes\xd9\x01\x02\x82Epath:Lrootfilesin:Nrawrepoformats\x82LgeneraldeltaHrevlogv1Hredirect\xa2Fhashes\x82Fsha256Dsha1Gtargets\x81\xa4DnameNtarget-bad-tlsHprotocolEhttpsKtlsversions\x82B42B39Duris\x81Thttps://example.com/Nv1capabilitiesY\x01\xd3batch branchmap $USUAL_BUNDLE2_CAPS$ changegroupsubset compression=$BUNDLE2_COMPRESSIONS$ getbundle httpheader=1024 httpmediatype=0.1rx,0.1tx,0.2tx known lookup pushkey streamreqs=generaldelta,revlogv1 unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash
- (remote redirect target target-bad-tls requires unsupported TLS versions: 39, 42)
- sending capabilities command
- s> POST /api/exp-http-v2-0003/ro/capabilities HTTP/1.1\r\n
- s> Accept-Encoding: identity\r\n
- s> accept: application/mercurial-exp-framing-0006\r\n
- s> content-type: application/mercurial-exp-framing-0006\r\n
- s> content-length: 102\r\n
- s> host: $LOCALIP:$HGPORT\r\n (glob)
- s> user-agent: Mercurial debugwireproto\r\n
- s> \r\n
- s> \x1c\x00\x00\x01\x00\x01\x01\x82\xa1Pcontentencodings\x81Hidentity:\x00\x00\x01\x00\x01\x00\x11\xa2DnameLcapabilitiesHredirect\xa2Fhashes\x82Fsha256Dsha1Gtargets\x80
- s> makefile('rb', None)
- s> HTTP/1.1 200 OK\r\n
- s> Server: testing stub value\r\n
- s> Date: $HTTP_DATE$\r\n
- s> Content-Type: application/mercurial-exp-framing-0006\r\n
- s> Transfer-Encoding: chunked\r\n
- s> \r\n
- s> 11\r\n
- s> \t\x00\x00\x01\x00\x02\x01\x92
- s> Hidentity
- s> \r\n
- received frame(size=9; request=1; stream=2; streamflags=stream-begin; type=stream-settings; flags=eos)
- s> 13\r\n
- s> \x0b\x00\x00\x01\x00\x02\x041
- s> \xa1FstatusBok
- s> \r\n
- received frame(size=11; request=1; stream=2; streamflags=encoded; type=command-response; flags=continuation)
- s> 6ca\r\n
- s> \xc2\x06\x00\x01\x00\x02\x041
- s> \xa5Hcommands\xacIbranchmap\xa2Dargs\xa0Kpermissions\x81DpullLcapabilities\xa2Dargs\xa0Kpermissions\x81DpullMchangesetdata\xa2Dargs\xa2Ffields\xa4Gdefault\xd9\x01\x02\x80Hrequired\xf4DtypeCsetKvalidvalues\xd9\x01\x02\x84IbookmarksGparentsEphaseHrevisionIrevisions\xa2Hrequired\xf5DtypeDlistKpermissions\x81DpullHfiledata\xa2Dargs\xa4Ffields\xa4Gdefault\xd9\x01\x02\x80Hrequired\xf4DtypeCsetKvalidvalues\xd9\x01\x02\x83HlinknodeGparentsHrevisionKhaveparents\xa3Gdefault\xf4Hrequired\xf4DtypeDboolEnodes\xa2Hrequired\xf5DtypeDlistDpath\xa2Hrequired\xf5DtypeEbytesKpermissions\x81DpullIfilesdata\xa3Dargs\xa4Ffields\xa4Gdefault\xd9\x01\x02\x80Hrequired\xf4DtypeCsetKvalidvalues\xd9\x01\x02\x84NfirstchangesetHlinknodeGparentsHrevisionKhaveparents\xa3Gdefault\xf4Hrequired\xf4DtypeDboolJpathfilter\xa3Gdefault\xf6Hrequired\xf4DtypeDdictIrevisions\xa2Hrequired\xf5DtypeDlistKpermissions\x81DpullTrecommendedbatchsize\x19\xc3PEheads\xa2Dargs\xa1Jpubliconly\xa3Gdefault\xf4Hrequired\xf4DtypeDboolKpermissions\x81DpullEknown\xa2Dargs\xa1Enodes\xa3Gdefault\x80Hrequired\xf4DtypeDlistKpermissions\x81DpullHlistkeys\xa2Dargs\xa1Inamespace\xa2Hrequired\xf5DtypeEbytesKpermissions\x81DpullFlookup\xa2Dargs\xa1Ckey\xa2Hrequired\xf5DtypeEbytesKpermissions\x81DpullLmanifestdata\xa3Dargs\xa4Ffields\xa4Gdefault\xd9\x01\x02\x80Hrequired\xf4DtypeCsetKvalidvalues\xd9\x01\x02\x82GparentsHrevisionKhaveparents\xa3Gdefault\xf4Hrequired\xf4DtypeDboolEnodes\xa2Hrequired\xf5DtypeDlistDtree\xa2Hrequired\xf5DtypeEbytesKpermissions\x81DpullTrecommendedbatchsize\x1a\x00\x01\x86\xa0Gpushkey\xa2Dargs\xa4Ckey\xa2Hrequired\xf5DtypeEbytesInamespace\xa2Hrequired\xf5DtypeEbytesCnew\xa2Hrequired\xf5DtypeEbytesCold\xa2Hrequired\xf5DtypeEbytesKpermissions\x81DpushPrawstorefiledata\xa2Dargs\xa2Efiles\xa2Hrequired\xf5DtypeDlistJpathfilter\xa3Gdefault\xf6Hrequired\xf4DtypeDlistKpermissions\x81DpullQframingmediatypes\x81X&application/mercurial-exp-framing-0006Rpathfilterprefixes\xd9\x01\x02\x82Epath:Lrootfilesin:Nrawrepoformats\x82LgeneraldeltaHrevlogv1Hredirect\xa2Fhashes\x82Fsha256Dsha1Gtargets\x81\xa4DnameNtarget-bad-tlsHprotocolEhttpsKtlsversions\x82B42B39Duris\x81Thttps://example.com/
- s> \r\n
- received frame(size=1730; request=1; stream=2; streamflags=encoded; type=command-response; flags=continuation)
- s> 8\r\n
- s> \x00\x00\x00\x01\x00\x02\x002
- s> \r\n
- s> 0\r\n
- s> \r\n
- received frame(size=0; request=1; stream=2; streamflags=; type=command-response; flags=eos)
- response: gen[
- {
- b'commands': {
- b'branchmap': {
- b'args': {},
- b'permissions': [
- b'pull'
- ]
- },
- b'capabilities': {
- b'args': {},
- b'permissions': [
- b'pull'
- ]
- },
- b'changesetdata': {
- b'args': {
- b'fields': {
- b'default': set([]),
- b'required': False,
- b'type': b'set',
- b'validvalues': set([
- b'bookmarks',
- b'parents',
- b'phase',
- b'revision'
- ])
- },
- b'revisions': {
- b'required': True,
- b'type': b'list'
- }
- },
- b'permissions': [
- b'pull'
- ]
- },
- b'filedata': {
- b'args': {
- b'fields': {
- b'default': set([]),
- b'required': False,
- b'type': b'set',
- b'validvalues': set([
- b'linknode',
- b'parents',
- b'revision'
- ])
- },
- b'haveparents': {
- b'default': False,
- b'required': False,
- b'type': b'bool'
- },
- b'nodes': {
- b'required': True,
- b'type': b'list'
- },
- b'path': {
- b'required': True,
- b'type': b'bytes'
- }
- },
- b'permissions': [
- b'pull'
- ]
- },
- b'filesdata': {
- b'args': {
- b'fields': {
- b'default': set([]),
- b'required': False,
- b'type': b'set',
- b'validvalues': set([
- b'firstchangeset',
- b'linknode',
- b'parents',
- b'revision'
- ])
- },
- b'haveparents': {
- b'default': False,
- b'required': False,
- b'type': b'bool'
- },
- b'pathfilter': {
- b'default': None,
- b'required': False,
- b'type': b'dict'
- },
- b'revisions': {
- b'required': True,
- b'type': b'list'
- }
- },
- b'permissions': [
- b'pull'
- ],
- b'recommendedbatchsize': 50000
- },
- b'heads': {
- b'args': {
- b'publiconly': {
- b'default': False,
- b'required': False,
- b'type': b'bool'
- }
- },
- b'permissions': [
- b'pull'
- ]
- },
- b'known': {
- b'args': {
- b'nodes': {
- b'default': [],
- b'required': False,
- b'type': b'list'
- }
- },
- b'permissions': [
- b'pull'
- ]
- },
- b'listkeys': {
- b'args': {
- b'namespace': {
- b'required': True,
- b'type': b'bytes'
- }
- },
- b'permissions': [
- b'pull'
- ]
- },
- b'lookup': {
- b'args': {
- b'key': {
- b'required': True,
- b'type': b'bytes'
- }
- },
- b'permissions': [
- b'pull'
- ]
- },
- b'manifestdata': {
- b'args': {
- b'fields': {
- b'default': set([]),
- b'required': False,
- b'type': b'set',
- b'validvalues': set([
- b'parents',
- b'revision'
- ])
- },
- b'haveparents': {
- b'default': False,
- b'required': False,
- b'type': b'bool'
- },
- b'nodes': {
- b'required': True,
- b'type': b'list'
- },
- b'tree': {
- b'required': True,
- b'type': b'bytes'
- }
- },
- b'permissions': [
- b'pull'
- ],
- b'recommendedbatchsize': 100000
- },
- b'pushkey': {
- b'args': {
- b'key': {
- b'required': True,
- b'type': b'bytes'
- },
- b'namespace': {
- b'required': True,
- b'type': b'bytes'
- },
- b'new': {
- b'required': True,
- b'type': b'bytes'
- },
- b'old': {
- b'required': True,
- b'type': b'bytes'
- }
- },
- b'permissions': [
- b'push'
- ]
- },
- b'rawstorefiledata': {
- b'args': {
- b'files': {
- b'required': True,
- b'type': b'list'
- },
- b'pathfilter': {
- b'default': None,
- b'required': False,
- b'type': b'list'
- }
- },
- b'permissions': [
- b'pull'
- ]
- }
- },
- b'framingmediatypes': [
- b'application/mercurial-exp-framing-0006'
- ],
- b'pathfilterprefixes': set([
- b'path:',
- b'rootfilesin:'
- ]),
- b'rawrepoformats': [
- b'generaldelta',
- b'revlogv1'
+ b'revlogv1',
+ b'sparserevlog'
],
b'redirect': {
b'hashes': [
--- a/tests/test-wireproto-exchangev2-shallow.t Wed Jan 09 20:00:35 2019 -0800
+++ b/tests/test-wireproto-exchangev2-shallow.t Fri Jan 18 13:28:22 2019 -0500
@@ -196,7 +196,7 @@
node = I\x1d\xa1\xbb\x89\xeax\xc0\xc0\xa2s[\x16\xce}\x93\x1d\xc8\xe2\r (esc)
p1rev = -1
p2rev = -1
- linkrev = 4
+ linkrev = 5
flags = 2
id = 4
@@ -205,7 +205,7 @@
node = S\x82\x06\xdc\x97\x1eR\x15@\xd6\x84:\xbf\xe6\xd1`2\xf6\xd4& (esc)
p1rev = -1
p2rev = -1
- linkrev = 1
+ linkrev = 5
flags = 0
id = 5
@@ -214,7 +214,7 @@
node = ]\xf3\xac\xd8\xd0\xc7\xfaP\x98\xd0'\x9a\x044\xc3\x02\x9e+x\xe1 (esc)
p1rev = -1
p2rev = -1
- linkrev = 4
+ linkrev = 5
flags = 2
id = 6
@@ -223,7 +223,7 @@
node = (\xc7v\xae\x08\xd0\xd5^\xb4\x06H\xb4\x01\xb9\x0f\xf5DH4\x8e (esc)
p1rev = -1
p2rev = -1
- linkrev = 4
+ linkrev = 5
flags = 2
Test a shallow clone with only some files
@@ -342,7 +342,7 @@
node = I\x1d\xa1\xbb\x89\xeax\xc0\xc0\xa2s[\x16\xce}\x93\x1d\xc8\xe2\r (esc)
p1rev = -1
p2rev = -1
- linkrev = 4
+ linkrev = 5
flags = 2
id = 2
@@ -351,7 +351,7 @@
node = S\x82\x06\xdc\x97\x1eR\x15@\xd6\x84:\xbf\xe6\xd1`2\xf6\xd4& (esc)
p1rev = -1
p2rev = -1
- linkrev = 1
+ linkrev = 5
flags = 0
Cloning an old revision with depth=1 works
--- a/tests/test-wireproto-exchangev2.t Wed Jan 09 20:00:35 2019 -0800
+++ b/tests/test-wireproto-exchangev2.t Fri Jan 18 13:28:22 2019 -0500
@@ -1236,3 +1236,70 @@
(sent 5 HTTP requests and * bytes; received * bytes in responses) (glob)
abort: revlog storage does not support missing parents write mode
[255]
+
+ $ killdaemons.py
+
+Repo with 2 DAG branches introducing same filenode, to test linknode adjustment
+
+ $ hg init server-linknode
+ $ enablehttpv2 server-linknode
+ $ cd server-linknode
+ $ touch foo
+ $ hg -q commit -Am initial
+ $ echo foo > dupe-file
+ $ hg commit -Am 'dupe 1'
+ adding dupe-file
+ $ hg -q up -r 0
+ $ echo foo > dupe-file
+ $ hg commit -Am 'dupe 2'
+ adding dupe-file
+ created new head
+ $ hg serve -p $HGPORT -d --pid-file hg.pid -E error.log
+ $ cat hg.pid > $DAEMON_PIDS
+ $ cd ..
+
+Perform an incremental pull of both heads and ensure linkrev is written out properly
+
+ $ hg clone -r 96ee1d7354c4 http://localhost:$HGPORT client-linknode-1
+ new changesets 96ee1d7354c4
+ updating to branch default
+ 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
+ $ cd client-linknode-1
+ $ touch extra
+ $ hg commit -Am extra
+ adding extra
+ $ cd ..
+
+ $ hg clone -r 96ee1d7354c4 http://localhost:$HGPORT client-linknode-2
+ new changesets 96ee1d7354c4
+ updating to branch default
+ 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
+ $ cd client-linknode-2
+ $ touch extra
+ $ hg commit -Am extra
+ adding extra
+ $ cd ..
+
+ $ hg -R client-linknode-1 pull -r 1681c33f9f80
+ pulling from http://localhost:$HGPORT/
+ searching for changes
+ new changesets 1681c33f9f80
+ (run 'hg update' to get a working copy)
+
+#if reporevlogstore
+ $ hg -R client-linknode-1 debugrevlogindex dupe-file
+ rev linkrev nodeid p1 p2
+ 0 2 2ed2a3912a0b 000000000000 000000000000
+#endif
+
+ $ hg -R client-linknode-2 pull -r 639c8990d6a5
+ pulling from http://localhost:$HGPORT/
+ searching for changes
+ new changesets 639c8990d6a5
+ (run 'hg update' to get a working copy)
+
+#if reporevlogstore
+ $ hg -R client-linknode-2 debugrevlogindex dupe-file
+ rev linkrev nodeid p1 p2
+ 0 2 2ed2a3912a0b 000000000000 000000000000
+#endif
--- a/tests/test-wireproto-serverreactor.py Wed Jan 09 20:00:35 2019 -0800
+++ b/tests/test-wireproto-serverreactor.py Fri Jan 18 13:28:22 2019 -0500
@@ -2,9 +2,6 @@
import unittest
-from mercurial.thirdparty import (
- cbor,
-)
from mercurial import (
ui as uimod,
util,
@@ -16,7 +13,7 @@
ffs = framing.makeframefromhumanstring
-OK = cbor.dumps({b'status': b'ok'})
+OK = b''.join(cborutil.streamencode({b'status': b'ok'}))
def makereactor(deferoutput=False):
ui = uimod.ui()
@@ -270,20 +267,20 @@
})
def testinterleavedcommands(self):
- cbor1 = cbor.dumps({
+ cbor1 = b''.join(cborutil.streamencode({
b'name': b'command1',
b'args': {
b'foo': b'bar',
b'key1': b'val',
}
- }, canonical=True)
- cbor3 = cbor.dumps({
+ }))
+ cbor3 = b''.join(cborutil.streamencode({
b'name': b'command3',
b'args': {
b'biz': b'baz',
b'key': b'val',
},
- }, canonical=True)
+ }))
results = list(sendframes(makereactor(), [
ffs(b'1 1 stream-begin command-request new|more %s' % cbor1[0:6]),