--- a/.clang-format Fri Feb 23 17:57:04 2018 -0800
+++ b/.clang-format Sat Feb 24 17:49:10 2018 -0600
@@ -6,3 +6,8 @@
IndentCaseLabels: false
AllowShortBlocksOnASingleLine: false
AllowShortFunctionsOnASingleLine: false
+IncludeCategories:
+ - Regex: '^<'
+ Priority: 1
+ - Regex: '^"'
+ Priority: 2
--- a/Makefile Fri Feb 23 17:57:04 2018 -0800
+++ b/Makefile Sat Feb 24 17:49:10 2018 -0600
@@ -234,18 +234,6 @@
docker-ubuntu-xenial-ppa: contrib/docker/ubuntu-xenial
contrib/dockerdeb ubuntu xenial --source-only
-docker-ubuntu-yakkety: contrib/docker/ubuntu-yakkety
- contrib/dockerdeb ubuntu yakkety
-
-docker-ubuntu-yakkety-ppa: contrib/docker/ubuntu-yakkety
- contrib/dockerdeb ubuntu yakkety --source-only
-
-docker-ubuntu-zesty: contrib/docker/ubuntu-zesty
- contrib/dockerdeb ubuntu zesty
-
-docker-ubuntu-zesty-ppa: contrib/docker/ubuntu-zesty
- contrib/dockerdeb ubuntu zesty --source-only
-
docker-ubuntu-artful: contrib/docker/ubuntu-artful
contrib/dockerdeb ubuntu artful
@@ -318,8 +306,6 @@
osx deb ppa docker-debian-jessie docker-debian-stretch \
docker-ubuntu-trusty docker-ubuntu-trusty-ppa \
docker-ubuntu-xenial docker-ubuntu-xenial-ppa \
- docker-ubuntu-yakkety docker-ubuntu-yakkety-ppa \
- docker-ubuntu-zesty docker-ubuntu-zesty-ppa \
docker-ubuntu-artful docker-ubuntu-artful-ppa \
fedora20 docker-fedora20 fedora21 docker-fedora21 \
centos5 docker-centos5 centos6 docker-centos6 centos7 docker-centos7 \
--- a/contrib/Makefile.python Fri Feb 23 17:57:04 2018 -0800
+++ b/contrib/Makefile.python Sat Feb 24 17:49:10 2018 -0600
@@ -1,4 +1,4 @@
-PYTHONVER=2.7.10
+PYTHONVER=2.7.14
PYTHONNAME=python-
PREFIX=$(HOME)/bin/prefix-$(PYTHONNAME)$(PYTHONVER)
SYMLINKDIR=$(HOME)/bin
--- a/contrib/buildrpm Fri Feb 23 17:57:04 2018 -0800
+++ b/contrib/buildrpm Sat Feb 24 17:49:10 2018 -0600
@@ -20,8 +20,8 @@
;;
--withpython | --with-python)
shift
- PYTHONVER=2.7.10
- PYTHONMD5=d7547558fd673bd9d38e2108c6b42521
+ PYTHONVER=2.7.14
+ PYTHONMD5=cee2e4b33ad3750da77b2e85f2f8b724
;;
--rpmbuilddir )
shift
--- a/contrib/check-code.py Fri Feb 23 17:57:04 2018 -0800
+++ b/contrib/check-code.py Sat Feb 24 17:49:10 2018 -0600
@@ -150,6 +150,7 @@
(r'grep.* -[ABC]', "don't use grep's context flags"),
(r'find.*-printf',
"don't use 'find -printf', it doesn't exist on BSD find(1)"),
+ (r'\$RANDOM ', "don't use bash-only $RANDOM to generate random values"),
],
# warnings
[
--- a/contrib/check-config.py Fri Feb 23 17:57:04 2018 -0800
+++ b/contrib/check-config.py Sat Feb 24 17:49:10 2018 -0600
@@ -15,7 +15,7 @@
documented = {}
allowinconsistent = set()
-configre = re.compile(r'''
+configre = re.compile(br'''
# Function call
ui\.config(?P<ctype>|int|bool|list)\(
# First argument.
@@ -25,7 +25,7 @@
(?:default=)?(?P<default>\S+?))?
\)''', re.VERBOSE | re.MULTILINE)
-configwithre = re.compile('''
+configwithre = re.compile(b'''
ui\.config(?P<ctype>with)\(
# First argument is callback function. This doesn't parse robustly
# if it is e.g. a function call.
@@ -35,57 +35,57 @@
(?:default=)?(?P<default>\S+?))?
\)''', re.VERBOSE | re.MULTILINE)
-configpartialre = (r"""ui\.config""")
+configpartialre = (br"""ui\.config""")
-ignorere = re.compile(r'''
+ignorere = re.compile(br'''
\#\s(?P<reason>internal|experimental|deprecated|developer|inconsistent)\s
config:\s(?P<config>\S+\.\S+)$
''', re.VERBOSE | re.MULTILINE)
def main(args):
for f in args:
- sect = ''
- prevname = ''
- confsect = ''
- carryover = ''
+ sect = b''
+ prevname = b''
+ confsect = b''
+ carryover = b''
linenum = 0
- for l in open(f):
+ for l in open(f, 'rb'):
linenum += 1
# check topic-like bits
- m = re.match('\s*``(\S+)``', l)
+ m = re.match(b'\s*``(\S+)``', l)
if m:
prevname = m.group(1)
- if re.match('^\s*-+$', l):
+ if re.match(b'^\s*-+$', l):
sect = prevname
- prevname = ''
+ prevname = b''
if sect and prevname:
- name = sect + '.' + prevname
+ name = sect + b'.' + prevname
documented[name] = 1
# check docstring bits
- m = re.match(r'^\s+\[(\S+)\]', l)
+ m = re.match(br'^\s+\[(\S+)\]', l)
if m:
confsect = m.group(1)
continue
- m = re.match(r'^\s+(?:#\s*)?(\S+) = ', l)
+ m = re.match(br'^\s+(?:#\s*)?(\S+) = ', l)
if m:
- name = confsect + '.' + m.group(1)
+ name = confsect + b'.' + m.group(1)
documented[name] = 1
# like the bugzilla extension
- m = re.match(r'^\s*(\S+\.\S+)$', l)
+ m = re.match(br'^\s*(\S+\.\S+)$', l)
if m:
documented[m.group(1)] = 1
# like convert
- m = re.match(r'^\s*:(\S+\.\S+):\s+', l)
+ m = re.match(br'^\s*:(\S+\.\S+):\s+', l)
if m:
documented[m.group(1)] = 1
# quoted in help or docstrings
- m = re.match(r'.*?``(\S+\.\S+)``', l)
+ m = re.match(br'.*?``(\S+\.\S+)``', l)
if m:
documented[m.group(1)] = 1
@@ -108,7 +108,7 @@
default = m.group('default')
if default in (None, 'False', 'None', '0', '[]', '""', "''"):
default = ''
- if re.match('[a-z.]+$', default):
+ if re.match(b'[a-z.]+$', default):
default = '<variable>'
if (name in foundopts and (ctype, default) != foundopts[name]
and name not in allowinconsistent):
--- a/contrib/chg/chg.c Fri Feb 23 17:57:04 2018 -0800
+++ b/contrib/chg/chg.c Sat Feb 24 17:49:10 2018 -0600
@@ -38,11 +38,13 @@
const char **args;
};
-static void initcmdserveropts(struct cmdserveropts *opts) {
+static void initcmdserveropts(struct cmdserveropts *opts)
+{
memset(opts, 0, sizeof(struct cmdserveropts));
}
-static void freecmdserveropts(struct cmdserveropts *opts) {
+static void freecmdserveropts(struct cmdserveropts *opts)
+{
free(opts->args);
opts->args = NULL;
opts->argsize = 0;
@@ -59,12 +61,8 @@
const char *name;
size_t narg;
} flags[] = {
- {"--config", 1},
- {"--cwd", 1},
- {"--repo", 1},
- {"--repository", 1},
- {"--traceback", 0},
- {"-R", 1},
+ {"--config", 1}, {"--cwd", 1}, {"--repo", 1},
+ {"--repository", 1}, {"--traceback", 0}, {"-R", 1},
};
size_t i;
for (i = 0; i < sizeof(flags) / sizeof(flags[0]); ++i) {
@@ -89,21 +87,21 @@
/*
* Parse argv[] and put sensitive flags to opts->args
*/
-static void setcmdserverargs(struct cmdserveropts *opts,
- int argc, const char *argv[])
+static void setcmdserverargs(struct cmdserveropts *opts, int argc,
+ const char *argv[])
{
size_t i, step;
opts->argsize = 0;
for (i = 0, step = 1; i < (size_t)argc; i += step, step = 1) {
if (!argv[i])
- continue; /* pass clang-analyse */
+ continue; /* pass clang-analyse */
if (strcmp(argv[i], "--") == 0)
break;
size_t n = testsensitiveflag(argv[i]);
if (n == 0 || i + n > (size_t)argc)
continue;
- opts->args = reallocx(opts->args,
- (n + opts->argsize) * sizeof(char *));
+ opts->args =
+ reallocx(opts->args, (n + opts->argsize) * sizeof(char *));
memcpy(opts->args + opts->argsize, argv + i,
sizeof(char *) * n);
opts->argsize += n;
@@ -180,8 +178,8 @@
r = snprintf(opts->sockname, sizeof(opts->sockname), sockfmt, basename);
if (r < 0 || (size_t)r >= sizeof(opts->sockname))
abortmsg("too long TMPDIR or CHGSOCKNAME (r = %d)", r);
- r = snprintf(opts->initsockname, sizeof(opts->initsockname),
- "%s.%u", opts->sockname, (unsigned)getpid());
+ r = snprintf(opts->initsockname, sizeof(opts->initsockname), "%s.%u",
+ opts->sockname, (unsigned)getpid());
if (r < 0 || (size_t)r >= sizeof(opts->initsockname))
abortmsg("too long TMPDIR or CHGSOCKNAME (r = %d)", r);
}
@@ -208,11 +206,14 @@
const char *hgcmd = gethgcmd();
const char *baseargv[] = {
- hgcmd,
- "serve",
- "--cmdserver", "chgunix",
- "--address", opts->initsockname,
- "--daemon-postexec", "chdir:/",
+ hgcmd,
+ "serve",
+ "--cmdserver",
+ "chgunix",
+ "--address",
+ opts->initsockname,
+ "--daemon-postexec",
+ "chdir:/",
};
size_t baseargvsize = sizeof(baseargv) / sizeof(baseargv[0]);
size_t argsize = baseargvsize + opts->argsize + 1;
@@ -237,7 +238,7 @@
debugmsg("try connect to %s repeatedly", opts->initsockname);
- unsigned int timeoutsec = 60; /* default: 60 seconds */
+ unsigned int timeoutsec = 60; /* default: 60 seconds */
const char *timeoutenv = getenv("CHGTIMEOUT");
if (timeoutenv)
sscanf(timeoutenv, "%u", &timeoutsec);
@@ -246,7 +247,7 @@
hgclient_t *hgc = hgc_open(opts->initsockname);
if (hgc) {
debugmsg("rename %s to %s", opts->initsockname,
- opts->sockname);
+ opts->sockname);
int r = rename(opts->initsockname, opts->sockname);
if (r != 0)
abortmsgerrno("cannot rename");
@@ -270,7 +271,7 @@
if (WIFEXITED(pst)) {
if (WEXITSTATUS(pst) == 0)
abortmsg("could not connect to cmdserver "
- "(exited with status 0)");
+ "(exited with status 0)");
debugmsg("cmdserver exited with status %d", WEXITSTATUS(pst));
exit(WEXITSTATUS(pst));
} else if (WIFSIGNALED(pst)) {
@@ -284,8 +285,8 @@
/* Connect to a cmdserver. Will start a new server on demand. */
static hgclient_t *connectcmdserver(struct cmdserveropts *opts)
{
- const char *sockname = opts->redirectsockname[0] ?
- opts->redirectsockname : opts->sockname;
+ const char *sockname =
+ opts->redirectsockname[0] ? opts->redirectsockname : opts->sockname;
debugmsg("try connect to %s", sockname);
hgclient_t *hgc = hgc_open(sockname);
if (hgc)
@@ -339,8 +340,8 @@
unlink(*pinst + 7);
} else if (strncmp(*pinst, "redirect ", 9) == 0) {
int r = snprintf(opts->redirectsockname,
- sizeof(opts->redirectsockname),
- "%s", *pinst + 9);
+ sizeof(opts->redirectsockname), "%s",
+ *pinst + 9);
if (r < 0 || r >= (int)sizeof(opts->redirectsockname))
abortmsg("redirect path is too long (%d)", r);
needreconnect = 1;
@@ -365,10 +366,9 @@
*/
static int isunsupported(int argc, const char *argv[])
{
- enum {
- SERVE = 1,
- DAEMON = 2,
- SERVEDAEMON = SERVE | DAEMON,
+ enum { SERVE = 1,
+ DAEMON = 2,
+ SERVEDAEMON = SERVE | DAEMON,
};
unsigned int state = 0;
int i;
@@ -378,7 +378,7 @@
if (i == 0 && strcmp("serve", argv[i]) == 0)
state |= SERVE;
else if (strcmp("-d", argv[i]) == 0 ||
- strcmp("--daemon", argv[i]) == 0)
+ strcmp("--daemon", argv[i]) == 0)
state |= DAEMON;
}
return (state & SERVEDAEMON) == SERVEDAEMON;
@@ -401,9 +401,9 @@
if (getenv("CHGINTERNALMARK"))
abortmsg("chg started by chg detected.\n"
- "Please make sure ${HG:-hg} is not a symlink or "
- "wrapper to chg. Alternatively, set $CHGHG to the "
- "path of real hg.");
+ "Please make sure ${HG:-hg} is not a symlink or "
+ "wrapper to chg. Alternatively, set $CHGHG to the "
+ "path of real hg.");
if (isunsupported(argc - 1, argv + 1))
execoriginalhg(argv);
@@ -435,11 +435,11 @@
hgc_close(hgc);
if (++retry > 10)
abortmsg("too many redirections.\n"
- "Please make sure %s is not a wrapper which "
- "changes sensitive environment variables "
- "before executing hg. If you have to use a "
- "wrapper, wrap chg instead of hg.",
- gethgcmd());
+ "Please make sure %s is not a wrapper which "
+ "changes sensitive environment variables "
+ "before executing hg. If you have to use a "
+ "wrapper, wrap chg instead of hg.",
+ gethgcmd());
}
setupsignalhandler(hgc_peerpid(hgc), hgc_peerpgid(hgc));
--- a/contrib/chg/hgclient.c Fri Feb 23 17:57:04 2018 -0800
+++ b/contrib/chg/hgclient.c Sat Feb 24 17:49:10 2018 -0600
@@ -7,7 +7,7 @@
* GNU General Public License version 2 or any later version.
*/
-#include <arpa/inet.h> /* for ntohl(), htonl() */
+#include <arpa/inet.h> /* for ntohl(), htonl() */
#include <assert.h>
#include <ctype.h>
#include <errno.h>
@@ -26,16 +26,15 @@
#include "procutil.h"
#include "util.h"
-enum {
- CAP_GETENCODING = 0x0001,
- CAP_RUNCOMMAND = 0x0002,
- /* cHg extension: */
- CAP_ATTACHIO = 0x0100,
- CAP_CHDIR = 0x0200,
- CAP_SETENV = 0x0800,
- CAP_SETUMASK = 0x1000,
- CAP_VALIDATE = 0x2000,
- CAP_SETPROCNAME = 0x4000,
+enum { CAP_GETENCODING = 0x0001,
+ CAP_RUNCOMMAND = 0x0002,
+ /* cHg extension: */
+ CAP_ATTACHIO = 0x0100,
+ CAP_CHDIR = 0x0200,
+ CAP_SETENV = 0x0800,
+ CAP_SETUMASK = 0x1000,
+ CAP_VALIDATE = 0x2000,
+ CAP_SETPROCNAME = 0x4000,
};
typedef struct {
@@ -44,15 +43,15 @@
} cappair_t;
static const cappair_t captable[] = {
- {"getencoding", CAP_GETENCODING},
- {"runcommand", CAP_RUNCOMMAND},
- {"attachio", CAP_ATTACHIO},
- {"chdir", CAP_CHDIR},
- {"setenv", CAP_SETENV},
- {"setumask", CAP_SETUMASK},
- {"validate", CAP_VALIDATE},
- {"setprocname", CAP_SETPROCNAME},
- {NULL, 0}, /* terminator */
+ {"getencoding", CAP_GETENCODING},
+ {"runcommand", CAP_RUNCOMMAND},
+ {"attachio", CAP_ATTACHIO},
+ {"chdir", CAP_CHDIR},
+ {"setenv", CAP_SETENV},
+ {"setumask", CAP_SETUMASK},
+ {"validate", CAP_VALIDATE},
+ {"setprocname", CAP_SETPROCNAME},
+ {NULL, 0}, /* terminator */
};
typedef struct {
@@ -88,8 +87,8 @@
if (newsize <= ctx->maxdatasize)
return;
- newsize = defaultdatasize
- * ((newsize + defaultdatasize - 1) / defaultdatasize);
+ newsize = defaultdatasize *
+ ((newsize + defaultdatasize - 1) / defaultdatasize);
ctx->data = reallocx(ctx->data, newsize);
ctx->maxdatasize = newsize;
debugmsg("enlarge context buffer to %zu", ctx->maxdatasize);
@@ -126,12 +125,12 @@
enlargecontext(&hgc->ctx, hgc->ctx.datasize);
if (isupper(hgc->ctx.ch) && hgc->ctx.ch != 'S')
- return; /* assumes input request */
+ return; /* assumes input request */
size_t cursize = 0;
while (cursize < hgc->ctx.datasize) {
rsize = recv(hgc->sockfd, hgc->ctx.data + cursize,
- hgc->ctx.datasize - cursize, 0);
+ hgc->ctx.datasize - cursize, 0);
if (rsize < 1)
abortmsg("failed to read data block");
cursize += rsize;
@@ -176,19 +175,19 @@
/* Build '\0'-separated list of args. argsize < 0 denotes that args are
* terminated by NULL. */
static void packcmdargs(context_t *ctx, const char *const args[],
- ssize_t argsize)
+ ssize_t argsize)
{
ctx->datasize = 0;
const char *const *const end = (argsize >= 0) ? args + argsize : NULL;
for (const char *const *it = args; it != end && *it; ++it) {
- const size_t n = strlen(*it) + 1; /* include '\0' */
+ const size_t n = strlen(*it) + 1; /* include '\0' */
enlargecontext(ctx, ctx->datasize + n);
memcpy(ctx->data + ctx->datasize, *it, n);
ctx->datasize += n;
}
if (ctx->datasize > 0)
- --ctx->datasize; /* strip last '\0' */
+ --ctx->datasize; /* strip last '\0' */
}
/* Extract '\0'-separated list of args to new buffer, terminated by NULL */
@@ -199,7 +198,7 @@
const char *s = ctx->data;
const char *e = ctx->data + ctx->datasize;
for (;;) {
- if (nargs + 1 >= maxnargs) { /* including last NULL */
+ if (nargs + 1 >= maxnargs) { /* including last NULL */
maxnargs += 256;
args = reallocx(args, maxnargs * sizeof(args[0]));
}
@@ -237,7 +236,7 @@
{
context_t *ctx = &hgc->ctx;
enlargecontext(ctx, ctx->datasize + 1);
- ctx->data[ctx->datasize] = '\0'; /* terminate last string */
+ ctx->data[ctx->datasize] = '\0'; /* terminate last string */
const char **args = unpackcmdargsnul(ctx);
if (!args[0] || !args[1] || !args[2])
@@ -269,8 +268,8 @@
for (;;) {
readchannel(hgc);
context_t *ctx = &hgc->ctx;
- debugmsg("response read from channel %c, size %zu",
- ctx->ch, ctx->datasize);
+ debugmsg("response read from channel %c, size %zu", ctx->ch,
+ ctx->datasize);
switch (ctx->ch) {
case 'o':
fwrite(ctx->data, sizeof(ctx->data[0]), ctx->datasize,
@@ -299,7 +298,7 @@
default:
if (isupper(ctx->ch))
abortmsg("cannot handle response (ch = %c)",
- ctx->ch);
+ ctx->ch);
}
}
}
@@ -366,8 +365,8 @@
static void updateprocname(hgclient_t *hgc)
{
- int r = snprintf(hgc->ctx.data, hgc->ctx.maxdatasize,
- "chg[worker/%d]", (int)getpid());
+ int r = snprintf(hgc->ctx.data, hgc->ctx.maxdatasize, "chg[worker/%d]",
+ (int)getpid());
if (r < 0 || (size_t)r >= hgc->ctx.maxdatasize)
abortmsg("insufficient buffer to write procname (r = %d)", r);
hgc->ctx.datasize = (size_t)r;
@@ -387,7 +386,7 @@
static const int fds[3] = {STDIN_FILENO, STDOUT_FILENO, STDERR_FILENO};
struct msghdr msgh;
memset(&msgh, 0, sizeof(msgh));
- struct iovec iov = {ctx->data, ctx->datasize}; /* dummy payload */
+ struct iovec iov = {ctx->data, ctx->datasize}; /* dummy payload */
msgh.msg_iov = &iov;
msgh.msg_iovlen = 1;
char fdbuf[CMSG_SPACE(sizeof(fds))];
@@ -552,7 +551,7 @@
* the last string is guaranteed to be NULL.
*/
const char **hgc_validate(hgclient_t *hgc, const char *const args[],
- size_t argsize)
+ size_t argsize)
{
assert(hgc);
if (!(hgc->capflags & CAP_VALIDATE))
--- a/contrib/chg/hgclient.h Fri Feb 23 17:57:04 2018 -0800
+++ b/contrib/chg/hgclient.h Sat Feb 24 17:49:10 2018 -0600
@@ -22,9 +22,9 @@
pid_t hgc_peerpid(const hgclient_t *hgc);
const char **hgc_validate(hgclient_t *hgc, const char *const args[],
- size_t argsize);
+ size_t argsize);
int hgc_runcommand(hgclient_t *hgc, const char *const args[], size_t argsize);
void hgc_attachio(hgclient_t *hgc);
void hgc_setenv(hgclient_t *hgc, const char *const envp[]);
-#endif /* HGCLIENT_H_ */
+#endif /* HGCLIENT_H_ */
--- a/contrib/chg/procutil.c Fri Feb 23 17:57:04 2018 -0800
+++ b/contrib/chg/procutil.c Sat Feb 24 17:49:10 2018 -0600
@@ -54,7 +54,7 @@
goto error;
forwardsignal(sig);
- if (raise(sig) < 0) /* resend to self */
+ if (raise(sig) < 0) /* resend to self */
goto error;
if (sigaction(sig, &sa, &oldsa) < 0)
goto error;
@@ -205,8 +205,8 @@
close(pipefds[0]);
close(pipefds[1]);
- int r = execle("/bin/sh", "/bin/sh", "-c", pagercmd, NULL,
- envp);
+ int r =
+ execle("/bin/sh", "/bin/sh", "-c", pagercmd, NULL, envp);
if (r < 0) {
abortmsgerrno("cannot start pager '%s'", pagercmd);
}
--- a/contrib/chg/util.c Fri Feb 23 17:57:04 2018 -0800
+++ b/contrib/chg/util.c Sat Feb 24 17:49:10 2018 -0600
@@ -62,7 +62,8 @@
static int debugmsgenabled = 0;
static double debugstart = 0;
-static double now() {
+static double now()
+{
struct timeval t;
gettimeofday(&t, NULL);
return t.tv_usec / 1e6 + t.tv_sec;
--- a/contrib/chg/util.h Fri Feb 23 17:57:04 2018 -0800
+++ b/contrib/chg/util.h Sat Feb 24 17:49:10 2018 -0600
@@ -32,4 +32,4 @@
int runshellcmd(const char *cmd, const char *envp[], const char *cwd);
-#endif /* UTIL_H_ */
+#endif /* UTIL_H_ */
--- a/contrib/clang-format-blacklist Fri Feb 23 17:57:04 2018 -0800
+++ b/contrib/clang-format-blacklist Sat Feb 24 17:49:10 2018 -0600
@@ -1,23 +1,8 @@
# Files that just need to be migrated to the formatter.
# Do not add new files here!
-contrib/chg/chg.c
-contrib/chg/hgclient.c
-contrib/chg/hgclient.h
-contrib/chg/procutil.c
-contrib/chg/procutil.h
-contrib/chg/util.c
-contrib/chg/util.h
-contrib/hgsh/hgsh.c
-mercurial/cext/base85.c
-mercurial/cext/bdiff.c
-mercurial/cext/charencode.c
-mercurial/cext/charencode.h
-mercurial/cext/diffhelpers.c
mercurial/cext/dirs.c
mercurial/cext/manifest.c
-mercurial/cext/mpatch.c
mercurial/cext/osutil.c
-mercurial/cext/pathencode.c
mercurial/cext/revlog.c
# Vendored code that we should never format:
contrib/python-zstandard/c-ext/bufferutil.c
--- a/contrib/dirstatenonnormalcheck.py Fri Feb 23 17:57:04 2018 -0800
+++ b/contrib/dirstatenonnormalcheck.py Sat Feb 24 17:49:10 2018 -0600
@@ -17,7 +17,7 @@
"""Compute nonnormal entries from dirstate's dmap"""
res = set()
for f, e in dmap.iteritems():
- if e[0] != 'n' or e[3] == -1:
+ if e[0] != b'n' or e[3] == -1:
res.add(f)
return res
@@ -25,24 +25,25 @@
"""Compute nonnormalset from dmap, check that it matches _nonnormalset"""
nonnormalcomputedmap = nonnormalentries(dmap)
if _nonnormalset != nonnormalcomputedmap:
- ui.develwarn("%s call to %s\n" % (label, orig), config='dirstate')
- ui.develwarn("inconsistency in nonnormalset\n", config='dirstate')
- ui.develwarn("[nonnormalset] %s\n" % _nonnormalset, config='dirstate')
- ui.develwarn("[map] %s\n" % nonnormalcomputedmap, config='dirstate')
+ ui.develwarn(b"%s call to %s\n" % (label, orig), config=b'dirstate')
+ ui.develwarn(b"inconsistency in nonnormalset\n", config=b'dirstate')
+ ui.develwarn(b"[nonnormalset] %s\n" % _nonnormalset, config=b'dirstate')
+ ui.develwarn(b"[map] %s\n" % nonnormalcomputedmap, config=b'dirstate')
def _checkdirstate(orig, self, arg):
"""Check nonnormal set consistency before and after the call to orig"""
checkconsistency(self._ui, orig, self._map, self._map.nonnormalset,
- "before")
+ b"before")
r = orig(self, arg)
- checkconsistency(self._ui, orig, self._map, self._map.nonnormalset, "after")
+ checkconsistency(self._ui, orig, self._map, self._map.nonnormalset,
+ b"after")
return r
def extsetup(ui):
"""Wrap functions modifying dirstate to check nonnormalset consistency"""
dirstatecl = dirstate.dirstate
- devel = ui.configbool('devel', 'all-warnings')
- paranoid = ui.configbool('experimental', 'nonnormalparanoidcheck')
+ devel = ui.configbool(b'devel', b'all-warnings')
+ paranoid = ui.configbool(b'experimental', b'nonnormalparanoidcheck')
if devel:
extensions.wrapfunction(dirstatecl, '_writedirstate', _checkdirstate)
if paranoid:
--- a/contrib/dumprevlog Fri Feb 23 17:57:04 2018 -0800
+++ b/contrib/dumprevlog Sat Feb 24 17:49:10 2018 -0600
@@ -14,8 +14,12 @@
for fp in (sys.stdin, sys.stdout, sys.stderr):
util.setbinary(fp)
+def binopen(path, mode='rb'):
+ if 'b' not in mode:
+ mode = mode + 'b'
+ return open(path, mode)
+
for f in sys.argv[1:]:
- binopen = lambda fn: open(fn, 'rb')
r = revlog.revlog(binopen, f)
print("file:", f)
for i in r:
--- a/contrib/hgsh/hgsh.c Fri Feb 23 17:57:04 2018 -0800
+++ b/contrib/hgsh/hgsh.c Sat Feb 24 17:49:10 2018 -0600
@@ -48,7 +48,7 @@
* have such machine, set to NULL.
*/
#ifndef HG_GATEWAY
-#define HG_GATEWAY "gateway"
+#define HG_GATEWAY "gateway"
#endif
/*
@@ -56,7 +56,7 @@
* NULL.
*/
#ifndef HG_HOST
-#define HG_HOST "mercurial"
+#define HG_HOST "mercurial"
#endif
/*
@@ -64,7 +64,7 @@
* host username are same, set to NULL.
*/
#ifndef HG_USER
-#define HG_USER "hg"
+#define HG_USER "hg"
#endif
/*
@@ -72,14 +72,14 @@
* validate location of repo when someone is try to access, set to NULL.
*/
#ifndef HG_ROOT
-#define HG_ROOT "/home/hg/repos"
+#define HG_ROOT "/home/hg/repos"
#endif
/*
* HG: path to the mercurial executable to run.
*/
#ifndef HG
-#define HG "/home/hg/bin/hg"
+#define HG "/home/hg/bin/hg"
#endif
/*
@@ -88,7 +88,7 @@
* impossible, set to NULL.
*/
#ifndef HG_SHELL
-#define HG_SHELL NULL
+#define HG_SHELL NULL
/* #define HG_SHELL "/bin/bash" */
#endif
@@ -97,7 +97,7 @@
* should not get helpful message, set to NULL.
*/
#ifndef HG_HELP
-#define HG_HELP "please contact support@example.com for help."
+#define HG_HELP "please contact support@example.com for help."
#endif
/*
@@ -106,7 +106,7 @@
* arguments it is called with. see forward_through_gateway.
*/
#ifndef SSH
-#define SSH "/usr/bin/ssh"
+#define SSH "/usr/bin/ssh"
#endif
/*
@@ -249,7 +249,6 @@
hg_serve,
};
-
/*
* attempt to verify that a directory is really a hg repo, by testing
* for the existence of a subdirectory.
@@ -310,8 +309,7 @@
if (sscanf(argv[2], "hg init %as", &repo) == 1) {
cmd = hg_init;
- }
- else if (sscanf(argv[2], "hg -R %as serve --stdio", &repo) == 1) {
+ } else if (sscanf(argv[2], "hg -R %as serve --stdio", &repo) == 1) {
cmd = hg_serve;
} else {
goto badargs;
--- a/contrib/mercurial.spec Fri Feb 23 17:57:04 2018 -0800
+++ b/contrib/mercurial.spec Sat Feb 24 17:49:10 2018 -0600
@@ -6,8 +6,8 @@
%global pythonver %{withpython}
%global pythonname Python-%{withpython}
-%global docutilsname docutils-0.12
-%global docutilsmd5 4622263b62c5c771c03502afa3157768
+%global docutilsname docutils-0.14
+%global docutilsmd5 c53768d63db3873b7d452833553469de
%global pythonhg python-hg
%global hgpyprefix /opt/%{pythonhg}
# byte compilation will fail on some some Python /test/ files
--- a/contrib/perf.py Fri Feb 23 17:57:04 2018 -0800
+++ b/contrib/perf.py Sat Feb 24 17:49:10 2018 -0600
@@ -64,6 +64,12 @@
from mercurial import scmutil # since 1.9 (or 8b252e826c68)
except ImportError:
pass
+try:
+ from mercurial import pycompat
+ getargspec = pycompat.getargspec # added to module after 4.5
+except (ImportError, AttributeError):
+ import inspect
+ getargspec = inspect.getargspec
# for "historical portability":
# define util.safehasattr forcibly, because util.safehasattr has been
@@ -114,9 +120,8 @@
if safehasattr(registrar, 'command'):
command = registrar.command(cmdtable)
elif safehasattr(cmdutil, 'command'):
- import inspect
command = cmdutil.command(cmdtable)
- if 'norepo' not in inspect.getargspec(command)[0]:
+ if 'norepo' not in getargspec(command).args:
# for "historical portability":
# wrap original cmdutil.command, because "norepo" option has
# been available since 3.1 (or 75a96326cecb)
@@ -1031,6 +1036,71 @@
with ready:
ready.notify_all()
+@command('perfunidiff', revlogopts + formatteropts + [
+ ('', 'count', 1, 'number of revisions to test (when using --startrev)'),
+ ('', 'alldata', False, 'test unidiffs for all associated revisions'),
+ ], '-c|-m|FILE REV')
+def perfunidiff(ui, repo, file_, rev=None, count=None, **opts):
+ """benchmark a unified diff between revisions
+
+ This doesn't include any copy tracing - it's just a unified diff
+ of the texts.
+
+ By default, benchmark a diff between its delta parent and itself.
+
+ With ``--count``, benchmark diffs between delta parents and self for N
+ revisions starting at the specified revision.
+
+ With ``--alldata``, assume the requested revision is a changeset and
+ measure diffs for all changes related to that changeset (manifest
+ and filelogs).
+ """
+ if opts['alldata']:
+ opts['changelog'] = True
+
+ if opts.get('changelog') or opts.get('manifest'):
+ file_, rev = None, file_
+ elif rev is None:
+ raise error.CommandError('perfunidiff', 'invalid arguments')
+
+ textpairs = []
+
+ r = cmdutil.openrevlog(repo, 'perfunidiff', file_, opts)
+
+ startrev = r.rev(r.lookup(rev))
+ for rev in range(startrev, min(startrev + count, len(r) - 1)):
+ if opts['alldata']:
+ # Load revisions associated with changeset.
+ ctx = repo[rev]
+ mtext = repo.manifestlog._revlog.revision(ctx.manifestnode())
+ for pctx in ctx.parents():
+ pman = repo.manifestlog._revlog.revision(pctx.manifestnode())
+ textpairs.append((pman, mtext))
+
+ # Load filelog revisions by iterating manifest delta.
+ man = ctx.manifest()
+ pman = ctx.p1().manifest()
+ for filename, change in pman.diff(man).items():
+ fctx = repo.file(filename)
+ f1 = fctx.revision(change[0][0] or -1)
+ f2 = fctx.revision(change[1][0] or -1)
+ textpairs.append((f1, f2))
+ else:
+ dp = r.deltaparent(rev)
+ textpairs.append((r.revision(dp), r.revision(rev)))
+
+ def d():
+ for left, right in textpairs:
+ # The date strings don't matter, so we pass empty strings.
+ headerlines, hunks = mdiff.unidiff(
+ left, '', right, '', 'left', 'right', binary=False)
+ # consume iterators in roughly the way patch.py does
+ b'\n'.join(headerlines)
+ b''.join(sum((list(hlines) for hrange, hlines in hunks), []))
+ timer, fm = gettimer(ui, opts)
+ timer(d)
+ fm.end()
+
@command('perfdiffwd', formatteropts)
def perfdiffwd(ui, repo, **opts):
"""Profile diff of working directory changes"""
@@ -1498,11 +1568,13 @@
('', 'clear-revbranch', False,
'purge the revbranch cache between computation'),
] + formatteropts)
-def perfbranchmap(ui, repo, full=False, clear_revbranch=False, **opts):
+def perfbranchmap(ui, repo, *filternames, **opts):
"""benchmark the update of a branchmap
This benchmarks the full repo.branchmap() call with read and write disabled
"""
+ full = opts.get("full", False)
+ clear_revbranch = opts.get("clear_revbranch", False)
timer, fm = gettimer(ui, opts)
def getbranchmap(filtername):
"""generate a benchmark function for the filtername"""
@@ -1521,6 +1593,8 @@
return d
# add filter in smaller subset to bigger subset
possiblefilters = set(repoview.filtertable)
+ if filternames:
+ possiblefilters &= set(filternames)
subsettable = getbranchmapsubsettable()
allfilters = []
while possiblefilters:
@@ -1537,8 +1611,9 @@
if not full:
for name in allfilters:
repo.filtered(name).branchmap()
- # add unfiltered
- allfilters.append(None)
+ if not filternames or 'unfiltered' in filternames:
+ # add unfiltered
+ allfilters.append(None)
branchcacheread = safeattrsetter(branchmap, 'read')
branchcachewrite = safeattrsetter(branchmap.branchcache, 'write')
@@ -1546,7 +1621,10 @@
branchcachewrite.set(lambda bc, repo: None)
try:
for name in allfilters:
- timer(getbranchmap(name), title=str(name))
+ printname = name
+ if name is None:
+ printname = 'unfiltered'
+ timer(getbranchmap(name), title=str(printname))
finally:
branchcacheread.restore()
branchcachewrite.restore()
--- a/contrib/python3-whitelist Fri Feb 23 17:57:04 2018 -0800
+++ b/contrib/python3-whitelist Sat Feb 24 17:49:10 2018 -0600
@@ -1,16 +1,26 @@
+test-abort-checkin.t
test-add.t
test-addremove-similar.t
test-addremove.t
+test-amend-subrepo.t
test-ancestor.py
+test-annotate.py
test-automv.t
test-backwards-remove.t
test-bheads.t
test-bisect2.t
+test-bookmarks-current.t
test-bookmarks-merge.t
+test-bookmarks-rebase.t
test-bookmarks-strip.t
+test-branch-option.t
test-branch-tag-confict.t
+test-bundle-phases.t
+test-bundle-vs-outgoing.t
+test-cappedreader.py
test-casecollision.t
test-cat.t
+test-censor.t
test-changelog-exec.t
test-check-commit.t
test-check-execute.t
@@ -19,11 +29,25 @@
test-check-pylint.t
test-check-shbang.t
test-children.t
+test-clone-pull-corruption.t
+test-clone-r.t
+test-clone-update-order.t
test-commit-unresolved.t
test-completion.t
+test-confused-revert.t
test-contrib-check-code.t
test-contrib-check-commit.t
+test-convert-authormap.t
+test-convert-clonebranches.t
+test-convert-datesort.t
+test-convert-hg-startrev.t
+test-copy-move-merge.t
+test-copytrace-heuristics.t
+test-debugbuilddag.t
+test-debugindexdot.t
test-debugrename.t
+test-diff-binary-file.t
+test-diff-change.t
test-diff-copy-depth.t
test-diff-hashes.t
test-diff-issue2761.t
@@ -32,42 +56,109 @@
test-diff-subdir.t
test-diffdir.t
test-directaccess.t
+test-dirstate-backup.t
test-dirstate-nonnormalset.t
test-doctest.py
test-double-merge.t
+test-drawdag.t
test-duplicateoptions.py
test-empty-dir.t
test-empty-file.t
+test-empty-group.t
test-empty.t
test-encoding-func.py
+test-eol-add.t
+test-eol-clone.t
+test-eol-tag.t
+test-eol-update.t
test-excessive-merge.t
+test-exchange-obsmarkers-case-A1.t
+test-exchange-obsmarkers-case-A2.t
+test-exchange-obsmarkers-case-A3.t
+test-exchange-obsmarkers-case-A4.t
+test-exchange-obsmarkers-case-A5.t
+test-exchange-obsmarkers-case-A6.t
+test-exchange-obsmarkers-case-A7.t
+test-exchange-obsmarkers-case-B1.t
+test-exchange-obsmarkers-case-B2.t
+test-exchange-obsmarkers-case-B3.t
+test-exchange-obsmarkers-case-B4.t
+test-exchange-obsmarkers-case-B5.t
+test-exchange-obsmarkers-case-B6.t
+test-exchange-obsmarkers-case-B7.t
+test-exchange-obsmarkers-case-C1.t
+test-exchange-obsmarkers-case-C2.t
+test-exchange-obsmarkers-case-C3.t
+test-exchange-obsmarkers-case-C4.t
+test-exchange-obsmarkers-case-D1.t
+test-exchange-obsmarkers-case-D2.t
+test-exchange-obsmarkers-case-D3.t
+test-exchange-obsmarkers-case-D4.t
test-execute-bit.t
+test-extdiff.t
+test-extra-filelog-entry.t
+test-filebranch.t
+test-fileset-generated.t
+test-flags.t
+test-generaldelta.t
+test-git-export.t
+test-glog-topological.t
test-gpg.t
test-hghave.t
+test-histedit-arguments.t
+test-histedit-base.t
+test-histedit-bookmark-motion.t
+test-histedit-commute.t
+test-histedit-drop.t
+test-histedit-edit.t
+test-histedit-fold-non-commute.t
+test-histedit-no-change.t
+test-histedit-non-commute.t
+test-histedit-obsolete.t
+test-histedit-outgoing.t
+test-http-branchmap.t
+test-http-clone-r.t
+test-identify.t
test-imports-checker.t
+test-inherit-mode.t
test-issue1089.t
test-issue1175.t
+test-issue1306.t
+test-issue1438.t
test-issue1502.t
test-issue1802.t
test-issue1877.t
test-issue1993.t
+test-issue3084.t
+test-issue4074.t
test-issue522.t
+test-issue586.t
test-issue612.t
test-issue619.t
test-issue672.t
test-issue842.t
test-journal-exists.t
+test-largefiles-small-disk.t
test-locate.t
+test-logexchange.t
test-lrucachedict.py
+test-mactext.t
+test-manifest-merging.t
test-manifest.py
-test-manifest-merging.t
+test-manifest.t
test-match.py
+test-mdiff.py
+test-merge-closedheads.t
+test-merge-commit.t
+test-merge-criss-cross.t
test-merge-default.t
test-merge-internal-tools-pattern.t
+test-merge-local.t
test-merge-remove.t
test-merge-revert.t
test-merge-revert2.t
test-merge-subrepos.t
+test-merge1.t
test-merge10.t
test-merge2.t
test-merge4.t
@@ -75,9 +166,41 @@
test-merge6.t
test-merge7.t
test-merge8.t
+test-mq-git.t
+test-mq-pull-from-bundle.t
+test-mq-qdiff.t
test-mq-qimport-fail-cleanup.t
+test-mq-qqueue.t
+test-mq-qrefresh.t
+test-mq-qsave.t
+test-narrow-clone-no-ellipsis.t
+test-narrow-clone-nonlinear.t
+test-narrow-clone.t
+test-narrow-copies.t
+test-narrow-debugrebuilddirstate.t
+test-narrow-exchange-merges.t
+test-narrow-merge.t
+test-narrow-patch.t
+test-narrow-patterns.t
+test-narrow-pull.t
+test-narrow-rebase.t
+test-narrow-shallow-merges.t
+test-narrow-update.t
+test-newbranch.t
test-obshistory.t
+test-obsmarkers-effectflag.t
+test-obsolete-bundle-strip.t
+test-obsolete-changeset-exchange.t
+test-obsolete-checkheads.t
+test-obsolete-distributed.t
+test-parents.t
test-permissions.t
+test-pull-branch.t
+test-pull-http.t
+test-pull-permission.t
+test-pull-pull-corruption.t
+test-pull-r.t
+test-pull-update.t
test-push-checkheads-partial-C1.t
test-push-checkheads-partial-C2.t
test-push-checkheads-partial-C3.t
@@ -105,27 +228,63 @@
test-push-checkheads-unpushed-D5.t
test-push-checkheads-unpushed-D6.t
test-push-checkheads-unpushed-D7.t
+test-push-warn.t
+test-rebase-bookmarks.t
+test-rebase-check-restore.t
+test-rebase-dest.t
+test-rebase-emptycommit.t
+test-rebase-inmemory.t
+test-rebase-issue-noparam-single-rev.t
+test-rebase-legacy.t
+test-rebase-named-branches.t
+test-rebase-newancestor.t
+test-rebase-partial.t
+test-rebase-pull.t
+test-rebase-rename.t
+test-rebase-transaction.t
test-record.t
+test-remove.t
+test-rename-after-merge.t
test-rename-dir-merge.t
test-rename-merge1.t
test-rename.t
+test-repair-strip.t
+test-repo-compengines.t
test-revert-flags.t
test-revert-unknown.t
test-revlog-group-emptyiter.t
test-revlog-mmapindex.t
test-revlog-packentry.t
+test-revset-dirstate-parents.t
+test-revset-outgoing.t
test-run-tests.py
+test-serve.t
test-show-stack.t
+test-show.t
test-simple-update.t
+test-single-head.t
test-sparse-clear.t
test-sparse-merges.t
test-sparse-requirement.t
test-sparse-verbose-json.t
+test-ssh-clone-r.t
+test-ssh-proto.t
+test-sshserver.py
+test-status-rev.t
test-status-terse.t
+test-strip-cross.t
+test-strip.t
+test-unamend.t
test-uncommit.t
test-unified-test.t
test-unrelated-pull.t
+test-up-local-change.t
+test-update-branches.t
+test-update-dest.t
test-update-issue1456.t
test-update-names.t
test-update-reverse.t
+test-url-rev.t
+test-username-newline.t
+test-win32text.t
test-xdg.t
--- a/contrib/synthrepo.py Fri Feb 23 17:57:04 2018 -0800
+++ b/contrib/synthrepo.py Sat Feb 24 17:49:10 2018 -0600
@@ -381,7 +381,7 @@
ui.progress(_synthesizing, None)
message = 'synthesized wide repo with %d files' % (len(files),)
mc = context.memctx(repo, [pctx.node(), nullid], message,
- files.iterkeys(), filectxfn, ui.username(),
+ files, filectxfn, ui.username(),
'%d %d' % util.makedate())
initnode = mc.commit()
if ui.debugflag:
--- a/hgext/acl.py Fri Feb 23 17:57:04 2018 -0800
+++ b/hgext/acl.py Sat Feb 24 17:49:10 2018 -0600
@@ -200,6 +200,7 @@
error,
extensions,
match,
+ pycompat,
registrar,
util,
)
@@ -334,13 +335,13 @@
return
user = None
- if source == 'serve' and 'url' in kwargs:
- url = kwargs['url'].split(':')
+ if source == 'serve' and r'url' in kwargs:
+ url = kwargs[r'url'].split(':')
if url[0] == 'remote' and url[1].startswith('http'):
user = urlreq.unquote(url[3])
if user is None:
- user = getpass.getuser()
+ user = pycompat.bytestr(getpass.getuser())
ui.debug('acl: checking access for user "%s"\n' % user)
@@ -355,7 +356,7 @@
allow = buildmatch(ui, repo, user, 'acl.allow')
deny = buildmatch(ui, repo, user, 'acl.deny')
- for rev in xrange(repo[node], len(repo)):
+ for rev in xrange(repo[node].rev(), len(repo)):
ctx = repo[rev]
branch = ctx.branch()
if denybranches and denybranches(branch):
--- a/hgext/bugzilla.py Fri Feb 23 17:57:04 2018 -0800
+++ b/hgext/bugzilla.py Sat Feb 24 17:49:10 2018 -0600
@@ -300,8 +300,8 @@
from mercurial.i18n import _
from mercurial.node import short
from mercurial import (
- cmdutil,
error,
+ logcmdutil,
mail,
registrar,
url,
@@ -1090,9 +1090,8 @@
if not mapfile and not tmpl:
tmpl = _('changeset {node|short} in repo {root} refers '
'to bug {bug}.\ndetails:\n\t{desc|tabindent}')
- spec = cmdutil.logtemplatespec(tmpl, mapfile)
- t = cmdutil.changeset_templater(self.ui, self.repo, spec,
- False, None, False)
+ spec = logcmdutil.templatespec(tmpl, mapfile)
+ t = logcmdutil.changesettemplater(self.ui, self.repo, spec)
self.ui.pushbuffer()
t.show(ctx, changes=ctx.changeset(),
bug=str(bugid),
--- a/hgext/children.py Fri Feb 23 17:57:04 2018 -0800
+++ b/hgext/children.py Sat Feb 24 17:49:10 2018 -0600
@@ -19,6 +19,7 @@
from mercurial.i18n import _
from mercurial import (
cmdutil,
+ logcmdutil,
pycompat,
registrar,
)
@@ -65,7 +66,7 @@
ctx = repo[rev]
childctxs = ctx.children()
- displayer = cmdutil.show_changeset(ui, repo, opts)
+ displayer = logcmdutil.changesetdisplayer(ui, repo, opts)
for cctx in childctxs:
displayer.show(cctx)
displayer.close()
--- a/hgext/churn.py Fri Feb 23 17:57:04 2018 -0800
+++ b/hgext/churn.py Sat Feb 24 17:49:10 2018 -0600
@@ -18,6 +18,7 @@
from mercurial import (
cmdutil,
encoding,
+ logcmdutil,
patch,
pycompat,
registrar,
@@ -54,7 +55,7 @@
return date.strftime(opts['dateformat'])
else:
tmpl = opts.get('oldtemplate') or opts.get('template')
- tmpl = cmdutil.makelogtemplater(ui, repo, tmpl)
+ tmpl = logcmdutil.maketemplater(ui, repo, tmpl)
def getkey(ctx):
ui.pushbuffer()
tmpl.show(ctx)
@@ -170,7 +171,7 @@
ui.warn(_("skipping malformed alias: %s\n") % l)
continue
- rate = countrate(ui, repo, amap, *pats, **opts).items()
+ rate = list(countrate(ui, repo, amap, *pats, **opts).items())
if not rate:
return
--- a/hgext/convert/common.py Fri Feb 23 17:57:04 2018 -0800
+++ b/hgext/convert/common.py Sat Feb 24 17:49:10 2018 -0600
@@ -18,6 +18,7 @@
encoding,
error,
phases,
+ pycompat,
util,
)
@@ -322,6 +323,7 @@
pass
def _cmdline(self, cmd, *args, **kwargs):
+ kwargs = pycompat.byteskwargs(kwargs)
cmdline = [self.command, cmd] + list(args)
for k, v in kwargs.iteritems():
if len(k) == 1:
@@ -416,17 +418,17 @@
def _limit_arglist(self, arglist, cmd, *args, **kwargs):
cmdlen = len(self._cmdline(cmd, *args, **kwargs))
limit = self.argmax - cmdlen
- bytes = 0
+ numbytes = 0
fl = []
for fn in arglist:
b = len(fn) + 3
- if bytes + b < limit or len(fl) == 0:
+ if numbytes + b < limit or len(fl) == 0:
fl.append(fn)
- bytes += b
+ numbytes += b
else:
yield fl
fl = [fn]
- bytes = b
+ numbytes = b
if fl:
yield fl
@@ -447,7 +449,7 @@
if not self.path:
return
try:
- fp = open(self.path, 'r')
+ fp = open(self.path, 'rb')
except IOError as err:
if err.errno != errno.ENOENT:
raise
@@ -471,12 +473,12 @@
def __setitem__(self, key, value):
if self.fp is None:
try:
- self.fp = open(self.path, 'a')
+ self.fp = open(self.path, 'ab')
except IOError as err:
raise error.Abort(
_('could not open map file %r: %s') %
(self.path, encoding.strtolocal(err.strerror)))
- self.fp.write('%s %s\n' % (key, value))
+ self.fp.write(util.tonativeeol('%s %s\n' % (key, value)))
self.fp.flush()
super(mapfile, self).__setitem__(key, value)
--- a/hgext/convert/convcmd.py Fri Feb 23 17:57:04 2018 -0800
+++ b/hgext/convert/convcmd.py Sat Feb 24 17:49:10 2018 -0600
@@ -16,6 +16,7 @@
encoding,
error,
hg,
+ pycompat,
scmutil,
util,
)
@@ -55,9 +56,10 @@
def recode(s):
if isinstance(s, unicode):
- return s.encode(orig_encoding, 'replace')
+ return s.encode(pycompat.sysstr(orig_encoding), 'replace')
else:
- return s.decode('utf-8').encode(orig_encoding, 'replace')
+ return s.decode('utf-8').encode(
+ pycompat.sysstr(orig_encoding), 'replace')
def mapbranch(branch, branchmap):
'''
@@ -202,7 +204,7 @@
return {}
m = {}
try:
- fp = open(path, 'r')
+ fp = open(path, 'rb')
for i, line in enumerate(util.iterfile(fp)):
line = line.splitlines()[0].rstrip()
if not line:
@@ -407,13 +409,14 @@
authorfile = self.authorfile
if authorfile:
self.ui.status(_('writing author map file %s\n') % authorfile)
- ofile = open(authorfile, 'w+')
+ ofile = open(authorfile, 'wb+')
for author in self.authors:
- ofile.write("%s=%s\n" % (author, self.authors[author]))
+ ofile.write(util.tonativeeol("%s=%s\n"
+ % (author, self.authors[author])))
ofile.close()
def readauthormap(self, authorfile):
- afile = open(authorfile, 'r')
+ afile = open(authorfile, 'rb')
for line in afile:
line = line.strip()
@@ -564,6 +567,7 @@
self.map.close()
def convert(ui, src, dest=None, revmapfile=None, **opts):
+ opts = pycompat.byteskwargs(opts)
global orig_encoding
orig_encoding = encoding.encoding
encoding.encoding = 'UTF-8'
--- a/hgext/convert/cvs.py Fri Feb 23 17:57:04 2018 -0800
+++ b/hgext/convert/cvs.py Sat Feb 24 17:49:10 2018 -0600
@@ -46,8 +46,8 @@
self.tags = {}
self.lastbranch = {}
self.socket = None
- self.cvsroot = open(os.path.join(cvs, "Root")).read()[:-1]
- self.cvsrepo = open(os.path.join(cvs, "Repository")).read()[:-1]
+ self.cvsroot = open(os.path.join(cvs, "Root"), 'rb').read()[:-1]
+ self.cvsrepo = open(os.path.join(cvs, "Repository"), 'rb').read()[:-1]
self.encoding = encoding.encoding
self._connect()
@@ -141,7 +141,7 @@
passw = "A"
cvspass = os.path.expanduser("~/.cvspass")
try:
- pf = open(cvspass)
+ pf = open(cvspass, 'rb')
for line in pf.read().splitlines():
part1, part2 = line.split(' ', 1)
# /1 :pserver:user@example.com:2401/cvsroot/foo
@@ -179,7 +179,7 @@
# :ext:user@host/home/user/path/to/cvsroot
if root.startswith(":ext:"):
root = root[5:]
- m = re.match(r'(?:([^@:/]+)@)?([^:/]+):?(.*)', root)
+ m = re.match(br'(?:([^@:/]+)@)?([^:/]+):?(.*)', root)
# Do not take Windows path "c:\foo\bar" for a connection strings
if os.path.isdir(root) or not m:
conntype = "local"
--- a/hgext/convert/cvsps.py Fri Feb 23 17:57:04 2018 -0800
+++ b/hgext/convert/cvsps.py Sat Feb 24 17:49:10 2018 -0600
@@ -132,7 +132,7 @@
# Get the real directory in the repository
try:
- prefix = open(os.path.join('CVS','Repository')).read().strip()
+ prefix = open(os.path.join('CVS','Repository'), 'rb').read().strip()
directory = prefix
if prefix == ".":
prefix = ""
@@ -144,7 +144,7 @@
# Use the Root file in the sandbox, if it exists
try:
- root = open(os.path.join('CVS','Root')).read().strip()
+ root = open(os.path.join('CVS','Root'), 'rb').read().strip()
except IOError:
pass
@@ -177,7 +177,7 @@
if cache == 'update':
try:
ui.note(_('reading cvs log cache %s\n') % cachefile)
- oldlog = pickle.load(open(cachefile))
+ oldlog = pickle.load(open(cachefile, 'rb'))
for e in oldlog:
if not (util.safehasattr(e, 'branchpoints') and
util.safehasattr(e, 'commitid') and
@@ -486,7 +486,7 @@
# write the new cachefile
ui.note(_('writing cvs log cache %s\n') % cachefile)
- pickle.dump(log, open(cachefile, 'w'))
+ pickle.dump(log, open(cachefile, 'wb'))
else:
log = oldlog
@@ -855,6 +855,7 @@
repository, and convert the log to changesets based on matching
commit log entries and dates.
'''
+ opts = pycompat.byteskwargs(opts)
if opts["new_cache"]:
cache = "write"
elif opts["update_cache"]:
--- a/hgext/convert/filemap.py Fri Feb 23 17:57:04 2018 -0800
+++ b/hgext/convert/filemap.py Sat Feb 24 17:49:10 2018 -0600
@@ -71,7 +71,7 @@
(lex.infile, lex.lineno, listname, name))
return 1
return 0
- lex = shlex.shlex(open(path), path, True)
+ lex = shlex.shlex(open(path, 'rb'), path, True)
lex.wordchars += '!@#$%^&*()-=+[]{}|;:,./<>?'
cmd = lex.get_token()
while cmd:
--- a/hgext/convert/git.py Fri Feb 23 17:57:04 2018 -0800
+++ b/hgext/convert/git.py Sat Feb 24 17:49:10 2018 -0600
@@ -168,19 +168,19 @@
raise error.Abort(_('cannot retrieve git head "%s"') % rev)
return heads
- def catfile(self, rev, type):
+ def catfile(self, rev, ftype):
if rev == nodemod.nullhex:
raise IOError
self.catfilepipe[0].write(rev+'\n')
self.catfilepipe[0].flush()
info = self.catfilepipe[1].readline().split()
- if info[1] != type:
- raise error.Abort(_('cannot read %r object at %s') % (type, rev))
+ if info[1] != ftype:
+ raise error.Abort(_('cannot read %r object at %s') % (ftype, rev))
size = int(info[2])
data = self.catfilepipe[1].read(size)
if len(data) < size:
raise error.Abort(_('cannot read %r object at %s: unexpected size')
- % (type, rev))
+ % (ftype, rev))
# read the trailing newline
self.catfilepipe[1].read(1)
return data
--- a/hgext/convert/hg.py Fri Feb 23 17:57:04 2018 -0800
+++ b/hgext/convert/hg.py Sat Feb 24 17:49:10 2018 -0600
@@ -42,7 +42,7 @@
mapfile = common.mapfile
NoRepo = common.NoRepo
-sha1re = re.compile(r'\b[0-9a-f]{12,40}\b')
+sha1re = re.compile(br'\b[0-9a-f]{12,40}\b')
class mercurial_sink(common.converter_sink):
def __init__(self, ui, repotype, path):
@@ -563,12 +563,7 @@
if copysource in self.ignored:
continue
# Ignore copy sources not in parent revisions
- found = False
- for p in parents:
- if copysource in p:
- found = True
- break
- if not found:
+ if not any(copysource in p for p in parents):
continue
copies[name] = copysource
except TypeError:
@@ -625,8 +620,8 @@
def converted(self, rev, destrev):
if self.convertfp is None:
- self.convertfp = open(self.repo.vfs.join('shamap'), 'a')
- self.convertfp.write('%s %s\n' % (destrev, rev))
+ self.convertfp = open(self.repo.vfs.join('shamap'), 'ab')
+ self.convertfp.write(util.tonativeeol('%s %s\n' % (destrev, rev)))
self.convertfp.flush()
def before(self):
--- a/hgext/convert/monotone.py Fri Feb 23 17:57:04 2018 -0800
+++ b/hgext/convert/monotone.py Sat Feb 24 17:49:10 2018 -0600
@@ -13,6 +13,7 @@
from mercurial.i18n import _
from mercurial import (
error,
+ pycompat,
util,
)
@@ -36,7 +37,7 @@
if not os.path.exists(os.path.join(path, '_MTN')):
# Could be a monotone repository (SQLite db file)
try:
- f = file(path, 'rb')
+ f = open(path, 'rb')
header = f.read(16)
f.close()
except IOError:
@@ -45,11 +46,11 @@
raise norepo
# regular expressions for parsing monotone output
- space = r'\s*'
- name = r'\s+"((?:\\"|[^"])*)"\s*'
+ space = br'\s*'
+ name = br'\s+"((?:\\"|[^"])*)"\s*'
value = name
- revision = r'\s+\[(\w+)\]\s*'
- lines = r'(?:.|\n)+'
+ revision = br'\s+\[(\w+)\]\s*'
+ lines = br'(?:.|\n)+'
self.dir_re = re.compile(space + "dir" + name)
self.file_re = re.compile(space + "file" + name +
@@ -84,11 +85,12 @@
return self.mtnrunsingle(*args, **kwargs)
def mtnrunsingle(self, *args, **kwargs):
- kwargs['d'] = self.path
+ kwargs[r'd'] = self.path
return self.run0('automate', *args, **kwargs)
def mtnrunstdio(self, *args, **kwargs):
# Prepare the command in automate stdio format
+ kwargs = pycompat.byteskwargs(kwargs)
command = []
for k, v in kwargs.iteritems():
command.append("%s:%s" % (len(k), k))
--- a/hgext/convert/subversion.py Fri Feb 23 17:57:04 2018 -0800
+++ b/hgext/convert/subversion.py Sat Feb 24 17:49:10 2018 -0600
@@ -231,7 +231,7 @@
def httpcheck(ui, path, proto):
try:
opener = urlreq.buildopener()
- rsp = opener.open('%s://%s/!svn/ver/0/.svn' % (proto, path))
+ rsp = opener.open('%s://%s/!svn/ver/0/.svn' % (proto, path), 'rb')
data = rsp.read()
except urlerr.httperror as inst:
if inst.code != 404:
@@ -384,7 +384,7 @@
def setrevmap(self, revmap):
lastrevs = {}
- for revid in revmap.iterkeys():
+ for revid in revmap:
uuid, module, revnum = revsplit(revid)
lastrevnum = lastrevs.setdefault(module, revnum)
if revnum > lastrevnum:
@@ -639,8 +639,9 @@
return
if self.convertfp is None:
self.convertfp = open(os.path.join(self.wc, '.svn', 'hg-shamap'),
- 'a')
- self.convertfp.write('%s %d\n' % (destrev, self.revnum(rev)))
+ 'ab')
+ self.convertfp.write(util.tonativeeol('%s %d\n'
+ % (destrev, self.revnum(rev))))
self.convertfp.flush()
def revid(self, revnum, module=None):
@@ -1158,7 +1159,7 @@
if created:
hook = os.path.join(created, 'hooks', 'pre-revprop-change')
- fp = open(hook, 'w')
+ fp = open(hook, 'wb')
fp.write(pre_revprop_change)
fp.close()
util.setflags(hook, False, True)
@@ -1308,8 +1309,8 @@
self.setexec = []
fd, messagefile = tempfile.mkstemp(prefix='hg-convert-')
- fp = os.fdopen(fd, pycompat.sysstr('w'))
- fp.write(commit.desc)
+ fp = os.fdopen(fd, pycompat.sysstr('wb'))
+ fp.write(util.tonativeeol(commit.desc))
fp.close()
try:
output = self.run0('commit',
--- a/hgext/extdiff.py Fri Feb 23 17:57:04 2018 -0800
+++ b/hgext/extdiff.py Sat Feb 24 17:49:10 2018 -0600
@@ -88,12 +88,12 @@
configtable = {}
configitem = registrar.configitem(configtable)
-configitem('extdiff', r'opts\..*',
+configitem('extdiff', br'opts\..*',
default='',
generic=True,
)
-configitem('diff-tools', r'.*\.diffargs$',
+configitem('diff-tools', br'.*\.diffargs$',
default=None,
generic=True,
)
@@ -256,8 +256,8 @@
cmdutil.export(repo, [repo[node1a].rev(), repo[node2].rev()],
fntemplate=repo.vfs.reljoin(tmproot, template),
match=matcher)
- label1a = cmdutil.makefilename(repo, template, node1a)
- label2 = cmdutil.makefilename(repo, template, node2)
+ label1a = cmdutil.makefilename(repo[node1a], template)
+ label2 = cmdutil.makefilename(repo[node2], template)
dir1a = repo.vfs.reljoin(tmproot, label1a)
dir2 = repo.vfs.reljoin(tmproot, label2)
dir1b = None
@@ -279,13 +279,13 @@
return pre + util.shellquote(replace[key])
# Match parent2 first, so 'parent1?' will match both parent1 and parent
- regex = (r'''(['"]?)([^\s'"$]*)'''
- r'\$(parent2|parent1?|child|plabel1|plabel2|clabel|root)\1')
+ regex = (br'''(['"]?)([^\s'"$]*)'''
+ br'\$(parent2|parent1?|child|plabel1|plabel2|clabel|root)\1')
if not do3way and not re.search(regex, cmdline):
cmdline += ' $parent1 $child'
cmdline = re.sub(regex, quote, cmdline)
- ui.debug('running %r in %s\n' % (cmdline, tmproot))
+ ui.debug('running %r in %s\n' % (pycompat.bytestr(cmdline), tmproot))
ui.system(cmdline, cwd=tmproot, blockedtag='extdiff')
for copy_fn, working_fn, st in fnsandstat:
@@ -366,7 +366,7 @@
# We can't pass non-ASCII through docstrings (and path is
# in an unknown encoding anyway)
docpath = util.escapestr(path)
- self.__doc__ = self.__doc__ % {'path': util.uirepr(docpath)}
+ self.__doc__ %= {r'path': pycompat.sysstr(util.uirepr(docpath))}
self._cmdline = cmdline
def __call__(self, ui, repo, *pats, **opts):
--- a/hgext/githelp.py Fri Feb 23 17:57:04 2018 -0800
+++ b/hgext/githelp.py Sat Feb 24 17:49:10 2018 -0600
@@ -22,6 +22,7 @@
from mercurial.i18n import _
from mercurial import (
+ encoding,
error,
fancyopts,
registrar,
@@ -109,7 +110,7 @@
self.args = []
self.opts = {}
- def __str__(self):
+ def __bytes__(self):
cmd = "hg " + self.name
if self.opts:
for k, values in sorted(self.opts.iteritems()):
@@ -123,6 +124,8 @@
cmd += " ".join(self.args)
return cmd
+ __str__ = encoding.strmethod(__bytes__)
+
def append(self, value):
self.args.append(value)
@@ -167,14 +170,14 @@
ui.status(_("note: use hg addremove to remove files that have "
"been deleted.\n\n"))
- ui.status((str(cmd)), "\n")
+ ui.status((bytes(cmd)), "\n")
def am(ui, repo, *args, **kwargs):
cmdoptions=[
]
args, opts = parseoptions(ui, cmdoptions, args)
cmd = Command('import')
- ui.status(str(cmd), "\n")
+ ui.status(bytes(cmd), "\n")
def apply(ui, repo, *args, **kwargs):
cmdoptions = [
@@ -187,7 +190,7 @@
cmd['-p'] = opts.get('p')
cmd.extend(args)
- ui.status((str(cmd)), "\n")
+ ui.status((bytes(cmd)), "\n")
def bisect(ui, repo, *args, **kwargs):
ui.status(_("See 'hg help bisect' for how to use bisect.\n\n"))
@@ -198,7 +201,7 @@
args, opts = parseoptions(ui, cmdoptions, args)
cmd = Command('annotate -udl')
cmd.extend([convert(v) for v in args])
- ui.status((str(cmd)), "\n")
+ ui.status((bytes(cmd)), "\n")
def branch(ui, repo, *args, **kwargs):
cmdoptions = [
@@ -239,7 +242,7 @@
cmd.append(args[0])
elif len(args) == 1:
cmd.append(args[0])
- ui.status((str(cmd)), "\n")
+ ui.status((bytes(cmd)), "\n")
def ispath(repo, string):
"""
@@ -330,7 +333,7 @@
else:
raise error.Abort("a commit must be specified")
- ui.status((str(cmd)), "\n")
+ ui.status((bytes(cmd)), "\n")
def cherrypick(ui, repo, *args, **kwargs):
cmdoptions = [
@@ -352,7 +355,7 @@
else:
cmd.extend(args)
- ui.status((str(cmd)), "\n")
+ ui.status((bytes(cmd)), "\n")
def clean(ui, repo, *args, **kwargs):
cmdoptions = [
@@ -367,7 +370,7 @@
cmd['--all'] = None
cmd.extend(args)
- ui.status((str(cmd)), "\n")
+ ui.status((bytes(cmd)), "\n")
def clone(ui, repo, *args, **kwargs):
cmdoptions = [
@@ -397,7 +400,7 @@
cocmd.append(opts.get('branch'))
cmd = cmd & cocmd
- ui.status((str(cmd)), "\n")
+ ui.status((bytes(cmd)), "\n")
def commit(ui, repo, *args, **kwargs):
cmdoptions = [
@@ -445,7 +448,7 @@
cmd.extend(args)
- ui.status((str(cmd)), "\n")
+ ui.status((bytes(cmd)), "\n")
def deprecated(ui, repo, *args, **kwargs):
ui.warn(_('This command has been deprecated in the git project, ' +
@@ -476,7 +479,7 @@
except Exception:
cmd.append(a)
- ui.status((str(cmd)), "\n")
+ ui.status((bytes(cmd)), "\n")
def difftool(ui, repo, *args, **kwargs):
ui.status(_('Mercurial does not enable external difftool by default. You '
@@ -509,7 +512,7 @@
else:
cmd['-r'] = v
- ui.status((str(cmd)), "\n")
+ ui.status((bytes(cmd)), "\n")
def grep(ui, repo, *args, **kwargs):
cmdoptions = [
@@ -522,7 +525,7 @@
# pattern first, followed by paths.
cmd.extend(args)
- ui.status((str(cmd)), "\n")
+ ui.status((bytes(cmd)), "\n")
def init(ui, repo, *args, **kwargs):
cmdoptions = [
@@ -534,7 +537,7 @@
if len(args) > 0:
cmd.append(args[0])
- ui.status((str(cmd)), "\n")
+ ui.status((bytes(cmd)), "\n")
def log(ui, repo, *args, **kwargs):
cmdoptions = [
@@ -588,7 +591,7 @@
del args[0]
cmd.extend(args)
- ui.status((str(cmd)), "\n")
+ ui.status((bytes(cmd)), "\n")
def lsfiles(ui, repo, *args, **kwargs):
cmdoptions = [
@@ -624,7 +627,7 @@
for include in args:
cmd['-I'] = util.shellquote(include)
- ui.status((str(cmd)), "\n")
+ ui.status((bytes(cmd)), "\n")
def merge(ui, repo, *args, **kwargs):
cmdoptions = [
@@ -636,7 +639,7 @@
if len(args) > 0:
cmd.append(args[len(args) - 1])
- ui.status((str(cmd)), "\n")
+ ui.status((bytes(cmd)), "\n")
def mergebase(ui, repo, *args, **kwargs):
cmdoptions = []
@@ -650,7 +653,7 @@
ui.status(_('NOTE: ancestors() is part of the revset language.\n'),
_("Learn more about revsets with 'hg help revsets'\n\n"))
- ui.status((str(cmd)), "\n")
+ ui.status((bytes(cmd)), "\n")
def mergetool(ui, repo, *args, **kwargs):
cmdoptions = []
@@ -661,7 +664,7 @@
if len(args) == 0:
cmd['--all'] = None
cmd.extend(args)
- ui.status((str(cmd)), "\n")
+ ui.status((bytes(cmd)), "\n")
def mv(ui, repo, *args, **kwargs):
cmdoptions = [
@@ -675,7 +678,7 @@
if opts.get('force'):
cmd['-f'] = None
- ui.status((str(cmd)), "\n")
+ ui.status((bytes(cmd)), "\n")
def pull(ui, repo, *args, **kwargs):
cmdoptions = [
@@ -701,7 +704,7 @@
else:
cmd['-r'] = v
- ui.status((str(cmd)), "\n")
+ ui.status((bytes(cmd)), "\n")
def push(ui, repo, *args, **kwargs):
cmdoptions = [
@@ -728,7 +731,7 @@
if opts.get('force'):
cmd['-f'] = None
- ui.status((str(cmd)), "\n")
+ ui.status((bytes(cmd)), "\n")
def rebase(ui, repo, *args, **kwargs):
cmdoptions = [
@@ -748,12 +751,12 @@
if len(args) > 0:
ui.status(_("also note: 'hg histedit' will automatically detect"
" your stack, so no second argument is necessary.\n\n"))
- ui.status((str(cmd)), "\n")
+ ui.status((bytes(cmd)), "\n")
return
if opts.get('skip'):
cmd = Command('revert --all -r .')
- ui.status((str(cmd)), "\n")
+ ui.status((bytes(cmd)), "\n")
cmd = Command('rebase')
@@ -777,7 +780,7 @@
cmd['-d'] = convert(args[0])
cmd['-b'] = convert(args[1])
- ui.status((str(cmd)), "\n")
+ ui.status((bytes(cmd)), "\n")
def reflog(ui, repo, *args, **kwargs):
cmdoptions = [
@@ -791,7 +794,7 @@
if len(args) > 0:
cmd.append(args[0])
- ui.status(str(cmd), "\n\n")
+ ui.status(bytes(cmd), "\n\n")
ui.status(_("note: in hg commits can be deleted from repo but we always"
" have backups.\n"))
@@ -819,7 +822,7 @@
cmd.append(commit)
- ui.status((str(cmd)), "\n")
+ ui.status((bytes(cmd)), "\n")
def revert(ui, repo, *args, **kwargs):
cmdoptions = [
@@ -834,7 +837,7 @@
if args:
cmd.append(args[0])
- ui.status((str(cmd)), "\n")
+ ui.status((bytes(cmd)), "\n")
def revparse(ui, repo, *args, **kwargs):
cmdoptions = [
@@ -847,7 +850,7 @@
cmd = Command('root')
if opts.get('show_cdup'):
ui.status(_("note: hg root prints the root of the repository\n\n"))
- ui.status((str(cmd)), "\n")
+ ui.status((bytes(cmd)), "\n")
else:
ui.status(_("note: see hg help revset for how to refer to commits\n"))
@@ -866,7 +869,7 @@
if opts.get('dry_run'):
cmd['-n'] = None
- ui.status((str(cmd)), "\n")
+ ui.status((bytes(cmd)), "\n")
def show(ui, repo, *args, **kwargs):
cmdoptions = [
@@ -898,7 +901,7 @@
else:
cmd = Command('export')
- ui.status((str(cmd)), "\n")
+ ui.status((bytes(cmd)), "\n")
def stash(ui, repo, *args, **kwargs):
cmdoptions = [
@@ -934,7 +937,7 @@
elif len(args) > 1:
cmd['--name'] = args[1]
- ui.status((str(cmd)), "\n")
+ ui.status((bytes(cmd)), "\n")
def status(ui, repo, *args, **kwargs):
cmdoptions = [
@@ -948,7 +951,7 @@
if opts.get('ignored'):
cmd['-i'] = None
- ui.status((str(cmd)), "\n")
+ ui.status((bytes(cmd)), "\n")
def svn(ui, repo, *args, **kwargs):
svncmd = args[0]
@@ -965,7 +968,7 @@
cmd = Command('push')
- ui.status((str(cmd)), "\n")
+ ui.status((bytes(cmd)), "\n")
def svnfetch(ui, repo, *args, **kwargs):
cmdoptions = [
@@ -975,7 +978,7 @@
cmd = Command('pull')
cmd.append('default-push')
- ui.status((str(cmd)), "\n")
+ ui.status((bytes(cmd)), "\n")
def svnfindrev(ui, repo, *args, **kwargs):
cmdoptions = [
@@ -985,7 +988,7 @@
cmd = Command('log')
cmd['-r'] = args[0]
- ui.status((str(cmd)), "\n")
+ ui.status((bytes(cmd)), "\n")
def svnrebase(ui, repo, *args, **kwargs):
cmdoptions = [
@@ -1000,7 +1003,7 @@
cmd = pullcmd & rebasecmd
- ui.status((str(cmd)), "\n")
+ ui.status((bytes(cmd)), "\n")
def tag(ui, repo, *args, **kwargs):
cmdoptions = [
@@ -1024,7 +1027,7 @@
if opts.get('force'):
cmd['-f'] = None
- ui.status((str(cmd)), "\n")
+ ui.status((bytes(cmd)), "\n")
gitcommands = {
'add': add,
--- a/hgext/gpg.py Fri Feb 23 17:57:04 2018 -0800
+++ b/hgext/gpg.py Sat Feb 24 17:49:10 2018 -0600
@@ -153,8 +153,7 @@
# warn for expired key and/or sigs
for key in keys:
if key[0] == "ERRSIG":
- ui.write(_("%s Unknown key ID \"%s\"\n")
- % (prefix, shortkey(ui, key[1][:15])))
+ ui.write(_("%s Unknown key ID \"%s\"\n") % (prefix, key[1]))
continue
if key[0] == "BADSIG":
ui.write(_("%s Bad signature from \"%s\"\n") % (prefix, key[2]))
@@ -320,13 +319,6 @@
except ValueError as inst:
raise error.Abort(str(inst))
-def shortkey(ui, key):
- if len(key) != 16:
- ui.debug("key ID \"%s\" format error\n" % key)
- return key
-
- return key[-8:]
-
def node2txt(repo, node, ver):
"""map a manifest into some text"""
if ver == "0":
--- a/hgext/histedit.py Fri Feb 23 17:57:04 2018 -0800
+++ b/hgext/histedit.py Sat Feb 24 17:49:10 2018 -0600
@@ -344,7 +344,7 @@
fp.write('v1\n')
fp.write('%s\n' % node.hex(self.parentctxnode))
fp.write('%s\n' % node.hex(self.topmost))
- fp.write('%s\n' % self.keep)
+ fp.write('%s\n' % ('True' if self.keep else 'False'))
fp.write('%d\n' % len(self.actions))
for action in self.actions:
fp.write('%s\n' % action.tostate())
@@ -491,7 +491,7 @@
repo.dirstate.setbranch(rulectx.branch())
if stats and stats[3] > 0:
buf = repo.ui.popbuffer()
- repo.ui.write(*buf)
+ repo.ui.write(buf)
raise error.InterventionRequired(
_('Fix up the change (%s %s)') %
(self.verb, node.short(self.node)),
@@ -1415,9 +1415,8 @@
# Save edit rules in .hg/histedit-last-edit.txt in case
# the user needs to ask for help after something
# surprising happens.
- f = open(repo.vfs.join('histedit-last-edit.txt'), 'w')
- f.write(rules)
- f.close()
+ with repo.vfs('histedit-last-edit.txt', 'wb') as f:
+ f.write(rules)
return rules
--- a/hgext/journal.py Fri Feb 23 17:57:04 2018 -0800
+++ b/hgext/journal.py Sat Feb 24 17:49:10 2018 -0600
@@ -29,14 +29,13 @@
hg,
localrepo,
lock,
+ logcmdutil,
node,
pycompat,
registrar,
util,
)
-from . import share
-
cmdtable = {}
command = registrar.command(cmdtable)
@@ -168,7 +167,7 @@
"""Copy shared journal entries into this repo when unsharing"""
if (repo.path == repopath and repo.shared() and
util.safehasattr(repo, 'journal')):
- sharedrepo = share._getsrcrepo(repo)
+ sharedrepo = hg.sharedreposource(repo)
sharedfeatures = _readsharedfeatures(repo)
if sharedrepo and sharedfeatures > {'journal'}:
# there is a shared repository and there are shared journal entries
@@ -257,7 +256,7 @@
self.sharedfeatures = self.sharedvfs = None
if repo.shared():
features = _readsharedfeatures(repo)
- sharedrepo = share._getsrcrepo(repo)
+ sharedrepo = hg.sharedreposource(repo)
if sharedrepo is not None and 'journal' in features:
self.sharedvfs = sharedrepo.vfs
self.sharedfeatures = features
@@ -478,7 +477,7 @@
displayname = "'%s'" % name
ui.status(_("previous locations of %s:\n") % displayname)
- limit = cmdutil.loglimit(opts)
+ limit = logcmdutil.getlimit(opts)
entry = None
ui.pager('journal')
for count, entry in enumerate(repo.journal.filtered(name=name)):
@@ -502,7 +501,7 @@
fm.write('command', ' %s\n', entry.command)
if opts.get("commits"):
- displayer = cmdutil.show_changeset(ui, repo, opts, buffered=False)
+ displayer = logcmdutil.changesetdisplayer(ui, repo, opts)
for hash in entry.newhashes:
try:
ctx = repo[hash]
--- a/hgext/keyword.py Fri Feb 23 17:57:04 2018 -0800
+++ b/hgext/keyword.py Sat Feb 24 17:49:10 2018 -0600
@@ -101,6 +101,7 @@
extensions,
filelog,
localrepo,
+ logcmdutil,
match,
patch,
pathutil,
@@ -254,7 +255,7 @@
'''Replaces keywords in data with expanded template.'''
def kwsub(mobj):
kw = mobj.group(1)
- ct = cmdutil.makelogtemplater(self.ui, self.repo,
+ ct = logcmdutil.maketemplater(self.ui, self.repo,
self.templates[kw])
self.ui.pushbuffer()
ct.show(ctx, root=self.repo.root, file=path)
--- a/hgext/largefiles/lfcommands.py Fri Feb 23 17:57:04 2018 -0800
+++ b/hgext/largefiles/lfcommands.py Sat Feb 24 17:49:10 2018 -0600
@@ -365,7 +365,7 @@
at = 0
ui.debug("sending statlfile command for %d largefiles\n" % len(files))
retval = store.exists(files)
- files = filter(lambda h: not retval[h], files)
+ files = [h for h in files if not retval[h]]
ui.debug("%d largefiles need to be uploaded\n" % len(files))
for hash in files:
--- a/hgext/largefiles/lfutil.py Fri Feb 23 17:57:04 2018 -0800
+++ b/hgext/largefiles/lfutil.py Sat Feb 24 17:49:10 2018 -0600
@@ -15,6 +15,7 @@
import stat
from mercurial.i18n import _
+from mercurial.node import hex
from mercurial import (
dirstate,
@@ -371,7 +372,7 @@
for data in instream:
hasher.update(data)
outfile.write(data)
- return hasher.hexdigest()
+ return hex(hasher.digest())
def hashfile(file):
if not os.path.exists(file):
@@ -404,7 +405,7 @@
h = hashlib.sha1()
for chunk in util.filechunkiter(fileobj):
h.update(chunk)
- return h.hexdigest()
+ return hex(h.digest())
def httpsendfile(ui, filename):
return httpconnection.httpsendfile(ui, filename, 'rb')
--- a/hgext/largefiles/overrides.py Fri Feb 23 17:57:04 2018 -0800
+++ b/hgext/largefiles/overrides.py Sat Feb 24 17:49:10 2018 -0600
@@ -19,6 +19,7 @@
cmdutil,
error,
hg,
+ logcmdutil,
match as matchmod,
pathutil,
pycompat,
@@ -41,7 +42,7 @@
matcher'''
m = copy.copy(match)
lfile = lambda f: lfutil.standin(f) in manifest
- m._files = filter(lfile, m._files)
+ m._files = [lf for lf in m._files if lfile(lf)]
m._fileset = set(m._files)
m.always = lambda: False
origmatchfn = m.matchfn
@@ -56,7 +57,7 @@
m = copy.copy(match)
notlfile = lambda f: not (lfutil.isstandin(f) or lfutil.standin(f) in
manifest or f in excluded)
- m._files = filter(notlfile, m._files)
+ m._files = [lf for lf in m._files if notlfile(lf)]
m._fileset = set(m._files)
m.always = lambda: False
origmatchfn = m.matchfn
@@ -388,20 +389,20 @@
# (2) to determine what files to print out diffs for.
# The magic matchandpats override should be used for case (1) but not for
# case (2).
- def overridemakelogfilematcher(repo, pats, opts, badfn=None):
+ def overridemakefilematcher(repo, pats, opts, badfn=None):
wctx = repo[None]
match, pats = oldmatchandpats(wctx, pats, opts, badfn=badfn)
- return lambda rev: match
+ return lambda ctx: match
oldmatchandpats = installmatchandpatsfn(overridematchandpats)
- oldmakelogfilematcher = cmdutil._makenofollowlogfilematcher
- setattr(cmdutil, '_makenofollowlogfilematcher', overridemakelogfilematcher)
+ oldmakefilematcher = logcmdutil._makenofollowfilematcher
+ setattr(logcmdutil, '_makenofollowfilematcher', overridemakefilematcher)
try:
return orig(ui, repo, *pats, **opts)
finally:
restorematchandpatsfn()
- setattr(cmdutil, '_makenofollowlogfilematcher', oldmakelogfilematcher)
+ setattr(logcmdutil, '_makenofollowfilematcher', oldmakefilematcher)
def overrideverify(orig, ui, repo, *pats, **opts):
large = opts.pop(r'large', False)
@@ -1237,10 +1238,11 @@
matchfn = m.matchfn
m.matchfn = lambda f: f in s.deleted and matchfn(f)
- removelargefiles(repo.ui, repo, True, m, **opts)
+ removelargefiles(repo.ui, repo, True, m, **pycompat.strkwargs(opts))
# Call into the normal add code, and any files that *should* be added as
# largefiles will be
- added, bad = addlargefiles(repo.ui, repo, True, matcher, **opts)
+ added, bad = addlargefiles(repo.ui, repo, True, matcher,
+ **pycompat.strkwargs(opts))
# Now that we've handled largefiles, hand off to the original addremove
# function to take care of the rest. Make sure it doesn't do anything with
# largefiles by passing a matcher that will ignore them.
@@ -1358,8 +1360,7 @@
m.visitdir = lfvisitdirfn
for f in ctx.walk(m):
- with cmdutil.makefileobj(repo, opts.get('output'), ctx.node(),
- pathname=f) as fp:
+ with cmdutil.makefileobj(ctx, opts.get('output'), pathname=f) as fp:
lf = lfutil.splitstandin(f)
if lf is None or origmatchfn(f):
# duplicating unreachable code from commands.cat
--- a/hgext/largefiles/proto.py Fri Feb 23 17:57:04 2018 -0800
+++ b/hgext/largefiles/proto.py Sat Feb 24 17:49:10 2018 -0600
@@ -14,6 +14,7 @@
httppeer,
util,
wireproto,
+ wireprototypes,
)
from . import (
@@ -34,27 +35,26 @@
def putlfile(repo, proto, sha):
'''Server command for putting a largefile into a repository's local store
and into the user cache.'''
- proto.redirect()
-
- path = lfutil.storepath(repo, sha)
- util.makedirs(os.path.dirname(path))
- tmpfp = util.atomictempfile(path, createmode=repo.store.createmode)
+ with proto.mayberedirectstdio() as output:
+ path = lfutil.storepath(repo, sha)
+ util.makedirs(os.path.dirname(path))
+ tmpfp = util.atomictempfile(path, createmode=repo.store.createmode)
- try:
- proto.getfile(tmpfp)
- tmpfp._fp.seek(0)
- if sha != lfutil.hexsha1(tmpfp._fp):
- raise IOError(0, _('largefile contents do not match hash'))
- tmpfp.close()
- lfutil.linktousercache(repo, sha)
- except IOError as e:
- repo.ui.warn(_('largefiles: failed to put %s into store: %s\n') %
- (sha, e.strerror))
- return wireproto.pushres(1)
- finally:
- tmpfp.discard()
+ try:
+ proto.forwardpayload(tmpfp)
+ tmpfp._fp.seek(0)
+ if sha != lfutil.hexsha1(tmpfp._fp):
+ raise IOError(0, _('largefile contents do not match hash'))
+ tmpfp.close()
+ lfutil.linktousercache(repo, sha)
+ except IOError as e:
+ repo.ui.warn(_('largefiles: failed to put %s into store: %s\n') %
+ (sha, e.strerror))
+ return wireproto.pushres(1, output.getvalue() if output else '')
+ finally:
+ tmpfp.discard()
- return wireproto.pushres(0)
+ return wireproto.pushres(0, output.getvalue() if output else '')
def getlfile(repo, proto, sha):
'''Server command for retrieving a largefile from the repository-local
@@ -86,8 +86,8 @@
server side.'''
filename = lfutil.findfile(repo, sha)
if not filename:
- return '2\n'
- return '0\n'
+ return wireprototypes.bytesresponse('2\n')
+ return wireprototypes.bytesresponse('0\n')
def wirereposetup(ui, repo):
class lfileswirerepository(repo.__class__):
@@ -180,7 +180,7 @@
args[r'cmds'] = args[r'cmds'].replace('heads ', 'lheads ')
return ssholdcallstream(self, cmd, **args)
-headsre = re.compile(r'(^|;)heads\b')
+headsre = re.compile(br'(^|;)heads\b')
def httprepocallstream(self, cmd, **args):
if cmd == 'heads' and self.capable('largefiles'):
--- a/hgext/largefiles/storefactory.py Fri Feb 23 17:57:04 2018 -0800
+++ b/hgext/largefiles/storefactory.py Sat Feb 24 17:49:10 2018 -0600
@@ -80,7 +80,7 @@
'ssh': [wirestore.wirestore],
}
-_scheme_re = re.compile(r'^([a-zA-Z0-9+-.]+)://')
+_scheme_re = re.compile(br'^([a-zA-Z0-9+-.]+)://')
def getlfile(ui, hash):
return util.chunkbuffer(openstore(ui=ui)._get(hash))
--- a/hgext/largefiles/uisetup.py Fri Feb 23 17:57:04 2018 -0800
+++ b/hgext/largefiles/uisetup.py Sat Feb 24 17:49:10 2018 -0600
@@ -165,13 +165,13 @@
overrides.openlargefile)
# create the new wireproto commands ...
- wireproto.commands['putlfile'] = (proto.putlfile, 'sha')
- wireproto.commands['getlfile'] = (proto.getlfile, 'sha')
- wireproto.commands['statlfile'] = (proto.statlfile, 'sha')
+ wireproto.wireprotocommand('putlfile', 'sha')(proto.putlfile)
+ wireproto.wireprotocommand('getlfile', 'sha')(proto.getlfile)
+ wireproto.wireprotocommand('statlfile', 'sha')(proto.statlfile)
+ wireproto.wireprotocommand('lheads', '')(wireproto.heads)
# ... and wrap some existing ones
- wireproto.commands['heads'] = (proto.heads, '')
- wireproto.commands['lheads'] = (wireproto.heads, '')
+ wireproto.commands['heads'].func = proto.heads
# make putlfile behave the same as push and {get,stat}lfile behave
# the same as pull w.r.t. permissions checks
@@ -185,9 +185,9 @@
# can't do this in reposetup because it needs to have happened before
# wirerepo.__init__ is called
- proto.ssholdcallstream = sshpeer.sshpeer._callstream
+ proto.ssholdcallstream = sshpeer.sshv1peer._callstream
proto.httpoldcallstream = httppeer.httppeer._callstream
- sshpeer.sshpeer._callstream = proto.sshrepocallstream
+ sshpeer.sshv1peer._callstream = proto.sshrepocallstream
httppeer.httppeer._callstream = proto.httprepocallstream
# override some extensions' stuff as well
--- a/hgext/lfs/__init__.py Fri Feb 23 17:57:04 2018 -0800
+++ b/hgext/lfs/__init__.py Sat Feb 24 17:49:10 2018 -0600
@@ -192,6 +192,7 @@
command = registrar.command(cmdtable)
templatekeyword = registrar.templatekeyword()
+filesetpredicate = registrar.filesetpredicate()
def featuresetup(ui, supported):
# don't die on seeing a repo with the lfs requirement
@@ -211,7 +212,7 @@
class lfsrepo(repo.__class__):
@localrepo.unfilteredmethod
def commitctx(self, ctx, error=False):
- repo.svfs.options['lfstrack'] = _trackedmatcher(self, ctx)
+ repo.svfs.options['lfstrack'] = _trackedmatcher(self)
return super(lfsrepo, self).commitctx(ctx, error)
repo.__class__ = lfsrepo
@@ -238,7 +239,7 @@
else:
repo.prepushoutgoinghooks.add('lfs', wrapper.prepush)
-def _trackedmatcher(repo, ctx):
+def _trackedmatcher(repo):
"""Return a function (path, size) -> bool indicating whether or not to
track a given file with lfs."""
if not repo.wvfs.exists('.hglfs'):
@@ -331,6 +332,8 @@
wrapfunction(hg, 'clone', wrapper.hgclone)
wrapfunction(hg, 'postshare', wrapper.hgpostshare)
+ scmutil.fileprefetchhooks.add('lfs', wrapper._prefetchfiles)
+
# Make bundle choose changegroup3 instead of changegroup2. This affects
# "hg bundle" command. Note: it does not cover all bundle formats like
# "packed1". Using "packed1" with lfs will likely cause trouble.
@@ -345,12 +348,21 @@
# when writing a bundle via "hg bundle" command, upload related LFS blobs
wrapfunction(bundle2, 'writenewbundle', wrapper.writenewbundle)
+@filesetpredicate('lfs()', callstatus=True)
+def lfsfileset(mctx, x):
+ """File that uses LFS storage."""
+ # i18n: "lfs" is a keyword
+ fileset.getargs(x, 0, 0, _("lfs takes no arguments"))
+ return [f for f in mctx.subset
+ if wrapper.pointerfromctx(mctx.ctx, f, removed=True) is not None]
+
@templatekeyword('lfs_files')
def lfsfiles(repo, ctx, **args):
- """List of strings. LFS files added or modified by the changeset."""
+ """List of strings. All files modified, added, or removed by this
+ changeset."""
args = pycompat.byteskwargs(args)
- pointers = wrapper.pointersfromctx(ctx) # {path: pointer}
+ pointers = wrapper.pointersfromctx(ctx, removed=True) # {path: pointer}
files = sorted(pointers.keys())
def pointer(v):
@@ -361,7 +373,7 @@
makemap = lambda v: {
'file': v,
- 'lfsoid': pointers[v].oid(),
+ 'lfsoid': pointers[v].oid() if pointers[v] else None,
'lfspointer': templatekw.hybriddict(pointer(v)),
}
--- a/hgext/lfs/blobstore.py Fri Feb 23 17:57:04 2018 -0800
+++ b/hgext/lfs/blobstore.py Sat Feb 24 17:49:10 2018 -0600
@@ -194,11 +194,11 @@
def writebatch(self, pointers, fromstore):
"""Batch upload from local to remote blobstore."""
- self._batch(pointers, fromstore, 'upload')
+ self._batch(_deduplicate(pointers), fromstore, 'upload')
def readbatch(self, pointers, tostore):
"""Batch download from remote to local blostore."""
- self._batch(pointers, tostore, 'download')
+ self._batch(_deduplicate(pointers), tostore, 'download')
def _batchrequest(self, pointers, action):
"""Get metadata about objects pointed by pointers for given action
@@ -366,12 +366,23 @@
oids = transfer(sorted(objects, key=lambda o: o.get('oid')))
processed = 0
+ blobs = 0
for _one, oid in oids:
processed += sizes[oid]
+ blobs += 1
self.ui.progress(topic, processed, total=total)
self.ui.note(_('lfs: processed: %s\n') % oid)
self.ui.progress(topic, pos=None, total=total)
+ if blobs > 0:
+ if action == 'upload':
+ self.ui.status(_('lfs: uploaded %d files (%s)\n')
+ % (blobs, util.bytecount(processed)))
+ # TODO: coalesce the download requests, and comment this in
+ #elif action == 'download':
+ # self.ui.status(_('lfs: downloaded %d files (%s)\n')
+ # % (blobs, util.bytecount(processed)))
+
def __del__(self):
# copied from mercurial/httppeer.py
urlopener = getattr(self, 'urlopener', None)
@@ -388,13 +399,13 @@
self.vfs = lfsvfs(fullpath)
def writebatch(self, pointers, fromstore):
- for p in pointers:
+ for p in _deduplicate(pointers):
content = fromstore.read(p.oid(), verify=True)
with self.vfs(p.oid(), 'wb', atomictemp=True) as fp:
fp.write(content)
def readbatch(self, pointers, tostore):
- for p in pointers:
+ for p in _deduplicate(pointers):
with self.vfs(p.oid(), 'rb') as fp:
tostore.download(p.oid(), fp)
@@ -433,6 +444,13 @@
None: _promptremote,
}
+def _deduplicate(pointers):
+ """Remove any duplicate oids that exist in the list"""
+ reduced = util.sortdict()
+ for p in pointers:
+ reduced[p.oid()] = p
+ return reduced.values()
+
def _verify(oid, content):
realoid = hashlib.sha256(content).hexdigest()
if realoid != oid:
--- a/hgext/lfs/wrapper.py Fri Feb 23 17:57:04 2018 -0800
+++ b/hgext/lfs/wrapper.py Sat Feb 24 17:49:10 2018 -0600
@@ -249,6 +249,21 @@
if 'lfs' in destrepo.requirements:
destrepo.vfs.append('hgrc', util.tonativeeol('\n[extensions]\nlfs=\n'))
+def _prefetchfiles(repo, ctx, files):
+ """Ensure that required LFS blobs are present, fetching them as a group if
+ needed."""
+ pointers = []
+ localstore = repo.svfs.lfslocalblobstore
+
+ for f in files:
+ p = pointerfromctx(ctx, f)
+ if p and not localstore.has(p.oid()):
+ p.filename = f
+ pointers.append(p)
+
+ if pointers:
+ repo.svfs.lfsremoteblobstore.readbatch(pointers, localstore)
+
def _canskipupload(repo):
# if remotestore is a null store, upload is a no-op and can be skipped
return isinstance(repo.svfs.lfsremoteblobstore, blobstore._nullremote)
@@ -307,20 +322,47 @@
pointers[p.oid()] = p
return sorted(pointers.values())
-def pointersfromctx(ctx):
- """return a dict {path: pointer} for given single changectx"""
+def pointerfromctx(ctx, f, removed=False):
+ """return a pointer for the named file from the given changectx, or None if
+ the file isn't LFS.
+
+ Optionally, the pointer for a file deleted from the context can be returned.
+ Since no such pointer is actually stored, and to distinguish from a non LFS
+ file, this pointer is represented by an empty dict.
+ """
+ _ctx = ctx
+ if f not in ctx:
+ if not removed:
+ return None
+ if f in ctx.p1():
+ _ctx = ctx.p1()
+ elif f in ctx.p2():
+ _ctx = ctx.p2()
+ else:
+ return None
+ fctx = _ctx[f]
+ if not _islfs(fctx.filelog(), fctx.filenode()):
+ return None
+ try:
+ p = pointer.deserialize(fctx.rawdata())
+ if ctx == _ctx:
+ return p
+ return {}
+ except pointer.InvalidPointer as ex:
+ raise error.Abort(_('lfs: corrupted pointer (%s@%s): %s\n')
+ % (f, short(_ctx.node()), ex))
+
+def pointersfromctx(ctx, removed=False):
+ """return a dict {path: pointer} for given single changectx.
+
+ If ``removed`` == True and the LFS file was removed from ``ctx``, the value
+ stored for the path is an empty dict.
+ """
result = {}
for f in ctx.files():
- if f not in ctx:
- continue
- fctx = ctx[f]
- if not _islfs(fctx.filelog(), fctx.filenode()):
- continue
- try:
- result[f] = pointer.deserialize(fctx.rawdata())
- except pointer.InvalidPointer as ex:
- raise error.Abort(_('lfs: corrupted pointer (%s@%s): %s\n')
- % (f, short(ctx.node()), ex))
+ p = pointerfromctx(ctx, f, removed=removed)
+ if p is not None:
+ result[f] = p
return result
def uploadblobs(repo, pointers):
--- a/hgext/mq.py Fri Feb 23 17:57:04 2018 -0800
+++ b/hgext/mq.py Sat Feb 24 17:49:10 2018 -0600
@@ -86,6 +86,7 @@
hg,
localrepo,
lock as lockmod,
+ logcmdutil,
patch as patchmod,
phases,
pycompat,
@@ -93,7 +94,7 @@
revsetlang,
scmutil,
smartset,
- subrepo,
+ subrepoutil,
util,
vfs as vfsmod,
)
@@ -148,9 +149,13 @@
class statusentry(object):
def __init__(self, node, name):
self.node, self.name = node, name
- def __repr__(self):
+
+ def __bytes__(self):
return hex(self.node) + ':' + self.name
+ __str__ = encoding.strmethod(__bytes__)
+ __repr__ = encoding.strmethod(__bytes__)
+
# The order of the headers in 'hg export' HG patches:
HGHEADERS = [
# '# HG changeset patch',
@@ -276,7 +281,7 @@
nodeid = None
diffstart = 0
- for line in file(pf):
+ for line in open(pf, 'rb'):
line = line.rstrip()
if (line.startswith('diff --git')
or (diffstart and line.startswith('+++ '))):
@@ -391,12 +396,14 @@
self.comments.append('')
self.comments.append(message)
- def __str__(self):
+ def __bytes__(self):
s = '\n'.join(self.comments).rstrip()
if not s:
return ''
return s + '\n\n'
+ __str__ = encoding.strmethod(__bytes__)
+
def _delmsg(self):
'''Remove existing message, keeping the rest of the comments fields.
If comments contains 'subject: ', message will prepend
@@ -438,9 +445,9 @@
def __init__(self, ui, baseui, path, patchdir=None):
self.basepath = path
try:
- fh = open(os.path.join(path, 'patches.queue'))
- cur = fh.read().rstrip()
- fh.close()
+ with open(os.path.join(path, 'patches.queue'), r'rb') as fh:
+ cur = fh.read().rstrip()
+
if not cur:
curpath = os.path.join(path, 'patches')
else:
@@ -546,10 +553,8 @@
for patchfn in patches:
patchf = self.opener(patchfn, 'r')
# if the patch was a git patch, refresh it as a git patch
- for line in patchf:
- if line.startswith('diff --git'):
- diffopts.git = True
- break
+ diffopts.git = any(line.startswith('diff --git')
+ for line in patchf)
patchf.close()
return diffopts
@@ -643,7 +648,7 @@
self.seriesdirty = True
def pushable(self, idx):
- if isinstance(idx, str):
+ if isinstance(idx, bytes):
idx = self.series.index(idx)
patchguards = self.seriesguards[idx]
if not patchguards:
@@ -691,12 +696,12 @@
def savedirty(self):
def writelist(items, path):
- fp = self.opener(path, 'w')
+ fp = self.opener(path, 'wb')
for i in items:
fp.write("%s\n" % i)
fp.close()
if self.applieddirty:
- writelist(map(str, self.applied), self.statuspath)
+ writelist(map(bytes, self.applied), self.statuspath)
self.applieddirty = False
if self.seriesdirty:
writelist(self.fullseries, self.seriespath)
@@ -739,8 +744,8 @@
opts = {}
stat = opts.get('stat')
m = scmutil.match(repo[node1], files, opts)
- cmdutil.diffordiffstat(self.ui, repo, diffopts, node1, node2, m,
- changes, stat, fp)
+ logcmdutil.diffordiffstat(self.ui, repo, diffopts, node1, node2, m,
+ changes, stat, fp)
def mergeone(self, repo, mergeq, head, patch, rev, diffopts):
# first try just applying the patch
@@ -850,7 +855,7 @@
files=files, eolmode=None)
return (True, list(files), fuzz)
except Exception as inst:
- self.ui.note(str(inst) + '\n')
+ self.ui.note(util.forcebytestr(inst) + '\n')
if not self.ui.verbose:
self.ui.warn(_("patch failed, unable to continue (try -v)\n"))
self.ui.traceback()
@@ -963,8 +968,8 @@
wctx = repo[None]
pctx = repo['.']
overwrite = False
- mergedsubstate = subrepo.submerge(repo, pctx, wctx, wctx,
- overwrite)
+ mergedsubstate = subrepoutil.submerge(repo, pctx, wctx, wctx,
+ overwrite)
files += mergedsubstate.keys()
match = scmutil.matchfiles(repo, files or [])
@@ -1178,7 +1183,7 @@
except error.Abort:
pass
i += 1
- name = '%s__%s' % (namebase, i)
+ name = '%s__%d' % (namebase, i)
return name
def checkkeepchanges(self, keepchanges, force):
@@ -1189,6 +1194,7 @@
"""options:
msg: a string or a no-argument function returning a string
"""
+ opts = pycompat.byteskwargs(opts)
msg = opts.get('msg')
edit = opts.get('edit')
editform = opts.get('editform', 'mq.qnew')
@@ -1259,13 +1265,13 @@
if user:
ph.setuser(user)
if date:
- ph.setdate('%s %s' % date)
+ ph.setdate('%d %d' % date)
ph.setparent(hex(nctx.p1().node()))
msg = nctx.description().strip()
if msg == defaultmsg.strip():
msg = ''
ph.setmessage(msg)
- p.write(str(ph))
+ p.write(bytes(ph))
if commitfiles:
parent = self.qparents(repo, n)
if inclsubs:
@@ -1550,12 +1556,8 @@
update = True
else:
parents = [p.node() for p in repo[None].parents()]
- needupdate = False
- for entry in self.applied[start:]:
- if entry.node in parents:
- needupdate = True
- break
- update = needupdate
+ update = any(entry.node in parents
+ for entry in self.applied[start:])
tobackup = set()
if update:
@@ -1632,6 +1634,7 @@
self.printdiff(repo, diffopts, node1, node2, files=pats, opts=opts)
def refresh(self, repo, pats=None, **opts):
+ opts = pycompat.byteskwargs(opts)
if not self.applied:
self.ui.write(_("no patches applied\n"))
return 1
@@ -1846,7 +1849,7 @@
self.putsubstate2changes(substatestate, c)
chunks = patchmod.diff(repo, patchparent,
changes=c, opts=diffopts)
- comments = str(ph)
+ comments = bytes(ph)
if comments:
patchf.write(comments)
for chunk in chunks:
@@ -2260,7 +2263,7 @@
To stop managing a patch and move it into permanent history,
use the :hg:`qfinish` command."""
q = repo.mq
- q.delete(repo, patches, opts)
+ q.delete(repo, patches, pycompat.byteskwargs(opts))
q.savedirty()
return 0
@@ -3189,7 +3192,7 @@
guards[g] += 1
if ui.verbose:
guards['NONE'] = noguards
- guards = guards.items()
+ guards = list(guards.items())
guards.sort(key=lambda x: x[0][1:])
if guards:
ui.note(_('guards in series file:\n'))
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/hgext/narrow/TODO.rst Sat Feb 24 17:49:10 2018 -0600
@@ -0,0 +1,37 @@
+Integration with the share extension needs improvement. Right now
+we've seen some odd bugs, and the way we modify the contents of the
+.hg/shared file is unfortunate. See wrappostshare() and unsharenarrowspec().
+
+Resolve commentary on narrowrepo.wraprepo.narrowrepository.status
+about the filtering of status being done at an awkward layer. This
+came up the import to hgext, but nobody's got concrete improvement
+ideas as of then.
+
+Fold most (or preferably all) of narrowrevlog.py into core.
+
+Address commentary in narrowrevlog.excludedmanifestrevlog.add -
+specifically we should improve the collaboration with core so that
+add() never gets called on an excluded directory and we can improve
+the stand-in to raise a ProgrammingError.
+
+Figure out how to correctly produce narrowmanifestrevlog and
+narrowfilelog instances instead of monkeypatching regular revlogs at
+runtime to our subclass. Even better, merge the narrowing logic
+directly into core.
+
+Reason more completely about rename-filtering logic in
+narrowfilelog. There could be some surprises lurking there.
+
+Formally document the narrowspec format. Unify with sparse, if at all
+possible. For bonus points, unify with the server-specified narrowspec
+format.
+
+narrowrepo.setnarrowpats() or narrowspec.save() need to make sure
+they're holding the wlock.
+
+Implement a simple version of the expandnarrow wireproto command for
+core. Having configurable shorthands for narrowspecs has been useful
+at Google (and sparse has a similar feature from Facebook), so it
+probably makes sense to implement the feature in core. (Google's
+handler is entirely custom to Google, with a custom format related to
+bazel's build language, so it's not in the narrowhg distribution.)
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/hgext/narrow/__init__.py Sat Feb 24 17:49:10 2018 -0600
@@ -0,0 +1,95 @@
+# __init__.py - narrowhg extension
+#
+# Copyright 2017 Google, Inc.
+#
+# This software may be used and distributed according to the terms of the
+# GNU General Public License version 2 or any later version.
+'''create clones which fetch history data for subset of files (EXPERIMENTAL)'''
+
+from __future__ import absolute_import
+
+# Note for extension authors: ONLY specify testedwith = 'ships-with-hg-core' for
+# extensions which SHIP WITH MERCURIAL. Non-mainline extensions should
+# be specifying the version(s) of Mercurial they are tested with, or
+# leave the attribute unspecified.
+testedwith = 'ships-with-hg-core'
+
+from mercurial import (
+ extensions,
+ hg,
+ localrepo,
+ registrar,
+ verify as verifymod,
+)
+
+from . import (
+ narrowbundle2,
+ narrowchangegroup,
+ narrowcommands,
+ narrowcopies,
+ narrowdirstate,
+ narrowmerge,
+ narrowpatch,
+ narrowrepo,
+ narrowrevlog,
+ narrowtemplates,
+ narrowwirepeer,
+)
+
+configtable = {}
+configitem = registrar.configitem(configtable)
+# Narrowhg *has* support for serving ellipsis nodes (which are used at
+# least by Google's internal server), but that support is pretty
+# fragile and has a lot of problems on real-world repositories that
+# have complex graph topologies. This could probably be corrected, but
+# absent someone needing the full support for ellipsis nodes in
+# repositories with merges, it's unlikely this work will get done. As
+# of this writining in late 2017, all repositories large enough for
+# ellipsis nodes to be a hard requirement also enforce strictly linear
+# history for other scaling reasons.
+configitem('experimental', 'narrowservebrokenellipses',
+ default=False,
+ alias=[('narrow', 'serveellipses')],
+)
+
+# Export the commands table for Mercurial to see.
+cmdtable = narrowcommands.table
+
+localrepo.localrepository._basesupported.add(narrowrepo.REQUIREMENT)
+
+def uisetup(ui):
+ """Wraps user-facing mercurial commands with narrow-aware versions."""
+ narrowrevlog.setup()
+ narrowbundle2.setup()
+ narrowmerge.setup()
+ narrowcommands.setup()
+ narrowchangegroup.setup()
+ narrowwirepeer.uisetup()
+
+def reposetup(ui, repo):
+ """Wraps local repositories with narrow repo support."""
+ if not isinstance(repo, localrepo.localrepository):
+ return
+
+ if narrowrepo.REQUIREMENT in repo.requirements:
+ narrowrepo.wraprepo(repo, True)
+ narrowcopies.setup(repo)
+ narrowdirstate.setup(repo)
+ narrowpatch.setup(repo)
+ narrowwirepeer.reposetup(repo)
+
+def _verifierinit(orig, self, repo, matcher=None):
+ # The verifier's matcher argument was desgined for narrowhg, so it should
+ # be None from core. If another extension passes a matcher (unlikely),
+ # we'll have to fail until matchers can be composed more easily.
+ assert matcher is None
+ matcher = getattr(repo, 'narrowmatch', lambda: None)()
+ orig(self, repo, matcher)
+
+def extsetup(ui):
+ extensions.wrapfunction(verifymod.verifier, '__init__', _verifierinit)
+ extensions.wrapfunction(hg, 'postshare', narrowrepo.wrappostshare)
+ extensions.wrapfunction(hg, 'copystore', narrowrepo.unsharenarrowspec)
+
+templatekeyword = narrowtemplates.templatekeyword
+revsetpredicate = narrowtemplates.revsetpredicate
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/hgext/narrow/narrowbundle2.py Sat Feb 24 17:49:10 2018 -0600
@@ -0,0 +1,494 @@
+# narrowbundle2.py - bundle2 extensions for narrow repository support
+#
+# Copyright 2017 Google, Inc.
+#
+# This software may be used and distributed according to the terms of the
+# GNU General Public License version 2 or any later version.
+
+from __future__ import absolute_import
+
+import collections
+import errno
+import struct
+
+from mercurial.i18n import _
+from mercurial.node import (
+ bin,
+ nullid,
+ nullrev,
+)
+from mercurial import (
+ bundle2,
+ changegroup,
+ dagutil,
+ error,
+ exchange,
+ extensions,
+ narrowspec,
+ repair,
+ util,
+ wireproto,
+)
+
+from . import (
+ narrowrepo,
+)
+
+NARROWCAP = 'narrow'
+_NARROWACL_SECTION = 'narrowhgacl'
+_CHANGESPECPART = NARROWCAP + ':changespec'
+_SPECPART = NARROWCAP + ':spec'
+_SPECPART_INCLUDE = 'include'
+_SPECPART_EXCLUDE = 'exclude'
+_KILLNODESIGNAL = 'KILL'
+_DONESIGNAL = 'DONE'
+_ELIDEDCSHEADER = '>20s20s20sl' # cset id, p1, p2, len(text)
+_ELIDEDMFHEADER = '>20s20s20s20sl' # manifest id, p1, p2, link id, len(text)
+_CSHEADERSIZE = struct.calcsize(_ELIDEDCSHEADER)
+_MFHEADERSIZE = struct.calcsize(_ELIDEDMFHEADER)
+
+# When advertising capabilities, always include narrow clone support.
+def getrepocaps_narrow(orig, repo, **kwargs):
+ caps = orig(repo, **kwargs)
+ caps[NARROWCAP] = ['v0']
+ return caps
+
+def _computeellipsis(repo, common, heads, known, match, depth=None):
+ """Compute the shape of a narrowed DAG.
+
+ Args:
+ repo: The repository we're transferring.
+ common: The roots of the DAG range we're transferring.
+ May be just [nullid], which means all ancestors of heads.
+ heads: The heads of the DAG range we're transferring.
+ match: The narrowmatcher that allows us to identify relevant changes.
+ depth: If not None, only consider nodes to be full nodes if they are at
+ most depth changesets away from one of heads.
+
+ Returns:
+ A tuple of (visitnodes, relevant_nodes, ellipsisroots) where:
+
+ visitnodes: The list of nodes (either full or ellipsis) which
+ need to be sent to the client.
+ relevant_nodes: The set of changelog nodes which change a file inside
+ the narrowspec. The client needs these as non-ellipsis nodes.
+ ellipsisroots: A dict of {rev: parents} that is used in
+ narrowchangegroup to produce ellipsis nodes with the
+ correct parents.
+ """
+ cl = repo.changelog
+ mfl = repo.manifestlog
+
+ cldag = dagutil.revlogdag(cl)
+ # dagutil does not like nullid/nullrev
+ commonrevs = cldag.internalizeall(common - set([nullid])) | set([nullrev])
+ headsrevs = cldag.internalizeall(heads)
+ if depth:
+ revdepth = {h: 0 for h in headsrevs}
+
+ ellipsisheads = collections.defaultdict(set)
+ ellipsisroots = collections.defaultdict(set)
+
+ def addroot(head, curchange):
+ """Add a root to an ellipsis head, splitting heads with 3 roots."""
+ ellipsisroots[head].add(curchange)
+ # Recursively split ellipsis heads with 3 roots by finding the
+ # roots' youngest common descendant which is an elided merge commit.
+ # That descendant takes 2 of the 3 roots as its own, and becomes a
+ # root of the head.
+ while len(ellipsisroots[head]) > 2:
+ child, roots = splithead(head)
+ splitroots(head, child, roots)
+ head = child # Recurse in case we just added a 3rd root
+
+ def splitroots(head, child, roots):
+ ellipsisroots[head].difference_update(roots)
+ ellipsisroots[head].add(child)
+ ellipsisroots[child].update(roots)
+ ellipsisroots[child].discard(child)
+
+ def splithead(head):
+ r1, r2, r3 = sorted(ellipsisroots[head])
+ for nr1, nr2 in ((r2, r3), (r1, r3), (r1, r2)):
+ mid = repo.revs('sort(merge() & %d::%d & %d::%d, -rev)',
+ nr1, head, nr2, head)
+ for j in mid:
+ if j == nr2:
+ return nr2, (nr1, nr2)
+ if j not in ellipsisroots or len(ellipsisroots[j]) < 2:
+ return j, (nr1, nr2)
+ raise error.Abort('Failed to split up ellipsis node! head: %d, '
+ 'roots: %d %d %d' % (head, r1, r2, r3))
+
+ missing = list(cl.findmissingrevs(common=commonrevs, heads=headsrevs))
+ visit = reversed(missing)
+ relevant_nodes = set()
+ visitnodes = [cl.node(m) for m in missing]
+ required = set(headsrevs) | known
+ for rev in visit:
+ clrev = cl.changelogrevision(rev)
+ ps = cldag.parents(rev)
+ if depth is not None:
+ curdepth = revdepth[rev]
+ for p in ps:
+ revdepth[p] = min(curdepth + 1, revdepth.get(p, depth + 1))
+ needed = False
+ shallow_enough = depth is None or revdepth[rev] <= depth
+ if shallow_enough:
+ curmf = mfl[clrev.manifest].read()
+ if ps:
+ # We choose to not trust the changed files list in
+ # changesets because it's not always correct. TODO: could
+ # we trust it for the non-merge case?
+ p1mf = mfl[cl.changelogrevision(ps[0]).manifest].read()
+ needed = bool(curmf.diff(p1mf, match))
+ if not needed and len(ps) > 1:
+ # For merge changes, the list of changed files is not
+ # helpful, since we need to emit the merge if a file
+ # in the narrow spec has changed on either side of the
+ # merge. As a result, we do a manifest diff to check.
+ p2mf = mfl[cl.changelogrevision(ps[1]).manifest].read()
+ needed = bool(curmf.diff(p2mf, match))
+ else:
+ # For a root node, we need to include the node if any
+ # files in the node match the narrowspec.
+ needed = any(curmf.walk(match))
+
+ if needed:
+ for head in ellipsisheads[rev]:
+ addroot(head, rev)
+ for p in ps:
+ required.add(p)
+ relevant_nodes.add(cl.node(rev))
+ else:
+ if not ps:
+ ps = [nullrev]
+ if rev in required:
+ for head in ellipsisheads[rev]:
+ addroot(head, rev)
+ for p in ps:
+ ellipsisheads[p].add(rev)
+ else:
+ for p in ps:
+ ellipsisheads[p] |= ellipsisheads[rev]
+
+ # add common changesets as roots of their reachable ellipsis heads
+ for c in commonrevs:
+ for head in ellipsisheads[c]:
+ addroot(head, c)
+ return visitnodes, relevant_nodes, ellipsisroots
+
+def _packellipsischangegroup(repo, common, match, relevant_nodes,
+ ellipsisroots, visitnodes, depth, source, version):
+ if version in ('01', '02'):
+ raise error.Abort(
+ 'ellipsis nodes require at least cg3 on client and server, '
+ 'but negotiated version %s' % version)
+ # We wrap cg1packer.revchunk, using a side channel to pass
+ # relevant_nodes into that area. Then if linknode isn't in the
+ # set, we know we have an ellipsis node and we should defer
+ # sending that node's data. We override close() to detect
+ # pending ellipsis nodes and flush them.
+ packer = changegroup.getbundler(version, repo)
+ # Let the packer have access to the narrow matcher so it can
+ # omit filelogs and dirlogs as needed
+ packer._narrow_matcher = lambda : match
+ # Give the packer the list of nodes which should not be
+ # ellipsis nodes. We store this rather than the set of nodes
+ # that should be an ellipsis because for very large histories
+ # we expect this to be significantly smaller.
+ packer.full_nodes = relevant_nodes
+ # Maps ellipsis revs to their roots at the changelog level.
+ packer.precomputed_ellipsis = ellipsisroots
+ # Maps CL revs to per-revlog revisions. Cleared in close() at
+ # the end of each group.
+ packer.clrev_to_localrev = {}
+ packer.next_clrev_to_localrev = {}
+ # Maps changelog nodes to changelog revs. Filled in once
+ # during changelog stage and then left unmodified.
+ packer.clnode_to_rev = {}
+ packer.changelog_done = False
+ # If true, informs the packer that it is serving shallow content and might
+ # need to pack file contents not introduced by the changes being packed.
+ packer.is_shallow = depth is not None
+
+ return packer.generate(common, visitnodes, False, source)
+
+# Serve a changegroup for a client with a narrow clone.
+def getbundlechangegrouppart_narrow(bundler, repo, source,
+ bundlecaps=None, b2caps=None, heads=None,
+ common=None, **kwargs):
+ cgversions = b2caps.get('changegroup')
+ if cgversions: # 3.1 and 3.2 ship with an empty value
+ cgversions = [v for v in cgversions
+ if v in changegroup.supportedoutgoingversions(repo)]
+ if not cgversions:
+ raise ValueError(_('no common changegroup version'))
+ version = max(cgversions)
+ else:
+ raise ValueError(_("server does not advertise changegroup version,"
+ " can't negotiate support for ellipsis nodes"))
+
+ include = sorted(filter(bool, kwargs.get(r'includepats', [])))
+ exclude = sorted(filter(bool, kwargs.get(r'excludepats', [])))
+ newmatch = narrowspec.match(repo.root, include=include, exclude=exclude)
+ if not repo.ui.configbool("experimental", "narrowservebrokenellipses"):
+ outgoing = exchange._computeoutgoing(repo, heads, common)
+ if not outgoing.missing:
+ return
+ def wrappedgetbundler(orig, *args, **kwargs):
+ bundler = orig(*args, **kwargs)
+ bundler._narrow_matcher = lambda : newmatch
+ return bundler
+ with extensions.wrappedfunction(changegroup, 'getbundler',
+ wrappedgetbundler):
+ cg = changegroup.makestream(repo, outgoing, version, source)
+ part = bundler.newpart('changegroup', data=cg)
+ part.addparam('version', version)
+ if 'treemanifest' in repo.requirements:
+ part.addparam('treemanifest', '1')
+
+ if include or exclude:
+ narrowspecpart = bundler.newpart(_SPECPART)
+ if include:
+ narrowspecpart.addparam(
+ _SPECPART_INCLUDE, '\n'.join(include), mandatory=True)
+ if exclude:
+ narrowspecpart.addparam(
+ _SPECPART_EXCLUDE, '\n'.join(exclude), mandatory=True)
+
+ return
+
+ depth = kwargs.get(r'depth', None)
+ if depth is not None:
+ depth = int(depth)
+ if depth < 1:
+ raise error.Abort(_('depth must be positive, got %d') % depth)
+
+ heads = set(heads or repo.heads())
+ common = set(common or [nullid])
+ oldinclude = sorted(filter(bool, kwargs.get(r'oldincludepats', [])))
+ oldexclude = sorted(filter(bool, kwargs.get(r'oldexcludepats', [])))
+ known = {bin(n) for n in kwargs.get(r'known', [])}
+ if known and (oldinclude != include or oldexclude != exclude):
+ # Steps:
+ # 1. Send kill for "$known & ::common"
+ #
+ # 2. Send changegroup for ::common
+ #
+ # 3. Proceed.
+ #
+ # In the future, we can send kills for only the specific
+ # nodes we know should go away or change shape, and then
+ # send a data stream that tells the client something like this:
+ #
+ # a) apply this changegroup
+ # b) apply nodes XXX, YYY, ZZZ that you already have
+ # c) goto a
+ #
+ # until they've built up the full new state.
+ # Convert to revnums and intersect with "common". The client should
+ # have made it a subset of "common" already, but let's be safe.
+ known = set(repo.revs("%ln & ::%ln", known, common))
+ # TODO: we could send only roots() of this set, and the
+ # list of nodes in common, and the client could work out
+ # what to strip, instead of us explicitly sending every
+ # single node.
+ deadrevs = known
+ def genkills():
+ for r in deadrevs:
+ yield _KILLNODESIGNAL
+ yield repo.changelog.node(r)
+ yield _DONESIGNAL
+ bundler.newpart(_CHANGESPECPART, data=genkills())
+ newvisit, newfull, newellipsis = _computeellipsis(
+ repo, set(), common, known, newmatch)
+ if newvisit:
+ cg = _packellipsischangegroup(
+ repo, common, newmatch, newfull, newellipsis,
+ newvisit, depth, source, version)
+ part = bundler.newpart('changegroup', data=cg)
+ part.addparam('version', version)
+ if 'treemanifest' in repo.requirements:
+ part.addparam('treemanifest', '1')
+
+ visitnodes, relevant_nodes, ellipsisroots = _computeellipsis(
+ repo, common, heads, set(), newmatch, depth=depth)
+
+ repo.ui.debug('Found %d relevant revs\n' % len(relevant_nodes))
+ if visitnodes:
+ cg = _packellipsischangegroup(
+ repo, common, newmatch, relevant_nodes, ellipsisroots,
+ visitnodes, depth, source, version)
+ part = bundler.newpart('changegroup', data=cg)
+ part.addparam('version', version)
+ if 'treemanifest' in repo.requirements:
+ part.addparam('treemanifest', '1')
+
+def applyacl_narrow(repo, kwargs):
+ ui = repo.ui
+ username = ui.shortuser(ui.environ.get('REMOTE_USER') or ui.username())
+ user_includes = ui.configlist(
+ _NARROWACL_SECTION, username + '.includes',
+ ui.configlist(_NARROWACL_SECTION, 'default.includes'))
+ user_excludes = ui.configlist(
+ _NARROWACL_SECTION, username + '.excludes',
+ ui.configlist(_NARROWACL_SECTION, 'default.excludes'))
+ if not user_includes:
+ raise error.Abort(_("{} configuration for user {} is empty")
+ .format(_NARROWACL_SECTION, username))
+
+ user_includes = [
+ 'path:.' if p == '*' else 'path:' + p for p in user_includes]
+ user_excludes = [
+ 'path:.' if p == '*' else 'path:' + p for p in user_excludes]
+
+ req_includes = set(kwargs.get(r'includepats', []))
+ req_excludes = set(kwargs.get(r'excludepats', []))
+
+ req_includes, req_excludes, invalid_includes = narrowspec.restrictpatterns(
+ req_includes, req_excludes, user_includes, user_excludes)
+
+ if invalid_includes:
+ raise error.Abort(
+ _("The following includes are not accessible for {}: {}")
+ .format(username, invalid_includes))
+
+ new_args = {}
+ new_args.update(kwargs)
+ new_args['includepats'] = req_includes
+ if req_excludes:
+ new_args['excludepats'] = req_excludes
+ return new_args
+
+@bundle2.parthandler(_SPECPART, (_SPECPART_INCLUDE, _SPECPART_EXCLUDE))
+def _handlechangespec_2(op, inpart):
+ includepats = set(inpart.params.get(_SPECPART_INCLUDE, '').splitlines())
+ excludepats = set(inpart.params.get(_SPECPART_EXCLUDE, '').splitlines())
+ narrowspec.save(op.repo, includepats, excludepats)
+ if not narrowrepo.REQUIREMENT in op.repo.requirements:
+ op.repo.requirements.add(narrowrepo.REQUIREMENT)
+ op.repo._writerequirements()
+ op.repo.invalidate(clearfilecache=True)
+
+@bundle2.parthandler(_CHANGESPECPART)
+def _handlechangespec(op, inpart):
+ repo = op.repo
+ cl = repo.changelog
+
+ # changesets which need to be stripped entirely. either they're no longer
+ # needed in the new narrow spec, or the server is sending a replacement
+ # in the changegroup part.
+ clkills = set()
+
+ # A changespec part contains all the updates to ellipsis nodes
+ # that will happen as a result of widening or narrowing a
+ # repo. All the changes that this block encounters are ellipsis
+ # nodes or flags to kill an existing ellipsis.
+ chunksignal = changegroup.readexactly(inpart, 4)
+ while chunksignal != _DONESIGNAL:
+ if chunksignal == _KILLNODESIGNAL:
+ # a node used to be an ellipsis but isn't anymore
+ ck = changegroup.readexactly(inpart, 20)
+ if cl.hasnode(ck):
+ clkills.add(ck)
+ else:
+ raise error.Abort(
+ _('unexpected changespec node chunk type: %s') % chunksignal)
+ chunksignal = changegroup.readexactly(inpart, 4)
+
+ if clkills:
+ # preserve bookmarks that repair.strip() would otherwise strip
+ bmstore = repo._bookmarks
+ class dummybmstore(dict):
+ def applychanges(self, repo, tr, changes):
+ pass
+ def recordchange(self, tr): # legacy version
+ pass
+ repo._bookmarks = dummybmstore()
+ chgrpfile = repair.strip(op.ui, repo, list(clkills), backup=True,
+ topic='widen')
+ repo._bookmarks = bmstore
+ if chgrpfile:
+ # presence of _widen_bundle attribute activates widen handler later
+ op._widen_bundle = chgrpfile
+ # Set the new narrowspec if we're widening. The setnewnarrowpats() method
+ # will currently always be there when using the core+narrowhg server, but
+ # other servers may include a changespec part even when not widening (e.g.
+ # because we're deepening a shallow repo).
+ if util.safehasattr(repo, 'setnewnarrowpats'):
+ repo.setnewnarrowpats()
+
+def handlechangegroup_widen(op, inpart):
+ """Changegroup exchange handler which restores temporarily-stripped nodes"""
+ # We saved a bundle with stripped node data we must now restore.
+ # This approach is based on mercurial/repair.py@6ee26a53c111.
+ repo = op.repo
+ ui = op.ui
+
+ chgrpfile = op._widen_bundle
+ del op._widen_bundle
+ vfs = repo.vfs
+
+ ui.note(_("adding branch\n"))
+ f = vfs.open(chgrpfile, "rb")
+ try:
+ gen = exchange.readbundle(ui, f, chgrpfile, vfs)
+ if not ui.verbose:
+ # silence internal shuffling chatter
+ ui.pushbuffer()
+ if isinstance(gen, bundle2.unbundle20):
+ with repo.transaction('strip') as tr:
+ bundle2.processbundle(repo, gen, lambda: tr)
+ else:
+ gen.apply(repo, 'strip', 'bundle:' + vfs.join(chgrpfile), True)
+ if not ui.verbose:
+ ui.popbuffer()
+ finally:
+ f.close()
+
+ # remove undo files
+ for undovfs, undofile in repo.undofiles():
+ try:
+ undovfs.unlink(undofile)
+ except OSError as e:
+ if e.errno != errno.ENOENT:
+ ui.warn(_('error removing %s: %s\n') %
+ (undovfs.join(undofile), str(e)))
+
+ # Remove partial backup only if there were no exceptions
+ vfs.unlink(chgrpfile)
+
+def setup():
+ """Enable narrow repo support in bundle2-related extension points."""
+ extensions.wrapfunction(bundle2, 'getrepocaps', getrepocaps_narrow)
+
+ wireproto.gboptsmap['narrow'] = 'boolean'
+ wireproto.gboptsmap['depth'] = 'plain'
+ wireproto.gboptsmap['oldincludepats'] = 'csv'
+ wireproto.gboptsmap['oldexcludepats'] = 'csv'
+ wireproto.gboptsmap['includepats'] = 'csv'
+ wireproto.gboptsmap['excludepats'] = 'csv'
+ wireproto.gboptsmap['known'] = 'csv'
+
+ # Extend changegroup serving to handle requests from narrow clients.
+ origcgfn = exchange.getbundle2partsmapping['changegroup']
+ def wrappedcgfn(*args, **kwargs):
+ repo = args[1]
+ if repo.ui.has_section(_NARROWACL_SECTION):
+ getbundlechangegrouppart_narrow(
+ *args, **applyacl_narrow(repo, kwargs))
+ elif kwargs.get(r'narrow', False):
+ getbundlechangegrouppart_narrow(*args, **kwargs)
+ else:
+ origcgfn(*args, **kwargs)
+ exchange.getbundle2partsmapping['changegroup'] = wrappedcgfn
+
+ # Extend changegroup receiver so client can fixup after widen requests.
+ origcghandler = bundle2.parthandlermapping['changegroup']
+ def wrappedcghandler(op, inpart):
+ origcghandler(op, inpart)
+ if util.safehasattr(op, '_widen_bundle'):
+ handlechangegroup_widen(op, inpart)
+ wrappedcghandler.params = origcghandler.params
+ bundle2.parthandlermapping['changegroup'] = wrappedcghandler
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/hgext/narrow/narrowchangegroup.py Sat Feb 24 17:49:10 2018 -0600
@@ -0,0 +1,380 @@
+# narrowchangegroup.py - narrow clone changegroup creation and consumption
+#
+# Copyright 2017 Google, Inc.
+#
+# This software may be used and distributed according to the terms of the
+# GNU General Public License version 2 or any later version.
+
+from __future__ import absolute_import
+
+from mercurial.i18n import _
+from mercurial import (
+ changegroup,
+ error,
+ extensions,
+ manifest,
+ mdiff,
+ node,
+ revlog,
+ util,
+)
+
+from . import (
+ narrowrepo,
+)
+
+def setup():
+
+ def supportedoutgoingversions(orig, repo):
+ versions = orig(repo)
+ if narrowrepo.REQUIREMENT in repo.requirements:
+ versions.discard('01')
+ versions.discard('02')
+ return versions
+
+ extensions.wrapfunction(changegroup, 'supportedoutgoingversions',
+ supportedoutgoingversions)
+
+ def prune(orig, self, revlog, missing, commonrevs):
+ if isinstance(revlog, manifest.manifestrevlog):
+ matcher = getattr(self._repo, 'narrowmatch',
+ getattr(self, '_narrow_matcher', None))
+ if (matcher is not None and
+ not matcher().visitdir(revlog._dir[:-1] or '.')):
+ return []
+ return orig(self, revlog, missing, commonrevs)
+
+ extensions.wrapfunction(changegroup.cg1packer, 'prune', prune)
+
+ def generatefiles(orig, self, changedfiles, linknodes, commonrevs,
+ source):
+ matcher = getattr(self._repo, 'narrowmatch',
+ getattr(self, '_narrow_matcher', None))
+ if matcher is not None:
+ narrowmatch = matcher()
+ changedfiles = [f for f in changedfiles if narrowmatch(f)]
+ if getattr(self, 'is_shallow', False):
+ # See comment in generate() for why this sadness is a thing.
+ mfdicts = self._mfdicts
+ del self._mfdicts
+ # In a shallow clone, the linknodes callback needs to also include
+ # those file nodes that are in the manifests we sent but weren't
+ # introduced by those manifests.
+ commonctxs = [self._repo[c] for c in commonrevs]
+ oldlinknodes = linknodes
+ clrev = self._repo.changelog.rev
+ def linknodes(flog, fname):
+ for c in commonctxs:
+ try:
+ fnode = c.filenode(fname)
+ self.clrev_to_localrev[c.rev()] = flog.rev(fnode)
+ except error.ManifestLookupError:
+ pass
+ links = oldlinknodes(flog, fname)
+ if len(links) != len(mfdicts):
+ for mf, lr in mfdicts:
+ fnode = mf.get(fname, None)
+ if fnode in links:
+ links[fnode] = min(links[fnode], lr, key=clrev)
+ elif fnode:
+ links[fnode] = lr
+ return links
+ return orig(self, changedfiles, linknodes, commonrevs, source)
+ extensions.wrapfunction(
+ changegroup.cg1packer, 'generatefiles', generatefiles)
+
+ def ellipsisdata(packer, rev, revlog_, p1, p2, data, linknode):
+ n = revlog_.node(rev)
+ p1n, p2n = revlog_.node(p1), revlog_.node(p2)
+ flags = revlog_.flags(rev)
+ flags |= revlog.REVIDX_ELLIPSIS
+ meta = packer.builddeltaheader(
+ n, p1n, p2n, node.nullid, linknode, flags)
+ # TODO: try and actually send deltas for ellipsis data blocks
+ diffheader = mdiff.trivialdiffheader(len(data))
+ l = len(meta) + len(diffheader) + len(data)
+ return ''.join((changegroup.chunkheader(l),
+ meta,
+ diffheader,
+ data))
+
+ def close(orig, self):
+ getattr(self, 'clrev_to_localrev', {}).clear()
+ if getattr(self, 'next_clrev_to_localrev', {}):
+ self.clrev_to_localrev = self.next_clrev_to_localrev
+ del self.next_clrev_to_localrev
+ self.changelog_done = True
+ return orig(self)
+ extensions.wrapfunction(changegroup.cg1packer, 'close', close)
+
+ # In a perfect world, we'd generate better ellipsis-ified graphs
+ # for non-changelog revlogs. In practice, we haven't started doing
+ # that yet, so the resulting DAGs for the manifestlog and filelogs
+ # are actually full of bogus parentage on all the ellipsis
+ # nodes. This has the side effect that, while the contents are
+ # correct, the individual DAGs might be completely out of whack in
+ # a case like 882681bc3166 and its ancestors (back about 10
+ # revisions or so) in the main hg repo.
+ #
+ # The one invariant we *know* holds is that the new (potentially
+ # bogus) DAG shape will be valid if we order the nodes in the
+ # order that they're introduced in dramatis personae by the
+ # changelog, so what we do is we sort the non-changelog histories
+ # by the order in which they are used by the changelog.
+ def _sortgroup(orig, self, revlog, nodelist, lookup):
+ if not util.safehasattr(self, 'full_nodes') or not self.clnode_to_rev:
+ return orig(self, revlog, nodelist, lookup)
+ key = lambda n: self.clnode_to_rev[lookup(n)]
+ return [revlog.rev(n) for n in sorted(nodelist, key=key)]
+
+ extensions.wrapfunction(changegroup.cg1packer, '_sortgroup', _sortgroup)
+
+ def generate(orig, self, commonrevs, clnodes, fastpathlinkrev, source):
+ '''yield a sequence of changegroup chunks (strings)'''
+ # Note: other than delegating to orig, the only deviation in
+ # logic from normal hg's generate is marked with BEGIN/END
+ # NARROW HACK.
+ if not util.safehasattr(self, 'full_nodes'):
+ # not sending a narrow bundle
+ for x in orig(self, commonrevs, clnodes, fastpathlinkrev, source):
+ yield x
+ return
+
+ repo = self._repo
+ cl = repo.changelog
+ mfl = repo.manifestlog
+ mfrevlog = mfl._revlog
+
+ clrevorder = {}
+ mfs = {} # needed manifests
+ fnodes = {} # needed file nodes
+ changedfiles = set()
+
+ # Callback for the changelog, used to collect changed files and manifest
+ # nodes.
+ # Returns the linkrev node (identity in the changelog case).
+ def lookupcl(x):
+ c = cl.read(x)
+ clrevorder[x] = len(clrevorder)
+ # BEGIN NARROW HACK
+ #
+ # Only update mfs if x is going to be sent. Otherwise we
+ # end up with bogus linkrevs specified for manifests and
+ # we skip some manifest nodes that we should otherwise
+ # have sent.
+ if x in self.full_nodes or cl.rev(x) in self.precomputed_ellipsis:
+ n = c[0]
+ # record the first changeset introducing this manifest version
+ mfs.setdefault(n, x)
+ # Set this narrow-specific dict so we have the lowest manifest
+ # revnum to look up for this cl revnum. (Part of mapping
+ # changelog ellipsis parents to manifest ellipsis parents)
+ self.next_clrev_to_localrev.setdefault(cl.rev(x),
+ mfrevlog.rev(n))
+ # We can't trust the changed files list in the changeset if the
+ # client requested a shallow clone.
+ if self.is_shallow:
+ changedfiles.update(mfl[c[0]].read().keys())
+ else:
+ changedfiles.update(c[3])
+ # END NARROW HACK
+ # Record a complete list of potentially-changed files in
+ # this manifest.
+ return x
+
+ self._verbosenote(_('uncompressed size of bundle content:\n'))
+ size = 0
+ for chunk in self.group(clnodes, cl, lookupcl, units=_('changesets')):
+ size += len(chunk)
+ yield chunk
+ self._verbosenote(_('%8.i (changelog)\n') % size)
+
+ # We need to make sure that the linkrev in the changegroup refers to
+ # the first changeset that introduced the manifest or file revision.
+ # The fastpath is usually safer than the slowpath, because the filelogs
+ # are walked in revlog order.
+ #
+ # When taking the slowpath with reorder=None and the manifest revlog
+ # uses generaldelta, the manifest may be walked in the "wrong" order.
+ # Without 'clrevorder', we would get an incorrect linkrev (see fix in
+ # cc0ff93d0c0c).
+ #
+ # When taking the fastpath, we are only vulnerable to reordering
+ # of the changelog itself. The changelog never uses generaldelta, so
+ # it is only reordered when reorder=True. To handle this case, we
+ # simply take the slowpath, which already has the 'clrevorder' logic.
+ # This was also fixed in cc0ff93d0c0c.
+ fastpathlinkrev = fastpathlinkrev and not self._reorder
+ # Treemanifests don't work correctly with fastpathlinkrev
+ # either, because we don't discover which directory nodes to
+ # send along with files. This could probably be fixed.
+ fastpathlinkrev = fastpathlinkrev and (
+ 'treemanifest' not in repo.requirements)
+ # Shallow clones also don't work correctly with fastpathlinkrev
+ # because file nodes may need to be sent for a manifest even if they
+ # weren't introduced by that manifest.
+ fastpathlinkrev = fastpathlinkrev and not self.is_shallow
+
+ for chunk in self.generatemanifests(commonrevs, clrevorder,
+ fastpathlinkrev, mfs, fnodes, source):
+ yield chunk
+ # BEGIN NARROW HACK
+ mfdicts = None
+ if self.is_shallow:
+ mfdicts = [(self._repo.manifestlog[n].read(), lr)
+ for (n, lr) in mfs.iteritems()]
+ # END NARROW HACK
+ mfs.clear()
+ clrevs = set(cl.rev(x) for x in clnodes)
+
+ if not fastpathlinkrev:
+ def linknodes(unused, fname):
+ return fnodes.get(fname, {})
+ else:
+ cln = cl.node
+ def linknodes(filerevlog, fname):
+ llr = filerevlog.linkrev
+ fln = filerevlog.node
+ revs = ((r, llr(r)) for r in filerevlog)
+ return dict((fln(r), cln(lr)) for r, lr in revs if lr in clrevs)
+
+ # BEGIN NARROW HACK
+ #
+ # We need to pass the mfdicts variable down into
+ # generatefiles(), but more than one command might have
+ # wrapped generatefiles so we can't modify the function
+ # signature. Instead, we pass the data to ourselves using an
+ # instance attribute. I'm sorry.
+ self._mfdicts = mfdicts
+ # END NARROW HACK
+ for chunk in self.generatefiles(changedfiles, linknodes, commonrevs,
+ source):
+ yield chunk
+
+ yield self.close()
+
+ if clnodes:
+ repo.hook('outgoing', node=node.hex(clnodes[0]), source=source)
+ extensions.wrapfunction(changegroup.cg1packer, 'generate', generate)
+
+ def revchunk(orig, self, revlog, rev, prev, linknode):
+ if not util.safehasattr(self, 'full_nodes'):
+ # not sending a narrow changegroup
+ for x in orig(self, revlog, rev, prev, linknode):
+ yield x
+ return
+ # build up some mapping information that's useful later. See
+ # the local() nested function below.
+ if not self.changelog_done:
+ self.clnode_to_rev[linknode] = rev
+ linkrev = rev
+ self.clrev_to_localrev[linkrev] = rev
+ else:
+ linkrev = self.clnode_to_rev[linknode]
+ self.clrev_to_localrev[linkrev] = rev
+ # This is a node to send in full, because the changeset it
+ # corresponds to was a full changeset.
+ if linknode in self.full_nodes:
+ for x in orig(self, revlog, rev, prev, linknode):
+ yield x
+ return
+ # At this point, a node can either be one we should skip or an
+ # ellipsis. If it's not an ellipsis, bail immediately.
+ if linkrev not in self.precomputed_ellipsis:
+ return
+ linkparents = self.precomputed_ellipsis[linkrev]
+ def local(clrev):
+ """Turn a changelog revnum into a local revnum.
+
+ The ellipsis dag is stored as revnums on the changelog,
+ but when we're producing ellipsis entries for
+ non-changelog revlogs, we need to turn those numbers into
+ something local. This does that for us, and during the
+ changelog sending phase will also expand the stored
+ mappings as needed.
+ """
+ if clrev == node.nullrev:
+ return node.nullrev
+ if not self.changelog_done:
+ # If we're doing the changelog, it's possible that we
+ # have a parent that is already on the client, and we
+ # need to store some extra mapping information so that
+ # our contained ellipsis nodes will be able to resolve
+ # their parents.
+ if clrev not in self.clrev_to_localrev:
+ clnode = revlog.node(clrev)
+ self.clnode_to_rev[clnode] = clrev
+ return clrev
+ # Walk the ellipsis-ized changelog breadth-first looking for a
+ # change that has been linked from the current revlog.
+ #
+ # For a flat manifest revlog only a single step should be necessary
+ # as all relevant changelog entries are relevant to the flat
+ # manifest.
+ #
+ # For a filelog or tree manifest dirlog however not every changelog
+ # entry will have been relevant, so we need to skip some changelog
+ # nodes even after ellipsis-izing.
+ walk = [clrev]
+ while walk:
+ p = walk[0]
+ walk = walk[1:]
+ if p in self.clrev_to_localrev:
+ return self.clrev_to_localrev[p]
+ elif p in self.full_nodes:
+ walk.extend([pp for pp in self._repo.changelog.parentrevs(p)
+ if pp != node.nullrev])
+ elif p in self.precomputed_ellipsis:
+ walk.extend([pp for pp in self.precomputed_ellipsis[p]
+ if pp != node.nullrev])
+ else:
+ # In this case, we've got an ellipsis with parents
+ # outside the current bundle (likely an
+ # incremental pull). We "know" that we can use the
+ # value of this same revlog at whatever revision
+ # is pointed to by linknode. "Know" is in scare
+ # quotes because I haven't done enough examination
+ # of edge cases to convince myself this is really
+ # a fact - it works for all the (admittedly
+ # thorough) cases in our testsuite, but I would be
+ # somewhat unsurprised to find a case in the wild
+ # where this breaks down a bit. That said, I don't
+ # know if it would hurt anything.
+ for i in xrange(rev, 0, -1):
+ if revlog.linkrev(i) == clrev:
+ return i
+ # We failed to resolve a parent for this node, so
+ # we crash the changegroup construction.
+ raise error.Abort(
+ 'unable to resolve parent while packing %r %r'
+ ' for changeset %r' % (revlog.indexfile, rev, clrev))
+ return node.nullrev
+
+ if not linkparents or (
+ revlog.parentrevs(rev) == (node.nullrev, node.nullrev)):
+ p1, p2 = node.nullrev, node.nullrev
+ elif len(linkparents) == 1:
+ p1, = sorted(local(p) for p in linkparents)
+ p2 = node.nullrev
+ else:
+ p1, p2 = sorted(local(p) for p in linkparents)
+ yield ellipsisdata(
+ self, rev, revlog, p1, p2, revlog.revision(rev), linknode)
+ extensions.wrapfunction(changegroup.cg1packer, 'revchunk', revchunk)
+
+ def deltaparent(orig, self, revlog, rev, p1, p2, prev):
+ if util.safehasattr(self, 'full_nodes'):
+ # TODO: send better deltas when in narrow mode.
+ #
+ # changegroup.group() loops over revisions to send,
+ # including revisions we'll skip. What this means is that
+ # `prev` will be a potentially useless delta base for all
+ # ellipsis nodes, as the client likely won't have it. In
+ # the future we should do bookkeeping about which nodes
+ # have been sent to the client, and try to be
+ # significantly smarter about delta bases. This is
+ # slightly tricky because this same code has to work for
+ # all revlogs, and we don't have the linkrev/linknode here.
+ return p1
+ return orig(self, revlog, rev, p1, p2, prev)
+ extensions.wrapfunction(changegroup.cg2packer, 'deltaparent', deltaparent)
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/hgext/narrow/narrowcommands.py Sat Feb 24 17:49:10 2018 -0600
@@ -0,0 +1,405 @@
+# narrowcommands.py - command modifications for narrowhg extension
+#
+# Copyright 2017 Google, Inc.
+#
+# This software may be used and distributed according to the terms of the
+# GNU General Public License version 2 or any later version.
+from __future__ import absolute_import
+
+import itertools
+
+from mercurial.i18n import _
+from mercurial import (
+ cmdutil,
+ commands,
+ discovery,
+ error,
+ exchange,
+ extensions,
+ hg,
+ merge,
+ narrowspec,
+ node,
+ pycompat,
+ registrar,
+ repair,
+ repoview,
+ util,
+)
+
+from . import (
+ narrowbundle2,
+ narrowrepo,
+)
+
+table = {}
+command = registrar.command(table)
+
+def setup():
+ """Wraps user-facing mercurial commands with narrow-aware versions."""
+
+ entry = extensions.wrapcommand(commands.table, 'clone', clonenarrowcmd)
+ entry[1].append(('', 'narrow', None,
+ _("create a narrow clone of select files")))
+ entry[1].append(('', 'depth', '',
+ _("limit the history fetched by distance from heads")))
+ # TODO(durin42): unify sparse/narrow --include/--exclude logic a bit
+ if 'sparse' not in extensions.enabled():
+ entry[1].append(('', 'include', [],
+ _("specifically fetch this file/directory")))
+ entry[1].append(
+ ('', 'exclude', [],
+ _("do not fetch this file/directory, even if included")))
+
+ entry = extensions.wrapcommand(commands.table, 'pull', pullnarrowcmd)
+ entry[1].append(('', 'depth', '',
+ _("limit the history fetched by distance from heads")))
+
+ extensions.wrapcommand(commands.table, 'archive', archivenarrowcmd)
+
+def expandpull(pullop, includepats, excludepats):
+ if not narrowspec.needsexpansion(includepats):
+ return includepats, excludepats
+
+ heads = pullop.heads or pullop.rheads
+ includepats, excludepats = pullop.remote.expandnarrow(
+ includepats, excludepats, heads)
+ pullop.repo.ui.debug('Expanded narrowspec to inc=%s, exc=%s\n' % (
+ includepats, excludepats))
+ return set(includepats), set(excludepats)
+
+def clonenarrowcmd(orig, ui, repo, *args, **opts):
+ """Wraps clone command, so 'hg clone' first wraps localrepo.clone()."""
+ opts = pycompat.byteskwargs(opts)
+ wrappedextraprepare = util.nullcontextmanager()
+ opts_narrow = opts['narrow']
+ if opts_narrow:
+ def pullbundle2extraprepare_widen(orig, pullop, kwargs):
+ # Create narrow spec patterns from clone flags
+ includepats = narrowspec.parsepatterns(opts['include'])
+ excludepats = narrowspec.parsepatterns(opts['exclude'])
+
+ # If necessary, ask the server to expand the narrowspec.
+ includepats, excludepats = expandpull(
+ pullop, includepats, excludepats)
+
+ if not includepats and excludepats:
+ # If nothing was included, we assume the user meant to include
+ # everything, except what they asked to exclude.
+ includepats = {'path:.'}
+
+ narrowspec.save(pullop.repo, includepats, excludepats)
+
+ # This will populate 'includepats' etc with the values from the
+ # narrowspec we just saved.
+ orig(pullop, kwargs)
+
+ if opts.get('depth'):
+ kwargs['depth'] = opts['depth']
+ wrappedextraprepare = extensions.wrappedfunction(exchange,
+ '_pullbundle2extraprepare', pullbundle2extraprepare_widen)
+
+ def pullnarrow(orig, repo, *args, **kwargs):
+ narrowrepo.wraprepo(repo.unfiltered(), opts_narrow)
+ if isinstance(repo, repoview.repoview):
+ repo.__class__.__bases__ = (repo.__class__.__bases__[0],
+ repo.unfiltered().__class__)
+ if opts_narrow:
+ repo.requirements.add(narrowrepo.REQUIREMENT)
+ repo._writerequirements()
+
+ return orig(repo, *args, **kwargs)
+
+ wrappedpull = extensions.wrappedfunction(exchange, 'pull', pullnarrow)
+
+ with wrappedextraprepare, wrappedpull:
+ return orig(ui, repo, *args, **pycompat.strkwargs(opts))
+
+def pullnarrowcmd(orig, ui, repo, *args, **opts):
+ """Wraps pull command to allow modifying narrow spec."""
+ wrappedextraprepare = util.nullcontextmanager()
+ if narrowrepo.REQUIREMENT in repo.requirements:
+
+ def pullbundle2extraprepare_widen(orig, pullop, kwargs):
+ orig(pullop, kwargs)
+ if opts.get('depth'):
+ kwargs['depth'] = opts['depth']
+ wrappedextraprepare = extensions.wrappedfunction(exchange,
+ '_pullbundle2extraprepare', pullbundle2extraprepare_widen)
+
+ with wrappedextraprepare:
+ return orig(ui, repo, *args, **opts)
+
+def archivenarrowcmd(orig, ui, repo, *args, **opts):
+ """Wraps archive command to narrow the default includes."""
+ if narrowrepo.REQUIREMENT in repo.requirements:
+ repo_includes, repo_excludes = repo.narrowpats
+ includes = set(opts.get(r'include', []))
+ excludes = set(opts.get(r'exclude', []))
+ includes, excludes, unused_invalid = narrowspec.restrictpatterns(
+ includes, excludes, repo_includes, repo_excludes)
+ if includes:
+ opts[r'include'] = includes
+ if excludes:
+ opts[r'exclude'] = excludes
+ return orig(ui, repo, *args, **opts)
+
+def pullbundle2extraprepare(orig, pullop, kwargs):
+ repo = pullop.repo
+ if narrowrepo.REQUIREMENT not in repo.requirements:
+ return orig(pullop, kwargs)
+
+ if narrowbundle2.NARROWCAP not in pullop.remotebundle2caps:
+ raise error.Abort(_("server doesn't support narrow clones"))
+ orig(pullop, kwargs)
+ kwargs['narrow'] = True
+ include, exclude = repo.narrowpats
+ kwargs['oldincludepats'] = include
+ kwargs['oldexcludepats'] = exclude
+ kwargs['includepats'] = include
+ kwargs['excludepats'] = exclude
+ kwargs['known'] = [node.hex(ctx.node()) for ctx in
+ repo.set('::%ln', pullop.common)
+ if ctx.node() != node.nullid]
+ if not kwargs['known']:
+ # Mercurial serialized an empty list as '' and deserializes it as
+ # [''], so delete it instead to avoid handling the empty string on the
+ # server.
+ del kwargs['known']
+
+extensions.wrapfunction(exchange,'_pullbundle2extraprepare',
+ pullbundle2extraprepare)
+
+def _narrow(ui, repo, remote, commoninc, oldincludes, oldexcludes,
+ newincludes, newexcludes, force):
+ oldmatch = narrowspec.match(repo.root, oldincludes, oldexcludes)
+ newmatch = narrowspec.match(repo.root, newincludes, newexcludes)
+
+ # This is essentially doing "hg outgoing" to find all local-only
+ # commits. We will then check that the local-only commits don't
+ # have any changes to files that will be untracked.
+ unfi = repo.unfiltered()
+ outgoing = discovery.findcommonoutgoing(unfi, remote,
+ commoninc=commoninc)
+ ui.status(_('looking for local changes to affected paths\n'))
+ localnodes = []
+ for n in itertools.chain(outgoing.missing, outgoing.excluded):
+ if any(oldmatch(f) and not newmatch(f) for f in unfi[n].files()):
+ localnodes.append(n)
+ revstostrip = unfi.revs('descendants(%ln)', localnodes)
+ hiddenrevs = repoview.filterrevs(repo, 'visible')
+ visibletostrip = list(repo.changelog.node(r)
+ for r in (revstostrip - hiddenrevs))
+ if visibletostrip:
+ ui.status(_('The following changeset(s) or their ancestors have '
+ 'local changes not on the remote:\n'))
+ maxnodes = 10
+ if ui.verbose or len(visibletostrip) <= maxnodes:
+ for n in visibletostrip:
+ ui.status('%s\n' % node.short(n))
+ else:
+ for n in visibletostrip[:maxnodes]:
+ ui.status('%s\n' % node.short(n))
+ ui.status(_('...and %d more, use --verbose to list all\n') %
+ (len(visibletostrip) - maxnodes))
+ if not force:
+ raise error.Abort(_('local changes found'),
+ hint=_('use --force-delete-local-changes to '
+ 'ignore'))
+
+ if revstostrip:
+ tostrip = [unfi.changelog.node(r) for r in revstostrip]
+ if repo['.'].node() in tostrip:
+ # stripping working copy, so move to a different commit first
+ urev = max(repo.revs('(::%n) - %ln + null',
+ repo['.'].node(), visibletostrip))
+ hg.clean(repo, urev)
+ repair.strip(ui, unfi, tostrip, topic='narrow')
+
+ todelete = []
+ for f, f2, size in repo.store.datafiles():
+ if f.startswith('data/'):
+ file = f[5:-2]
+ if not newmatch(file):
+ todelete.append(f)
+ elif f.startswith('meta/'):
+ dir = f[5:-13]
+ dirs = ['.'] + sorted(util.dirs({dir})) + [dir]
+ include = True
+ for d in dirs:
+ visit = newmatch.visitdir(d)
+ if not visit:
+ include = False
+ break
+ if visit == 'all':
+ break
+ if not include:
+ todelete.append(f)
+
+ repo.destroying()
+
+ with repo.transaction("narrowing"):
+ for f in todelete:
+ ui.status(_('deleting %s\n') % f)
+ util.unlinkpath(repo.svfs.join(f))
+ repo.store.markremoved(f)
+
+ for f in repo.dirstate:
+ if not newmatch(f):
+ repo.dirstate.drop(f)
+ repo.wvfs.unlinkpath(f)
+ repo.setnarrowpats(newincludes, newexcludes)
+
+ repo.destroyed()
+
+def _widen(ui, repo, remote, commoninc, newincludes, newexcludes):
+ newmatch = narrowspec.match(repo.root, newincludes, newexcludes)
+
+ # TODO(martinvonz): Get expansion working with widening/narrowing.
+ if narrowspec.needsexpansion(newincludes):
+ raise error.Abort('Expansion not yet supported on pull')
+
+ def pullbundle2extraprepare_widen(orig, pullop, kwargs):
+ orig(pullop, kwargs)
+ # The old{in,ex}cludepats have already been set by orig()
+ kwargs['includepats'] = newincludes
+ kwargs['excludepats'] = newexcludes
+ wrappedextraprepare = extensions.wrappedfunction(exchange,
+ '_pullbundle2extraprepare', pullbundle2extraprepare_widen)
+
+ # define a function that narrowbundle2 can call after creating the
+ # backup bundle, but before applying the bundle from the server
+ def setnewnarrowpats():
+ repo.setnarrowpats(newincludes, newexcludes)
+ repo.setnewnarrowpats = setnewnarrowpats
+
+ ds = repo.dirstate
+ p1, p2 = ds.p1(), ds.p2()
+ with ds.parentchange():
+ ds.setparents(node.nullid, node.nullid)
+ common = commoninc[0]
+ with wrappedextraprepare:
+ exchange.pull(repo, remote, heads=common)
+ with ds.parentchange():
+ ds.setparents(p1, p2)
+
+ actions = {k: [] for k in 'a am f g cd dc r dm dg m e k p pr'.split()}
+ addgaction = actions['g'].append
+
+ mf = repo['.'].manifest().matches(newmatch)
+ for f, fn in mf.iteritems():
+ if f not in repo.dirstate:
+ addgaction((f, (mf.flags(f), False),
+ "add from widened narrow clone"))
+
+ merge.applyupdates(repo, actions, wctx=repo[None],
+ mctx=repo['.'], overwrite=False)
+ merge.recordupdates(repo, actions, branchmerge=False)
+
+# TODO(rdamazio): Make new matcher format and update description
+@command('tracked',
+ [('', 'addinclude', [], _('new paths to include')),
+ ('', 'removeinclude', [], _('old paths to no longer include')),
+ ('', 'addexclude', [], _('new paths to exclude')),
+ ('', 'removeexclude', [], _('old paths to no longer exclude')),
+ ('', 'clear', False, _('whether to replace the existing narrowspec')),
+ ('', 'force-delete-local-changes', False,
+ _('forces deletion of local changes when narrowing')),
+ ] + commands.remoteopts,
+ _('[OPTIONS]... [REMOTE]'),
+ inferrepo=True)
+def trackedcmd(ui, repo, remotepath=None, *pats, **opts):
+ """show or change the current narrowspec
+
+ With no argument, shows the current narrowspec entries, one per line. Each
+ line will be prefixed with 'I' or 'X' for included or excluded patterns,
+ respectively.
+
+ The narrowspec is comprised of expressions to match remote files and/or
+ directories that should be pulled into your client.
+ The narrowspec has *include* and *exclude* expressions, with excludes always
+ trumping includes: that is, if a file matches an exclude expression, it will
+ be excluded even if it also matches an include expression.
+ Excluding files that were never included has no effect.
+
+ Each included or excluded entry is in the format described by
+ 'hg help patterns'.
+
+ The options allow you to add or remove included and excluded expressions.
+
+ If --clear is specified, then all previous includes and excludes are DROPPED
+ and replaced by the new ones specified to --addinclude and --addexclude.
+ If --clear is specified without any further options, the narrowspec will be
+ empty and will not match any files.
+ """
+ opts = pycompat.byteskwargs(opts)
+ if narrowrepo.REQUIREMENT not in repo.requirements:
+ ui.warn(_('The narrow command is only supported on respositories cloned'
+ ' with --narrow.\n'))
+ return 1
+
+ # Before supporting, decide whether it "hg tracked --clear" should mean
+ # tracking no paths or all paths.
+ if opts['clear']:
+ ui.warn(_('The --clear option is not yet supported.\n'))
+ return 1
+
+ if narrowspec.needsexpansion(opts['addinclude'] + opts['addexclude']):
+ raise error.Abort('Expansion not yet supported on widen/narrow')
+
+ addedincludes = narrowspec.parsepatterns(opts['addinclude'])
+ removedincludes = narrowspec.parsepatterns(opts['removeinclude'])
+ addedexcludes = narrowspec.parsepatterns(opts['addexclude'])
+ removedexcludes = narrowspec.parsepatterns(opts['removeexclude'])
+ widening = addedincludes or removedexcludes
+ narrowing = removedincludes or addedexcludes
+ only_show = not widening and not narrowing
+
+ # Only print the current narrowspec.
+ if only_show:
+ include, exclude = repo.narrowpats
+
+ ui.pager('tracked')
+ fm = ui.formatter('narrow', opts)
+ for i in sorted(include):
+ fm.startitem()
+ fm.write('status', '%s ', 'I', label='narrow.included')
+ fm.write('pat', '%s\n', i, label='narrow.included')
+ for i in sorted(exclude):
+ fm.startitem()
+ fm.write('status', '%s ', 'X', label='narrow.excluded')
+ fm.write('pat', '%s\n', i, label='narrow.excluded')
+ fm.end()
+ return 0
+
+ with repo.wlock(), repo.lock():
+ cmdutil.bailifchanged(repo)
+
+ # Find the revisions we have in common with the remote. These will
+ # be used for finding local-only changes for narrowing. They will
+ # also define the set of revisions to update for widening.
+ remotepath = ui.expandpath(remotepath or 'default')
+ url, branches = hg.parseurl(remotepath)
+ ui.status(_('comparing with %s\n') % util.hidepassword(url))
+ remote = hg.peer(repo, opts, url)
+ commoninc = discovery.findcommonincoming(repo, remote)
+
+ oldincludes, oldexcludes = repo.narrowpats
+ if narrowing:
+ newincludes = oldincludes - removedincludes
+ newexcludes = oldexcludes | addedexcludes
+ _narrow(ui, repo, remote, commoninc, oldincludes, oldexcludes,
+ newincludes, newexcludes,
+ opts['force_delete_local_changes'])
+ # _narrow() updated the narrowspec and _widen() below needs to
+ # use the updated values as its base (otherwise removed includes
+ # and addedexcludes will be lost in the resulting narrowspec)
+ oldincludes = newincludes
+ oldexcludes = newexcludes
+
+ if widening:
+ newincludes = oldincludes | addedincludes
+ newexcludes = oldexcludes - removedexcludes
+ _widen(ui, repo, remote, commoninc, newincludes, newexcludes)
+
+ return 0
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/hgext/narrow/narrowcopies.py Sat Feb 24 17:49:10 2018 -0600
@@ -0,0 +1,35 @@
+# narrowcopies.py - extensions to mercurial copies module to support narrow
+# clones
+#
+# Copyright 2017 Google, Inc.
+#
+# This software may be used and distributed according to the terms of the
+# GNU General Public License version 2 or any later version.
+
+from __future__ import absolute_import
+
+from mercurial import (
+ copies,
+ extensions,
+ util,
+)
+
+def setup(repo):
+ def _computeforwardmissing(orig, a, b, match=None):
+ missing = orig(a, b, match)
+ if util.safehasattr(repo, 'narrowmatch'):
+ narrowmatch = repo.narrowmatch()
+ missing = [f for f in missing if narrowmatch(f)]
+ return missing
+
+ def _checkcopies(orig, srcctx, dstctx, f, base, tca, remotebase, limit,
+ data):
+ if util.safehasattr(repo, 'narrowmatch'):
+ narrowmatch = repo.narrowmatch()
+ if not narrowmatch(f):
+ return
+ orig(srcctx, dstctx, f, base, tca, remotebase, limit, data)
+
+ extensions.wrapfunction(copies, '_computeforwardmissing',
+ _computeforwardmissing)
+ extensions.wrapfunction(copies, '_checkcopies', _checkcopies)
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/hgext/narrow/narrowdirstate.py Sat Feb 24 17:49:10 2018 -0600
@@ -0,0 +1,82 @@
+# narrowdirstate.py - extensions to mercurial dirstate to support narrow clones
+#
+# Copyright 2017 Google, Inc.
+#
+# This software may be used and distributed according to the terms of the
+# GNU General Public License version 2 or any later version.
+
+from __future__ import absolute_import
+
+from mercurial.i18n import _
+from mercurial import (
+ dirstate,
+ error,
+ extensions,
+ match as matchmod,
+ narrowspec,
+ util as hgutil,
+)
+
+def setup(repo):
+ """Add narrow spec dirstate ignore, block changes outside narrow spec."""
+
+ def walk(orig, self, match, subrepos, unknown, ignored, full=True,
+ narrowonly=True):
+ if narrowonly:
+ # hack to not exclude explicitly-specified paths so that they can
+ # be warned later on e.g. dirstate.add()
+ em = matchmod.exact(match._root, match._cwd, match.files())
+ nm = matchmod.unionmatcher([repo.narrowmatch(), em])
+ match = matchmod.intersectmatchers(match, nm)
+ return orig(self, match, subrepos, unknown, ignored, full)
+
+ extensions.wrapfunction(dirstate.dirstate, 'walk', walk)
+
+ # Prevent adding files that are outside the sparse checkout
+ editfuncs = ['normal', 'add', 'normallookup', 'copy', 'remove', 'merge']
+ for func in editfuncs:
+ def _wrapper(orig, self, *args):
+ dirstate = repo.dirstate
+ narrowmatch = repo.narrowmatch()
+ for f in args:
+ if f is not None and not narrowmatch(f) and f not in dirstate:
+ raise error.Abort(_("cannot track '%s' - it is outside " +
+ "the narrow clone") % f)
+ return orig(self, *args)
+ extensions.wrapfunction(dirstate.dirstate, func, _wrapper)
+
+ def filterrebuild(orig, self, parent, allfiles, changedfiles=None):
+ if changedfiles is None:
+ # Rebuilding entire dirstate, let's filter allfiles to match the
+ # narrowspec.
+ allfiles = [f for f in allfiles if repo.narrowmatch()(f)]
+ orig(self, parent, allfiles, changedfiles)
+
+ extensions.wrapfunction(dirstate.dirstate, 'rebuild', filterrebuild)
+
+ def _narrowbackupname(backupname):
+ assert 'dirstate' in backupname
+ return backupname.replace('dirstate', narrowspec.FILENAME)
+
+ def restorebackup(orig, self, tr, backupname):
+ self._opener.rename(_narrowbackupname(backupname), narrowspec.FILENAME,
+ checkambig=True)
+ orig(self, tr, backupname)
+
+ extensions.wrapfunction(dirstate.dirstate, 'restorebackup', restorebackup)
+
+ def savebackup(orig, self, tr, backupname):
+ orig(self, tr, backupname)
+
+ narrowbackupname = _narrowbackupname(backupname)
+ self._opener.tryunlink(narrowbackupname)
+ hgutil.copyfile(self._opener.join(narrowspec.FILENAME),
+ self._opener.join(narrowbackupname), hardlink=True)
+
+ extensions.wrapfunction(dirstate.dirstate, 'savebackup', savebackup)
+
+ def clearbackup(orig, self, tr, backupname):
+ orig(self, tr, backupname)
+ self._opener.unlink(_narrowbackupname(backupname))
+
+ extensions.wrapfunction(dirstate.dirstate, 'clearbackup', clearbackup)
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/hgext/narrow/narrowmerge.py Sat Feb 24 17:49:10 2018 -0600
@@ -0,0 +1,78 @@
+# narrowmerge.py - extensions to mercurial merge module to support narrow clones
+#
+# Copyright 2017 Google, Inc.
+#
+# This software may be used and distributed according to the terms of the
+# GNU General Public License version 2 or any later version.
+
+from __future__ import absolute_import
+
+from mercurial.i18n import _
+from mercurial import (
+ copies,
+ error,
+ extensions,
+ merge,
+ util,
+)
+
+def setup():
+ def _manifestmerge(orig, repo, wctx, p2, pa, branchmerge, *args, **kwargs):
+ """Filter updates to only lay out files that match the narrow spec."""
+ actions, diverge, renamedelete = orig(
+ repo, wctx, p2, pa, branchmerge, *args, **kwargs)
+
+ if not util.safehasattr(repo, 'narrowmatch'):
+ return actions, diverge, renamedelete
+
+ nooptypes = set(['k']) # TODO: handle with nonconflicttypes
+ nonconflicttypes = set('a am c cm f g r e'.split())
+ narrowmatch = repo.narrowmatch()
+ # We mutate the items in the dict during iteration, so iterate
+ # over a copy.
+ for f, action in list(actions.items()):
+ if narrowmatch(f):
+ pass
+ elif not branchmerge:
+ del actions[f] # just updating, ignore changes outside clone
+ elif action[0] in nooptypes:
+ del actions[f] # merge does not affect file
+ elif action[0] in nonconflicttypes:
+ raise error.Abort(_('merge affects file \'%s\' outside narrow, '
+ 'which is not yet supported') % f,
+ hint=_('merging in the other direction '
+ 'may work'))
+ else:
+ raise error.Abort(_('conflict in file \'%s\' is outside '
+ 'narrow clone') % f)
+
+ return actions, diverge, renamedelete
+
+ extensions.wrapfunction(merge, 'manifestmerge', _manifestmerge)
+
+ def _checkcollision(orig, repo, wmf, actions):
+ if util.safehasattr(repo, 'narrowmatch'):
+ narrowmatch = repo.narrowmatch()
+ wmf = wmf.matches(narrowmatch)
+ if actions:
+ narrowactions = {}
+ for m, actionsfortype in actions.iteritems():
+ narrowactions[m] = []
+ for (f, args, msg) in actionsfortype:
+ if narrowmatch(f):
+ narrowactions[m].append((f, args, msg))
+ actions = narrowactions
+ return orig(repo, wmf, actions)
+
+ extensions.wrapfunction(merge, '_checkcollision', _checkcollision)
+
+ def _computenonoverlap(orig, repo, *args, **kwargs):
+ u1, u2 = orig(repo, *args, **kwargs)
+ if not util.safehasattr(repo, 'narrowmatch'):
+ return u1, u2
+
+ narrowmatch = repo.narrowmatch()
+ u1 = [f for f in u1 if narrowmatch(f)]
+ u2 = [f for f in u2 if narrowmatch(f)]
+ return u1, u2
+ extensions.wrapfunction(copies, '_computenonoverlap', _computenonoverlap)
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/hgext/narrow/narrowpatch.py Sat Feb 24 17:49:10 2018 -0600
@@ -0,0 +1,42 @@
+# narrowpatch.py - extensions to mercurial patch module to support narrow clones
+#
+# Copyright 2017 Google, Inc.
+#
+# This software may be used and distributed according to the terms of the
+# GNU General Public License version 2 or any later version.
+
+from __future__ import absolute_import
+
+from mercurial import (
+ extensions,
+ patch,
+ util,
+)
+
+def setup(repo):
+ def _filepairs(orig, *args):
+ """Only includes files within the narrow spec in the diff."""
+ if util.safehasattr(repo, 'narrowmatch'):
+ narrowmatch = repo.narrowmatch()
+ for x in orig(*args):
+ f1, f2, copyop = x
+ if ((not f1 or narrowmatch(f1)) and
+ (not f2 or narrowmatch(f2))):
+ yield x
+ else:
+ for x in orig(*args):
+ yield x
+
+ def trydiff(orig, repo, revs, ctx1, ctx2, modified, added, removed,
+ copy, getfilectx, *args, **kwargs):
+ if util.safehasattr(repo, 'narrowmatch'):
+ narrowmatch = repo.narrowmatch()
+ modified = [f for f in modified if narrowmatch(f)]
+ added = [f for f in added if narrowmatch(f)]
+ removed = [f for f in removed if narrowmatch(f)]
+ copy = {k: v for k, v in copy.iteritems() if narrowmatch(k)}
+ return orig(repo, revs, ctx1, ctx2, modified, added, removed, copy,
+ getfilectx, *args, **kwargs)
+
+ extensions.wrapfunction(patch, '_filepairs', _filepairs)
+ extensions.wrapfunction(patch, 'trydiff', trydiff)
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/hgext/narrow/narrowrepo.py Sat Feb 24 17:49:10 2018 -0600
@@ -0,0 +1,113 @@
+# narrowrepo.py - repository which supports narrow revlogs, lazy loading
+#
+# Copyright 2017 Google, Inc.
+#
+# This software may be used and distributed according to the terms of the
+# GNU General Public License version 2 or any later version.
+
+from __future__ import absolute_import
+
+from mercurial import (
+ bundlerepo,
+ hg,
+ localrepo,
+ match as matchmod,
+ narrowspec,
+ scmutil,
+)
+
+from . import (
+ narrowrevlog,
+)
+
+# When narrowing is finalized and no longer subject to format changes,
+# we should move this to just "narrow" or similar.
+REQUIREMENT = 'narrowhg-experimental'
+
+def wrappostshare(orig, sourcerepo, destrepo, **kwargs):
+ orig(sourcerepo, destrepo, **kwargs)
+ if REQUIREMENT in sourcerepo.requirements:
+ with destrepo.wlock():
+ with destrepo.vfs('shared', 'a') as fp:
+ fp.write(narrowspec.FILENAME + '\n')
+
+def unsharenarrowspec(orig, ui, repo, repopath):
+ if (REQUIREMENT in repo.requirements
+ and repo.path == repopath and repo.shared()):
+ srcrepo = hg.sharedreposource(repo)
+ with srcrepo.vfs(narrowspec.FILENAME) as f:
+ spec = f.read()
+ with repo.vfs(narrowspec.FILENAME, 'w') as f:
+ f.write(spec)
+ return orig(ui, repo, repopath)
+
+def wraprepo(repo, opts_narrow):
+ """Enables narrow clone functionality on a single local repository."""
+
+ cacheprop = localrepo.storecache
+ if isinstance(repo, bundlerepo.bundlerepository):
+ # We have to use a different caching property decorator for
+ # bundlerepo because storecache blows up in strange ways on a
+ # bundlerepo. Fortunately, there's no risk of data changing in
+ # a bundlerepo.
+ cacheprop = lambda name: localrepo.unfilteredpropertycache
+
+ class narrowrepository(repo.__class__):
+
+ def _constructmanifest(self):
+ manifest = super(narrowrepository, self)._constructmanifest()
+ narrowrevlog.makenarrowmanifestrevlog(manifest, repo)
+ return manifest
+
+ @cacheprop('00manifest.i')
+ def manifestlog(self):
+ mfl = super(narrowrepository, self).manifestlog
+ narrowrevlog.makenarrowmanifestlog(mfl, self)
+ return mfl
+
+ def file(self, f):
+ fl = super(narrowrepository, self).file(f)
+ narrowrevlog.makenarrowfilelog(fl, self.narrowmatch())
+ return fl
+
+ @localrepo.repofilecache(narrowspec.FILENAME)
+ def narrowpats(self):
+ """matcher patterns for this repository's narrowspec
+
+ A tuple of (includes, excludes).
+ """
+ return narrowspec.load(self)
+
+ @localrepo.repofilecache(narrowspec.FILENAME)
+ def _narrowmatch(self):
+ include, exclude = self.narrowpats
+ if not opts_narrow and not include and not exclude:
+ return matchmod.always(self.root, '')
+ return narrowspec.match(self.root, include=include, exclude=exclude)
+
+ # TODO(martinvonz): make this property-like instead?
+ def narrowmatch(self):
+ return self._narrowmatch
+
+ def setnarrowpats(self, newincludes, newexcludes):
+ narrowspec.save(self, newincludes, newexcludes)
+ self.invalidate(clearfilecache=True)
+
+ # I'm not sure this is the right place to do this filter.
+ # context._manifestmatches() would probably be better, or perhaps
+ # move it to a later place, in case some of the callers do want to know
+ # which directories changed. This seems to work for now, though.
+ def status(self, *args, **kwargs):
+ s = super(narrowrepository, self).status(*args, **kwargs)
+ narrowmatch = self.narrowmatch()
+ modified = list(filter(narrowmatch, s.modified))
+ added = list(filter(narrowmatch, s.added))
+ removed = list(filter(narrowmatch, s.removed))
+ deleted = list(filter(narrowmatch, s.deleted))
+ unknown = list(filter(narrowmatch, s.unknown))
+ ignored = list(filter(narrowmatch, s.ignored))
+ clean = list(filter(narrowmatch, s.clean))
+ return scmutil.status(modified, added, removed, deleted, unknown,
+ ignored, clean)
+
+ repo.__class__ = narrowrepository
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/hgext/narrow/narrowrevlog.py Sat Feb 24 17:49:10 2018 -0600
@@ -0,0 +1,187 @@
+# narrowrevlog.py - revlog storing irrelevant nodes as "ellipsis" nodes
+#
+# Copyright 2017 Google, Inc.
+#
+# This software may be used and distributed according to the terms of the
+# GNU General Public License version 2 or any later version.
+
+from __future__ import absolute_import
+
+from mercurial import (
+ error,
+ manifest,
+ revlog,
+ util,
+)
+
+def readtransform(self, text):
+ return text, False
+
+def writetransform(self, text):
+ return text, False
+
+def rawtransform(self, text):
+ return False
+
+revlog.addflagprocessor(revlog.REVIDX_ELLIPSIS,
+ (readtransform, writetransform, rawtransform))
+
+def setup():
+ # We just wanted to add the flag processor, which is done at module
+ # load time.
+ pass
+
+class excludeddir(manifest.treemanifest):
+ """Stand-in for a directory that is excluded from the repository.
+
+ With narrowing active on a repository that uses treemanifests,
+ some of the directory revlogs will be excluded from the resulting
+ clone. This is a huge storage win for clients, but means we need
+ some sort of pseudo-manifest to surface to internals so we can
+ detect a merge conflict outside the narrowspec. That's what this
+ class is: it stands in for a directory whose node is known, but
+ whose contents are unknown.
+ """
+ def __init__(self, dir, node):
+ super(excludeddir, self).__init__(dir)
+ self._node = node
+ # Add an empty file, which will be included by iterators and such,
+ # appearing as the directory itself (i.e. something like "dir/")
+ self._files[''] = node
+ self._flags[''] = 't'
+
+ # Manifests outside the narrowspec should never be modified, so avoid
+ # copying. This makes a noticeable difference when there are very many
+ # directories outside the narrowspec. Also, it makes sense for the copy to
+ # be of the same type as the original, which would not happen with the
+ # super type's copy().
+ def copy(self):
+ return self
+
+class excludeddirmanifestctx(manifest.treemanifestctx):
+ """context wrapper for excludeddir - see that docstring for rationale"""
+ def __init__(self, dir, node):
+ self._dir = dir
+ self._node = node
+
+ def read(self):
+ return excludeddir(self._dir, self._node)
+
+ def write(self, *args):
+ raise error.ProgrammingError(
+ 'attempt to write manifest from excluded dir %s' % self._dir)
+
+class excludedmanifestrevlog(manifest.manifestrevlog):
+ """Stand-in for excluded treemanifest revlogs.
+
+ When narrowing is active on a treemanifest repository, we'll have
+ references to directories we can't see due to the revlog being
+ skipped. This class exists to conform to the manifestrevlog
+ interface for those directories and proactively prevent writes to
+ outside the narrowspec.
+ """
+
+ def __init__(self, dir):
+ self._dir = dir
+
+ def __len__(self):
+ raise error.ProgrammingError(
+ 'attempt to get length of excluded dir %s' % self._dir)
+
+ def rev(self, node):
+ raise error.ProgrammingError(
+ 'attempt to get rev from excluded dir %s' % self._dir)
+
+ def linkrev(self, node):
+ raise error.ProgrammingError(
+ 'attempt to get linkrev from excluded dir %s' % self._dir)
+
+ def node(self, rev):
+ raise error.ProgrammingError(
+ 'attempt to get node from excluded dir %s' % self._dir)
+
+ def add(self, *args, **kwargs):
+ # We should never write entries in dirlogs outside the narrow clone.
+ # However, the method still gets called from writesubtree() in
+ # _addtree(), so we need to handle it. We should possibly make that
+ # avoid calling add() with a clean manifest (_dirty is always False
+ # in excludeddir instances).
+ pass
+
+def makenarrowmanifestrevlog(mfrevlog, repo):
+ if util.safehasattr(mfrevlog, '_narrowed'):
+ return
+
+ class narrowmanifestrevlog(mfrevlog.__class__):
+ # This function is called via debug{revlog,index,data}, but also during
+ # at least some push operations. This will be used to wrap/exclude the
+ # child directories when using treemanifests.
+ def dirlog(self, d):
+ if d and not d.endswith('/'):
+ d = d + '/'
+ if not repo.narrowmatch().visitdir(d[:-1] or '.'):
+ return excludedmanifestrevlog(d)
+ result = super(narrowmanifestrevlog, self).dirlog(d)
+ makenarrowmanifestrevlog(result, repo)
+ return result
+
+ mfrevlog.__class__ = narrowmanifestrevlog
+ mfrevlog._narrowed = True
+
+def makenarrowmanifestlog(mfl, repo):
+ class narrowmanifestlog(mfl.__class__):
+ def get(self, dir, node, verify=True):
+ if not repo.narrowmatch().visitdir(dir[:-1] or '.'):
+ return excludeddirmanifestctx(dir, node)
+ return super(narrowmanifestlog, self).get(dir, node, verify=verify)
+ mfl.__class__ = narrowmanifestlog
+
+def makenarrowfilelog(fl, narrowmatch):
+ class narrowfilelog(fl.__class__):
+ def renamed(self, node):
+ # Renames that come from outside the narrowspec are
+ # problematic at least for git-diffs, because we lack the
+ # base text for the rename. This logic was introduced in
+ # 3cd72b1 of narrowhg (authored by martinvonz, reviewed by
+ # adgar), but that revision doesn't have any additional
+ # commentary on what problems we can encounter.
+ m = super(narrowfilelog, self).renamed(node)
+ if m and not narrowmatch(m[0]):
+ return None
+ return m
+
+ def size(self, rev):
+ # We take advantage of the fact that remotefilelog
+ # lacks a node() method to just skip the
+ # rename-checking logic when on remotefilelog. This
+ # might be incorrect on other non-revlog-based storage
+ # engines, but for now this seems to be fine.
+ #
+ # TODO: when remotefilelog is in core, improve this to
+ # explicitly look for remotefilelog instead of cheating
+ # with a hasattr check.
+ if util.safehasattr(self, 'node'):
+ node = self.node(rev)
+ # Because renamed() is overridden above to
+ # sometimes return None even if there is metadata
+ # in the revlog, size can be incorrect for
+ # copies/renames, so we need to make sure we call
+ # the super class's implementation of renamed()
+ # for the purpose of size calculation.
+ if super(narrowfilelog, self).renamed(node):
+ return len(self.read(node))
+ return super(narrowfilelog, self).size(rev)
+
+ def cmp(self, node, text):
+ different = super(narrowfilelog, self).cmp(node, text)
+ if different:
+ # Similar to size() above, if the file was copied from
+ # a file outside the narrowspec, the super class's
+ # would have returned True because we tricked it into
+ # thinking that the file was not renamed.
+ if super(narrowfilelog, self).renamed(node):
+ t2 = self.read(node)
+ return t2 != text
+ return different
+
+ fl.__class__ = narrowfilelog
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/hgext/narrow/narrowtemplates.py Sat Feb 24 17:49:10 2018 -0600
@@ -0,0 +1,48 @@
+# narrowtemplates.py - added template keywords for narrow clones
+#
+# Copyright 2017 Google, Inc.
+#
+# This software may be used and distributed according to the terms of the
+# GNU General Public License version 2 or any later version.
+
+from __future__ import absolute_import
+
+from mercurial import (
+ registrar,
+ revlog,
+ util,
+)
+
+keywords = {}
+templatekeyword = registrar.templatekeyword(keywords)
+revsetpredicate = registrar.revsetpredicate()
+
+def _isellipsis(repo, rev):
+ if repo.changelog.flags(rev) & revlog.REVIDX_ELLIPSIS:
+ return True
+ return False
+
+@templatekeyword('ellipsis')
+def ellipsis(repo, ctx, templ, **args):
+ """:ellipsis: String. 'ellipsis' if the change is an ellipsis node,
+ else ''."""
+ if _isellipsis(repo, ctx.rev()):
+ return 'ellipsis'
+ return ''
+
+@templatekeyword('outsidenarrow')
+def outsidenarrow(repo, ctx, templ, **args):
+ """:outsidenarrow: String. 'outsidenarrow' if the change affects no
+ tracked files, else ''."""
+ if util.safehasattr(repo, 'narrowmatch'):
+ m = repo.narrowmatch()
+ if not any(m(f) for f in ctx.files()):
+ return 'outsidenarrow'
+ return ''
+
+@revsetpredicate('ellipsis')
+def ellipsisrevset(repo, subset, x):
+ """``ellipsis()``
+ Changesets that are ellipsis nodes.
+ """
+ return subset.filter(lambda r: _isellipsis(repo, r))
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/hgext/narrow/narrowwirepeer.py Sat Feb 24 17:49:10 2018 -0600
@@ -0,0 +1,52 @@
+# narrowwirepeer.py - passes narrow spec with unbundle command
+#
+# Copyright 2017 Google, Inc.
+#
+# This software may be used and distributed according to the terms of the
+# GNU General Public License version 2 or any later version.
+
+from __future__ import absolute_import
+
+from mercurial.i18n import _
+from mercurial import (
+ error,
+ extensions,
+ hg,
+ narrowspec,
+ node,
+)
+
+def uisetup():
+ def peersetup(ui, peer):
+ # We must set up the expansion before reposetup below, since it's used
+ # at clone time before we have a repo.
+ class expandingpeer(peer.__class__):
+ def expandnarrow(self, narrow_include, narrow_exclude, nodes):
+ ui.status(_("expanding narrowspec\n"))
+ if not self.capable('exp-expandnarrow'):
+ raise error.Abort(
+ 'peer does not support expanding narrowspecs')
+
+ hex_nodes = (node.hex(n) for n in nodes)
+ new_narrowspec = self._call(
+ 'expandnarrow',
+ includepats=','.join(narrow_include),
+ excludepats=','.join(narrow_exclude),
+ nodes=','.join(hex_nodes))
+
+ return narrowspec.parseserverpatterns(new_narrowspec)
+ peer.__class__ = expandingpeer
+ hg.wirepeersetupfuncs.append(peersetup)
+
+def reposetup(repo):
+ def wirereposetup(ui, peer):
+ def wrapped(orig, cmd, *args, **kwargs):
+ if cmd == 'unbundle':
+ # TODO: don't blindly add include/exclude wireproto
+ # arguments to unbundle.
+ include, exclude = repo.narrowpats
+ kwargs[r"includepats"] = ','.join(include)
+ kwargs[r"excludepats"] = ','.join(exclude)
+ return orig(cmd, *args, **kwargs)
+ extensions.wrapfunction(peer, '_calltwowaystream', wrapped)
+ hg.wirepeersetupfuncs.append(wirereposetup)
--- a/hgext/notify.py Fri Feb 23 17:57:04 2018 -0800
+++ b/hgext/notify.py Sat Feb 24 17:49:10 2018 -0600
@@ -142,8 +142,8 @@
from mercurial.i18n import _
from mercurial import (
- cmdutil,
error,
+ logcmdutil,
mail,
patch,
registrar,
@@ -257,9 +257,8 @@
mapfile = self.ui.config('notify', 'style')
if not mapfile and not template:
template = deftemplates.get(hooktype) or single_template
- spec = cmdutil.logtemplatespec(template, mapfile)
- self.t = cmdutil.changeset_templater(self.ui, self.repo, spec,
- False, None, False)
+ spec = logcmdutil.templatespec(template, mapfile)
+ self.t = logcmdutil.changesettemplater(self.ui, self.repo, spec)
def strip(self, path):
'''strip leading slashes from local path, turn into web-safe path.'''
--- a/hgext/patchbomb.py Fri Feb 23 17:57:04 2018 -0800
+++ b/hgext/patchbomb.py Sat Feb 24 17:49:10 2018 -0600
@@ -265,11 +265,10 @@
if patchtags:
patchname = patchtags[0]
elif total > 1:
- patchname = cmdutil.makefilename(repo, '%b-%n.patch',
- binnode, seqno=idx,
- total=total)
+ patchname = cmdutil.makefilename(repo[node], '%b-%n.patch',
+ seqno=idx, total=total)
else:
- patchname = cmdutil.makefilename(repo, '%b.patch', binnode)
+ patchname = cmdutil.makefilename(repo[node], '%b.patch')
disposition = 'inline'
if opts.get('attach'):
disposition = 'attachment'
--- a/hgext/rebase.py Fri Feb 23 17:57:04 2018 -0800
+++ b/hgext/rebase.py Sat Feb 24 17:49:10 2018 -0600
@@ -214,7 +214,7 @@
if v >= 0:
newrev = repo[v].hex()
else:
- newrev = v
+ newrev = "%d" % v
destnode = repo[destmap[d]].hex()
f.write("%s:%s:%s\n" % (oldrev, newrev, destnode))
repo.ui.debug('rebase status stored\n')
@@ -289,7 +289,7 @@
skipped.add(old)
seen.add(new)
repo.ui.debug('computed skipped revs: %s\n' %
- (' '.join(str(r) for r in sorted(skipped)) or None))
+ (' '.join('%d' % r for r in sorted(skipped)) or ''))
repo.ui.debug('rebase status resumed\n')
self.originalwd = originalwd
@@ -312,10 +312,13 @@
if not self.ui.configbool('experimental', 'rebaseskipobsolete'):
return
obsoleteset = set(obsoleterevs)
- self.obsoletenotrebased, self.obsoletewithoutsuccessorindestination = \
- _computeobsoletenotrebased(self.repo, obsoleteset, destmap)
+ (self.obsoletenotrebased,
+ self.obsoletewithoutsuccessorindestination,
+ obsoleteextinctsuccessors) = _computeobsoletenotrebased(
+ self.repo, obsoleteset, destmap)
skippedset = set(self.obsoletenotrebased)
skippedset.update(self.obsoletewithoutsuccessorindestination)
+ skippedset.update(obsoleteextinctsuccessors)
_checkobsrebase(self.repo, self.ui, obsoleteset, skippedset)
def _prepareabortorcontinue(self, isabort):
@@ -499,7 +502,8 @@
if not self.collapsef:
merging = p2 != nullrev
editform = cmdutil.mergeeditform(merging, 'rebase')
- editor = cmdutil.getcommiteditor(editform=editform, **opts)
+ editor = cmdutil.getcommiteditor(editform=editform,
+ **pycompat.strkwargs(opts))
if self.wctx.isinmemory():
newnode = concludememorynode(repo, rev, p1, p2,
wctx=self.wctx,
@@ -537,7 +541,7 @@
'to commit\n') % (rev, ctx))
self.skipped.add(rev)
self.state[rev] = p1
- ui.debug('next revision set to %s\n' % p1)
+ ui.debug('next revision set to %d\n' % p1)
else:
ui.status(_('already rebased %s as %s\n') %
(desc, repo[self.state[rev]]))
@@ -585,7 +589,7 @@
date=self.date)
if newnode is not None:
newrev = repo[newnode].rev()
- for oldrev in self.state.iterkeys():
+ for oldrev in self.state:
self.state[oldrev] = newrev
if 'qtip' in repo.tags():
@@ -1220,7 +1224,7 @@
`rebaseobsrevs`: set of obsolete revision in source
`rebaseobsskipped`: set of revisions from source skipped because they have
- successors in destination
+ successors in destination or no non-obsolete successor.
"""
# Obsolete node with successors not in dest leads to divergence
divergenceok = ui.configbool('experimental',
@@ -1436,7 +1440,7 @@
def isagitpatch(repo, patchname):
'Return true if the given patch is in git format'
mqpatch = os.path.join(repo.mq.path, patchname)
- for line in patch.linereader(file(mqpatch, 'rb')):
+ for line in patch.linereader(open(mqpatch, 'rb')):
if line.startswith('diff --git'):
return True
return False
@@ -1646,7 +1650,9 @@
roots = list(repo.set('roots(%ld)', sortedsrc[0]))
if not roots:
raise error.Abort(_('no matching revisions'))
- roots.sort()
+ def revof(r):
+ return r.rev()
+ roots = sorted(roots, key=revof)
state = dict.fromkeys(rebaseset, revtodo)
emptyrebase = (len(sortedsrc) == 1)
for root in roots:
@@ -1785,25 +1791,34 @@
`obsoletewithoutsuccessorindestination` is a set with obsolete revisions
without a successor in destination.
+
+ `obsoleteextinctsuccessors` is a set of obsolete revisions with only
+ obsolete successors.
"""
obsoletenotrebased = {}
obsoletewithoutsuccessorindestination = set([])
+ obsoleteextinctsuccessors = set([])
assert repo.filtername is None
cl = repo.changelog
nodemap = cl.nodemap
+ extinctnodes = set(cl.node(r) for r in repo.revs('extinct()'))
for srcrev in rebaseobsrevs:
srcnode = cl.node(srcrev)
destnode = cl.node(destmap[srcrev])
# XXX: more advanced APIs are required to handle split correctly
- successors = list(obsutil.allsuccessors(repo.obsstore, [srcnode]))
- if len(successors) == 1:
- # obsutil.allsuccessors includes node itself. When the list only
- # contains one element, it means there are no successors.
+ successors = set(obsutil.allsuccessors(repo.obsstore, [srcnode]))
+ # obsutil.allsuccessors includes node itself
+ successors.remove(srcnode)
+ if successors.issubset(extinctnodes):
+ # all successors are extinct
+ obsoleteextinctsuccessors.add(srcrev)
+ if not successors:
+ # no successor
obsoletenotrebased[srcrev] = None
else:
for succnode in successors:
- if succnode == srcnode or succnode not in nodemap:
+ if succnode not in nodemap:
continue
if cl.isancestor(succnode, destnode):
obsoletenotrebased[srcrev] = nodemap[succnode]
@@ -1812,11 +1827,14 @@
# If 'srcrev' has a successor in rebase set but none in
# destination (which would be catched above), we shall skip it
# and its descendants to avoid divergence.
- if any(nodemap[s] in destmap
- for s in successors if s != srcnode):
+ if any(nodemap[s] in destmap for s in successors):
obsoletewithoutsuccessorindestination.add(srcrev)
- return obsoletenotrebased, obsoletewithoutsuccessorindestination
+ return (
+ obsoletenotrebased,
+ obsoletewithoutsuccessorindestination,
+ obsoleteextinctsuccessors,
+ )
def summaryhook(ui, repo):
if not repo.vfs.exists('rebasestate'):
--- a/hgext/relink.py Fri Feb 23 17:57:04 2018 -0800
+++ b/hgext/relink.py Sat Feb 24 17:49:10 2018 -0600
@@ -168,8 +168,8 @@
source = os.path.join(src, f)
tgt = os.path.join(dst, f)
# Binary mode, so that read() works correctly, especially on Windows
- sfp = file(source, 'rb')
- dfp = file(tgt, 'rb')
+ sfp = open(source, 'rb')
+ dfp = open(tgt, 'rb')
sin = sfp.read(CHUNKLEN)
while sin:
din = dfp.read(CHUNKLEN)
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/hgext/remotenames.py Sat Feb 24 17:49:10 2018 -0600
@@ -0,0 +1,307 @@
+# remotenames.py - extension to display remotenames
+#
+# Copyright 2017 Augie Fackler <raf@durin42.com>
+# Copyright 2017 Sean Farley <sean@farley.io>
+#
+# This software may be used and distributed according to the terms of the
+# GNU General Public License version 2 or any later version.
+
+""" showing remotebookmarks and remotebranches in UI
+
+By default both remotebookmarks and remotebranches are turned on. Config knob to
+control the individually are as follows.
+
+Config options to tweak the default behaviour:
+
+remotenames.bookmarks
+ Boolean value to enable or disable showing of remotebookmarks
+
+remotenames.branches
+ Boolean value to enable or disable showing of remotebranches
+"""
+
+from __future__ import absolute_import
+
+import collections
+
+from mercurial.i18n import _
+
+from mercurial.node import (
+ bin,
+)
+from mercurial import (
+ logexchange,
+ namespaces,
+ pycompat,
+ registrar,
+ revsetlang,
+ smartset,
+ templatekw,
+)
+
+# Note for extension authors: ONLY specify testedwith = 'ships-with-hg-core' for
+# extensions which SHIP WITH MERCURIAL. Non-mainline extensions should
+# be specifying the version(s) of Mercurial they are tested with, or
+# leave the attribute unspecified.
+testedwith = 'ships-with-hg-core'
+
+configtable = {}
+configitem = registrar.configitem(configtable)
+templatekeyword = registrar.templatekeyword()
+revsetpredicate = registrar.revsetpredicate()
+
+configitem('remotenames', 'bookmarks',
+ default=True,
+)
+configitem('remotenames', 'branches',
+ default=True,
+)
+
+class lazyremotenamedict(collections.MutableMapping):
+ """
+ Read-only dict-like Class to lazily resolve remotename entries
+
+ We are doing that because remotenames startup was slow.
+ We lazily read the remotenames file once to figure out the potential entries
+ and store them in self.potentialentries. Then when asked to resolve an
+ entry, if it is not in self.potentialentries, then it isn't there, if it
+ is in self.potentialentries we resolve it and store the result in
+ self.cache. We cannot be lazy is when asked all the entries (keys).
+ """
+ def __init__(self, kind, repo):
+ self.cache = {}
+ self.potentialentries = {}
+ self._kind = kind # bookmarks or branches
+ self._repo = repo
+ self.loaded = False
+
+ def _load(self):
+ """ Read the remotenames file, store entries matching selected kind """
+ self.loaded = True
+ repo = self._repo
+ for node, rpath, rname in logexchange.readremotenamefile(repo,
+ self._kind):
+ name = rpath + '/' + rname
+ self.potentialentries[name] = (node, rpath, name)
+
+ def _resolvedata(self, potentialentry):
+ """ Check that the node for potentialentry exists and return it """
+ if not potentialentry in self.potentialentries:
+ return None
+ node, remote, name = self.potentialentries[potentialentry]
+ repo = self._repo
+ binnode = bin(node)
+ # if the node doesn't exist, skip it
+ try:
+ repo.changelog.rev(binnode)
+ except LookupError:
+ return None
+ # Skip closed branches
+ if (self._kind == 'branches' and repo[binnode].closesbranch()):
+ return None
+ return [binnode]
+
+ def __getitem__(self, key):
+ if not self.loaded:
+ self._load()
+ val = self._fetchandcache(key)
+ if val is not None:
+ return val
+ else:
+ raise KeyError()
+
+ def __iter__(self):
+ return iter(self.potentialentries)
+
+ def __len__(self):
+ return len(self.potentialentries)
+
+ def __setitem__(self):
+ raise NotImplementedError
+
+ def __delitem__(self):
+ raise NotImplementedError
+
+ def _fetchandcache(self, key):
+ if key in self.cache:
+ return self.cache[key]
+ val = self._resolvedata(key)
+ if val is not None:
+ self.cache[key] = val
+ return val
+ else:
+ return None
+
+ def keys(self):
+ """ Get a list of bookmark or branch names """
+ if not self.loaded:
+ self._load()
+ return self.potentialentries.keys()
+
+ def iteritems(self):
+ """ Iterate over (name, node) tuples """
+
+ if not self.loaded:
+ self._load()
+
+ for k, vtup in self.potentialentries.iteritems():
+ yield (k, [bin(vtup[0])])
+
+class remotenames(dict):
+ """
+ This class encapsulates all the remotenames state. It also contains
+ methods to access that state in convenient ways. Remotenames are lazy
+ loaded. Whenever client code needs to ensure the freshest copy of
+ remotenames, use the `clearnames` method to force an eventual load.
+ """
+
+ def __init__(self, repo, *args):
+ dict.__init__(self, *args)
+ self._repo = repo
+ self.clearnames()
+
+ def clearnames(self):
+ """ Clear all remote names state """
+ self['bookmarks'] = lazyremotenamedict("bookmarks", self._repo)
+ self['branches'] = lazyremotenamedict("branches", self._repo)
+ self._invalidatecache()
+
+ def _invalidatecache(self):
+ self._nodetobmarks = None
+ self._nodetobranch = None
+
+ def bmarktonodes(self):
+ return self['bookmarks']
+
+ def nodetobmarks(self):
+ if not self._nodetobmarks:
+ bmarktonodes = self.bmarktonodes()
+ self._nodetobmarks = {}
+ for name, node in bmarktonodes.iteritems():
+ self._nodetobmarks.setdefault(node[0], []).append(name)
+ return self._nodetobmarks
+
+ def branchtonodes(self):
+ return self['branches']
+
+ def nodetobranch(self):
+ if not self._nodetobranch:
+ branchtonodes = self.branchtonodes()
+ self._nodetobranch = {}
+ for name, nodes in branchtonodes.iteritems():
+ for node in nodes:
+ self._nodetobranch.setdefault(node, []).append(name)
+ return self._nodetobranch
+
+def reposetup(ui, repo):
+ if not repo.local():
+ return
+
+ repo._remotenames = remotenames(repo)
+ ns = namespaces.namespace
+
+ if ui.configbool('remotenames', 'bookmarks'):
+ remotebookmarkns = ns(
+ 'remotebookmarks',
+ templatename='remotebookmarks',
+ colorname='remotebookmark',
+ logfmt='remote bookmark: %s\n',
+ listnames=lambda repo: repo._remotenames.bmarktonodes().keys(),
+ namemap=lambda repo, name:
+ repo._remotenames.bmarktonodes().get(name, []),
+ nodemap=lambda repo, node:
+ repo._remotenames.nodetobmarks().get(node, []))
+ repo.names.addnamespace(remotebookmarkns)
+
+ if ui.configbool('remotenames', 'branches'):
+ remotebranchns = ns(
+ 'remotebranches',
+ templatename='remotebranches',
+ colorname='remotebranch',
+ logfmt='remote branch: %s\n',
+ listnames = lambda repo: repo._remotenames.branchtonodes().keys(),
+ namemap = lambda repo, name:
+ repo._remotenames.branchtonodes().get(name, []),
+ nodemap = lambda repo, node:
+ repo._remotenames.nodetobranch().get(node, []))
+ repo.names.addnamespace(remotebranchns)
+
+@templatekeyword('remotenames')
+def remotenameskw(**args):
+ """:remotenames: List of strings. List of remote names associated with the
+ changeset.
+ """
+ args = pycompat.byteskwargs(args)
+ repo, ctx = args['repo'], args['ctx']
+
+ remotenames = []
+ if 'remotebookmarks' in repo.names:
+ remotenames = repo.names['remotebookmarks'].names(repo, ctx.node())
+
+ if 'remotebranches' in repo.names:
+ remotenames += repo.names['remotebranches'].names(repo, ctx.node())
+
+ return templatekw.showlist('remotename', remotenames, args,
+ plural='remotenames')
+
+@templatekeyword('remotebookmarks')
+def remotebookmarkskw(**args):
+ """:remotebookmarks: List of strings. List of remote bookmarks associated
+ with the changeset.
+ """
+ args = pycompat.byteskwargs(args)
+ repo, ctx = args['repo'], args['ctx']
+
+ remotebmarks = []
+ if 'remotebookmarks' in repo.names:
+ remotebmarks = repo.names['remotebookmarks'].names(repo, ctx.node())
+
+ return templatekw.showlist('remotebookmark', remotebmarks, args,
+ plural='remotebookmarks')
+
+@templatekeyword('remotebranches')
+def remotebrancheskw(**args):
+ """:remotebranches: List of strings. List of remote branches associated
+ with the changeset.
+ """
+ args = pycompat.byteskwargs(args)
+ repo, ctx = args['repo'], args['ctx']
+
+ remotebranches = []
+ if 'remotebranches' in repo.names:
+ remotebranches = repo.names['remotebranches'].names(repo, ctx.node())
+
+ return templatekw.showlist('remotebranch', remotebranches, args,
+ plural='remotebranches')
+
+def _revsetutil(repo, subset, x, rtypes):
+ """utility function to return a set of revs based on the rtypes"""
+
+ revs = set()
+ cl = repo.changelog
+ for rtype in rtypes:
+ if rtype in repo.names:
+ ns = repo.names[rtype]
+ for name in ns.listnames(repo):
+ revs.update(ns.nodes(repo, name))
+
+ results = (cl.rev(n) for n in revs if cl.hasnode(n))
+ return subset & smartset.baseset(sorted(results))
+
+@revsetpredicate('remotenames()')
+def remotenamesrevset(repo, subset, x):
+ """All changesets which have a remotename on them."""
+ revsetlang.getargs(x, 0, 0, _("remotenames takes no arguments"))
+ return _revsetutil(repo, subset, x, ('remotebookmarks', 'remotebranches'))
+
+@revsetpredicate('remotebranches()')
+def remotebranchesrevset(repo, subset, x):
+ """All changesets which are branch heads on remotes."""
+ revsetlang.getargs(x, 0, 0, _("remotebranches takes no arguments"))
+ return _revsetutil(repo, subset, x, ('remotebranches',))
+
+@revsetpredicate('remotebookmarks()')
+def remotebmarksrevset(repo, subset, x):
+ """All changesets which have bookmarks on remotes."""
+ revsetlang.getargs(x, 0, 0, _("remotebookmarks takes no arguments"))
+ return _revsetutil(repo, subset, x, ('remotebookmarks',))
--- a/hgext/share.py Fri Feb 23 17:57:04 2018 -0800
+++ b/hgext/share.py Sat Feb 24 17:49:10 2018 -0600
@@ -52,9 +52,6 @@
util,
)
-repository = hg.repository
-parseurl = hg.parseurl
-
cmdtable = {}
command = registrar.command(cmdtable)
# Note for extension authors: ONLY specify testedwith = 'ships-with-hg-core' for
@@ -135,27 +132,9 @@
return False
return hg.sharedbookmarks in shared
-def _getsrcrepo(repo):
- """
- Returns the source repository object for a given shared repository.
- If repo is not a shared repository, return None.
- """
- if repo.sharedpath == repo.path:
- return None
-
- if util.safehasattr(repo, 'srcrepo') and repo.srcrepo:
- return repo.srcrepo
-
- # the sharedpath always ends in the .hg; we want the path to the repo
- source = repo.vfs.split(repo.sharedpath)[0]
- srcurl, branches = parseurl(source)
- srcrepo = repository(repo.ui, srcurl)
- repo.srcrepo = srcrepo
- return srcrepo
-
def getbkfile(orig, repo):
if _hassharedbookmarks(repo):
- srcrepo = _getsrcrepo(repo)
+ srcrepo = hg.sharedreposource(repo)
if srcrepo is not None:
# just orig(srcrepo) doesn't work as expected, because
# HG_PENDING refers repo.root.
@@ -186,7 +165,7 @@
orig(self, tr)
if _hassharedbookmarks(self._repo):
- srcrepo = _getsrcrepo(self._repo)
+ srcrepo = hg.sharedreposource(self._repo)
if srcrepo is not None:
category = 'share-bookmarks'
tr.addpostclose(category, lambda tr: self._writerepo(srcrepo))
@@ -196,6 +175,6 @@
orig(self, repo)
if _hassharedbookmarks(self._repo):
- srcrepo = _getsrcrepo(self._repo)
+ srcrepo = hg.sharedreposource(self._repo)
if srcrepo is not None:
orig(self, srcrepo)
--- a/hgext/shelve.py Fri Feb 23 17:57:04 2018 -0800
+++ b/hgext/shelve.py Sat Feb 24 17:49:10 2018 -0600
@@ -271,7 +271,7 @@
"activebook": activebook or cls._noactivebook
}
scmutil.simplekeyvaluefile(repo.vfs, cls._filename)\
- .write(info, firstline=str(cls._version))
+ .write(info, firstline=("%d" % cls._version))
@classmethod
def clear(cls, repo):
@@ -619,7 +619,7 @@
repo.vfs.rename('unshelverebasestate', 'rebasestate')
try:
rebase.rebase(ui, repo, **{
- 'abort' : True
+ r'abort' : True
})
except Exception:
repo.vfs.rename('rebasestate', 'unshelverebasestate')
@@ -648,7 +648,7 @@
ui.pushbuffer(True)
cmdutil.revert(ui, repo, shelvectx, repo.dirstate.parents(),
*pathtofiles(repo, files),
- **{'no_backup': True})
+ **{r'no_backup': True})
ui.popbuffer()
def restorebranch(ui, repo, branchtorestore):
@@ -681,7 +681,7 @@
repo.vfs.rename('unshelverebasestate', 'rebasestate')
try:
rebase.rebase(ui, repo, **{
- 'continue' : True
+ r'continue' : True
})
except Exception:
repo.vfs.rename('rebasestate', 'unshelverebasestate')
@@ -744,10 +744,10 @@
ui.status(_('rebasing shelved changes\n'))
try:
rebase.rebase(ui, repo, **{
- 'rev': [shelvectx.rev()],
- 'dest': str(tmpwctx.rev()),
- 'keep': True,
- 'tool': opts.get('tool', ''),
+ r'rev': [shelvectx.rev()],
+ r'dest': str(tmpwctx.rev()),
+ r'keep': True,
+ r'tool': opts.get('tool', ''),
})
except error.InterventionRequired:
tr.close()
--- a/hgext/show.py Fri Feb 23 17:57:04 2018 -0800
+++ b/hgext/show.py Sat Feb 24 17:49:10 2018 -0600
@@ -39,6 +39,7 @@
error,
formatter,
graphmod,
+ logcmdutil,
phases,
pycompat,
registrar,
@@ -125,7 +126,7 @@
ui.write('\n')
for name, func in sorted(views.items()):
- ui.write(('%s\n') % func.__doc__)
+ ui.write(('%s\n') % pycompat.sysbytes(func.__doc__))
ui.write('\n')
raise error.Abort(_('no view requested'),
@@ -148,7 +149,7 @@
elif fn._csettopic:
ref = 'show%s' % fn._csettopic
spec = formatter.lookuptemplate(ui, ref, template)
- displayer = cmdutil.changeset_templater(ui, repo, spec, buffered=True)
+ displayer = logcmdutil.changesettemplater(ui, repo, spec, buffered=True)
return fn(ui, repo, displayer)
else:
return fn(ui, repo)
@@ -409,8 +410,8 @@
revdag = graphmod.dagwalker(repo, revs)
ui.setconfig('experimental', 'graphshorten', True)
- cmdutil.displaygraph(ui, repo, revdag, displayer, graphmod.asciiedges,
- props={'nodelen': nodelen})
+ logcmdutil.displaygraph(ui, repo, revdag, displayer, graphmod.asciiedges,
+ props={'nodelen': nodelen})
def extsetup(ui):
# Alias `hg <prefix><view>` to `hg show <view>`.
--- a/hgext/sparse.py Fri Feb 23 17:57:04 2018 -0800
+++ b/hgext/sparse.py Sat Feb 24 17:49:10 2018 -0600
@@ -75,12 +75,12 @@
from mercurial.i18n import _
from mercurial import (
- cmdutil,
commands,
dirstate,
error,
extensions,
hg,
+ logcmdutil,
match as matchmod,
pycompat,
registrar,
@@ -126,7 +126,7 @@
entry[1].append(('', 'sparse', None,
"limit to changesets affecting the sparse checkout"))
- def _logrevs(orig, repo, opts):
+ def _initialrevs(orig, repo, opts):
revs = orig(repo, opts)
if opts.get('sparse'):
sparsematch = sparse.matcher(repo)
@@ -135,7 +135,7 @@
return any(f for f in ctx.files() if sparsematch(f))
revs = revs.filter(ctxmatch)
return revs
- extensions.wrapfunction(cmdutil, '_logrevs', _logrevs)
+ extensions.wrapfunction(logcmdutil, '_initialrevs', _initialrevs)
def _clonesparsecmd(orig, ui, repo, *args, **opts):
include_pat = opts.get('include')
@@ -194,7 +194,11 @@
"""
def walk(orig, self, match, subrepos, unknown, ignored, full=True):
- match = matchmod.intersectmatchers(match, self._sparsematcher)
+ # hack to not exclude explicitly-specified paths so that they can
+ # be warned later on e.g. dirstate.add()
+ em = matchmod.exact(match._root, match._cwd, match.files())
+ sm = matchmod.unionmatcher([self._sparsematcher, em])
+ match = matchmod.intersectmatchers(match, sm)
return orig(self, match, subrepos, unknown, ignored, full)
extensions.wrapfunction(dirstate.dirstate, 'walk', walk)
--- a/hgext/strip.py Fri Feb 23 17:57:04 2018 -0800
+++ b/hgext/strip.py Sat Feb 24 17:49:10 2018 -0600
@@ -181,13 +181,10 @@
strippedrevs = revs.union(descendants)
roots = revs.difference(descendants)
- update = False
# if one of the wdir parent is stripped we'll need
# to update away to an earlier revision
- for p in repo.dirstate.parents():
- if p != nullid and cl.rev(p) in strippedrevs:
- update = True
- break
+ update = any(p != nullid and cl.rev(p) in strippedrevs
+ for p in repo.dirstate.parents())
rootnodes = set(cl.node(r) for r in roots)
@@ -215,7 +212,7 @@
# only reset the dirstate for files that would actually change
# between the working context and uctx
- descendantrevs = repo.revs("%s::." % uctx.rev())
+ descendantrevs = repo.revs(b"%d::.", uctx.rev())
changedfiles = []
for rev in descendantrevs:
# blindly reset the files, regardless of what actually changed
--- a/hgext/transplant.py Fri Feb 23 17:57:04 2018 -0800
+++ b/hgext/transplant.py Sat Feb 24 17:49:10 2018 -0600
@@ -24,6 +24,7 @@
error,
exchange,
hg,
+ logcmdutil,
match,
merge,
node as nodemod,
@@ -119,7 +120,8 @@
opener=self.opener)
def getcommiteditor():
editform = cmdutil.mergeeditform(repo[None], 'transplant')
- return cmdutil.getcommiteditor(editform=editform, **opts)
+ return cmdutil.getcommiteditor(editform=editform,
+ **pycompat.strkwargs(opts))
self.getcommiteditor = getcommiteditor
def applied(self, repo, node, parent):
@@ -160,7 +162,7 @@
tr = repo.transaction('transplant')
for rev in revs:
node = revmap[rev]
- revstr = '%s:%s' % (rev, nodemod.short(node))
+ revstr = '%d:%s' % (rev, nodemod.short(node))
if self.applied(repo, node, p1):
self.ui.warn(_('skipping already applied revision %s\n') %
@@ -194,7 +196,7 @@
skipmerge = False
if parents[1] != revlog.nullid:
if not opts.get('parent'):
- self.ui.note(_('skipping merge changeset %s:%s\n')
+ self.ui.note(_('skipping merge changeset %d:%s\n')
% (rev, nodemod.short(node)))
skipmerge = True
else:
@@ -210,7 +212,7 @@
patchfile = None
else:
fd, patchfile = tempfile.mkstemp(prefix='hg-transplant-')
- fp = os.fdopen(fd, pycompat.sysstr('w'))
+ fp = os.fdopen(fd, pycompat.sysstr('wb'))
gen = patch.diff(source, parent, node, opts=diffopts)
for chunk in gen:
fp.write(chunk)
@@ -258,7 +260,7 @@
self.ui.status(_('filtering %s\n') % patchfile)
user, date, msg = (changelog[1], changelog[2], changelog[4])
fd, headerfile = tempfile.mkstemp(prefix='hg-transplant-')
- fp = os.fdopen(fd, pycompat.sysstr('w'))
+ fp = os.fdopen(fd, pycompat.sysstr('wb'))
fp.write("# HG changeset patch\n")
fp.write("# User %s\n" % user)
fp.write("# Date %d %d\n" % date)
@@ -273,7 +275,7 @@
},
onerr=error.Abort, errprefix=_('filter failed'),
blockedtag='transplant_filter')
- user, date, msg = self.parselog(file(headerfile))[1:4]
+ user, date, msg = self.parselog(open(headerfile, 'rb'))[1:4]
finally:
os.unlink(headerfile)
@@ -309,7 +311,7 @@
p1 = repo.dirstate.p1()
p2 = node
self.log(user, date, message, p1, p2, merge=merge)
- self.ui.write(str(inst) + '\n')
+ self.ui.write(util.forcebytestr(inst) + '\n')
raise TransplantError(_('fix up the working directory and run '
'hg transplant --continue'))
else:
@@ -501,7 +503,7 @@
def browserevs(ui, repo, nodes, opts):
'''interactively transplant changesets'''
- displayer = cmdutil.show_changeset(ui, repo, opts)
+ displayer = logcmdutil.changesetdisplayer(ui, repo, opts)
transplants = []
merges = []
prompt = _('apply changeset? [ynmpcq?]:'
@@ -646,6 +648,7 @@
raise error.Abort(_('--all is incompatible with a '
'revision list'))
+ opts = pycompat.byteskwargs(opts)
checkopts(opts, revs)
if not opts.get('log'):
--- a/mercurial/archival.py Fri Feb 23 17:57:04 2018 -0800
+++ b/mercurial/archival.py Sat Feb 24 17:49:10 2018 -0600
@@ -21,6 +21,7 @@
error,
formatter,
match as matchmod,
+ scmutil,
util,
vfs as vfsmod,
)
@@ -76,29 +77,27 @@
return repo[rev]
return repo['null']
+# {tags} on ctx includes local tags and 'tip', with no current way to limit
+# that to global tags. Therefore, use {latesttag} as a substitute when
+# the distance is 0, since that will be the list of global tags on ctx.
+_defaultmetatemplate = br'''
+repo: {root}
+node: {ifcontains(rev, revset("wdir()"), "{p1node}{dirty}", "{node}")}
+branch: {branch|utf8}
+{ifeq(latesttagdistance, 0, join(latesttag % "tag: {tag}", "\n"),
+ separate("\n",
+ join(latesttag % "latesttag: {tag}", "\n"),
+ "latesttagdistance: {latesttagdistance}",
+ "changessincelatesttag: {changessincelatesttag}"))}
+'''[1:] # drop leading '\n'
+
def buildmetadata(ctx):
'''build content of .hg_archival.txt'''
repo = ctx.repo()
- default = (
- r'repo: {root}\n'
- r'node: {ifcontains(rev, revset("wdir()"),'
- r'"{p1node}{dirty}", "{node}")}\n'
- r'branch: {branch|utf8}\n'
-
- # {tags} on ctx includes local tags and 'tip', with no current way to
- # limit that to global tags. Therefore, use {latesttag} as a substitute
- # when the distance is 0, since that will be the list of global tags on
- # ctx.
- r'{ifeq(latesttagdistance, 0, latesttag % "tag: {tag}\n",'
- r'"{latesttag % "latesttag: {tag}\n"}'
- r'latesttagdistance: {latesttagdistance}\n'
- r'changessincelatesttag: {changessincelatesttag}\n")}'
- )
-
opts = {
'template': repo.ui.config('experimental', 'archivemetatemplate',
- default)
+ _defaultmetatemplate)
}
out = util.stringio()
@@ -219,7 +218,7 @@
dest.tell()
except (AttributeError, IOError):
dest = tellable(dest)
- self.z = zipfile.ZipFile(dest, 'w',
+ self.z = zipfile.ZipFile(dest, r'w',
compress and zipfile.ZIP_DEFLATED or
zipfile.ZIP_STORED)
@@ -339,6 +338,7 @@
total = len(files)
if total:
files.sort()
+ scmutil.fileprefetchhooks(repo, ctx, files)
repo.ui.progress(_('archiving'), 0, unit=_('files'), total=total)
for i, f in enumerate(files):
ff = ctx.flags(f)
--- a/mercurial/bookmarks.py Fri Feb 23 17:57:04 2018 -0800
+++ b/mercurial/bookmarks.py Sat Feb 24 17:49:10 2018 -0600
@@ -103,30 +103,21 @@
self._aclean = False
def __setitem__(self, *args, **kwargs):
- msg = ("'bookmarks[name] = node' is deprecated, "
- "use 'bookmarks.applychanges'")
- self._repo.ui.deprecwarn(msg, '4.3')
- self._set(*args, **kwargs)
+ raise error.ProgrammingError("use 'bookmarks.applychanges' instead")
def _set(self, key, value):
self._clean = False
return dict.__setitem__(self, key, value)
def __delitem__(self, key):
- msg = ("'del bookmarks[name]' is deprecated, "
- "use 'bookmarks.applychanges'")
- self._repo.ui.deprecwarn(msg, '4.3')
- self._del(key)
+ raise error.ProgrammingError("use 'bookmarks.applychanges' instead")
def _del(self, key):
self._clean = False
return dict.__delitem__(self, key)
def update(self, *others):
- msg = ("bookmarks.update(...)' is deprecated, "
- "use 'bookmarks.applychanges'")
- self._repo.ui.deprecwarn(msg, '4.5')
- return dict.update(self, *others)
+ raise error.ProgrammingError("use 'bookmarks.applychanges' instead")
def applychanges(self, repo, tr, changes):
"""Apply a list of changes to bookmarks
@@ -146,12 +137,6 @@
bmchanges[name] = (old, node)
self._recordchange(tr)
- def recordchange(self, tr):
- msg = ("'bookmarks.recorchange' is deprecated, "
- "use 'bookmarks.applychanges'")
- self._repo.ui.deprecwarn(msg, '4.3')
- return self._recordchange(tr)
-
def _recordchange(self, tr):
"""record that bookmarks have been changed in a transaction
--- a/mercurial/branchmap.py Fri Feb 23 17:57:04 2018 -0800
+++ b/mercurial/branchmap.py Sat Feb 24 17:49:10 2018 -0600
@@ -18,6 +18,7 @@
from . import (
encoding,
error,
+ pycompat,
scmutil,
util,
)
@@ -52,18 +53,19 @@
filteredhash=filteredhash)
if not partial.validfor(repo):
# invalidate the cache
- raise ValueError('tip differs')
+ raise ValueError(r'tip differs')
cl = repo.changelog
for l in lines:
if not l:
continue
node, state, label = l.split(" ", 2)
if state not in 'oc':
- raise ValueError('invalid branch state')
+ raise ValueError(r'invalid branch state')
label = encoding.tolocal(label.strip())
node = bin(node)
if not cl.hasnode(node):
- raise ValueError('node %s does not exist' % hex(node))
+ raise ValueError(
+ r'node %s does not exist' % pycompat.sysstr(hex(node)))
partial.setdefault(label, []).append(node)
if state == 'c':
partial._closednodes.add(node)
@@ -73,7 +75,7 @@
if repo.filtername is not None:
msg += ' (%s)' % repo.filtername
msg += ': %s\n'
- repo.ui.debug(msg % inst)
+ repo.ui.debug(msg % pycompat.bytestr(inst))
partial = None
return partial
@@ -253,7 +255,8 @@
repo.filtername, len(self), nodecount)
except (IOError, OSError, error.Abort) as inst:
# Abort may be raised by read only opener, so log and continue
- repo.ui.debug("couldn't write branch cache: %s\n" % inst)
+ repo.ui.debug("couldn't write branch cache: %s\n" %
+ util.forcebytestr(inst))
def update(self, repo, revgen):
"""Given a branchhead cache, self, that may have extra nodes or be
@@ -375,7 +378,7 @@
self._rbcrevs[:] = data
except (IOError, OSError) as inst:
repo.ui.debug("couldn't read revision branch cache: %s\n" %
- inst)
+ util.forcebytestr(inst))
# remember number of good records on disk
self._rbcrevslen = min(len(self._rbcrevs) // _rbcrecsize,
len(repo.changelog))
--- a/mercurial/bundle2.py Fri Feb 23 17:57:04 2018 -0800
+++ b/mercurial/bundle2.py Sat Feb 24 17:49:10 2018 -0600
@@ -1729,7 +1729,7 @@
extrakwargs = {}
targetphase = inpart.params.get('targetphase')
if targetphase is not None:
- extrakwargs['targetphase'] = int(targetphase)
+ extrakwargs[r'targetphase'] = int(targetphase)
ret = _processchangegroup(op, cg, tr, 'bundle2', 'bundle2',
expectedtotal=nbchangesets, **extrakwargs)
if op.reply is not None:
@@ -2040,14 +2040,15 @@
allhooks.append(hookargs)
for hookargs in allhooks:
- op.repo.hook('prepushkey', throw=True, **hookargs)
+ op.repo.hook('prepushkey', throw=True,
+ **pycompat.strkwargs(hookargs))
bookstore.applychanges(op.repo, op.gettransaction(), changes)
if pushkeycompat:
def runhook():
for hookargs in allhooks:
- op.repo.hook('pushkey', **hookargs)
+ op.repo.hook('pushkey', **pycompat.strkwargs(hookargs))
op.repo._afterlock(runhook)
elif bookmarksmode == 'records':
--- a/mercurial/bundlerepo.py Fri Feb 23 17:57:04 2018 -0800
+++ b/mercurial/bundlerepo.py Sat Feb 24 17:49:10 2018 -0600
@@ -402,7 +402,7 @@
# manifestlog implementation did not consume the manifests from the
# changegroup (ex: it might be consuming trees from a separate bundle2
# part instead). So we need to manually consume it.
- if 'filestart' not in self.__dict__:
+ if r'filestart' not in self.__dict__:
self._consumemanifest()
return self.filestart
--- a/mercurial/cext/base85.c Fri Feb 23 17:57:04 2018 -0800
+++ b/mercurial/cext/base85.c Sat Feb 24 17:49:10 2018 -0600
@@ -14,8 +14,9 @@
#include "util.h"
-static const char b85chars[] = "0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZ"
- "abcdefghijklmnopqrstuvwxyz!#$%&()*+-;<=>?@^_`{|}~";
+static const char b85chars[] =
+ "0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZ"
+ "abcdefghijklmnopqrstuvwxyz!#$%&()*+-;<=>?@^_`{|}~";
static char b85dec[256];
static void b85prep(void)
@@ -105,25 +106,25 @@
c = b85dec[(int)*text++] - 1;
if (c < 0)
return PyErr_Format(
- PyExc_ValueError,
- "bad base85 character at position %d",
- (int)i);
+ PyExc_ValueError,
+ "bad base85 character at position %d",
+ (int)i);
acc = acc * 85 + c;
}
if (i++ < len) {
c = b85dec[(int)*text++] - 1;
if (c < 0)
return PyErr_Format(
- PyExc_ValueError,
- "bad base85 character at position %d",
- (int)i);
+ PyExc_ValueError,
+ "bad base85 character at position %d",
+ (int)i);
/* overflow detection: 0xffffffff == "|NsC0",
* "|NsC" == 0x03030303 */
if (acc > 0x03030303 || (acc *= 85) > 0xffffffff - c)
return PyErr_Format(
- PyExc_ValueError,
- "bad base85 sequence at position %d",
- (int)i);
+ PyExc_ValueError,
+ "bad base85 sequence at position %d",
+ (int)i);
acc += c;
}
@@ -145,23 +146,19 @@
static char base85_doc[] = "Base85 Data Encoding";
static PyMethodDef methods[] = {
- {"b85encode", b85encode, METH_VARARGS,
- "Encode text in base85.\n\n"
- "If the second parameter is true, pad the result to a multiple of "
- "five characters.\n"},
- {"b85decode", b85decode, METH_VARARGS, "Decode base85 text.\n"},
- {NULL, NULL}
+ {"b85encode", b85encode, METH_VARARGS,
+ "Encode text in base85.\n\n"
+ "If the second parameter is true, pad the result to a multiple of "
+ "five characters.\n"},
+ {"b85decode", b85decode, METH_VARARGS, "Decode base85 text.\n"},
+ {NULL, NULL},
};
static const int version = 1;
#ifdef IS_PY3K
static struct PyModuleDef base85_module = {
- PyModuleDef_HEAD_INIT,
- "base85",
- base85_doc,
- -1,
- methods
+ PyModuleDef_HEAD_INIT, "base85", base85_doc, -1, methods,
};
PyMODINIT_FUNC PyInit_base85(void)
--- a/mercurial/cext/bdiff.c Fri Feb 23 17:57:04 2018 -0800
+++ b/mercurial/cext/bdiff.c Sat Feb 24 17:49:10 2018 -0600
@@ -19,7 +19,6 @@
#include "bitmanipulation.h"
#include "util.h"
-
static PyObject *blocks(PyObject *self, PyObject *args)
{
PyObject *sa, *sb, *rl = NULL, *m;
@@ -82,9 +81,7 @@
_save = PyEval_SaveThread();
lmax = la > lb ? lb : la;
- for (ia = sa, ib = sb;
- li < lmax && *ia == *ib;
- ++li, ++ia, ++ib)
+ for (ia = sa, ib = sb; li < lmax && *ia == *ib; ++li, ++ia, ++ib)
if (*ia == '\n')
lcommon = li + 1;
/* we can almost add: if (li == lmax) lcommon = li; */
@@ -122,7 +119,8 @@
if (h->a1 != la || h->b1 != lb) {
len = bl[h->b1].l - bl[lb].l;
putbe32((uint32_t)(al[la].l + lcommon - al->l), rb);
- putbe32((uint32_t)(al[h->a1].l + lcommon - al->l), rb + 4);
+ putbe32((uint32_t)(al[h->a1].l + lcommon - al->l),
+ rb + 4);
putbe32((uint32_t)len, rb + 8);
memcpy(rb + 12, bl[lb].l, len);
rb += 12 + len;
@@ -167,8 +165,8 @@
if (c == ' ' || c == '\t' || c == '\r') {
if (!allws && (wlen == 0 || w[wlen - 1] != ' '))
w[wlen++] = ' ';
- } else if (c == '\n' && !allws
- && wlen > 0 && w[wlen - 1] == ' ') {
+ } else if (c == '\n' && !allws && wlen > 0 &&
+ w[wlen - 1] == ' ') {
w[wlen - 1] = '\n';
} else {
w[wlen++] = c;
@@ -182,25 +180,70 @@
return result ? result : PyErr_NoMemory();
}
+static bool sliceintolist(PyObject *list, Py_ssize_t destidx,
+ const char *source, Py_ssize_t len)
+{
+ PyObject *sliced = PyBytes_FromStringAndSize(source, len);
+ if (sliced == NULL)
+ return false;
+ PyList_SET_ITEM(list, destidx, sliced);
+ return true;
+}
+
+static PyObject *splitnewlines(PyObject *self, PyObject *args)
+{
+ const char *text;
+ Py_ssize_t nelts = 0, size, i, start = 0;
+ PyObject *result = NULL;
+
+ if (!PyArg_ParseTuple(args, "s#", &text, &size)) {
+ goto abort;
+ }
+ if (!size) {
+ return PyList_New(0);
+ }
+ /* This loops to size-1 because if the last byte is a newline,
+ * we don't want to perform a split there. */
+ for (i = 0; i < size - 1; ++i) {
+ if (text[i] == '\n') {
+ ++nelts;
+ }
+ }
+ if ((result = PyList_New(nelts + 1)) == NULL)
+ goto abort;
+ nelts = 0;
+ for (i = 0; i < size - 1; ++i) {
+ if (text[i] == '\n') {
+ if (!sliceintolist(result, nelts++, text + start,
+ i - start + 1))
+ goto abort;
+ start = i + 1;
+ }
+ }
+ if (!sliceintolist(result, nelts++, text + start, size - start))
+ goto abort;
+ return result;
+abort:
+ Py_XDECREF(result);
+ return NULL;
+}
static char mdiff_doc[] = "Efficient binary diff.";
static PyMethodDef methods[] = {
- {"bdiff", bdiff, METH_VARARGS, "calculate a binary diff\n"},
- {"blocks", blocks, METH_VARARGS, "find a list of matching lines\n"},
- {"fixws", fixws, METH_VARARGS, "normalize diff whitespaces\n"},
- {NULL, NULL}
+ {"bdiff", bdiff, METH_VARARGS, "calculate a binary diff\n"},
+ {"blocks", blocks, METH_VARARGS, "find a list of matching lines\n"},
+ {"fixws", fixws, METH_VARARGS, "normalize diff whitespaces\n"},
+ {"splitnewlines", splitnewlines, METH_VARARGS,
+ "like str.splitlines, but only split on newlines\n"},
+ {NULL, NULL},
};
-static const int version = 1;
+static const int version = 2;
#ifdef IS_PY3K
static struct PyModuleDef bdiff_module = {
- PyModuleDef_HEAD_INIT,
- "bdiff",
- mdiff_doc,
- -1,
- methods
+ PyModuleDef_HEAD_INIT, "bdiff", mdiff_doc, -1, methods,
};
PyMODINIT_FUNC PyInit_bdiff(void)
--- a/mercurial/cext/charencode.c Fri Feb 23 17:57:04 2018 -0800
+++ b/mercurial/cext/charencode.c Sat Feb 24 17:49:10 2018 -0600
@@ -65,7 +65,6 @@
'\x58', '\x59', '\x5a', /* x-z */
'\x7b', '\x7c', '\x7d', '\x7e', '\x7f'
};
-/* clang-format on */
/* 1: no escape, 2: \<c>, 6: \u<x> */
static const uint8_t jsonlentable[256] = {
@@ -102,6 +101,7 @@
'0', '1', '2', '3', '4', '5', '6', '7',
'8', '9', 'a', 'b', 'c', 'd', 'e', 'f',
};
+/* clang-format on */
/*
* Turn a hex-encoded string into binary.
@@ -151,9 +151,8 @@
Py_RETURN_TRUE;
}
-static inline PyObject *_asciitransform(PyObject *str_obj,
- const char table[128],
- PyObject *fallback_fn)
+static inline PyObject *
+_asciitransform(PyObject *str_obj, const char table[128], PyObject *fallback_fn)
{
char *str, *newstr;
Py_ssize_t i, len;
@@ -173,12 +172,12 @@
char c = str[i];
if (c & 0x80) {
if (fallback_fn != NULL) {
- ret = PyObject_CallFunctionObjArgs(fallback_fn,
- str_obj, NULL);
+ ret = PyObject_CallFunctionObjArgs(
+ fallback_fn, str_obj, NULL);
} else {
PyObject *err = PyUnicodeDecodeError_Create(
- "ascii", str, len, i, (i + 1),
- "unexpected code byte");
+ "ascii", str, len, i, (i + 1),
+ "unexpected code byte");
PyErr_SetObject(PyExc_UnicodeDecodeError, err);
Py_XDECREF(err);
}
@@ -220,10 +219,9 @@
Py_ssize_t pos = 0;
const char *table;
- if (!PyArg_ParseTuple(args, "O!O!O!:make_file_foldmap",
- &PyDict_Type, &dmap,
- &PyInt_Type, &spec_obj,
- &PyFunction_Type, &normcase_fallback))
+ if (!PyArg_ParseTuple(args, "O!O!O!:make_file_foldmap", &PyDict_Type,
+ &dmap, &PyInt_Type, &spec_obj, &PyFunction_Type,
+ &normcase_fallback))
goto quit;
spec = (int)PyInt_AS_LONG(spec_obj);
@@ -251,7 +249,7 @@
while (PyDict_Next(dmap, &pos, &k, &v)) {
if (!dirstate_tuple_check(v)) {
PyErr_SetString(PyExc_TypeError,
- "expected a dirstate tuple");
+ "expected a dirstate tuple");
goto quit;
}
@@ -260,10 +258,10 @@
PyObject *normed;
if (table != NULL) {
normed = _asciitransform(k, table,
- normcase_fallback);
+ normcase_fallback);
} else {
normed = PyObject_CallFunctionObjArgs(
- normcase_fallback, k, NULL);
+ normcase_fallback, k, NULL);
}
if (normed == NULL)
@@ -292,13 +290,13 @@
char c = buf[i];
if (c & 0x80) {
PyErr_SetString(PyExc_ValueError,
- "cannot process non-ascii str");
+ "cannot process non-ascii str");
return -1;
}
esclen += jsonparanoidlentable[(unsigned char)c];
if (esclen < 0) {
PyErr_SetString(PyExc_MemoryError,
- "overflow in jsonescapelen");
+ "overflow in jsonescapelen");
return -1;
}
}
@@ -308,7 +306,7 @@
esclen += jsonlentable[(unsigned char)c];
if (esclen < 0) {
PyErr_SetString(PyExc_MemoryError,
- "overflow in jsonescapelen");
+ "overflow in jsonescapelen");
return -1;
}
}
@@ -336,17 +334,17 @@
case '\\':
return '\\';
}
- return '\0'; /* should not happen */
+ return '\0'; /* should not happen */
}
/* convert 'origbuf' to JSON-escaped form 'escbuf'; 'origbuf' should only
include characters mappable by json(paranoid)lentable */
static void encodejsonescape(char *escbuf, Py_ssize_t esclen,
- const char *origbuf, Py_ssize_t origlen,
- bool paranoid)
+ const char *origbuf, Py_ssize_t origlen,
+ bool paranoid)
{
const uint8_t *lentable =
- (paranoid) ? jsonparanoidlentable : jsonlentable;
+ (paranoid) ? jsonparanoidlentable : jsonlentable;
Py_ssize_t i, j;
for (i = 0, j = 0; i < origlen; i++) {
@@ -377,15 +375,15 @@
const char *origbuf;
Py_ssize_t origlen, esclen;
int paranoid;
- if (!PyArg_ParseTuple(args, "O!i:jsonescapeu8fast",
- &PyBytes_Type, &origstr, ¶noid))
+ if (!PyArg_ParseTuple(args, "O!i:jsonescapeu8fast", &PyBytes_Type,
+ &origstr, ¶noid))
return NULL;
origbuf = PyBytes_AS_STRING(origstr);
origlen = PyBytes_GET_SIZE(origstr);
esclen = jsonescapelen(origbuf, origlen, paranoid);
if (esclen < 0)
- return NULL; /* unsupported char found or overflow */
+ return NULL; /* unsupported char found or overflow */
if (origlen == esclen) {
Py_INCREF(origstr);
return origstr;
@@ -395,7 +393,7 @@
if (!escstr)
return NULL;
encodejsonescape(PyBytes_AS_STRING(escstr), esclen, origbuf, origlen,
- paranoid);
+ paranoid);
return escstr;
}
--- a/mercurial/cext/charencode.h Fri Feb 23 17:57:04 2018 -0800
+++ b/mercurial/cext/charencode.h Sat Feb 24 17:49:10 2018 -0600
@@ -25,6 +25,7 @@
PyObject *make_file_foldmap(PyObject *self, PyObject *args);
PyObject *jsonescapeu8fast(PyObject *self, PyObject *args);
+/* clang-format off */
static const int8_t hextable[256] = {
-1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
-1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
@@ -43,6 +44,7 @@
-1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
-1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1
};
+/* clang-format on */
static inline int hexdigit(const char *p, Py_ssize_t off)
{
--- a/mercurial/cext/diffhelpers.c Fri Feb 23 17:57:04 2018 -0800
+++ b/mercurial/cext/diffhelpers.c Sat Feb 24 17:49:10 2018 -0600
@@ -16,12 +16,11 @@
static char diffhelpers_doc[] = "Efficient diff parsing";
static PyObject *diffhelpers_Error;
-
/* fixup the last lines of a and b when the patch has no newline at eof */
static void _fix_newline(PyObject *hunk, PyObject *a, PyObject *b)
{
Py_ssize_t hunksz = PyList_Size(hunk);
- PyObject *s = PyList_GET_ITEM(hunk, hunksz-1);
+ PyObject *s = PyList_GET_ITEM(hunk, hunksz - 1);
char *l = PyBytes_AsString(s);
Py_ssize_t alen = PyList_Size(a);
Py_ssize_t blen = PyList_Size(b);
@@ -29,29 +28,28 @@
PyObject *hline;
Py_ssize_t sz = PyBytes_GET_SIZE(s);
- if (sz > 1 && l[sz-2] == '\r')
+ if (sz > 1 && l[sz - 2] == '\r')
/* tolerate CRLF in last line */
sz -= 1;
- hline = PyBytes_FromStringAndSize(l, sz-1);
+ hline = PyBytes_FromStringAndSize(l, sz - 1);
if (!hline) {
return;
}
if (c == ' ' || c == '+') {
PyObject *rline = PyBytes_FromStringAndSize(l + 1, sz - 2);
- PyList_SetItem(b, blen-1, rline);
+ PyList_SetItem(b, blen - 1, rline);
}
if (c == ' ' || c == '-') {
Py_INCREF(hline);
- PyList_SetItem(a, alen-1, hline);
+ PyList_SetItem(a, alen - 1, hline);
}
- PyList_SetItem(hunk, hunksz-1, hline);
+ PyList_SetItem(hunk, hunksz - 1, hline);
}
/* python callable form of _fix_newline */
-static PyObject *
-fix_newline(PyObject *self, PyObject *args)
+static PyObject *fix_newline(PyObject *self, PyObject *args)
{
PyObject *hunk, *a, *b;
if (!PyArg_ParseTuple(args, "OOO", &hunk, &a, &b))
@@ -72,8 +70,7 @@
* The control char from the hunk is saved when inserting into a, but not b
* (for performance while deleting files)
*/
-static PyObject *
-addlines(PyObject *self, PyObject *args)
+static PyObject *addlines(PyObject *self, PyObject *args)
{
PyObject *fp, *hunk, *a, *b, *x;
@@ -83,8 +80,8 @@
Py_ssize_t todoa, todob;
char *s, c;
PyObject *l;
- if (!PyArg_ParseTuple(args, addlines_format,
- &fp, &hunk, &lena, &lenb, &a, &b))
+ if (!PyArg_ParseTuple(args, addlines_format, &fp, &hunk, &lena, &lenb,
+ &a, &b))
return NULL;
while (1) {
@@ -92,7 +89,7 @@
todob = lenb - PyList_Size(b);
num = todoa > todob ? todoa : todob;
if (num == 0)
- break;
+ break;
for (i = 0; i < num; i++) {
x = PyFile_GetLine(fp, 0);
s = PyBytes_AsString(x);
@@ -131,8 +128,7 @@
* a control char at the start of each line, this char is ignored in the
* compare
*/
-static PyObject *
-testhunk(PyObject *self, PyObject *args)
+static PyObject *testhunk(PyObject *self, PyObject *args)
{
PyObject *a, *b;
@@ -158,21 +154,16 @@
}
static PyMethodDef methods[] = {
- {"addlines", addlines, METH_VARARGS, "add lines to a hunk\n"},
- {"fix_newline", fix_newline, METH_VARARGS, "fixup newline counters\n"},
- {"testhunk", testhunk, METH_VARARGS, "test lines in a hunk\n"},
- {NULL, NULL}
-};
+ {"addlines", addlines, METH_VARARGS, "add lines to a hunk\n"},
+ {"fix_newline", fix_newline, METH_VARARGS, "fixup newline counters\n"},
+ {"testhunk", testhunk, METH_VARARGS, "test lines in a hunk\n"},
+ {NULL, NULL}};
static const int version = 1;
#ifdef IS_PY3K
static struct PyModuleDef diffhelpers_module = {
- PyModuleDef_HEAD_INIT,
- "diffhelpers",
- diffhelpers_doc,
- -1,
- methods
+ PyModuleDef_HEAD_INIT, "diffhelpers", diffhelpers_doc, -1, methods,
};
PyMODINIT_FUNC PyInit_diffhelpers(void)
@@ -183,8 +174,8 @@
if (m == NULL)
return NULL;
- diffhelpers_Error = PyErr_NewException("diffhelpers.diffhelpersError",
- NULL, NULL);
+ diffhelpers_Error =
+ PyErr_NewException("diffhelpers.diffhelpersError", NULL, NULL);
Py_INCREF(diffhelpers_Error);
PyModule_AddObject(m, "diffhelpersError", diffhelpers_Error);
PyModule_AddIntConstant(m, "version", version);
@@ -192,13 +183,12 @@
return m;
}
#else
-PyMODINIT_FUNC
-initdiffhelpers(void)
+PyMODINIT_FUNC initdiffhelpers(void)
{
PyObject *m;
m = Py_InitModule3("diffhelpers", methods, diffhelpers_doc);
- diffhelpers_Error = PyErr_NewException("diffhelpers.diffhelpersError",
- NULL, NULL);
+ diffhelpers_Error =
+ PyErr_NewException("diffhelpers.diffhelpersError", NULL, NULL);
PyModule_AddIntConstant(m, "version", version);
}
#endif
--- a/mercurial/cext/mpatch.c Fri Feb 23 17:57:04 2018 -0800
+++ b/mercurial/cext/mpatch.c Sat Feb 24 17:49:10 2018 -0600
@@ -55,10 +55,10 @@
ssize_t blen;
int r;
- PyObject *tmp = PyList_GetItem((PyObject*)bins, pos);
+ PyObject *tmp = PyList_GetItem((PyObject *)bins, pos);
if (!tmp)
return NULL;
- if (PyObject_AsCharBuffer(tmp, &buffer, (Py_ssize_t*)&blen))
+ if (PyObject_AsCharBuffer(tmp, &buffer, (Py_ssize_t *)&blen))
return NULL;
if ((r = mpatch_decode(buffer, blen, &res)) < 0) {
if (!PyErr_Occurred())
@@ -68,8 +68,7 @@
return res;
}
-static PyObject *
-patches(PyObject *self, PyObject *args)
+static PyObject *patches(PyObject *self, PyObject *args)
{
PyObject *text, *bins, *result;
struct mpatch_flist *patch;
@@ -110,7 +109,14 @@
goto cleanup;
}
out = PyBytes_AsString(result);
- if ((r = mpatch_apply(out, in, inlen, patch)) < 0) {
+ /* clang-format off */
+ {
+ Py_BEGIN_ALLOW_THREADS
+ r = mpatch_apply(out, in, inlen, patch);
+ Py_END_ALLOW_THREADS
+ }
+ /* clang-format on */
+ if (r < 0) {
Py_DECREF(result);
result = NULL;
}
@@ -122,8 +128,7 @@
}
/* calculate size of a patched file directly */
-static PyObject *
-patchedsize(PyObject *self, PyObject *args)
+static PyObject *patchedsize(PyObject *self, PyObject *args)
{
long orig, start, end, len, outlen = 0, last = 0, pos = 0;
Py_ssize_t patchlen;
@@ -146,7 +151,8 @@
if (pos != patchlen) {
if (!PyErr_Occurred())
- PyErr_SetString(mpatch_Error, "patch cannot be decoded");
+ PyErr_SetString(mpatch_Error,
+ "patch cannot be decoded");
return NULL;
}
@@ -155,20 +161,16 @@
}
static PyMethodDef methods[] = {
- {"patches", patches, METH_VARARGS, "apply a series of patches\n"},
- {"patchedsize", patchedsize, METH_VARARGS, "calculed patched size\n"},
- {NULL, NULL}
+ {"patches", patches, METH_VARARGS, "apply a series of patches\n"},
+ {"patchedsize", patchedsize, METH_VARARGS, "calculed patched size\n"},
+ {NULL, NULL},
};
static const int version = 1;
#ifdef IS_PY3K
static struct PyModuleDef mpatch_module = {
- PyModuleDef_HEAD_INIT,
- "mpatch",
- mpatch_doc,
- -1,
- methods
+ PyModuleDef_HEAD_INIT, "mpatch", mpatch_doc, -1, methods,
};
PyMODINIT_FUNC PyInit_mpatch(void)
@@ -179,8 +181,8 @@
if (m == NULL)
return NULL;
- mpatch_Error = PyErr_NewException("mercurial.cext.mpatch.mpatchError",
- NULL, NULL);
+ mpatch_Error =
+ PyErr_NewException("mercurial.cext.mpatch.mpatchError", NULL, NULL);
Py_INCREF(mpatch_Error);
PyModule_AddObject(m, "mpatchError", mpatch_Error);
PyModule_AddIntConstant(m, "version", version);
@@ -188,13 +190,12 @@
return m;
}
#else
-PyMODINIT_FUNC
-initmpatch(void)
+PyMODINIT_FUNC initmpatch(void)
{
PyObject *m;
m = Py_InitModule3("mpatch", methods, mpatch_doc);
- mpatch_Error = PyErr_NewException("mercurial.cext.mpatch.mpatchError",
- NULL, NULL);
+ mpatch_Error =
+ PyErr_NewException("mercurial.cext.mpatch.mpatchError", NULL, NULL);
PyModule_AddIntConstant(m, "version", version);
}
#endif
--- a/mercurial/cext/pathencode.c Fri Feb 23 17:57:04 2018 -0800
+++ b/mercurial/cext/pathencode.c Sat Feb 24 17:49:10 2018 -0600
@@ -26,26 +26,26 @@
/* state machine for the fast path */
enum path_state {
- START, /* first byte of a path component */
- A, /* "AUX" */
+ START, /* first byte of a path component */
+ A, /* "AUX" */
AU,
- THIRD, /* third of a 3-byte sequence, e.g. "AUX", "NUL" */
- C, /* "CON" or "COMn" */
+ THIRD, /* third of a 3-byte sequence, e.g. "AUX", "NUL" */
+ C, /* "CON" or "COMn" */
CO,
- COMLPT, /* "COM" or "LPT" */
+ COMLPT, /* "COM" or "LPT" */
COMLPTn,
L,
LP,
N,
NU,
- P, /* "PRN" */
+ P, /* "PRN" */
PR,
- LDOT, /* leading '.' */
- DOT, /* '.' in a non-leading position */
- H, /* ".h" */
- HGDI, /* ".hg", ".d", or ".i" */
+ LDOT, /* leading '.' */
+ DOT, /* '.' in a non-leading position */
+ H, /* ".h" */
+ HGDI, /* ".hg", ".d", or ".i" */
SPACE,
- DEFAULT /* byte of a path component after the first */
+ DEFAULT, /* byte of a path component after the first */
};
/* state machine for dir-encoding */
@@ -53,7 +53,7 @@
DDOT,
DH,
DHGDI,
- DDEFAULT
+ DDEFAULT,
};
static inline int inset(const uint32_t bitset[], char c)
@@ -82,7 +82,7 @@
}
static inline void hexencode(char *dest, Py_ssize_t *destlen, size_t destsize,
- uint8_t c)
+ uint8_t c)
{
static const char hexdigit[] = "0123456789abcdef";
@@ -92,14 +92,14 @@
/* 3-byte escape: tilde followed by two hex digits */
static inline void escape3(char *dest, Py_ssize_t *destlen, size_t destsize,
- char c)
+ char c)
{
charcopy(dest, destlen, destsize, '~');
hexencode(dest, destlen, destsize, c);
}
-static Py_ssize_t _encodedir(char *dest, size_t destsize,
- const char *src, Py_ssize_t len)
+static Py_ssize_t _encodedir(char *dest, size_t destsize, const char *src,
+ Py_ssize_t len)
{
enum dir_state state = DDEFAULT;
Py_ssize_t i = 0, destlen = 0;
@@ -126,8 +126,8 @@
if (src[i] == 'g') {
state = DHGDI;
charcopy(dest, &destlen, destsize, src[i++]);
- }
- else state = DDEFAULT;
+ } else
+ state = DDEFAULT;
break;
case DHGDI:
if (src[i] == '/') {
@@ -173,17 +173,15 @@
if (newobj) {
assert(PyBytes_Check(newobj));
Py_SIZE(newobj)--;
- _encodedir(PyBytes_AS_STRING(newobj), newlen, path,
- len + 1);
+ _encodedir(PyBytes_AS_STRING(newobj), newlen, path, len + 1);
}
return newobj;
}
static Py_ssize_t _encode(const uint32_t twobytes[8], const uint32_t onebyte[8],
- char *dest, Py_ssize_t destlen, size_t destsize,
- const char *src, Py_ssize_t len,
- int encodedir)
+ char *dest, Py_ssize_t destlen, size_t destsize,
+ const char *src, Py_ssize_t len, int encodedir)
{
enum path_state state = START;
Py_ssize_t i = 0;
@@ -237,15 +235,15 @@
if (src[i] == 'u') {
state = AU;
charcopy(dest, &destlen, destsize, src[i++]);
- }
- else state = DEFAULT;
+ } else
+ state = DEFAULT;
break;
case AU:
if (src[i] == 'x') {
state = THIRD;
i++;
- }
- else state = DEFAULT;
+ } else
+ state = DEFAULT;
break;
case THIRD:
state = DEFAULT;
@@ -264,24 +262,30 @@
if (src[i] == 'o') {
state = CO;
charcopy(dest, &destlen, destsize, src[i++]);
- }
- else state = DEFAULT;
+ } else
+ state = DEFAULT;
break;
case CO:
if (src[i] == 'm') {
state = COMLPT;
i++;
- }
- else if (src[i] == 'n') {
+ } else if (src[i] == 'n') {
state = THIRD;
i++;
- }
- else state = DEFAULT;
+ } else
+ state = DEFAULT;
break;
case COMLPT:
switch (src[i]) {
- case '1': case '2': case '3': case '4': case '5':
- case '6': case '7': case '8': case '9':
+ case '1':
+ case '2':
+ case '3':
+ case '4':
+ case '5':
+ case '6':
+ case '7':
+ case '8':
+ case '9':
state = COMLPTn;
i++;
break;
@@ -301,8 +305,8 @@
charcopy(dest, &destlen, destsize, src[i - 1]);
break;
default:
- memcopy(dest, &destlen, destsize,
- &src[i - 2], 2);
+ memcopy(dest, &destlen, destsize, &src[i - 2],
+ 2);
break;
}
break;
@@ -310,43 +314,43 @@
if (src[i] == 'p') {
state = LP;
charcopy(dest, &destlen, destsize, src[i++]);
- }
- else state = DEFAULT;
+ } else
+ state = DEFAULT;
break;
case LP:
if (src[i] == 't') {
state = COMLPT;
i++;
- }
- else state = DEFAULT;
+ } else
+ state = DEFAULT;
break;
case N:
if (src[i] == 'u') {
state = NU;
charcopy(dest, &destlen, destsize, src[i++]);
- }
- else state = DEFAULT;
+ } else
+ state = DEFAULT;
break;
case NU:
if (src[i] == 'l') {
state = THIRD;
i++;
- }
- else state = DEFAULT;
+ } else
+ state = DEFAULT;
break;
case P:
if (src[i] == 'r') {
state = PR;
charcopy(dest, &destlen, destsize, src[i++]);
- }
- else state = DEFAULT;
+ } else
+ state = DEFAULT;
break;
case PR:
if (src[i] == 'n') {
state = THIRD;
i++;
- }
- else state = DEFAULT;
+ } else
+ state = DEFAULT;
break;
case LDOT:
switch (src[i]) {
@@ -393,18 +397,18 @@
if (src[i] == 'g') {
state = HGDI;
charcopy(dest, &destlen, destsize, src[i++]);
- }
- else state = DEFAULT;
+ } else
+ state = DEFAULT;
break;
case HGDI:
if (src[i] == '/') {
state = START;
if (encodedir)
memcopy(dest, &destlen, destsize, ".hg",
- 3);
+ 3);
charcopy(dest, &destlen, destsize, src[i++]);
- }
- else state = DEFAULT;
+ } else
+ state = DEFAULT;
break;
case SPACE:
switch (src[i]) {
@@ -444,19 +448,17 @@
if (inset(onebyte, src[i])) {
do {
charcopy(dest, &destlen,
- destsize, src[i++]);
+ destsize, src[i++]);
} while (i < len &&
- inset(onebyte, src[i]));
- }
- else if (inset(twobytes, src[i])) {
+ inset(onebyte, src[i]));
+ } else if (inset(twobytes, src[i])) {
char c = src[i++];
charcopy(dest, &destlen, destsize, '_');
charcopy(dest, &destlen, destsize,
- c == '_' ? '_' : c + 32);
- }
- else
+ c == '_' ? '_' : c + 32);
+ } else
escape3(dest, &destlen, destsize,
- src[i++]);
+ src[i++]);
break;
}
break;
@@ -466,31 +468,29 @@
return destlen;
}
-static Py_ssize_t basicencode(char *dest, size_t destsize,
- const char *src, Py_ssize_t len)
+static Py_ssize_t basicencode(char *dest, size_t destsize, const char *src,
+ Py_ssize_t len)
{
- static const uint32_t twobytes[8] = { 0, 0, 0x87fffffe };
+ static const uint32_t twobytes[8] = {0, 0, 0x87fffffe};
static const uint32_t onebyte[8] = {
- 1, 0x2bff3bfa, 0x68000001, 0x2fffffff,
+ 1, 0x2bff3bfa, 0x68000001, 0x2fffffff,
};
Py_ssize_t destlen = 0;
- return _encode(twobytes, onebyte, dest, destlen, destsize,
- src, len, 1);
+ return _encode(twobytes, onebyte, dest, destlen, destsize, src, len, 1);
}
static const Py_ssize_t maxstorepathlen = 120;
-static Py_ssize_t _lowerencode(char *dest, size_t destsize,
- const char *src, Py_ssize_t len)
+static Py_ssize_t _lowerencode(char *dest, size_t destsize, const char *src,
+ Py_ssize_t len)
{
- static const uint32_t onebyte[8] = {
- 1, 0x2bfffbfb, 0xe8000001, 0x2fffffff
- };
+ static const uint32_t onebyte[8] = {1, 0x2bfffbfb, 0xe8000001,
+ 0x2fffffff};
- static const uint32_t lower[8] = { 0, 0, 0x7fffffe };
+ static const uint32_t lower[8] = {0, 0, 0x7fffffe};
Py_ssize_t i, destlen = 0;
@@ -524,13 +524,13 @@
}
/* See store.py:_auxencode for a description. */
-static Py_ssize_t auxencode(char *dest, size_t destsize,
- const char *src, Py_ssize_t len)
+static Py_ssize_t auxencode(char *dest, size_t destsize, const char *src,
+ Py_ssize_t len)
{
static const uint32_t twobytes[8];
static const uint32_t onebyte[8] = {
- ~0U, 0xffff3ffe, ~0U, ~0U, ~0U, ~0U, ~0U, ~0U,
+ ~0U, 0xffff3ffe, ~0U, ~0U, ~0U, ~0U, ~0U, ~0U,
};
return _encode(twobytes, onebyte, dest, 0, destsize, src, len, 0);
@@ -590,8 +590,7 @@
break;
charcopy(dest, &destlen, destsize, src[i]);
p = -1;
- }
- else if (p < dirprefixlen)
+ } else if (p < dirprefixlen)
charcopy(dest, &destlen, destsize, src[i]);
}
@@ -622,13 +621,13 @@
slop = maxstorepathlen - used;
if (slop > 0) {
Py_ssize_t basenamelen =
- lastslash >= 0 ? len - lastslash - 2 : len - 1;
+ lastslash >= 0 ? len - lastslash - 2 : len - 1;
if (basenamelen > slop)
basenamelen = slop;
if (basenamelen > 0)
memcopy(dest, &destlen, destsize, &src[lastslash + 1],
- basenamelen);
+ basenamelen);
}
/* Add hash and suffix. */
@@ -637,7 +636,7 @@
if (lastdot >= 0)
memcopy(dest, &destlen, destsize, &src[lastdot],
- len - lastdot - 1);
+ len - lastdot - 1);
assert(PyBytes_Check(ret));
Py_SIZE(ret) = destlen;
@@ -672,8 +671,8 @@
if (shafunc == NULL) {
PyErr_SetString(PyExc_AttributeError,
- "module 'hashlib' has no "
- "attribute 'sha1'");
+ "module 'hashlib' has no "
+ "attribute 'sha1'");
return -1;
}
}
@@ -690,7 +689,7 @@
if (!PyBytes_Check(hashobj) || PyBytes_GET_SIZE(hashobj) != 20) {
PyErr_SetString(PyExc_TypeError,
- "result of digest is not a 20-byte hash");
+ "result of digest is not a 20-byte hash");
Py_DECREF(hashobj);
return -1;
}
@@ -755,10 +754,9 @@
assert(PyBytes_Check(newobj));
Py_SIZE(newobj)--;
basicencode(PyBytes_AS_STRING(newobj), newlen, path,
- len + 1);
+ len + 1);
}
- }
- else
+ } else
newobj = hashencode(path, len + 1);
return newobj;
--- a/mercurial/changelog.py Fri Feb 23 17:57:04 2018 -0800
+++ b/mercurial/changelog.py Sat Feb 24 17:49:10 2018 -0600
@@ -20,6 +20,7 @@
from . import (
encoding,
error,
+ pycompat,
revlog,
util,
)
@@ -90,6 +91,11 @@
return self.offset
def flush(self):
pass
+
+ @property
+ def closed(self):
+ return self.fp.closed
+
def close(self):
self.fp.close()
@@ -127,6 +133,13 @@
self.offset += len(s)
self._end += len(s)
+ def __enter__(self):
+ self.fp.__enter__()
+ return self
+
+ def __exit__(self, *args):
+ return self.fp.__exit__(*args)
+
def _divertopener(opener, target):
"""build an opener that writes in 'target.a' instead of 'target'"""
def _divert(name, mode='r', checkambig=False):
@@ -420,7 +433,7 @@
self._delaybuf = None
self._divert = False
# split when we're done
- self.checkinlinesize(tr)
+ self._enforceinlinesize(tr)
def _writepending(self, tr):
"create a file containing the unfinalized state for pretxnchangegroup"
@@ -446,9 +459,9 @@
return False
- def checkinlinesize(self, tr, fp=None):
+ def _enforceinlinesize(self, tr, fp=None):
if not self._delayed:
- revlog.revlog.checkinlinesize(self, tr, fp)
+ revlog.revlog._enforceinlinesize(self, tr, fp)
def read(self, node):
"""Obtain data from a parsed changelog revision.
@@ -505,8 +518,8 @@
if not user:
raise error.RevlogError(_("empty username"))
if "\n" in user:
- raise error.RevlogError(_("username %s contains a newline")
- % repr(user))
+ raise error.RevlogError(_("username %r contains a newline")
+ % pycompat.bytestr(user))
desc = stripdesc(desc)
--- a/mercurial/cmdutil.py Fri Feb 23 17:57:04 2018 -0800
+++ b/mercurial/cmdutil.py Sat Feb 24 17:49:10 2018 -0600
@@ -8,7 +8,6 @@
from __future__ import absolute_import
import errno
-import itertools
import os
import re
import tempfile
@@ -26,26 +25,23 @@
changelog,
copies,
crecord as crecordmod,
- dagop,
dirstateguard,
encoding,
error,
formatter,
- graphmod,
+ logcmdutil,
match as matchmod,
- mdiff,
+ merge as mergemod,
obsolete,
patch,
pathutil,
pycompat,
registrar,
revlog,
- revset,
- revsetlang,
rewriteutil,
scmutil,
smartset,
- templatekw,
+ subrepoutil,
templater,
util,
vfs as vfsmod,
@@ -225,7 +221,6 @@
def dorecord(ui, repo, commitfunc, cmdsuggest, backupall,
filterfn, *pats, **opts):
- from . import merge as mergemod
opts = pycompat.byteskwargs(opts)
if not ui.interactive():
if cmdsuggest:
@@ -562,8 +557,6 @@
return '\n'.join(commentedlines) + '\n'
def _conflictsmsg(repo):
- # avoid merge cycle
- from . import merge as mergemod
mergestate = mergemod.mergestate.read(repo)
if not mergestate.active():
return
@@ -898,65 +891,45 @@
else:
return commiteditor
-def loglimit(opts):
- """get the log limit according to option -l/--limit"""
- limit = opts.get('limit')
- if limit:
- try:
- limit = int(limit)
- except ValueError:
- raise error.Abort(_('limit must be a positive integer'))
- if limit <= 0:
- raise error.Abort(_('limit must be positive'))
- else:
- limit = None
- return limit
-
-def makefilename(repo, pat, node, desc=None,
+def makefilename(ctx, pat,
total=None, seqno=None, revwidth=None, pathname=None):
- node_expander = {
- 'H': lambda: hex(node),
- 'R': lambda: '%d' % repo.changelog.rev(node),
- 'h': lambda: short(node),
- 'm': lambda: re.sub('[^\w]', '_', desc or '')
- }
expander = {
+ 'H': lambda: ctx.hex(),
+ 'R': lambda: '%d' % ctx.rev(),
+ 'h': lambda: short(ctx.node()),
+ 'm': lambda: re.sub('[^\w]', '_',
+ ctx.description().rstrip().splitlines()[0]),
+ 'r': lambda: ('%d' % ctx.rev()).zfill(revwidth or 0),
'%': lambda: '%',
- 'b': lambda: os.path.basename(repo.root),
+ 'b': lambda: os.path.basename(ctx.repo().root),
}
-
- try:
- if node:
- expander.update(node_expander)
- if node:
- expander['r'] = (lambda:
- ('%d' % repo.changelog.rev(node)).zfill(revwidth or 0))
- if total is not None:
- expander['N'] = lambda: '%d' % total
- if seqno is not None:
- expander['n'] = lambda: '%d' % seqno
- if total is not None and seqno is not None:
- expander['n'] = (lambda: ('%d' % seqno).zfill(len('%d' % total)))
- if pathname is not None:
- expander['s'] = lambda: os.path.basename(pathname)
- expander['d'] = lambda: os.path.dirname(pathname) or '.'
- expander['p'] = lambda: pathname
-
- newname = []
- patlen = len(pat)
- i = 0
- while i < patlen:
+ if total is not None:
+ expander['N'] = lambda: '%d' % total
+ if seqno is not None:
+ expander['n'] = lambda: '%d' % seqno
+ if total is not None and seqno is not None:
+ expander['n'] = (lambda: ('%d' % seqno).zfill(len('%d' % total)))
+ if pathname is not None:
+ expander['s'] = lambda: os.path.basename(pathname)
+ expander['d'] = lambda: os.path.dirname(pathname) or '.'
+ expander['p'] = lambda: pathname
+
+ newname = []
+ patlen = len(pat)
+ i = 0
+ while i < patlen:
+ c = pat[i:i + 1]
+ if c == '%':
+ i += 1
c = pat[i:i + 1]
- if c == '%':
- i += 1
- c = pat[i:i + 1]
+ try:
c = expander[c]()
- newname.append(c)
- i += 1
- return ''.join(newname)
- except KeyError as inst:
- raise error.Abort(_("invalid format spec '%%%s' in output filename") %
- inst.args[0])
+ except KeyError:
+ raise error.Abort(_("invalid format spec '%%%s' in output "
+ "filename") % c)
+ newname.append(c)
+ i += 1
+ return ''.join(newname)
def isstdiofilename(pat):
"""True if the given pat looks like a filename denoting stdin/stdout"""
@@ -981,19 +954,20 @@
def __exit__(self, exc_type, exc_value, exc_tb):
pass
-def makefileobj(repo, pat, node=None, desc=None, total=None,
+def makefileobj(ctx, pat, total=None,
seqno=None, revwidth=None, mode='wb', modemap=None,
pathname=None):
writable = mode not in ('r', 'rb')
if isstdiofilename(pat):
+ repo = ctx.repo()
if writable:
fp = repo.ui.fout
else:
fp = repo.ui.fin
return _unclosablefile(fp)
- fn = makefilename(repo, pat, node, desc, total, seqno, revwidth, pathname)
+ fn = makefilename(ctx, pat, total, seqno, revwidth, pathname)
if modemap is not None:
mode = modemap.get(fn, mode)
if mode == 'wb':
@@ -1568,9 +1542,7 @@
ctx = repo[rev]
fo = None
if not fp and fntemplate:
- desc_lines = ctx.description().rstrip().split('\n')
- desc = desc_lines[0] #Commit always has a first line.
- fo = makefileobj(repo, fntemplate, ctx.node(), desc=desc,
+ fo = makefileobj(ctx, fntemplate,
total=total, seqno=seqno, revwidth=revwidth,
mode='wb', modemap=filemode)
dest = fo.name
@@ -1583,500 +1555,6 @@
if fo is not None:
fo.close()
-def diffordiffstat(ui, repo, diffopts, node1, node2, match,
- changes=None, stat=False, fp=None, prefix='',
- root='', listsubrepos=False, hunksfilterfn=None):
- '''show diff or diffstat.'''
- if fp is None:
- write = ui.write
- else:
- def write(s, **kw):
- fp.write(s)
-
- if root:
- relroot = pathutil.canonpath(repo.root, repo.getcwd(), root)
- else:
- relroot = ''
- if relroot != '':
- # XXX relative roots currently don't work if the root is within a
- # subrepo
- uirelroot = match.uipath(relroot)
- relroot += '/'
- for matchroot in match.files():
- if not matchroot.startswith(relroot):
- ui.warn(_('warning: %s not inside relative root %s\n') % (
- match.uipath(matchroot), uirelroot))
-
- if stat:
- diffopts = diffopts.copy(context=0, noprefix=False)
- width = 80
- if not ui.plain():
- width = ui.termwidth()
- chunks = patch.diff(repo, node1, node2, match, changes, opts=diffopts,
- prefix=prefix, relroot=relroot,
- hunksfilterfn=hunksfilterfn)
- for chunk, label in patch.diffstatui(util.iterlines(chunks),
- width=width):
- write(chunk, label=label)
- else:
- for chunk, label in patch.diffui(repo, node1, node2, match,
- changes, opts=diffopts, prefix=prefix,
- relroot=relroot,
- hunksfilterfn=hunksfilterfn):
- write(chunk, label=label)
-
- if listsubrepos:
- ctx1 = repo[node1]
- ctx2 = repo[node2]
- for subpath, sub in scmutil.itersubrepos(ctx1, ctx2):
- tempnode2 = node2
- try:
- if node2 is not None:
- tempnode2 = ctx2.substate[subpath][1]
- except KeyError:
- # A subrepo that existed in node1 was deleted between node1 and
- # node2 (inclusive). Thus, ctx2's substate won't contain that
- # subpath. The best we can do is to ignore it.
- tempnode2 = None
- submatch = matchmod.subdirmatcher(subpath, match)
- sub.diff(ui, diffopts, tempnode2, submatch, changes=changes,
- stat=stat, fp=fp, prefix=prefix)
-
-def _changesetlabels(ctx):
- labels = ['log.changeset', 'changeset.%s' % ctx.phasestr()]
- if ctx.obsolete():
- labels.append('changeset.obsolete')
- if ctx.isunstable():
- labels.append('changeset.unstable')
- for instability in ctx.instabilities():
- labels.append('instability.%s' % instability)
- return ' '.join(labels)
-
-class changeset_printer(object):
- '''show changeset information when templating not requested.'''
-
- def __init__(self, ui, repo, matchfn, diffopts, buffered):
- self.ui = ui
- self.repo = repo
- self.buffered = buffered
- self.matchfn = matchfn
- self.diffopts = diffopts
- self.header = {}
- self.hunk = {}
- self.lastheader = None
- self.footer = None
- self._columns = templatekw.getlogcolumns()
-
- def flush(self, ctx):
- rev = ctx.rev()
- if rev in self.header:
- h = self.header[rev]
- if h != self.lastheader:
- self.lastheader = h
- self.ui.write(h)
- del self.header[rev]
- if rev in self.hunk:
- self.ui.write(self.hunk[rev])
- del self.hunk[rev]
-
- def close(self):
- if self.footer:
- self.ui.write(self.footer)
-
- def show(self, ctx, copies=None, matchfn=None, hunksfilterfn=None,
- **props):
- props = pycompat.byteskwargs(props)
- if self.buffered:
- self.ui.pushbuffer(labeled=True)
- self._show(ctx, copies, matchfn, hunksfilterfn, props)
- self.hunk[ctx.rev()] = self.ui.popbuffer()
- else:
- self._show(ctx, copies, matchfn, hunksfilterfn, props)
-
- def _show(self, ctx, copies, matchfn, hunksfilterfn, props):
- '''show a single changeset or file revision'''
- changenode = ctx.node()
- rev = ctx.rev()
-
- if self.ui.quiet:
- self.ui.write("%s\n" % scmutil.formatchangeid(ctx),
- label='log.node')
- return
-
- columns = self._columns
- self.ui.write(columns['changeset'] % scmutil.formatchangeid(ctx),
- label=_changesetlabels(ctx))
-
- # branches are shown first before any other names due to backwards
- # compatibility
- branch = ctx.branch()
- # don't show the default branch name
- if branch != 'default':
- self.ui.write(columns['branch'] % branch, label='log.branch')
-
- for nsname, ns in self.repo.names.iteritems():
- # branches has special logic already handled above, so here we just
- # skip it
- if nsname == 'branches':
- continue
- # we will use the templatename as the color name since those two
- # should be the same
- for name in ns.names(self.repo, changenode):
- self.ui.write(ns.logfmt % name,
- label='log.%s' % ns.colorname)
- if self.ui.debugflag:
- self.ui.write(columns['phase'] % ctx.phasestr(), label='log.phase')
- for pctx in scmutil.meaningfulparents(self.repo, ctx):
- label = 'log.parent changeset.%s' % pctx.phasestr()
- self.ui.write(columns['parent'] % scmutil.formatchangeid(pctx),
- label=label)
-
- if self.ui.debugflag and rev is not None:
- mnode = ctx.manifestnode()
- mrev = self.repo.manifestlog._revlog.rev(mnode)
- self.ui.write(columns['manifest']
- % scmutil.formatrevnode(self.ui, mrev, mnode),
- label='ui.debug log.manifest')
- self.ui.write(columns['user'] % ctx.user(), label='log.user')
- self.ui.write(columns['date'] % util.datestr(ctx.date()),
- label='log.date')
-
- if ctx.isunstable():
- instabilities = ctx.instabilities()
- self.ui.write(columns['instability'] % ', '.join(instabilities),
- label='log.instability')
-
- elif ctx.obsolete():
- self._showobsfate(ctx)
-
- self._exthook(ctx)
-
- if self.ui.debugflag:
- files = ctx.p1().status(ctx)[:3]
- for key, value in zip(['files', 'files+', 'files-'], files):
- if value:
- self.ui.write(columns[key] % " ".join(value),
- label='ui.debug log.files')
- elif ctx.files() and self.ui.verbose:
- self.ui.write(columns['files'] % " ".join(ctx.files()),
- label='ui.note log.files')
- if copies and self.ui.verbose:
- copies = ['%s (%s)' % c for c in copies]
- self.ui.write(columns['copies'] % ' '.join(copies),
- label='ui.note log.copies')
-
- extra = ctx.extra()
- if extra and self.ui.debugflag:
- for key, value in sorted(extra.items()):
- self.ui.write(columns['extra'] % (key, util.escapestr(value)),
- label='ui.debug log.extra')
-
- description = ctx.description().strip()
- if description:
- if self.ui.verbose:
- self.ui.write(_("description:\n"),
- label='ui.note log.description')
- self.ui.write(description,
- label='ui.note log.description')
- self.ui.write("\n\n")
- else:
- self.ui.write(columns['summary'] % description.splitlines()[0],
- label='log.summary')
- self.ui.write("\n")
-
- self.showpatch(ctx, matchfn, hunksfilterfn=hunksfilterfn)
-
- def _showobsfate(self, ctx):
- obsfate = templatekw.showobsfate(repo=self.repo, ctx=ctx, ui=self.ui)
-
- if obsfate:
- for obsfateline in obsfate:
- self.ui.write(self._columns['obsolete'] % obsfateline,
- label='log.obsfate')
-
- def _exthook(self, ctx):
- '''empty method used by extension as a hook point
- '''
-
- def showpatch(self, ctx, matchfn, hunksfilterfn=None):
- if not matchfn:
- matchfn = self.matchfn
- if matchfn:
- stat = self.diffopts.get('stat')
- diff = self.diffopts.get('patch')
- diffopts = patch.diffallopts(self.ui, self.diffopts)
- node = ctx.node()
- prev = ctx.p1().node()
- if stat:
- diffordiffstat(self.ui, self.repo, diffopts, prev, node,
- match=matchfn, stat=True,
- hunksfilterfn=hunksfilterfn)
- if diff:
- if stat:
- self.ui.write("\n")
- diffordiffstat(self.ui, self.repo, diffopts, prev, node,
- match=matchfn, stat=False,
- hunksfilterfn=hunksfilterfn)
- if stat or diff:
- self.ui.write("\n")
-
-class jsonchangeset(changeset_printer):
- '''format changeset information.'''
-
- def __init__(self, ui, repo, matchfn, diffopts, buffered):
- changeset_printer.__init__(self, ui, repo, matchfn, diffopts, buffered)
- self.cache = {}
- self._first = True
-
- def close(self):
- if not self._first:
- self.ui.write("\n]\n")
- else:
- self.ui.write("[]\n")
-
- def _show(self, ctx, copies, matchfn, hunksfilterfn, props):
- '''show a single changeset or file revision'''
- rev = ctx.rev()
- if rev is None:
- jrev = jnode = 'null'
- else:
- jrev = '%d' % rev
- jnode = '"%s"' % hex(ctx.node())
- j = encoding.jsonescape
-
- if self._first:
- self.ui.write("[\n {")
- self._first = False
- else:
- self.ui.write(",\n {")
-
- if self.ui.quiet:
- self.ui.write(('\n "rev": %s') % jrev)
- self.ui.write((',\n "node": %s') % jnode)
- self.ui.write('\n }')
- return
-
- self.ui.write(('\n "rev": %s') % jrev)
- self.ui.write((',\n "node": %s') % jnode)
- self.ui.write((',\n "branch": "%s"') % j(ctx.branch()))
- self.ui.write((',\n "phase": "%s"') % ctx.phasestr())
- self.ui.write((',\n "user": "%s"') % j(ctx.user()))
- self.ui.write((',\n "date": [%d, %d]') % ctx.date())
- self.ui.write((',\n "desc": "%s"') % j(ctx.description()))
-
- self.ui.write((',\n "bookmarks": [%s]') %
- ", ".join('"%s"' % j(b) for b in ctx.bookmarks()))
- self.ui.write((',\n "tags": [%s]') %
- ", ".join('"%s"' % j(t) for t in ctx.tags()))
- self.ui.write((',\n "parents": [%s]') %
- ", ".join('"%s"' % c.hex() for c in ctx.parents()))
-
- if self.ui.debugflag:
- if rev is None:
- jmanifestnode = 'null'
- else:
- jmanifestnode = '"%s"' % hex(ctx.manifestnode())
- self.ui.write((',\n "manifest": %s') % jmanifestnode)
-
- self.ui.write((',\n "extra": {%s}') %
- ", ".join('"%s": "%s"' % (j(k), j(v))
- for k, v in ctx.extra().items()))
-
- files = ctx.p1().status(ctx)
- self.ui.write((',\n "modified": [%s]') %
- ", ".join('"%s"' % j(f) for f in files[0]))
- self.ui.write((',\n "added": [%s]') %
- ", ".join('"%s"' % j(f) for f in files[1]))
- self.ui.write((',\n "removed": [%s]') %
- ", ".join('"%s"' % j(f) for f in files[2]))
-
- elif self.ui.verbose:
- self.ui.write((',\n "files": [%s]') %
- ", ".join('"%s"' % j(f) for f in ctx.files()))
-
- if copies:
- self.ui.write((',\n "copies": {%s}') %
- ", ".join('"%s": "%s"' % (j(k), j(v))
- for k, v in copies))
-
- matchfn = self.matchfn
- if matchfn:
- stat = self.diffopts.get('stat')
- diff = self.diffopts.get('patch')
- diffopts = patch.difffeatureopts(self.ui, self.diffopts, git=True)
- node, prev = ctx.node(), ctx.p1().node()
- if stat:
- self.ui.pushbuffer()
- diffordiffstat(self.ui, self.repo, diffopts, prev, node,
- match=matchfn, stat=True)
- self.ui.write((',\n "diffstat": "%s"')
- % j(self.ui.popbuffer()))
- if diff:
- self.ui.pushbuffer()
- diffordiffstat(self.ui, self.repo, diffopts, prev, node,
- match=matchfn, stat=False)
- self.ui.write((',\n "diff": "%s"') % j(self.ui.popbuffer()))
-
- self.ui.write("\n }")
-
-class changeset_templater(changeset_printer):
- '''format changeset information.
-
- Note: there are a variety of convenience functions to build a
- changeset_templater for common cases. See functions such as:
- makelogtemplater, show_changeset, buildcommittemplate, or other
- functions that use changesest_templater.
- '''
-
- # Arguments before "buffered" used to be positional. Consider not
- # adding/removing arguments before "buffered" to not break callers.
- def __init__(self, ui, repo, tmplspec, matchfn=None, diffopts=None,
- buffered=False):
- diffopts = diffopts or {}
-
- changeset_printer.__init__(self, ui, repo, matchfn, diffopts, buffered)
- tres = formatter.templateresources(ui, repo)
- self.t = formatter.loadtemplater(ui, tmplspec,
- defaults=templatekw.keywords,
- resources=tres,
- cache=templatekw.defaulttempl)
- self._counter = itertools.count()
- self.cache = tres['cache'] # shared with _graphnodeformatter()
-
- self._tref = tmplspec.ref
- self._parts = {'header': '', 'footer': '',
- tmplspec.ref: tmplspec.ref,
- 'docheader': '', 'docfooter': '',
- 'separator': ''}
- if tmplspec.mapfile:
- # find correct templates for current mode, for backward
- # compatibility with 'log -v/-q/--debug' using a mapfile
- tmplmodes = [
- (True, ''),
- (self.ui.verbose, '_verbose'),
- (self.ui.quiet, '_quiet'),
- (self.ui.debugflag, '_debug'),
- ]
- for mode, postfix in tmplmodes:
- for t in self._parts:
- cur = t + postfix
- if mode and cur in self.t:
- self._parts[t] = cur
- else:
- partnames = [p for p in self._parts.keys() if p != tmplspec.ref]
- m = formatter.templatepartsmap(tmplspec, self.t, partnames)
- self._parts.update(m)
-
- if self._parts['docheader']:
- self.ui.write(templater.stringify(self.t(self._parts['docheader'])))
-
- def close(self):
- if self._parts['docfooter']:
- if not self.footer:
- self.footer = ""
- self.footer += templater.stringify(self.t(self._parts['docfooter']))
- return super(changeset_templater, self).close()
-
- def _show(self, ctx, copies, matchfn, hunksfilterfn, props):
- '''show a single changeset or file revision'''
- props = props.copy()
- props['ctx'] = ctx
- props['index'] = index = next(self._counter)
- props['revcache'] = {'copies': copies}
- props = pycompat.strkwargs(props)
-
- # write separator, which wouldn't work well with the header part below
- # since there's inherently a conflict between header (across items) and
- # separator (per item)
- if self._parts['separator'] and index > 0:
- self.ui.write(templater.stringify(self.t(self._parts['separator'])))
-
- # write header
- if self._parts['header']:
- h = templater.stringify(self.t(self._parts['header'], **props))
- if self.buffered:
- self.header[ctx.rev()] = h
- else:
- if self.lastheader != h:
- self.lastheader = h
- self.ui.write(h)
-
- # write changeset metadata, then patch if requested
- key = self._parts[self._tref]
- self.ui.write(templater.stringify(self.t(key, **props)))
- self.showpatch(ctx, matchfn, hunksfilterfn=hunksfilterfn)
-
- if self._parts['footer']:
- if not self.footer:
- self.footer = templater.stringify(
- self.t(self._parts['footer'], **props))
-
-def logtemplatespec(tmpl, mapfile):
- if mapfile:
- return formatter.templatespec('changeset', tmpl, mapfile)
- else:
- return formatter.templatespec('', tmpl, None)
-
-def _lookuplogtemplate(ui, tmpl, style):
- """Find the template matching the given template spec or style
-
- See formatter.lookuptemplate() for details.
- """
-
- # ui settings
- if not tmpl and not style: # template are stronger than style
- tmpl = ui.config('ui', 'logtemplate')
- if tmpl:
- return logtemplatespec(templater.unquotestring(tmpl), None)
- else:
- style = util.expandpath(ui.config('ui', 'style'))
-
- if not tmpl and style:
- mapfile = style
- if not os.path.split(mapfile)[0]:
- mapname = (templater.templatepath('map-cmdline.' + mapfile)
- or templater.templatepath(mapfile))
- if mapname:
- mapfile = mapname
- return logtemplatespec(None, mapfile)
-
- if not tmpl:
- return logtemplatespec(None, None)
-
- return formatter.lookuptemplate(ui, 'changeset', tmpl)
-
-def makelogtemplater(ui, repo, tmpl, buffered=False):
- """Create a changeset_templater from a literal template 'tmpl'
- byte-string."""
- spec = logtemplatespec(tmpl, None)
- return changeset_templater(ui, repo, spec, buffered=buffered)
-
-def show_changeset(ui, repo, opts, buffered=False):
- """show one changeset using template or regular display.
-
- Display format will be the first non-empty hit of:
- 1. option 'template'
- 2. option 'style'
- 3. [ui] setting 'logtemplate'
- 4. [ui] setting 'style'
- If all of these values are either the unset or the empty string,
- regular display via changeset_printer() is done.
- """
- # options
- match = None
- if opts.get('patch') or opts.get('stat'):
- match = scmutil.matchall(repo)
-
- if opts.get('template') == 'json':
- return jsonchangeset(ui, repo, match, opts, buffered)
-
- spec = _lookuplogtemplate(ui, opts.get('template'), opts.get('style'))
-
- if not spec.ref and not spec.tmpl and not spec.mapfile:
- return changeset_printer(ui, repo, match, opts, buffered)
-
- return changeset_templater(ui, repo, spec, match, opts, buffered)
-
def showmarker(fm, marker, index=None):
"""utility function to display obsolescence marker in a readable way
@@ -2095,7 +1573,8 @@
fm.write('date', '(%s) ', fm.formatdate(marker.date()))
meta = marker.metadata().copy()
meta.pop('date', None)
- fm.write('metadata', '{%s}', fm.formatdict(meta, fmt='%r: %r', sep=', '))
+ smeta = util.rapply(pycompat.maybebytestr, meta)
+ fm.write('metadata', '{%s}', fm.formatdict(smeta, fmt='%r: %r', sep=', '))
fm.plain('\n')
def finddate(ui, repo, date):
@@ -2352,7 +1831,7 @@
else:
self.revs.discard(value)
ctx = change(value)
- matches = filter(match, ctx.files())
+ matches = [f for f in ctx.files() if match(f)]
if matches:
fncache[value] = matches
self.set.add(value)
@@ -2415,394 +1894,6 @@
return iterate()
-def _makelogmatcher(repo, revs, pats, opts):
- """Build matcher and expanded patterns from log options
-
- If --follow, revs are the revisions to follow from.
-
- Returns (match, pats, slowpath) where
- - match: a matcher built from the given pats and -I/-X opts
- - pats: patterns used (globs are expanded on Windows)
- - slowpath: True if patterns aren't as simple as scanning filelogs
- """
- # pats/include/exclude are passed to match.match() directly in
- # _matchfiles() revset but walkchangerevs() builds its matcher with
- # scmutil.match(). The difference is input pats are globbed on
- # platforms without shell expansion (windows).
- wctx = repo[None]
- match, pats = scmutil.matchandpats(wctx, pats, opts)
- slowpath = match.anypats() or (not match.always() and opts.get('removed'))
- if not slowpath:
- follow = opts.get('follow') or opts.get('follow_first')
- startctxs = []
- if follow and opts.get('rev'):
- startctxs = [repo[r] for r in revs]
- for f in match.files():
- if follow and startctxs:
- # No idea if the path was a directory at that revision, so
- # take the slow path.
- if any(f not in c for c in startctxs):
- slowpath = True
- continue
- elif follow and f not in wctx:
- # If the file exists, it may be a directory, so let it
- # take the slow path.
- if os.path.exists(repo.wjoin(f)):
- slowpath = True
- continue
- else:
- raise error.Abort(_('cannot follow file not in parent '
- 'revision: "%s"') % f)
- filelog = repo.file(f)
- if not filelog:
- # A zero count may be a directory or deleted file, so
- # try to find matching entries on the slow path.
- if follow:
- raise error.Abort(
- _('cannot follow nonexistent file: "%s"') % f)
- slowpath = True
-
- # We decided to fall back to the slowpath because at least one
- # of the paths was not a file. Check to see if at least one of them
- # existed in history - in that case, we'll continue down the
- # slowpath; otherwise, we can turn off the slowpath
- if slowpath:
- for path in match.files():
- if path == '.' or path in repo.store:
- break
- else:
- slowpath = False
-
- return match, pats, slowpath
-
-def _fileancestors(repo, revs, match, followfirst):
- fctxs = []
- for r in revs:
- ctx = repo[r]
- fctxs.extend(ctx[f].introfilectx() for f in ctx.walk(match))
-
- # When displaying a revision with --patch --follow FILE, we have
- # to know which file of the revision must be diffed. With
- # --follow, we want the names of the ancestors of FILE in the
- # revision, stored in "fcache". "fcache" is populated as a side effect
- # of the graph traversal.
- fcache = {}
- def filematcher(rev):
- return scmutil.matchfiles(repo, fcache.get(rev, []))
-
- def revgen():
- for rev, cs in dagop.filectxancestors(fctxs, followfirst=followfirst):
- fcache[rev] = [c.path() for c in cs]
- yield rev
- return smartset.generatorset(revgen(), iterasc=False), filematcher
-
-def _makenofollowlogfilematcher(repo, pats, opts):
- '''hook for extensions to override the filematcher for non-follow cases'''
- return None
-
-_opt2logrevset = {
- 'no_merges': ('not merge()', None),
- 'only_merges': ('merge()', None),
- '_matchfiles': (None, '_matchfiles(%ps)'),
- 'date': ('date(%s)', None),
- 'branch': ('branch(%s)', '%lr'),
- '_patslog': ('filelog(%s)', '%lr'),
- 'keyword': ('keyword(%s)', '%lr'),
- 'prune': ('ancestors(%s)', 'not %lr'),
- 'user': ('user(%s)', '%lr'),
-}
-
-def _makelogrevset(repo, match, pats, slowpath, opts):
- """Return a revset string built from log options and file patterns"""
- opts = dict(opts)
- # follow or not follow?
- follow = opts.get('follow') or opts.get('follow_first')
-
- # branch and only_branch are really aliases and must be handled at
- # the same time
- opts['branch'] = opts.get('branch', []) + opts.get('only_branch', [])
- opts['branch'] = [repo.lookupbranch(b) for b in opts['branch']]
-
- if slowpath:
- # See walkchangerevs() slow path.
- #
- # pats/include/exclude cannot be represented as separate
- # revset expressions as their filtering logic applies at file
- # level. For instance "-I a -X b" matches a revision touching
- # "a" and "b" while "file(a) and not file(b)" does
- # not. Besides, filesets are evaluated against the working
- # directory.
- matchargs = ['r:', 'd:relpath']
- for p in pats:
- matchargs.append('p:' + p)
- for p in opts.get('include', []):
- matchargs.append('i:' + p)
- for p in opts.get('exclude', []):
- matchargs.append('x:' + p)
- opts['_matchfiles'] = matchargs
- elif not follow:
- opts['_patslog'] = list(pats)
-
- expr = []
- for op, val in sorted(opts.iteritems()):
- if not val:
- continue
- if op not in _opt2logrevset:
- continue
- revop, listop = _opt2logrevset[op]
- if revop and '%' not in revop:
- expr.append(revop)
- elif not listop:
- expr.append(revsetlang.formatspec(revop, val))
- else:
- if revop:
- val = [revsetlang.formatspec(revop, v) for v in val]
- expr.append(revsetlang.formatspec(listop, val))
-
- if expr:
- expr = '(' + ' and '.join(expr) + ')'
- else:
- expr = None
- return expr
-
-def _logrevs(repo, opts):
- """Return the initial set of revisions to be filtered or followed"""
- follow = opts.get('follow') or opts.get('follow_first')
- if opts.get('rev'):
- revs = scmutil.revrange(repo, opts['rev'])
- elif follow and repo.dirstate.p1() == nullid:
- revs = smartset.baseset()
- elif follow:
- revs = repo.revs('.')
- else:
- revs = smartset.spanset(repo)
- revs.reverse()
- return revs
-
-def getlogrevs(repo, pats, opts):
- """Return (revs, filematcher) where revs is a smartset
-
- filematcher is a callable taking a revision number and returning a match
- objects filtering the files to be detailed when displaying the revision.
- """
- follow = opts.get('follow') or opts.get('follow_first')
- followfirst = opts.get('follow_first')
- limit = loglimit(opts)
- revs = _logrevs(repo, opts)
- if not revs:
- return smartset.baseset(), None
- match, pats, slowpath = _makelogmatcher(repo, revs, pats, opts)
- filematcher = None
- if follow:
- if slowpath or match.always():
- revs = dagop.revancestors(repo, revs, followfirst=followfirst)
- else:
- revs, filematcher = _fileancestors(repo, revs, match, followfirst)
- revs.reverse()
- if filematcher is None:
- filematcher = _makenofollowlogfilematcher(repo, pats, opts)
- if filematcher is None:
- def filematcher(rev):
- return match
-
- expr = _makelogrevset(repo, match, pats, slowpath, opts)
- if opts.get('graph') and opts.get('rev'):
- # User-specified revs might be unsorted, but don't sort before
- # _makelogrevset because it might depend on the order of revs
- if not (revs.isdescending() or revs.istopo()):
- revs.sort(reverse=True)
- if expr:
- matcher = revset.match(None, expr)
- revs = matcher(repo, revs)
- if limit is not None:
- revs = revs.slice(0, limit)
- return revs, filematcher
-
-def _parselinerangelogopt(repo, opts):
- """Parse --line-range log option and return a list of tuples (filename,
- (fromline, toline)).
- """
- linerangebyfname = []
- for pat in opts.get('line_range', []):
- try:
- pat, linerange = pat.rsplit(',', 1)
- except ValueError:
- raise error.Abort(_('malformatted line-range pattern %s') % pat)
- try:
- fromline, toline = map(int, linerange.split(':'))
- except ValueError:
- raise error.Abort(_("invalid line range for %s") % pat)
- msg = _("line range pattern '%s' must match exactly one file") % pat
- fname = scmutil.parsefollowlinespattern(repo, None, pat, msg)
- linerangebyfname.append(
- (fname, util.processlinerange(fromline, toline)))
- return linerangebyfname
-
-def getloglinerangerevs(repo, userrevs, opts):
- """Return (revs, filematcher, hunksfilter).
-
- "revs" are revisions obtained by processing "line-range" log options and
- walking block ancestors of each specified file/line-range.
-
- "filematcher(rev) -> match" is a factory function returning a match object
- for a given revision for file patterns specified in --line-range option.
- If neither --stat nor --patch options are passed, "filematcher" is None.
-
- "hunksfilter(rev) -> filterfn(fctx, hunks)" is a factory function
- returning a hunks filtering function.
- If neither --stat nor --patch options are passed, "filterhunks" is None.
- """
- wctx = repo[None]
-
- # Two-levels map of "rev -> file ctx -> [line range]".
- linerangesbyrev = {}
- for fname, (fromline, toline) in _parselinerangelogopt(repo, opts):
- if fname not in wctx:
- raise error.Abort(_('cannot follow file not in parent '
- 'revision: "%s"') % fname)
- fctx = wctx.filectx(fname)
- for fctx, linerange in dagop.blockancestors(fctx, fromline, toline):
- rev = fctx.introrev()
- if rev not in userrevs:
- continue
- linerangesbyrev.setdefault(
- rev, {}).setdefault(
- fctx.path(), []).append(linerange)
-
- filematcher = None
- hunksfilter = None
- if opts.get('patch') or opts.get('stat'):
-
- def nofilterhunksfn(fctx, hunks):
- return hunks
-
- def hunksfilter(rev):
- fctxlineranges = linerangesbyrev.get(rev)
- if fctxlineranges is None:
- return nofilterhunksfn
-
- def filterfn(fctx, hunks):
- lineranges = fctxlineranges.get(fctx.path())
- if lineranges is not None:
- for hr, lines in hunks:
- if hr is None: # binary
- yield hr, lines
- continue
- if any(mdiff.hunkinrange(hr[2:], lr)
- for lr in lineranges):
- yield hr, lines
- else:
- for hunk in hunks:
- yield hunk
-
- return filterfn
-
- def filematcher(rev):
- files = list(linerangesbyrev.get(rev, []))
- return scmutil.matchfiles(repo, files)
-
- revs = sorted(linerangesbyrev, reverse=True)
-
- return revs, filematcher, hunksfilter
-
-def _graphnodeformatter(ui, displayer):
- spec = ui.config('ui', 'graphnodetemplate')
- if not spec:
- return templatekw.showgraphnode # fast path for "{graphnode}"
-
- spec = templater.unquotestring(spec)
- tres = formatter.templateresources(ui)
- if isinstance(displayer, changeset_templater):
- tres['cache'] = displayer.cache # reuse cache of slow templates
- templ = formatter.maketemplater(ui, spec, defaults=templatekw.keywords,
- resources=tres)
- def formatnode(repo, ctx):
- props = {'ctx': ctx, 'repo': repo, 'revcache': {}}
- return templ.render(props)
- return formatnode
-
-def displaygraph(ui, repo, dag, displayer, edgefn, getrenamed=None,
- filematcher=None, props=None):
- props = props or {}
- formatnode = _graphnodeformatter(ui, displayer)
- state = graphmod.asciistate()
- styles = state['styles']
-
- # only set graph styling if HGPLAIN is not set.
- if ui.plain('graph'):
- # set all edge styles to |, the default pre-3.8 behaviour
- styles.update(dict.fromkeys(styles, '|'))
- else:
- edgetypes = {
- 'parent': graphmod.PARENT,
- 'grandparent': graphmod.GRANDPARENT,
- 'missing': graphmod.MISSINGPARENT
- }
- for name, key in edgetypes.items():
- # experimental config: experimental.graphstyle.*
- styles[key] = ui.config('experimental', 'graphstyle.%s' % name,
- styles[key])
- if not styles[key]:
- styles[key] = None
-
- # experimental config: experimental.graphshorten
- state['graphshorten'] = ui.configbool('experimental', 'graphshorten')
-
- for rev, type, ctx, parents in dag:
- char = formatnode(repo, ctx)
- copies = None
- if getrenamed and ctx.rev():
- copies = []
- for fn in ctx.files():
- rename = getrenamed(fn, ctx.rev())
- if rename:
- copies.append((fn, rename[0]))
- revmatchfn = None
- if filematcher is not None:
- revmatchfn = filematcher(ctx.rev())
- edges = edgefn(type, char, state, rev, parents)
- firstedge = next(edges)
- width = firstedge[2]
- displayer.show(ctx, copies=copies, matchfn=revmatchfn,
- _graphwidth=width, **pycompat.strkwargs(props))
- lines = displayer.hunk.pop(rev).split('\n')
- if not lines[-1]:
- del lines[-1]
- displayer.flush(ctx)
- for type, char, width, coldata in itertools.chain([firstedge], edges):
- graphmod.ascii(ui, state, type, char, lines, coldata)
- lines = []
- displayer.close()
-
-def graphlog(ui, repo, revs, filematcher, opts):
- # Parameters are identical to log command ones
- revdag = graphmod.dagwalker(repo, revs)
-
- getrenamed = None
- if opts.get('copies'):
- endrev = None
- if opts.get('rev'):
- endrev = scmutil.revrange(repo, opts.get('rev')).max() + 1
- getrenamed = templatekw.getrenamedfn(repo, endrev=endrev)
-
- ui.pager('log')
- displayer = show_changeset(ui, repo, opts, buffered=True)
- displaygraph(ui, repo, revdag, displayer, graphmod.asciiedges, getrenamed,
- filematcher)
-
-def checkunsupportedgraphflags(pats, opts):
- for op in ["newest_first"]:
- if op in opts and opts[op]:
- raise error.Abort(_("-G/--graph option is incompatible with --%s")
- % op.replace("_", "-"))
-
-def graphrevs(repo, nodes, opts):
- limit = loglimit(opts)
- nodes.reverse()
- if limit is not None:
- nodes = nodes[:limit]
- return graphmod.nodes(repo, nodes)
-
def add(ui, repo, match, prefix, explicitonly, **opts):
join = lambda f: os.path.join(prefix, f)
bad = []
@@ -3071,7 +2162,7 @@
def write(path):
filename = None
if fntemplate:
- filename = makefilename(repo, fntemplate, ctx.node(),
+ filename = makefilename(ctx, fntemplate,
pathname=os.path.join(prefix, path))
# attempt to create the directory if it does not already exist
try:
@@ -3089,12 +2180,16 @@
mfnode = ctx.manifestnode()
try:
if mfnode and mfl[mfnode].find(file)[0]:
+ scmutil.fileprefetchhooks(repo, ctx, [file])
write(file)
return 0
except KeyError:
pass
- for abs in ctx.walk(matcher):
+ files = [f for f in ctx.walk(matcher)]
+ scmutil.fileprefetchhooks(repo, ctx, files)
+
+ for abs in files:
write(abs)
err = 0
@@ -3204,13 +2299,12 @@
# subrepo.precommit(). To minimize the risk of this hack, we do
# nothing if .hgsub does not exist.
if '.hgsub' in wctx or '.hgsub' in old:
- from . import subrepo # avoid cycle: cmdutil -> subrepo -> cmdutil
- subs, commitsubs, newsubstate = subrepo.precommit(
+ subs, commitsubs, newsubstate = subrepoutil.precommit(
ui, wctx, wctx._status, matcher)
# amend should abort if commitsubrepos is enabled
assert not commitsubs
if subs:
- subrepo.writestate(repo, newsubstate)
+ subrepoutil.writestate(repo, newsubstate)
filestoamend = set(f for f in wctx.files() if matcher(f))
@@ -3398,7 +2492,7 @@
def buildcommittemplate(repo, ctx, subs, extramsg, ref):
ui = repo.ui
spec = formatter.templatespec(ref, None, None)
- t = changeset_templater(ui, repo, spec, None, {}, False)
+ t = logcmdutil.changesettemplater(ui, repo, spec)
t.t.cache.update((k, templater.unquotestring(v))
for k, v in repo.ui.configitems('committemplate'))
@@ -3763,7 +2857,15 @@
if not opts.get('dry_run'):
needdata = ('revert', 'add', 'undelete')
- _revertprefetch(repo, ctx, *[actions[name][0] for name in needdata])
+ if _revertprefetch is not _revertprefetchstub:
+ ui.deprecwarn("'cmdutil._revertprefetch' is deprecated, "
+ "add a callback to 'scmutil.fileprefetchhooks'",
+ '4.6', stacklevel=1)
+ _revertprefetch(repo, ctx,
+ *[actions[name][0] for name in needdata])
+ oplist = [actions[name][0] for name in needdata]
+ prefetch = scmutil.fileprefetchhooks
+ prefetch(repo, ctx, [f for sublist in oplist for f in sublist])
_performrevert(repo, parents, ctx, actions, interactive, tobackup)
if targetsubs:
@@ -3776,8 +2878,11 @@
raise error.Abort("subrepository '%s' does not exist in %s!"
% (sub, short(ctx.node())))
-def _revertprefetch(repo, ctx, *files):
- """Let extension changing the storage layer prefetch content"""
+def _revertprefetchstub(repo, ctx, *files):
+ """Stub method for detecting extension wrapping of _revertprefetch(), to
+ issue a deprecation warning."""
+
+_revertprefetch = _revertprefetchstub
def _performrevert(repo, parents, ctx, actions, interactive=False,
tobackup=None):
@@ -3791,7 +2896,6 @@
parent, p2 = parents
node = ctx.node()
excluded_files = []
- matcher_opts = {"exclude": excluded_files}
def checkout(f):
fc = ctx[f]
@@ -3812,7 +2916,7 @@
if choice == 0:
repo.dirstate.drop(f)
else:
- excluded_files.append(repo.wjoin(f))
+ excluded_files.append(f)
else:
repo.dirstate.drop(f)
for f in actions['remove'][0]:
@@ -3823,7 +2927,7 @@
if choice == 0:
doremove(f)
else:
- excluded_files.append(repo.wjoin(f))
+ excluded_files.append(f)
else:
doremove(f)
for f in actions['drop'][0]:
@@ -3843,8 +2947,8 @@
newlyaddedandmodifiedfiles = set()
if interactive:
# Prompt the user for changes to revert
- torevert = [repo.wjoin(f) for f in actions['revert'][0]]
- m = scmutil.match(ctx, torevert, matcher_opts)
+ torevert = [f for f in actions['revert'][0] if f not in excluded_files]
+ m = scmutil.matchfiles(repo, torevert)
diffopts = patch.difffeatureopts(repo.ui, whitespace=True)
diffopts.nodates = True
diffopts.git = True
@@ -4025,3 +3129,23 @@
if after[1]:
hint = after[0]
raise error.Abort(_('no %s in progress') % task, hint=hint)
+
+class changeset_printer(logcmdutil.changesetprinter):
+
+ def __init__(self, ui, *args, **kwargs):
+ msg = ("'cmdutil.changeset_printer' is deprecated, "
+ "use 'logcmdutil.logcmdutil'")
+ ui.deprecwarn(msg, "4.6")
+ super(changeset_printer, self).__init__(ui, *args, **kwargs)
+
+def displaygraph(ui, *args, **kwargs):
+ msg = ("'cmdutil.displaygraph' is deprecated, "
+ "use 'logcmdutil.displaygraph'")
+ ui.deprecwarn(msg, "4.6")
+ return logcmdutil.displaygraph(ui, *args, **kwargs)
+
+def show_changeset(ui, *args, **kwargs):
+ msg = ("'cmdutil.show_changeset' is deprecated, "
+ "use 'logcmdutil.changesetdisplayer'")
+ ui.deprecwarn(msg, "4.6")
+ return logcmdutil.changesetdisplayer(ui, *args, **kwargs)
--- a/mercurial/commands.py Fri Feb 23 17:57:04 2018 -0800
+++ b/mercurial/commands.py Sat Feb 24 17:49:10 2018 -0600
@@ -41,6 +41,7 @@
help,
hg,
lock as lockmod,
+ logcmdutil,
merge as mergemod,
obsolete,
obsutil,
@@ -53,12 +54,12 @@
rewriteutil,
scmutil,
server,
- sshserver,
streamclone,
tags as tagsmod,
templatekw,
ui as uimod,
util,
+ wireprotoserver,
)
release = lockmod.release
@@ -336,8 +337,8 @@
('number', ' ', lambda x: x.fctx.rev(), formatrev),
('changeset', ' ', lambda x: hexfn(x.fctx.node()), formathex),
('date', ' ', lambda x: x.fctx.date(), util.cachefunc(datefunc)),
- ('file', ' ', lambda x: x.fctx.path(), str),
- ('line_number', ':', lambda x: x.lineno, str),
+ ('file', ' ', lambda x: x.fctx.path(), pycompat.bytestr),
+ ('line_number', ':', lambda x: x.lineno, pycompat.bytestr),
]
fieldnamemap = {'number': 'rev', 'changeset': 'node'}
@@ -475,7 +476,7 @@
if not ctx:
raise error.Abort(_('no working directory: please specify a revision'))
node = ctx.node()
- dest = cmdutil.makefilename(repo, dest, node)
+ dest = cmdutil.makefilename(ctx, dest)
if os.path.realpath(dest) == repo.root:
raise error.Abort(_('repository root cannot be destination'))
@@ -485,11 +486,11 @@
if dest == '-':
if kind == 'files':
raise error.Abort(_('cannot archive plain files to stdout'))
- dest = cmdutil.makefileobj(repo, dest)
+ dest = cmdutil.makefileobj(ctx, dest)
if not prefix:
prefix = os.path.basename(repo.root) + '-%h'
- prefix = cmdutil.makefilename(repo, prefix, node)
+ prefix = cmdutil.makefilename(ctx, prefix)
match = scmutil.match(ctx, [], opts)
archival.archive(repo, dest, node, kind, not opts.get('no_decode'),
match, prefix, subrepos=opts.get('subrepos'))
@@ -823,7 +824,7 @@
cmdutil.bailifchanged(repo)
return hg.clean(repo, node, show_stats=show_stats)
- displayer = cmdutil.show_changeset(ui, repo, {})
+ displayer = logcmdutil.changesetdisplayer(ui, repo, {})
if command:
changesets = 1
@@ -1156,13 +1157,15 @@
def bundle(ui, repo, fname, dest=None, **opts):
"""create a bundle file
- Generate a bundle file containing data to be added to a repository.
+ Generate a bundle file containing data to be transferred to another
+ repository.
To create a bundle containing all changesets, use -a/--all
(or --base null). Otherwise, hg assumes the destination will have
all the nodes you specify with --base parameters. Otherwise, hg
will assume the repository has all the nodes in destination, or
- default-push/default if no destination is specified.
+ default-push/default if no destination is specified, where destination
+ is the repository you provide through DEST option.
You can change bundle format with the -t/--type option. See
:hg:`help bundlespec` for documentation on this format. By default,
@@ -1219,7 +1222,7 @@
raise error.Abort(_("--base is incompatible with specifying "
"a destination"))
common = [repo.lookup(rev) for rev in base]
- heads = revs and map(repo.lookup, revs) or None
+ heads = [repo.lookup(r) for r in revs] if revs else None
outgoing = discovery.outgoing(repo, common, heads)
else:
dest = ui.expandpath(dest or 'default-push', dest or 'default')
@@ -1550,7 +1553,7 @@
extra = {}
if opts.get('close_branch'):
- extra['close'] = 1
+ extra['close'] = '1'
if not bheads:
raise error.Abort(_('can only close branch heads'))
@@ -1873,9 +1876,9 @@
diffopts = patch.diffallopts(ui, opts)
m = scmutil.match(repo[node2], pats, opts)
ui.pager('diff')
- cmdutil.diffordiffstat(ui, repo, diffopts, node1, node2, m, stat=stat,
- listsubrepos=opts.get('subrepos'),
- root=opts.get('root'))
+ logcmdutil.diffordiffstat(ui, repo, diffopts, node1, node2, m, stat=stat,
+ listsubrepos=opts.get('subrepos'),
+ root=opts.get('root'))
@command('^export',
[('o', 'output', '',
@@ -2647,7 +2650,7 @@
ui.pager('heads')
heads = sorted(heads, key=lambda x: -x.rev())
- displayer = cmdutil.show_changeset(ui, repo, opts)
+ displayer = logcmdutil.changesetdisplayer(ui, repo, opts)
for ctx in heads:
displayer.show(ctx)
displayer.close()
@@ -3155,11 +3158,11 @@
"""
opts = pycompat.byteskwargs(opts)
if opts.get('graph'):
- cmdutil.checkunsupportedgraphflags([], opts)
+ logcmdutil.checkunsupportedgraphflags([], opts)
def display(other, chlist, displayer):
- revdag = cmdutil.graphrevs(other, chlist, opts)
- cmdutil.displaygraph(ui, repo, revdag, displayer,
- graphmod.asciiedges)
+ revdag = logcmdutil.graphrevs(other, chlist, opts)
+ logcmdutil.displaygraph(ui, repo, revdag, displayer,
+ graphmod.asciiedges)
hg._incoming(display, lambda: 1, ui, repo, source, opts, buffered=True)
return 0
@@ -3414,33 +3417,17 @@
raise error.Abort(_('--line-range requires --follow'))
if linerange and pats:
+ # TODO: take pats as patterns with no line-range filter
raise error.Abort(
_('FILE arguments are not compatible with --line-range option')
)
repo = scmutil.unhidehashlikerevs(repo, opts.get('rev'), 'nowarn')
- revs, filematcher = cmdutil.getlogrevs(repo, pats, opts)
- hunksfilter = None
-
- if opts.get('graph'):
- if linerange:
- raise error.Abort(_('graph not supported with line range patterns'))
- return cmdutil.graphlog(ui, repo, revs, filematcher, opts)
-
+ revs, differ = logcmdutil.getrevs(repo, pats, opts)
if linerange:
- revs, lrfilematcher, hunksfilter = cmdutil.getloglinerangerevs(
- repo, revs, opts)
-
- if filematcher is not None and lrfilematcher is not None:
- basefilematcher = filematcher
-
- def filematcher(rev):
- files = (basefilematcher(rev).files()
- + lrfilematcher(rev).files())
- return scmutil.matchfiles(repo, files)
-
- elif filematcher is None:
- filematcher = lrfilematcher
+ # TODO: should follow file history from logcmdutil._initialrevs(),
+ # then filter the result by logcmdutil._makerevset() and --limit
+ revs, differ = logcmdutil.getlinerangerevs(repo, revs, opts)
getrenamed = None
if opts.get('copies'):
@@ -3450,29 +3437,13 @@
getrenamed = templatekw.getrenamedfn(repo, endrev=endrev)
ui.pager('log')
- displayer = cmdutil.show_changeset(ui, repo, opts, buffered=True)
- for rev in revs:
- ctx = repo[rev]
- copies = None
- if getrenamed is not None and rev:
- copies = []
- for fn in ctx.files():
- rename = getrenamed(fn, rev)
- if rename:
- copies.append((fn, rename[0]))
- if filematcher:
- revmatchfn = filematcher(ctx.rev())
- else:
- revmatchfn = None
- if hunksfilter:
- revhunksfilter = hunksfilter(rev)
- else:
- revhunksfilter = None
- displayer.show(ctx, copies=copies, matchfn=revmatchfn,
- hunksfilterfn=revhunksfilter)
- displayer.flush(ctx)
-
- displayer.close()
+ displayer = logcmdutil.changesetdisplayer(ui, repo, opts, differ,
+ buffered=True)
+ if opts.get('graph'):
+ displayfn = logcmdutil.displaygraphrevs
+ else:
+ displayfn = logcmdutil.displayrevs
+ displayfn(ui, repo, revs, displayer, getrenamed)
@command('manifest',
[('r', 'rev', '', _('revision to display'), _('REV')),
@@ -3523,8 +3494,8 @@
if not node:
node = rev
- char = {'l': '@', 'x': '*', '': ''}
- mode = {'l': '644', 'x': '755', '': '644'}
+ char = {'l': '@', 'x': '*', '': '', 't': 'd'}
+ mode = {'l': '644', 'x': '755', '': '644', 't': '755'}
if node:
repo = scmutil.unhidehashlikerevs(repo, [node], 'nowarn')
ctx = scmutil.revsingle(repo, node)
@@ -3604,7 +3575,7 @@
p2 = repo.lookup(node)
nodes = repo.changelog.findmissing(common=[p1], heads=[p2])
- displayer = cmdutil.show_changeset(ui, repo, opts)
+ displayer = logcmdutil.changesetdisplayer(ui, repo, opts)
for node in nodes:
displayer.show(repo[node])
displayer.close()
@@ -3668,16 +3639,17 @@
"""
opts = pycompat.byteskwargs(opts)
if opts.get('graph'):
- cmdutil.checkunsupportedgraphflags([], opts)
+ logcmdutil.checkunsupportedgraphflags([], opts)
o, other = hg._outgoing(ui, repo, dest, opts)
if not o:
cmdutil.outgoinghooks(ui, repo, other, opts, o)
return
- revdag = cmdutil.graphrevs(repo, o, opts)
+ revdag = logcmdutil.graphrevs(repo, o, opts)
ui.pager('outgoing')
- displayer = cmdutil.show_changeset(ui, repo, opts, buffered=True)
- cmdutil.displaygraph(ui, repo, revdag, displayer, graphmod.asciiedges)
+ displayer = logcmdutil.changesetdisplayer(ui, repo, opts, buffered=True)
+ logcmdutil.displaygraph(ui, repo, revdag, displayer,
+ graphmod.asciiedges)
cmdutil.outgoinghooks(ui, repo, other, opts, o)
return 0
@@ -3752,7 +3724,7 @@
else:
p = [cp.node() for cp in ctx.parents()]
- displayer = cmdutil.show_changeset(ui, repo, opts)
+ displayer = logcmdutil.changesetdisplayer(ui, repo, opts)
for n in p:
if n != nullid:
displayer.show(repo[n])
@@ -4043,7 +4015,7 @@
brev = None
if checkout:
- checkout = str(repo.changelog.rev(checkout))
+ checkout = "%d" % repo.changelog.rev(checkout)
# order below depends on implementation of
# hg.addbranchrevs(). opts['bookmark'] is ignored,
@@ -4757,7 +4729,7 @@
if repo is None:
raise error.RepoError(_("there is no Mercurial repository here"
" (.hg not found)"))
- s = sshserver.sshserver(ui, repo)
+ s = wireprotoserver.sshserver(ui, repo)
s.serve_forever()
service = server.createservice(ui, repo, opts)
@@ -4984,7 +4956,7 @@
# shows a working directory parent *changeset*:
# i18n: column positioning for "hg summary"
ui.write(_('parent: %d:%s ') % (p.rev(), p),
- label=cmdutil._changesetlabels(p))
+ label=logcmdutil.changesetlabels(p))
ui.write(' '.join(p.tags()), label='log.tag')
if p.bookmarks():
marks.extend(p.bookmarks())
@@ -5406,7 +5378,7 @@
Returns 0 on success.
"""
opts = pycompat.byteskwargs(opts)
- displayer = cmdutil.show_changeset(ui, repo, opts)
+ displayer = logcmdutil.changesetdisplayer(ui, repo, opts)
displayer.show(repo['tip'])
displayer.close()
--- a/mercurial/config.py Fri Feb 23 17:57:04 2018 -0800
+++ b/mercurial/config.py Sat Feb 24 17:49:10 2018 -0600
@@ -154,7 +154,7 @@
if inst.errno != errno.ENOENT:
raise error.ParseError(_("cannot include %s (%s)")
% (inc, inst.strerror),
- "%s:%s" % (src, line))
+ "%s:%d" % (src, line))
continue
if emptyre.match(l):
continue
@@ -185,7 +185,7 @@
self._unset.append((section, name))
continue
- raise error.ParseError(l.rstrip(), ("%s:%s" % (src, line)))
+ raise error.ParseError(l.rstrip(), ("%s:%d" % (src, line)))
def read(self, path, fp=None, sections=None, remap=None):
if not fp:
--- a/mercurial/configitems.py Fri Feb 23 17:57:04 2018 -0800
+++ b/mercurial/configitems.py Sat Feb 24 17:49:10 2018 -0600
@@ -538,9 +538,6 @@
coreconfigitem('experimental', 'httppostargs',
default=False,
)
-coreconfigitem('experimental', 'manifestv2',
- default=False,
-)
coreconfigitem('experimental', 'mergedriver',
default=None,
)
@@ -556,6 +553,9 @@
coreconfigitem('experimental', 'single-head-per-branch',
default=False,
)
+coreconfigitem('experimental', 'sshserver.support-v2',
+ default=False,
+)
coreconfigitem('experimental', 'spacemovesdown',
default=False,
)
@@ -574,6 +574,9 @@
coreconfigitem('experimental', 'update.atomic-file',
default=False,
)
+coreconfigitem('experimental', 'sshpeer.advertise-v2',
+ default=False,
+)
coreconfigitem('extensions', '.*',
default=None,
generic=True,
@@ -743,6 +746,16 @@
generic=True,
priority=-1,
)
+coreconfigitem('merge-tools', br'.*\.mergemarkers$',
+ default='basic',
+ generic=True,
+ priority=-1,
+)
+coreconfigitem('merge-tools', br'.*\.mergemarkertemplate$',
+ default=dynamicdefault, # take from ui.mergemarkertemplate
+ generic=True,
+ priority=-1,
+)
coreconfigitem('merge-tools', br'.*\.priority$',
default=0,
generic=True,
--- a/mercurial/context.py Fri Feb 23 17:57:04 2018 -0800
+++ b/mercurial/context.py Sat Feb 24 17:49:10 2018 -0600
@@ -46,6 +46,7 @@
scmutil,
sparse,
subrepo,
+ subrepoutil,
util,
)
@@ -173,7 +174,7 @@
@propertycache
def substate(self):
- return subrepo.state(self, self._repo.ui)
+ return subrepoutil.state(self, self._repo.ui)
def subrev(self, subpath):
return self.substate[subpath][1]
@@ -206,22 +207,10 @@
"""True if the changeset is extinct"""
return self.rev() in obsmod.getrevs(self._repo, 'extinct')
- def unstable(self):
- msg = ("'context.unstable' is deprecated, "
- "use 'context.orphan'")
- self._repo.ui.deprecwarn(msg, '4.4')
- return self.orphan()
-
def orphan(self):
"""True if the changeset is not obsolete but it's ancestor are"""
return self.rev() in obsmod.getrevs(self._repo, 'orphan')
- def bumped(self):
- msg = ("'context.bumped' is deprecated, "
- "use 'context.phasedivergent'")
- self._repo.ui.deprecwarn(msg, '4.4')
- return self.phasedivergent()
-
def phasedivergent(self):
"""True if the changeset try to be a successor of a public changeset
@@ -229,12 +218,6 @@
"""
return self.rev() in obsmod.getrevs(self._repo, 'phasedivergent')
- def divergent(self):
- msg = ("'context.divergent' is deprecated, "
- "use 'context.contentdivergent'")
- self._repo.ui.deprecwarn(msg, '4.4')
- return self.contentdivergent()
-
def contentdivergent(self):
"""Is a successors of a changeset with multiple possible successors set
@@ -242,33 +225,10 @@
"""
return self.rev() in obsmod.getrevs(self._repo, 'contentdivergent')
- def troubled(self):
- msg = ("'context.troubled' is deprecated, "
- "use 'context.isunstable'")
- self._repo.ui.deprecwarn(msg, '4.4')
- return self.isunstable()
-
def isunstable(self):
"""True if the changeset is either unstable, bumped or divergent"""
return self.orphan() or self.phasedivergent() or self.contentdivergent()
- def troubles(self):
- """Keep the old version around in order to avoid breaking extensions
- about different return values.
- """
- msg = ("'context.troubles' is deprecated, "
- "use 'context.instabilities'")
- self._repo.ui.deprecwarn(msg, '4.4')
-
- troubles = []
- if self.orphan():
- troubles.append('orphan')
- if self.phasedivergent():
- troubles.append('bumped')
- if self.contentdivergent():
- troubles.append('divergent')
- return troubles
-
def instabilities(self):
"""return the list of instabilities affecting this changeset.
@@ -1051,7 +1011,7 @@
# renamed filectx won't have a filelog yet, so set it
# from the cache to save time
for p in pl:
- if not '_filelog' in p.__dict__:
+ if not r'_filelog' in p.__dict__:
p._filelog = getlog(p.path())
return pl
--- a/mercurial/copies.py Fri Feb 23 17:57:04 2018 -0800
+++ b/mercurial/copies.py Sat Feb 24 17:49:10 2018 -0600
@@ -123,7 +123,7 @@
t[k] = v
# remove criss-crossed copies
- for k, v in t.items():
+ for k, v in list(t.items()):
if k in src and v in dst:
del t[k]
@@ -685,8 +685,8 @@
# the base and present in the source.
# Presence in the base is important to exclude added files, presence in the
# source is important to exclude removed files.
- missingfiles = filter(lambda f: f not in m1 and f in base and f in c2,
- changedfiles)
+ filt = lambda f: f not in m1 and f in base and f in c2
+ missingfiles = [f for f in changedfiles if filt(f)]
if missingfiles:
basenametofilename = collections.defaultdict(list)
--- a/mercurial/crecord.py Fri Feb 23 17:57:04 2018 -0800
+++ b/mercurial/crecord.py Sat Feb 24 17:49:10 2018 -0600
@@ -950,7 +950,7 @@
# preprocess the text, converting tabs to spaces
text = text.expandtabs(4)
# strip \n, and convert control characters to ^[char] representation
- text = re.sub(r'[\x00-\x08\x0a-\x1f]',
+ text = re.sub(br'[\x00-\x08\x0a-\x1f]',
lambda m:'^' + chr(ord(m.group()) + 64), text.strip('\n'))
if pair is not None:
@@ -1335,7 +1335,7 @@
# temporarily disable printing to windows by printstring
patchdisplaystring = self.printitem(item, ignorefolding,
recursechildren, towin=False)
- numlines = len(patchdisplaystring) / self.xscreensize
+ numlines = len(patchdisplaystring) // self.xscreensize
return numlines
def sigwinchhandler(self, n, frame):
--- a/mercurial/debugcommands.py Fri Feb 23 17:57:04 2018 -0800
+++ b/mercurial/debugcommands.py Sat Feb 24 17:49:10 2018 -0600
@@ -48,6 +48,7 @@
hg,
localrepo,
lock as lockmod,
+ logcmdutil,
merge as mergemod,
obsolete,
obsutil,
@@ -162,7 +163,7 @@
if mergeable_file:
linesperrev = 2
# make a file with k lines per rev
- initialmergedlines = [str(i) for i in xrange(0, total * linesperrev)]
+ initialmergedlines = ['%d' % i for i in xrange(0, total * linesperrev)]
initialmergedlines.append("")
tags = []
@@ -1239,16 +1240,19 @@
# editor
editor = ui.geteditor()
editor = util.expandpath(editor)
- fm.write('editor', _("checking commit editor... (%s)\n"), editor)
- cmdpath = util.findexe(pycompat.shlexsplit(editor)[0])
+ editorbin = pycompat.shlexsplit(editor, posix=not pycompat.iswindows)[0]
+ if pycompat.iswindows and editorbin[0] == '"' and editorbin[-1] == '"':
+ editorbin = editorbin[1:-1]
+ fm.write('editor', _("checking commit editor... (%s)\n"), editorbin)
+ cmdpath = util.findexe(editorbin)
fm.condwrite(not cmdpath and editor == 'vi', 'vinotfound',
_(" No commit editor set and can't find %s in PATH\n"
" (specify a commit editor in your configuration"
- " file)\n"), not cmdpath and editor == 'vi' and editor)
+ " file)\n"), not cmdpath and editor == 'vi' and editorbin)
fm.condwrite(not cmdpath and editor != 'vi', 'editornotfound',
_(" Can't find editor '%s' in PATH\n"
" (specify a commit editor in your configuration"
- " file)\n"), not cmdpath and editor)
+ " file)\n"), not cmdpath and editorbin)
if not cmdpath and editor != 'vi':
problems += 1
@@ -1405,7 +1409,7 @@
return h
def printrecords(version):
- ui.write(('* version %s records\n') % version)
+ ui.write(('* version %d records\n') % version)
if version == 1:
records = v1records
else:
@@ -1692,6 +1696,25 @@
ui.write('\n'.join(repo.pathto(p, cwd) for p in sorted(files)))
ui.write('\n')
+@command('debugpeer', [], _('PATH'), norepo=True)
+def debugpeer(ui, path):
+ """establish a connection to a peer repository"""
+ # Always enable peer request logging. Requires --debug to display
+ # though.
+ overrides = {
+ ('devel', 'debug.peer-request'): True,
+ }
+
+ with ui.configoverride(overrides):
+ peer = hg.peer(ui, {}, path)
+
+ local = peer.local() is not None
+ canpush = peer.canpush()
+
+ ui.write(_('url: %s\n') % peer.url())
+ ui.write(_('local: %s\n') % (_('yes') if local else _('no')))
+ ui.write(_('pushable: %s\n') % (_('yes') if canpush else _('no')))
+
@command('debugpickmergetool',
[('r', 'rev', '', _('check for files in this revision'), _('REV')),
('', 'changedelete', None, _('emulate merging change and delete')),
@@ -2206,7 +2229,7 @@
if not opts['show_revs']:
return
for c in revs:
- ui.write("%s\n" % c)
+ ui.write("%d\n" % c)
@command('debugsetparents', [], _('REV1 [REV2]'))
def debugsetparents(ui, repo, rev1, rev2=None):
@@ -2336,7 +2359,7 @@
"""
# passed to successorssets caching computation from one call to another
cache = {}
- ctx2str = str
+ ctx2str = bytes
node2str = short
for rev in scmutil.revrange(repo, revs):
ctx = repo[rev]
@@ -2396,7 +2419,7 @@
t = formatter.maketemplater(ui, tmpl, resources=tres)
ui.write(t.render(props))
else:
- displayer = cmdutil.makelogtemplater(ui, repo, tmpl)
+ displayer = logcmdutil.maketemplater(ui, repo, tmpl)
for r in revs:
displayer.show(repo[r], **pycompat.strkwargs(props))
displayer.close()
--- a/mercurial/default.d/mergetools.rc Fri Feb 23 17:57:04 2018 -0800
+++ b/mercurial/default.d/mergetools.rc Sat Feb 24 17:49:10 2018 -0600
@@ -1,7 +1,7 @@
# Some default global settings for common merge tools
[merge-tools]
-kdiff3.args=--auto --L1 base --L2 local --L3 other $base $local $other -o $output
+kdiff3.args=--auto --L1 $labelbase --L2 $labellocal --L3 $labelother $base $local $other -o $output
kdiff3.regkey=Software\KDiff3
kdiff3.regkeyalt=Software\Wow6432Node\KDiff3
kdiff3.regappend=\kdiff3.exe
@@ -26,7 +26,7 @@
gpyfm.gui=True
meld.gui=True
-meld.args=--label='local' $local --label='merged' $base --label='other' $other -o $output
+meld.args=--label=$labellocal $local --label='merged' $base --label=$labelother $other -o $output
meld.check=changed
meld.diffargs=-a --label=$plabel1 $parent --label=$clabel $child
@@ -35,7 +35,7 @@
tkdiff.priority=-8
tkdiff.diffargs=-L $plabel1 $parent -L $clabel $child
-xxdiff.args=--show-merged-pane --exit-with-merge-status --title1 local --title2 base --title3 other --merged-filename $output --merge $local $base $other
+xxdiff.args=--show-merged-pane --exit-with-merge-status --title1 $labellocal --title2 $labelbase --title3 $labelother --merged-filename $output --merge $local $base $other
xxdiff.gui=True
xxdiff.priority=-8
xxdiff.diffargs=--title1 $plabel1 $parent --title2 $clabel $child
@@ -44,7 +44,7 @@
diffmerge.regkeyalt=Software\Wow6432Node\SourceGear\SourceGear DiffMerge\
diffmerge.regname=Location
diffmerge.priority=-7
-diffmerge.args=-nosplash -merge -title1=local -title2=merged -title3=other $local $base $other -result=$output
+diffmerge.args=-nosplash -merge -title1=$labellocal -title2=merged -title3=$labelother $local $base $other -result=$output
diffmerge.check=changed
diffmerge.gui=True
diffmerge.diffargs=--nosplash --title1=$plabel1 --title2=$clabel $parent $child
@@ -72,7 +72,7 @@
tortoisemerge.priority=-8
tortoisemerge.diffargs=/base:$parent /mine:$child /basename:$plabel1 /minename:$clabel
-ecmerge.args=$base $local $other --mode=merge3 --title0=base --title1=local --title2=other --to=$output
+ecmerge.args=$base $local $other --mode=merge3 --title0=$labelbase --title1=$labellocal --title2=$labelother --to=$output
ecmerge.regkey=Software\Elli\xc3\xa9 Computing\Merge
ecmerge.regkeyalt=Software\Wow6432Node\Elli\xc3\xa9 Computing\Merge
ecmerge.gui=True
@@ -93,7 +93,7 @@
filemergexcode.gui=True
; Windows version of Beyond Compare
-beyondcompare3.args=$local $other $base $output /ro /lefttitle=local /centertitle=base /righttitle=other /automerge /reviewconflicts /solo
+beyondcompare3.args=$local $other $base $output /ro /lefttitle=$labellocal /centertitle=$labelbase /righttitle=$labelother /automerge /reviewconflicts /solo
beyondcompare3.regkey=Software\Scooter Software\Beyond Compare 3
beyondcompare3.regname=ExePath
beyondcompare3.gui=True
@@ -113,7 +113,7 @@
bcomposx.priority=-1
bcomposx.diffargs=-lro -lefttitle=$plabel1 -righttitle=$clabel -solo -expandall $parent $child
-winmerge.args=/e /x /wl /ub /dl other /dr local $other $local $output
+winmerge.args=/e /x /wl /ub /dl $labelother /dr $labellocal $other $local $output
winmerge.regkey=Software\Thingamahoochie\WinMerge
winmerge.regkeyalt=Software\Wow6432Node\Thingamahoochie\WinMerge\
winmerge.regname=Executable
--- a/mercurial/dirstate.py Fri Feb 23 17:57:04 2018 -0800
+++ b/mercurial/dirstate.py Sat Feb 24 17:49:10 2018 -0600
@@ -99,27 +99,6 @@
# normally, so we don't have a try/finally here on purpose.
self._parentwriters -= 1
- def beginparentchange(self):
- '''Marks the beginning of a set of changes that involve changing
- the dirstate parents. If there is an exception during this time,
- the dirstate will not be written when the wlock is released. This
- prevents writing an incoherent dirstate where the parent doesn't
- match the contents.
- '''
- self._ui.deprecwarn('beginparentchange is obsoleted by the '
- 'parentchange context manager.', '4.3')
- self._parentwriters += 1
-
- def endparentchange(self):
- '''Marks the end of a set of changes that involve changing the
- dirstate parents. Once all parent changes have been marked done,
- the wlock will be free to write the dirstate on release.
- '''
- self._ui.deprecwarn('endparentchange is obsoleted by the '
- 'parentchange context manager.', '4.3')
- if self._parentwriters > 0:
- self._parentwriters -= 1
-
def pendingparentchange(self):
'''Returns true if the dirstate is in the middle of a set of changes
that modify the dirstate parent.
@@ -360,7 +339,7 @@
rereads the dirstate. Use localrepo.invalidatedirstate() if you want to
check whether the dirstate has changed before rereading it.'''
- for a in ("_map", "_branch", "_ignore"):
+ for a in (r"_map", r"_branch", r"_ignore"):
if a in self.__dict__:
delattr(self, a)
self._lastnormaltime = 0
@@ -808,6 +787,17 @@
else:
badfn(ff, encoding.strtolocal(inst.strerror))
+ # match.files() may contain explicitly-specified paths that shouldn't
+ # be taken; drop them from the list of files found. dirsfound/notfound
+ # aren't filtered here because they will be tested later.
+ if match.anypats():
+ for f in list(results):
+ if f == '.hg' or f in subrepos:
+ # keep sentinel to disable further out-of-repo walks
+ continue
+ if not match(f):
+ del results[f]
+
# Case insensitive filesystems cannot rely on lstat() failing to detect
# a case-only rename. Prune the stat object for any file that does not
# match the case in the filesystem, if there are multiple files that
@@ -1237,9 +1227,12 @@
util.clearcachedproperty(self, "nonnormalset")
util.clearcachedproperty(self, "otherparentset")
- def iteritems(self):
+ def items(self):
return self._map.iteritems()
+ # forward for python2,3 compat
+ iteritems = items
+
def __len__(self):
return len(self._map)
@@ -1264,9 +1257,9 @@
def addfile(self, f, oldstate, state, mode, size, mtime):
"""Add a tracked file to the dirstate."""
- if oldstate in "?r" and "_dirs" in self.__dict__:
+ if oldstate in "?r" and r"_dirs" in self.__dict__:
self._dirs.addpath(f)
- if oldstate == "?" and "_alldirs" in self.__dict__:
+ if oldstate == "?" and r"_alldirs" in self.__dict__:
self._alldirs.addpath(f)
self._map[f] = dirstatetuple(state, mode, size, mtime)
if state != 'n' or mtime == -1:
@@ -1282,11 +1275,11 @@
the file's previous state. In the future, we should refactor this
to be more explicit about what that state is.
"""
- if oldstate not in "?r" and "_dirs" in self.__dict__:
+ if oldstate not in "?r" and r"_dirs" in self.__dict__:
self._dirs.delpath(f)
- if oldstate == "?" and "_alldirs" in self.__dict__:
+ if oldstate == "?" and r"_alldirs" in self.__dict__:
self._alldirs.addpath(f)
- if "filefoldmap" in self.__dict__:
+ if r"filefoldmap" in self.__dict__:
normed = util.normcase(f)
self.filefoldmap.pop(normed, None)
self._map[f] = dirstatetuple('r', 0, size, 0)
@@ -1299,11 +1292,11 @@
"""
exists = self._map.pop(f, None) is not None
if exists:
- if oldstate != "r" and "_dirs" in self.__dict__:
+ if oldstate != "r" and r"_dirs" in self.__dict__:
self._dirs.delpath(f)
- if "_alldirs" in self.__dict__:
+ if r"_alldirs" in self.__dict__:
self._alldirs.delpath(f)
- if "filefoldmap" in self.__dict__:
+ if r"filefoldmap" in self.__dict__:
normed = util.normcase(f)
self.filefoldmap.pop(normed, None)
self.nonnormalset.discard(f)
--- a/mercurial/discovery.py Fri Feb 23 17:57:04 2018 -0800
+++ b/mercurial/discovery.py Sat Feb 24 17:49:10 2018 -0600
@@ -53,16 +53,11 @@
return treediscovery.findcommonincoming(repo, remote, heads, force)
if heads:
- allknown = True
knownnode = repo.changelog.hasnode # no nodemap until it is filtered
- for h in heads:
- if not knownnode(h):
- allknown = False
- break
- if allknown:
+ if all(knownnode(h) for h in heads):
return (heads, False, heads)
- res = setdiscovery.findcommonheads(repo.ui, repo, remote,
+ res = setdiscovery.findcommonheads(repo.ui, repo, remote, heads,
abortwhenunrelated=not force,
ancestorsof=ancestorsof)
common, anyinc, srvheads = res
--- a/mercurial/dispatch.py Fri Feb 23 17:57:04 2018 -0800
+++ b/mercurial/dispatch.py Sat Feb 24 17:49:10 2018 -0600
@@ -477,7 +477,8 @@
if earlyopts:
self.badalias = (_("error in definition for alias '%s': %s may "
"only be given on the command line")
- % (self.name, '/'.join(zip(*earlyopts)[0])))
+ % (self.name, '/'.join(pycompat.ziplist(*earlyopts)
+ [0])))
return
self.cmdname = cmd = args.pop(0)
self.givenargs = args
@@ -821,9 +822,7 @@
if options['verbose'] or options['debug'] or options['quiet']:
for opt in ('verbose', 'debug', 'quiet'):
- val = str(bool(options[opt]))
- if pycompat.ispy3:
- val = val.encode('ascii')
+ val = pycompat.bytestr(bool(options[opt]))
for ui_ in uis:
ui_.setconfig('ui', opt, val, '--' + opt)
@@ -941,9 +940,9 @@
worst = None, ct, ''
if ui.config('ui', 'supportcontact') is None:
for name, mod in extensions.extensions():
- testedwith = getattr(mod, 'testedwith', '')
- if pycompat.ispy3 and isinstance(testedwith, str):
- testedwith = testedwith.encode(u'utf-8')
+ # 'testedwith' should be bytes, but not all extensions are ported
+ # to py3 and we don't want UnicodeException because of that.
+ testedwith = util.forcebytestr(getattr(mod, 'testedwith', ''))
report = getattr(mod, 'buglink', _('the extension author.'))
if not testedwith.strip():
# We found an untested extension. It's likely the culprit.
@@ -978,11 +977,7 @@
bugtracker = _("https://mercurial-scm.org/wiki/BugTracker")
warning = (_("** unknown exception encountered, "
"please report by visiting\n** ") + bugtracker + '\n')
- if pycompat.ispy3:
- sysversion = sys.version.encode(u'utf-8')
- else:
- sysversion = sys.version
- sysversion = sysversion.replace('\n', '')
+ sysversion = pycompat.sysbytes(sys.version).replace('\n', '')
warning += ((_("** Python %s\n") % sysversion) +
(_("** Mercurial Distributed SCM (version %s)\n") %
util.version()) +
@@ -997,6 +992,7 @@
this function returns False, ignored otherwise.
"""
warning = _exceptionwarning(ui)
- ui.log("commandexception", "%s\n%s\n", warning, traceback.format_exc())
+ ui.log("commandexception", "%s\n%s\n", warning,
+ pycompat.sysbytes(traceback.format_exc()))
ui.warn(warning)
return False # re-raise the exception
--- a/mercurial/extensions.py Fri Feb 23 17:57:04 2018 -0800
+++ b/mercurial/extensions.py Sat Feb 24 17:49:10 2018 -0600
@@ -122,6 +122,18 @@
if ui.debugflag:
ui.traceback()
+def _rejectunicode(name, xs):
+ if isinstance(xs, (list, set, tuple)):
+ for x in xs:
+ _rejectunicode(name, x)
+ elif isinstance(xs, dict):
+ for k, v in xs.items():
+ _rejectunicode(name, k)
+ _rejectunicode(b'%s.%s' % (name, util.forcebytestr(k)), v)
+ elif isinstance(xs, type(u'')):
+ raise error.ProgrammingError(b"unicode %r found in %s" % (xs, name),
+ hint="use b'' to make it byte string")
+
# attributes set by registrar.command
_cmdfuncattrs = ('norepo', 'optionalrepo', 'inferrepo')
@@ -134,19 +146,22 @@
"registrar.command to register '%s'" % c, '4.6')
missing = [a for a in _cmdfuncattrs if not util.safehasattr(f, a)]
if not missing:
- for option in e[1]:
- default = option[2]
- if isinstance(default, type(u'')):
- raise error.ProgrammingError(
- "option '%s.%s' has a unicode default value"
- % (c, option[1]),
- hint=("change the %s.%s default value to a "
- "non-unicode string" % (c, option[1])))
continue
raise error.ProgrammingError(
'missing attributes: %s' % ', '.join(missing),
hint="use @command decorator to register '%s'" % c)
+def _validatetables(ui, mod):
+ """Sanity check for loadable tables provided by extension module"""
+ for t in ['cmdtable', 'colortable', 'configtable']:
+ _rejectunicode(t, getattr(mod, t, {}))
+ for t in ['filesetpredicate', 'internalmerge', 'revsetpredicate',
+ 'templatefilter', 'templatefunc', 'templatekeyword']:
+ o = getattr(mod, t, None)
+ if o:
+ _rejectunicode(t, o._table)
+ _validatecmdtable(ui, getattr(mod, 'cmdtable', {}))
+
def load(ui, name, path):
if name.startswith('hgext.') or name.startswith('hgext/'):
shortname = name[6:]
@@ -168,7 +183,7 @@
ui.warn(_('(third party extension %s requires version %s or newer '
'of Mercurial; disabling)\n') % (shortname, minver))
return
- _validatecmdtable(ui, getattr(mod, 'cmdtable', {}))
+ _validatetables(ui, mod)
_extensions[shortname] = mod
_order.append(shortname)
@@ -195,11 +210,7 @@
try:
extsetup(ui)
except TypeError:
- # Try to use getfullargspec (Python 3) first, and fall
- # back to getargspec only if it doesn't exist so as to
- # avoid warnings.
- if getattr(inspect, 'getfullargspec',
- getattr(inspect, 'getargspec'))(extsetup).args:
+ if pycompat.getargspec(extsetup).args:
raise
extsetup() # old extsetup with no ui argument
except Exception as inst:
--- a/mercurial/fancyopts.py Fri Feb 23 17:57:04 2018 -0800
+++ b/mercurial/fancyopts.py Sat Feb 24 17:49:10 2018 -0600
@@ -7,6 +7,7 @@
from __future__ import absolute_import
+import abc
import functools
from .i18n import _
@@ -201,6 +202,64 @@
parsedargs.extend(args[pos:])
return parsedopts, parsedargs
+class customopt(object):
+ """Manage defaults and mutations for any type of opt."""
+
+ __metaclass__ = abc.ABCMeta
+
+ def __init__(self, defaultvalue):
+ self.defaultvalue = defaultvalue
+
+ def _isboolopt(self):
+ return False
+
+ @abc.abstractmethod
+ def newstate(self, oldstate, newparam, abort):
+ """Adds newparam to oldstate and returns the new state.
+
+ On failure, abort can be called with a string error message."""
+
+class _simpleopt(customopt):
+ def _isboolopt(self):
+ return isinstance(self.defaultvalue, (bool, type(None)))
+
+ def newstate(self, oldstate, newparam, abort):
+ return newparam
+
+class _callableopt(customopt):
+ def __init__(self, callablefn):
+ self.callablefn = callablefn
+ super(_callableopt, self).__init__(None)
+
+ def newstate(self, oldstate, newparam, abort):
+ return self.callablefn(newparam)
+
+class _listopt(customopt):
+ def newstate(self, oldstate, newparam, abort):
+ oldstate.append(newparam)
+ return oldstate
+
+class _intopt(customopt):
+ def newstate(self, oldstate, newparam, abort):
+ try:
+ return int(newparam)
+ except ValueError:
+ abort(_('expected int'))
+
+def _defaultopt(default):
+ """Returns a default opt implementation, given a default value."""
+
+ if isinstance(default, customopt):
+ return default
+ elif callable(default):
+ return _callableopt(default)
+ elif isinstance(default, list):
+ return _listopt(default[:])
+ elif type(default) is type(1):
+ return _intopt(default)
+ else:
+ return _simpleopt(default)
+
def fancyopts(args, options, state, gnu=False, early=False, optaliases=None):
"""
read args, parse options, and store options in state
@@ -220,6 +279,7 @@
list - parameter string is added to a list
integer - parameter strings is stored as int
function - call function with parameter
+ customopt - subclass of 'customopt'
optaliases is a mapping from a canonical option name to a list of
additional long options. This exists for preserving backward compatibility
@@ -250,18 +310,13 @@
argmap['-' + short] = name
for n in onames:
argmap['--' + n] = name
- defmap[name] = default
+ defmap[name] = _defaultopt(default)
# copy defaults to state
- if isinstance(default, list):
- state[name] = default[:]
- elif callable(default):
- state[name] = None
- else:
- state[name] = default
+ state[name] = defmap[name].defaultvalue
# does it take a parameter?
- if not (default is None or default is True or default is False):
+ if not defmap[name]._isboolopt():
if short:
short += ':'
onames = [n + '=' for n in onames]
@@ -301,21 +356,13 @@
boolval = False
name = argmap[opt]
obj = defmap[name]
- t = type(obj)
- if callable(obj):
- state[name] = defmap[name](val)
- elif t is type(1):
- try:
- state[name] = int(val)
- except ValueError:
- raise error.Abort(_('invalid value %r for option %s, '
- 'expected int') % (val, opt))
- elif t is type(''):
- state[name] = val
- elif t is type([]):
- state[name].append(val)
- elif t is type(None) or t is type(False):
+ if obj._isboolopt():
state[name] = boolval
+ else:
+ def abort(s):
+ raise error.Abort(
+ _('invalid value %r for option %s, %s') % (val, opt, s))
+ state[name] = defmap[name].newstate(state[name], val, abort)
# return unparsed args
return args
--- a/mercurial/filemerge.py Fri Feb 23 17:57:04 2018 -0800
+++ b/mercurial/filemerge.py Sat Feb 24 17:49:10 2018 -0600
@@ -513,6 +513,11 @@
b, c = _maketempfiles(repo, fco, fca)
try:
out = ""
+ mylabel, otherlabel = labels[:2]
+ if len(labels) >= 3:
+ baselabel = labels[2]
+ else:
+ baselabel = 'base'
env = {'HG_FILE': fcd.path(),
'HG_MY_NODE': short(mynode),
'HG_OTHER_NODE': str(fco.changectx()),
@@ -520,6 +525,9 @@
'HG_MY_ISLINK': 'l' in fcd.flags(),
'HG_OTHER_ISLINK': 'l' in fco.flags(),
'HG_BASE_ISLINK': 'l' in fca.flags(),
+ 'HG_MY_LABEL': mylabel,
+ 'HG_OTHER_LABEL': otherlabel,
+ 'HG_BASE_LABEL': baselabel,
}
ui = repo.ui
@@ -528,8 +536,10 @@
# read input from backup, write to original
out = a
a = repo.wvfs.join(back.path())
- replace = {'local': a, 'base': b, 'other': c, 'output': out}
- args = util.interpolate(r'\$', replace, args,
+ replace = {'local': a, 'base': b, 'other': c, 'output': out,
+ 'labellocal': mylabel, 'labelother': otherlabel,
+ 'labelbase': baselabel}
+ args = util.interpolate(br'\$', replace, args,
lambda s: util.shellquote(util.localpath(s)))
cmd = toolpath + ' ' + args
if _toolbool(ui, tool, "gui"):
@@ -566,7 +576,7 @@
_defaultconflictlabels = ['local', 'other']
-def _formatlabels(repo, fcd, fco, fca, labels):
+def _formatlabels(repo, fcd, fco, fca, labels, tool=None):
"""Formats the given labels using the conflict marker template.
Returns a list of formatted labels.
@@ -577,6 +587,8 @@
ui = repo.ui
template = ui.config('ui', 'mergemarkertemplate')
+ if tool is not None:
+ template = _toolstr(ui, tool, 'mergemarkertemplate', template)
template = templater.unquotestring(template)
tres = formatter.templateresources(ui, repo)
tmpl = formatter.maketemplater(ui, template, defaults=templatekw.keywords,
@@ -706,6 +718,7 @@
mergetype = func.mergetype
onfailure = func.onfailure
precheck = func.precheck
+ isexternal = False
else:
if wctx.isinmemory():
func = _xmergeimm
@@ -714,6 +727,7 @@
mergetype = fullmerge
onfailure = _("merging %s failed!\n")
precheck = None
+ isexternal = True
toolconf = tool, toolpath, binary, symlink
@@ -743,19 +757,42 @@
files = (None, None, None, back)
r = 1
try:
- markerstyle = ui.config('ui', 'mergemarkers')
+ internalmarkerstyle = ui.config('ui', 'mergemarkers')
+ if isexternal:
+ markerstyle = _toolstr(ui, tool, 'mergemarkers')
+ else:
+ markerstyle = internalmarkerstyle
+
if not labels:
labels = _defaultconflictlabels
+ formattedlabels = labels
if markerstyle != 'basic':
- labels = _formatlabels(repo, fcd, fco, fca, labels)
+ formattedlabels = _formatlabels(repo, fcd, fco, fca, labels,
+ tool=tool)
if premerge and mergetype == fullmerge:
- r = _premerge(repo, fcd, fco, fca, toolconf, files, labels=labels)
+ # conflict markers generated by premerge will use 'detailed'
+ # settings if either ui.mergemarkers or the tool's mergemarkers
+ # setting is 'detailed'. This way tools can have basic labels in
+ # space-constrained areas of the UI, but still get full information
+ # in conflict markers if premerge is 'keep' or 'keep-merge3'.
+ premergelabels = labels
+ labeltool = None
+ if markerstyle != 'basic':
+ # respect 'tool's mergemarkertemplate (which defaults to
+ # ui.mergemarkertemplate)
+ labeltool = tool
+ if internalmarkerstyle != 'basic' or markerstyle != 'basic':
+ premergelabels = _formatlabels(repo, fcd, fco, fca,
+ premergelabels, tool=labeltool)
+
+ r = _premerge(repo, fcd, fco, fca, toolconf, files,
+ labels=premergelabels)
# complete if premerge successful (r is 0)
return not r, r, False
needcheck, r, deleted = func(repo, mynode, orig, fcd, fco, fca,
- toolconf, files, labels=labels)
+ toolconf, files, labels=formattedlabels)
if needcheck:
r = _check(repo, r, ui, tool, fcd, files)
--- a/mercurial/formatter.py Fri Feb 23 17:57:04 2018 -0800
+++ b/mercurial/formatter.py Sat Feb 24 17:49:10 2018 -0600
@@ -291,7 +291,7 @@
self._out = out
self._out.write("%s = [\n" % self._topic)
def _showitem(self):
- self._out.write(" " + repr(self._item) + ",\n")
+ self._out.write(' %s,\n' % pycompat.byterepr(self._item))
def end(self):
baseformatter.end(self)
self._out.write("]\n")
--- a/mercurial/graphmod.py Fri Feb 23 17:57:04 2018 -0800
+++ b/mercurial/graphmod.py Sat Feb 24 17:49:10 2018 -0600
@@ -454,7 +454,7 @@
if any(len(char) > 1 for char in edgemap.values()):
# limit drawing an edge to the first or last N lines of the current
# section the rest of the edge is drawn like a parent line.
- parent = state['styles'][PARENT][-1]
+ parent = state['styles'][PARENT][-1:]
def _drawgp(char, i):
# should a grandparent character be drawn for this line?
if len(char) < 2:
@@ -463,7 +463,7 @@
# either skip first num lines or take last num lines, based on sign
return -num <= i if num < 0 else (len(lines) - i) <= num
for i, line in enumerate(lines):
- line[:] = [c[-1] if _drawgp(c, i) else parent for c in line]
+ line[:] = [c[-1:] if _drawgp(c, i) else parent for c in line]
edgemap.update(
(e, (c if len(c) < 2 else parent)) for e, c in edgemap.items())
--- a/mercurial/help.py Fri Feb 23 17:57:04 2018 -0800
+++ b/mercurial/help.py Sat Feb 24 17:49:10 2018 -0600
@@ -62,7 +62,8 @@
rst = loaddoc('extensions')(ui).splitlines(True)
rst.extend(listexts(
_('enabled extensions:'), extensions.enabled(), showdeprecated=True))
- rst.extend(listexts(_('disabled extensions:'), extensions.disabled()))
+ rst.extend(listexts(_('disabled extensions:'), extensions.disabled(),
+ showdeprecated=ui.verbose))
doc = ''.join(rst)
return doc
@@ -149,7 +150,7 @@
doclines = docs.splitlines()
if doclines:
summary = doclines[0]
- cmdname = cmd.partition('|')[0].lstrip('^')
+ cmdname = cmdutil.parsealiases(cmd)[0]
if filtercmd(ui, cmdname, kw, docs):
continue
results['commands'].append((cmdname, summary))
@@ -169,7 +170,7 @@
continue
for cmd, entry in getattr(mod, 'cmdtable', {}).iteritems():
if kw in cmd or (len(entry) > 2 and lowercontains(entry[2])):
- cmdname = cmd.partition('|')[0].lstrip('^')
+ cmdname = cmdutil.parsealiases(cmd)[0]
cmddoc = pycompat.getdoc(entry[0])
if cmddoc:
cmddoc = gettext(cmddoc).splitlines()[0]
@@ -327,7 +328,7 @@
# py3k fix: except vars can't be used outside the scope of the
# except block, nor can be used inside a lambda. python issue4617
prefix = inst.args[0]
- select = lambda c: c.lstrip('^').startswith(prefix)
+ select = lambda c: cmdutil.parsealiases(c)[0].startswith(prefix)
rst = helplist(select)
return rst
@@ -418,15 +419,18 @@
h = {}
cmds = {}
for c, e in commands.table.iteritems():
- f = c.partition("|")[0]
- if select and not select(f):
+ fs = cmdutil.parsealiases(c)
+ f = fs[0]
+ p = ''
+ if c.startswith("^"):
+ p = '^'
+ if select and not select(p + f):
continue
if (not select and name != 'shortlist' and
e[0].__module__ != commands.__name__):
continue
- if name == "shortlist" and not f.startswith("^"):
+ if name == "shortlist" and not p:
continue
- f = f.lstrip("^")
doc = pycompat.getdoc(e[0])
if filtercmd(ui, f, name, doc):
continue
@@ -434,7 +438,7 @@
if not doc:
doc = _("(no help text available)")
h[f] = doc.splitlines()[0].rstrip()
- cmds[f] = c.lstrip("^")
+ cmds[f] = '|'.join(fs)
rst = []
if not h:
--- a/mercurial/help/config.txt Fri Feb 23 17:57:04 2018 -0800
+++ b/mercurial/help/config.txt Sat Feb 24 17:49:10 2018 -0600
@@ -1363,13 +1363,18 @@
``args``
The arguments to pass to the tool executable. You can refer to the
files being merged as well as the output file through these
- variables: ``$base``, ``$local``, ``$other``, ``$output``. The meaning
- of ``$local`` and ``$other`` can vary depending on which action is being
- performed. During and update or merge, ``$local`` represents the original
- state of the file, while ``$other`` represents the commit you are updating
- to or the commit you are merging with. During a rebase ``$local``
- represents the destination of the rebase, and ``$other`` represents the
- commit being rebased.
+ variables: ``$base``, ``$local``, ``$other``, ``$output``.
+
+ The meaning of ``$local`` and ``$other`` can vary depending on which action is
+ being performed. During an update or merge, ``$local`` represents the original
+ state of the file, while ``$other`` represents the commit you are updating to or
+ the commit you are merging with. During a rebase, ``$local`` represents the
+ destination of the rebase, and ``$other`` represents the commit being rebased.
+
+ Some operations define custom labels to assist with identifying the revisions,
+ accessible via ``$labellocal``, ``$labelother``, and ``$labelbase``. If custom
+ labels are not available, these will be ``local``, ``other``, and ``base``,
+ respectively.
(default: ``$local $base $other``)
``premerge``
@@ -1405,6 +1410,21 @@
``gui``
This tool requires a graphical interface to run. (default: False)
+``mergemarkers``
+ Controls whether the labels passed via ``$labellocal``, ``$labelother``, and
+ ``$labelbase`` are ``detailed`` (respecting ``mergemarkertemplate``) or
+ ``basic``. If ``premerge`` is ``keep`` or ``keep-merge3``, the conflict
+ markers generated during premerge will be ``detailed`` if either this option or
+ the corresponding option in the ``[ui]`` section is ``detailed``.
+ (default: ``basic``)
+
+``mergemarkertemplate``
+ This setting can be used to override ``mergemarkertemplate`` from the ``[ui]``
+ section on a per-tool basis; this applies to the ``$label``-prefixed variables
+ and to the conflict markers that are generated if ``premerge`` is ``keep` or
+ ``keep-merge3``. See the corresponding variable in ``[ui]`` for more
+ information.
+
.. container:: windows
``regkey``
@@ -2120,6 +2140,8 @@
markers is different from the encoding of the merged files,
serious problems may occur.
+ Can be overridden per-merge-tool, see the ``[merge-tools]`` section.
+
``origbackuppath``
The path to a directory used to store generated .orig files. If the path is
not a directory, one will be created. If set, files stored in this
--- a/mercurial/help/internals/requirements.txt Fri Feb 23 17:57:04 2018 -0800
+++ b/mercurial/help/internals/requirements.txt Sat Feb 24 17:49:10 2018 -0600
@@ -1,4 +1,3 @@
-
Repositories contain a file (``.hg/requires``) containing a list of
features/capabilities that are *required* for clients to interface
with the repository. This file has been present in Mercurial since
@@ -105,8 +104,10 @@
Denotes that version 2 of manifests are being used.
Support for this requirement was added in Mercurial 3.4 (released
-May 2015). The requirement is currently experimental and is disabled
-by default.
+May 2015). The new format failed to meet expectations and support
+for the format and requirement were removed in Mercurial 4.6
+(released May 2018) since the feature never graduated frome experiment
+status.
treemanifest
============
--- a/mercurial/help/internals/wireprotocol.txt Fri Feb 23 17:57:04 2018 -0800
+++ b/mercurial/help/internals/wireprotocol.txt Sat Feb 24 17:49:10 2018 -0600
@@ -10,11 +10,43 @@
The protocol is synchronous and does not support multiplexing (concurrent
commands).
-Transport Protocols
-===================
+Handshake
+=========
+
+It is required or common for clients to perform a *handshake* when connecting
+to a server. The handshake serves the following purposes:
+
+* Negotiating protocol/transport level options
+* Allows the client to learn about server capabilities to influence
+ future requests
+* Ensures the underlying transport channel is in a *clean* state
-HTTP Transport
---------------
+An important goal of the handshake is to allow clients to use more modern
+wire protocol features. By default, clients must assume they are talking
+to an old version of Mercurial server (possibly even the very first
+implementation). So, clients should not attempt to call or utilize modern
+wire protocol features until they have confirmation that the server
+supports them. The handshake implementation is designed to allow both
+ends to utilize the latest set of features and capabilities with as
+few round trips as possible.
+
+The handshake mechanism varies by transport and protocol and is documented
+in the sections below.
+
+HTTP Protocol
+=============
+
+Handshake
+---------
+
+The client sends a ``capabilities`` command request (``?cmd=capabilities``)
+as soon as HTTP requests may be issued.
+
+The server responds with a capabilities string, which the client parses to
+learn about the server's abilities.
+
+HTTP Version 1 Transport
+------------------------
Commands are issued as HTTP/1.0 or HTTP/1.1 requests. Commands are
sent to the base URL of the repository with the command name sent in
@@ -112,11 +144,175 @@
``application/mercurial-0.*`` media type and the HTTP response is typically
using *chunked transfer* (``Transfer-Encoding: chunked``).
-SSH Transport
-=============
+SSH Protocol
+============
+
+Handshake
+---------
+
+For all clients, the handshake consists of the client sending 1 or more
+commands to the server using version 1 of the transport. Servers respond
+to commands they know how to respond to and send an empty response (``0\n``)
+for unknown commands (per standard behavior of version 1 of the transport).
+Clients then typically look for a response to the newest sent command to
+determine which transport version to use and what the available features for
+the connection and server are.
+
+Preceding any response from client-issued commands, the server may print
+non-protocol output. It is common for SSH servers to print banners, message
+of the day announcements, etc when clients connect. It is assumed that any
+such *banner* output will precede any Mercurial server output. So clients
+must be prepared to handle server output on initial connect that isn't
+in response to any client-issued command and doesn't conform to Mercurial's
+wire protocol. This *banner* output should only be on stdout. However,
+some servers may send output on stderr.
+
+Pre 0.9.1 clients issue a ``between`` command with the ``pairs`` argument
+having the value
+``0000000000000000000000000000000000000000-0000000000000000000000000000000000000000``.
+
+The ``between`` command has been supported since the original Mercurial
+SSH server. Requesting the empty range will return a ``\n`` string response,
+which will be encoded as ``1\n\n`` (value length of ``1`` followed by a newline
+followed by the value, which happens to be a newline).
+
+For pre 0.9.1 clients and all servers, the exchange looks like::
+
+ c: between\n
+ c: pairs 81\n
+ c: 0000000000000000000000000000000000000000-0000000000000000000000000000000000000000
+ s: 1\n
+ s: \n
+
+0.9.1+ clients send a ``hello`` command (with no arguments) before the
+``between`` command. The response to this command allows clients to
+discover server capabilities and settings.
+
+An example exchange between 0.9.1+ clients and a ``hello`` aware server looks
+like::
+
+ c: hello\n
+ c: between\n
+ c: pairs 81\n
+ c: 0000000000000000000000000000000000000000-0000000000000000000000000000000000000000
+ s: 324\n
+ s: capabilities: lookup changegroupsubset branchmap pushkey known getbundle ...\n
+ s: 1\n
+ s: \n
+
+And a similar scenario but with servers sending a banner on connect::
+
+ c: hello\n
+ c: between\n
+ c: pairs 81\n
+ c: 0000000000000000000000000000000000000000-0000000000000000000000000000000000000000
+ s: welcome to the server\n
+ s: if you find any issues, email someone@somewhere.com\n
+ s: 324\n
+ s: capabilities: lookup changegroupsubset branchmap pushkey known getbundle ...\n
+ s: 1\n
+ s: \n
+
+Note that output from the ``hello`` command is terminated by a ``\n``. This is
+part of the response payload and not part of the wire protocol adding a newline
+after responses. In other words, the length of the response contains the
+trailing ``\n``.
+
+Clients supporting version 2 of the SSH transport send a line beginning
+with ``upgrade`` before the ``hello`` and ``between`` commands. The line
+(which isn't a well-formed command line because it doesn't consist of a
+single command name) serves to both communicate the client's intent to
+switch to transport version 2 (transports are version 1 by default) as
+well as to advertise the client's transport-level capabilities so the
+server may satisfy that request immediately.
+
+The upgrade line has the form:
-The SSH transport is a custom text-based protocol suitable for use over any
-bi-directional stream transport. It is most commonly used with SSH.
+ upgrade <token> <transport capabilities>
+
+That is the literal string ``upgrade`` followed by a space, followed by
+a randomly generated string, followed by a space, followed by a string
+denoting the client's transport capabilities.
+
+The token can be anything. However, a random UUID is recommended. (Use
+of version 4 UUIDs is recommended because version 1 UUIDs can leak the
+client's MAC address.)
+
+The transport capabilities string is a URL/percent encoded string
+containing key-value pairs defining the client's transport-level
+capabilities. The following capabilities are defined:
+
+proto
+ A comma-delimited list of transport protocol versions the client
+ supports. e.g. ``ssh-v2``.
+
+If the server does not recognize the ``upgrade`` line, it should issue
+an empty response and continue processing the ``hello`` and ``between``
+commands. Here is an example handshake between a version 2 aware client
+and a non version 2 aware server:
+
+ c: upgrade 2e82ab3f-9ce3-4b4e-8f8c-6fd1c0e9e23a proto=ssh-v2
+ c: hello\n
+ c: between\n
+ c: pairs 81\n
+ c: 0000000000000000000000000000000000000000-0000000000000000000000000000000000000000
+ s: 0\n
+ s: 324\n
+ s: capabilities: lookup changegroupsubset branchmap pushkey known getbundle ...\n
+ s: 1\n
+ s: \n
+
+(The initial ``0\n`` line from the server indicates an empty response to
+the unknown ``upgrade ..`` command/line.)
+
+If the server recognizes the ``upgrade`` line and is willing to satisfy that
+upgrade request, it replies to with a payload of the following form:
+
+ upgraded <token> <transport name>\n
+
+This line is the literal string ``upgraded``, a space, the token that was
+specified by the client in its ``upgrade ...`` request line, a space, and the
+name of the transport protocol that was chosen by the server. The transport
+name MUST match one of the names the client specified in the ``proto`` field
+of its ``upgrade ...`` request line.
+
+If a server issues an ``upgraded`` response, it MUST also read and ignore
+the lines associated with the ``hello`` and ``between`` command requests
+that were issued by the server. It is assumed that the negotiated transport
+will respond with equivalent requested information following the transport
+handshake.
+
+All data following the ``\n`` terminating the ``upgraded`` line is the
+domain of the negotiated transport. It is common for the data immediately
+following to contain additional metadata about the state of the transport and
+the server. However, this isn't strictly speaking part of the transport
+handshake and isn't covered by this section.
+
+Here is an example handshake between a version 2 aware client and a version
+2 aware server:
+
+ c: upgrade 2e82ab3f-9ce3-4b4e-8f8c-6fd1c0e9e23a proto=ssh-v2
+ c: hello\n
+ c: between\n
+ c: pairs 81\n
+ c: 0000000000000000000000000000000000000000-0000000000000000000000000000000000000000
+ s: upgraded 2e82ab3f-9ce3-4b4e-8f8c-6fd1c0e9e23a ssh-v2\n
+ s: <additional transport specific data>
+
+The client-issued token that is echoed in the response provides a more
+resilient mechanism for differentiating *banner* output from Mercurial
+output. In version 1, properly formatted banner output could get confused
+for Mercurial server output. By submitting a randomly generated token
+that is then present in the response, the client can look for that token
+in response lines and have reasonable certainty that the line did not
+originate from a *banner* message.
+
+SSH Version 1 Transport
+-----------------------
+
+The SSH transport (version 1) is a custom text-based protocol suitable for
+use over any bi-directional stream transport. It is most commonly used with
+SSH.
A SSH transport server can be started with ``hg serve --stdio``. The stdin,
stderr, and stdout file descriptors of the started process are used to exchange
@@ -174,6 +370,31 @@
The server terminates if it receives an empty command (a ``\n`` character).
+SSH Version 2 Transport
+-----------------------
+
+**Experimental**
+
+Version 2 of the SSH transport behaves identically to version 1 of the SSH
+transport with the exception of handshake semantics. See above for how
+version 2 of the SSH transport is negotiated.
+
+Immediately following the ``upgraded`` line signaling a switch to version
+2 of the SSH protocol, the server automatically sends additional details
+about the capabilities of the remote server. This has the form:
+
+ <integer length of value>\n
+ capabilities: ...\n
+
+e.g.
+
+ s: upgraded 2e82ab3f-9ce3-4b4e-8f8c-6fd1c0e9e23a ssh-v2\n
+ s: 240\n
+ s: capabilities: known getbundle batch ...\n
+
+Following capabilities advertisement, the peers communicate using version
+1 of the SSH transport.
+
Capabilities
============
@@ -463,53 +684,6 @@
reflects the priority/preference of that type, where the first value is the
most preferred type.
-Handshake Protocol
-==================
-
-While not explicitly required, it is common for clients to perform a
-*handshake* when connecting to a server. The handshake accomplishes 2 things:
-
-* Obtaining capabilities and other server features
-* Flushing extra server output (e.g. SSH servers may print extra text
- when connecting that may confuse the wire protocol)
-
-This isn't a traditional *handshake* as far as network protocols go because
-there is no persistent state as a result of the handshake: the handshake is
-simply the issuing of commands and commands are stateless.
-
-The canonical clients perform a capabilities lookup at connection establishment
-time. This is because clients must assume a server only supports the features
-of the original Mercurial server implementation until proven otherwise (from
-advertised capabilities). Nearly every server running today supports features
-that weren't present in the original Mercurial server implementation. Rather
-than wait for a client to perform functionality that needs to consult
-capabilities, it issues the lookup at connection start to avoid any delay later.
-
-For HTTP servers, the client sends a ``capabilities`` command request as
-soon as the connection is established. The server responds with a capabilities
-string, which the client parses.
-
-For SSH servers, the client sends the ``hello`` command (no arguments)
-and a ``between`` command with the ``pairs`` argument having the value
-``0000000000000000000000000000000000000000-0000000000000000000000000000000000000000``.
-
-The ``between`` command has been supported since the original Mercurial
-server. Requesting the empty range will return a ``\n`` string response,
-which will be encoded as ``1\n\n`` (value length of ``1`` followed by a newline
-followed by the value, which happens to be a newline).
-
-The ``hello`` command was later introduced. Servers supporting it will issue
-a response to that command before sending the ``1\n\n`` response to the
-``between`` command. Servers not supporting ``hello`` will send an empty
-response (``0\n``).
-
-In addition to the expected output from the ``hello`` and ``between`` commands,
-servers may also send other output, such as *message of the day (MOTD)*
-announcements. Clients assume servers will send this output before the
-Mercurial server replies to the client-issued commands. So any server output
-not conforming to the expected command responses is assumed to be not related
-to Mercurial and can be ignored.
-
Content Negotiation
===================
@@ -519,8 +693,8 @@
well-defined response type and only certain commands needed to support
functionality like compression.
-Currently, only the HTTP transport supports content negotiation at the protocol
-layer.
+Currently, only the HTTP version 1 transport supports content negotiation
+at the protocol layer.
HTTP requests advertise supported response formats via the ``X-HgProto-<N>``
request header, where ``<N>`` is an integer starting at 1 allowing the logical
@@ -662,6 +836,8 @@
This command does not accept any arguments. Return type is a ``string``.
+This command was introduced in Mercurial 0.9.1 (released July 2006).
+
changegroup
-----------
@@ -737,7 +913,7 @@
Boolean indicating whether phases data is requested.
The return type on success is a ``stream`` where the value is bundle.
-On the HTTP transport, the response is zlib compressed.
+On the HTTP version 1 transport, the response is zlib compressed.
If an error occurs, a generic error response can be sent.
@@ -779,6 +955,8 @@
This command does not accept any arguments. The return type is a ``string``.
+This command was introduced in Mercurial 0.9.1 (released July 2006).
+
listkeys
--------
@@ -838,13 +1016,14 @@
The return type is a ``string``. The value depends on the transport protocol.
-The SSH transport sends a string encoded integer followed by a newline
-(``\n``) which indicates operation result. The server may send additional
-output on the ``stderr`` stream that should be displayed to the user.
+The SSH version 1 transport sends a string encoded integer followed by a
+newline (``\n``) which indicates operation result. The server may send
+additional output on the ``stderr`` stream that should be displayed to the
+user.
-The HTTP transport sends a string encoded integer followed by a newline
-followed by additional server output that should be displayed to the user.
-This may include output from hooks, etc.
+The HTTP version 1 transport sends a string encoded integer followed by a
+newline followed by additional server output that should be displayed to
+the user. This may include output from hooks, etc.
The integer result varies by namespace. ``0`` means an error has occurred
and there should be additional output to display to the user.
@@ -908,18 +1087,18 @@
The encoding of the ``push response`` type varies by transport.
-For the SSH transport, this type is composed of 2 ``string`` responses: an
-empty response (``0\n``) followed by the integer result value. e.g.
-``1\n2``. So the full response might be ``0\n1\n2``.
+For the SSH version 1 transport, this type is composed of 2 ``string``
+responses: an empty response (``0\n``) followed by the integer result value.
+e.g. ``1\n2``. So the full response might be ``0\n1\n2``.
-For the HTTP transport, the response is a ``string`` type composed of an
-integer result value followed by a newline (``\n``) followed by string
+For the HTTP version 1 transport, the response is a ``string`` type composed
+of an integer result value followed by a newline (``\n``) followed by string
content holding server output that should be displayed on the client (output
hooks, etc).
In some cases, the server may respond with a ``bundle2`` bundle. In this
-case, the response type is ``stream``. For the HTTP transport, the response
-is zlib compressed.
+case, the response type is ``stream``. For the HTTP version 1 transport, the
+response is zlib compressed.
The server may also respond with a generic error type, which contains a string
indicating the failure.
--- a/mercurial/hg.py Fri Feb 23 17:57:04 2018 -0800
+++ b/mercurial/hg.py Sat Feb 24 17:49:10 2018 -0600
@@ -31,6 +31,7 @@
httppeer,
localrepo,
lock,
+ logcmdutil,
logexchange,
merge as mergemod,
node,
@@ -201,6 +202,24 @@
return ''
return os.path.basename(os.path.normpath(path))
+def sharedreposource(repo):
+ """Returns repository object for source repository of a shared repo.
+
+ If repo is not a shared repository, returns None.
+ """
+ if repo.sharedpath == repo.path:
+ return None
+
+ if util.safehasattr(repo, 'srcrepo') and repo.srcrepo:
+ return repo.srcrepo
+
+ # the sharedpath always ends in the .hg; we want the path to the repo
+ source = repo.vfs.split(repo.sharedpath)[0]
+ srcurl, branches = parseurl(source)
+ srcrepo = repository(repo.ui, srcurl)
+ repo.srcrepo = srcrepo
+ return srcrepo
+
def share(ui, source, dest=None, update=True, bookmarks=True, defaultpath=None,
relative=False):
'''create a shared repository'''
@@ -213,7 +232,7 @@
else:
dest = ui.expandpath(dest)
- if isinstance(source, str):
+ if isinstance(source, bytes):
origsource = ui.expandpath(source)
source, branches = parseurl(origsource)
srcrepo = repository(ui, source)
@@ -885,7 +904,8 @@
ui.status(_("no changes found\n"))
return subreporecurse()
ui.pager('incoming')
- displayer = cmdutil.show_changeset(ui, other, opts, buffered)
+ displayer = logcmdutil.changesetdisplayer(ui, other, opts,
+ buffered=buffered)
displaychlist(other, chlist, displayer)
displayer.close()
finally:
@@ -904,7 +924,7 @@
return ret
def display(other, chlist, displayer):
- limit = cmdutil.loglimit(opts)
+ limit = logcmdutil.getlimit(opts)
if opts.get('newest_first'):
chlist.reverse()
count = 0
@@ -949,7 +969,7 @@
ret = min(ret, sub.outgoing(ui, dest, opts))
return ret
- limit = cmdutil.loglimit(opts)
+ limit = logcmdutil.getlimit(opts)
o, other = _outgoing(ui, repo, dest, opts)
if not o:
cmdutil.outgoinghooks(ui, repo, other, opts, o)
@@ -958,7 +978,7 @@
if opts.get('newest_first'):
o.reverse()
ui.pager('outgoing')
- displayer = cmdutil.show_changeset(ui, repo, opts)
+ displayer = logcmdutil.changesetdisplayer(ui, repo, opts)
count = 0
for n in o:
if limit is not None and count >= limit:
--- a/mercurial/hgweb/common.py Fri Feb 23 17:57:04 2018 -0800
+++ b/mercurial/hgweb/common.py Sat Feb 24 17:49:10 2018 -0600
@@ -45,7 +45,7 @@
authentication info). Return if op allowed, else raise an ErrorResponse
exception.'''
- user = req.env.get('REMOTE_USER')
+ user = req.env.get(r'REMOTE_USER')
deny_read = hgweb.configlist('web', 'deny_read')
if deny_read and (not user or ismember(hgweb.repo.ui, user, deny_read)):
@@ -61,7 +61,7 @@
return
# enforce that you can only push using POST requests
- if req.env['REQUEST_METHOD'] != 'POST':
+ if req.env[r'REQUEST_METHOD'] != r'POST':
msg = 'push requires POST request'
raise ErrorResponse(HTTP_METHOD_NOT_ALLOWED, msg)
@@ -185,7 +185,7 @@
if stripecount and offset:
# account for offset, e.g. due to building the list in reverse
count = (stripecount + offset) % stripecount
- parity = (stripecount + offset) / stripecount & 1
+ parity = (stripecount + offset) // stripecount & 1
else:
count = 0
parity = 0
--- a/mercurial/hgweb/hgweb_mod.py Fri Feb 23 17:57:04 2018 -0800
+++ b/mercurial/hgweb/hgweb_mod.py Sat Feb 24 17:49:10 2018 -0600
@@ -36,10 +36,10 @@
templater,
ui as uimod,
util,
+ wireprotoserver,
)
from . import (
- protocol,
webcommands,
webutil,
wsgicgi,
@@ -63,8 +63,6 @@
def getstyle(req, configfn, templatepath):
fromreq = req.form.get('style', [None])[0]
- if fromreq is not None:
- fromreq = pycompat.sysbytes(fromreq)
styles = (
fromreq,
configfn('web', 'style'),
@@ -357,31 +355,21 @@
query = req.env[r'QUERY_STRING'].partition(r'&')[0]
query = query.partition(r';')[0]
- # process this if it's a protocol request
- # protocol bits don't need to create any URLs
- # and the clients always use the old URL structure
+ # Route it to a wire protocol handler if it looks like a wire protocol
+ # request.
+ protohandler = wireprotoserver.parsehttprequest(rctx.repo, req, query)
- cmd = pycompat.sysbytes(req.form.get(r'cmd', [r''])[0])
- if protocol.iscmd(cmd):
+ if protohandler:
+ cmd = protohandler['cmd']
try:
if query:
raise ErrorResponse(HTTP_NOT_FOUND)
if cmd in perms:
self.check_perm(rctx, req, perms[cmd])
- return protocol.call(rctx.repo, req, cmd)
except ErrorResponse as inst:
- # A client that sends unbundle without 100-continue will
- # break if we respond early.
- if (cmd == 'unbundle' and
- (req.env.get('HTTP_EXPECT',
- '').lower() != '100-continue') or
- req.env.get('X-HgHttp2', '')):
- req.drain()
- else:
- req.headers.append((r'Connection', r'Close'))
- req.respond(inst, protocol.HGTYPE,
- body='0\n%s\n' % inst)
- return ''
+ return protohandler['handleerror'](inst)
+
+ return protohandler['dispatch']()
# translate user-visible url structure to internal structure
@@ -417,6 +405,8 @@
if fn.endswith(ext):
req.form['node'] = [fn[:-len(ext)]]
req.form['type'] = [type_]
+ else:
+ cmd = pycompat.sysbytes(req.form.get(r'cmd', [r''])[0])
# process the web interface request
@@ -451,20 +441,20 @@
except (error.LookupError, error.RepoLookupError) as err:
req.respond(HTTP_NOT_FOUND, ctype)
- msg = str(err)
+ msg = pycompat.bytestr(err)
if (util.safehasattr(err, 'name') and
not isinstance(err, error.ManifestLookupError)):
msg = 'revision not found: %s' % err.name
return tmpl('error', error=msg)
except (error.RepoError, error.RevlogError) as inst:
req.respond(HTTP_SERVER_ERROR, ctype)
- return tmpl('error', error=str(inst))
+ return tmpl('error', error=pycompat.bytestr(inst))
except ErrorResponse as inst:
req.respond(inst, ctype)
if inst.code == HTTP_NOT_MODIFIED:
# Not allowed to return a body on a 304
return ['']
- return tmpl('error', error=str(inst))
+ return tmpl('error', error=pycompat.bytestr(inst))
def check_perm(self, rctx, req, op):
for permhook in permhooks:
--- a/mercurial/hgweb/protocol.py Fri Feb 23 17:57:04 2018 -0800
+++ /dev/null Thu Jan 01 00:00:00 1970 +0000
@@ -1,201 +0,0 @@
-#
-# Copyright 21 May 2005 - (c) 2005 Jake Edge <jake@edge2.net>
-# Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
-#
-# This software may be used and distributed according to the terms of the
-# GNU General Public License version 2 or any later version.
-
-from __future__ import absolute_import
-
-import cgi
-import struct
-
-from .common import (
- HTTP_OK,
-)
-
-from .. import (
- error,
- pycompat,
- util,
- wireproto,
-)
-stringio = util.stringio
-
-urlerr = util.urlerr
-urlreq = util.urlreq
-
-HGTYPE = 'application/mercurial-0.1'
-HGTYPE2 = 'application/mercurial-0.2'
-HGERRTYPE = 'application/hg-error'
-
-def decodevaluefromheaders(req, headerprefix):
- """Decode a long value from multiple HTTP request headers.
-
- Returns the value as a bytes, not a str.
- """
- chunks = []
- i = 1
- prefix = headerprefix.upper().replace(r'-', r'_')
- while True:
- v = req.env.get(r'HTTP_%s_%d' % (prefix, i))
- if v is None:
- break
- chunks.append(pycompat.bytesurl(v))
- i += 1
-
- return ''.join(chunks)
-
-class webproto(wireproto.abstractserverproto):
- def __init__(self, req, ui):
- self.req = req
- self.response = ''
- self.ui = ui
- self.name = 'http'
-
- def getargs(self, args):
- knownargs = self._args()
- data = {}
- keys = args.split()
- for k in keys:
- if k == '*':
- star = {}
- for key in knownargs.keys():
- if key != 'cmd' and key not in keys:
- star[key] = knownargs[key][0]
- data['*'] = star
- else:
- data[k] = knownargs[k][0]
- return [data[k] for k in keys]
- def _args(self):
- args = self.req.form.copy()
- if pycompat.ispy3:
- args = {k.encode('ascii'): [v.encode('ascii') for v in vs]
- for k, vs in args.items()}
- postlen = int(self.req.env.get(r'HTTP_X_HGARGS_POST', 0))
- if postlen:
- args.update(cgi.parse_qs(
- self.req.read(postlen), keep_blank_values=True))
- return args
-
- argvalue = decodevaluefromheaders(self.req, r'X-HgArg')
- args.update(cgi.parse_qs(argvalue, keep_blank_values=True))
- return args
- def getfile(self, fp):
- length = int(self.req.env[r'CONTENT_LENGTH'])
- # If httppostargs is used, we need to read Content-Length
- # minus the amount that was consumed by args.
- length -= int(self.req.env.get(r'HTTP_X_HGARGS_POST', 0))
- for s in util.filechunkiter(self.req, limit=length):
- fp.write(s)
- def redirect(self):
- self.oldio = self.ui.fout, self.ui.ferr
- self.ui.ferr = self.ui.fout = stringio()
- def restore(self):
- val = self.ui.fout.getvalue()
- self.ui.ferr, self.ui.fout = self.oldio
- return val
-
- def _client(self):
- return 'remote:%s:%s:%s' % (
- self.req.env.get('wsgi.url_scheme') or 'http',
- urlreq.quote(self.req.env.get('REMOTE_HOST', '')),
- urlreq.quote(self.req.env.get('REMOTE_USER', '')))
-
- def responsetype(self, prefer_uncompressed):
- """Determine the appropriate response type and compression settings.
-
- Returns a tuple of (mediatype, compengine, engineopts).
- """
- # Determine the response media type and compression engine based
- # on the request parameters.
- protocaps = decodevaluefromheaders(self.req, r'X-HgProto').split(' ')
-
- if '0.2' in protocaps:
- # All clients are expected to support uncompressed data.
- if prefer_uncompressed:
- return HGTYPE2, util._noopengine(), {}
-
- # Default as defined by wire protocol spec.
- compformats = ['zlib', 'none']
- for cap in protocaps:
- if cap.startswith('comp='):
- compformats = cap[5:].split(',')
- break
-
- # Now find an agreed upon compression format.
- for engine in wireproto.supportedcompengines(self.ui, self,
- util.SERVERROLE):
- if engine.wireprotosupport().name in compformats:
- opts = {}
- level = self.ui.configint('server',
- '%slevel' % engine.name())
- if level is not None:
- opts['level'] = level
-
- return HGTYPE2, engine, opts
-
- # No mutually supported compression format. Fall back to the
- # legacy protocol.
-
- # Don't allow untrusted settings because disabling compression or
- # setting a very high compression level could lead to flooding
- # the server's network or CPU.
- opts = {'level': self.ui.configint('server', 'zliblevel')}
- return HGTYPE, util.compengines['zlib'], opts
-
-def iscmd(cmd):
- return cmd in wireproto.commands
-
-def call(repo, req, cmd):
- p = webproto(req, repo.ui)
-
- def genversion2(gen, engine, engineopts):
- # application/mercurial-0.2 always sends a payload header
- # identifying the compression engine.
- name = engine.wireprotosupport().name
- assert 0 < len(name) < 256
- yield struct.pack('B', len(name))
- yield name
-
- for chunk in gen:
- yield chunk
-
- rsp = wireproto.dispatch(repo, p, cmd)
- if isinstance(rsp, bytes):
- req.respond(HTTP_OK, HGTYPE, body=rsp)
- return []
- elif isinstance(rsp, wireproto.streamres_legacy):
- gen = rsp.gen
- req.respond(HTTP_OK, HGTYPE)
- return gen
- elif isinstance(rsp, wireproto.streamres):
- gen = rsp.gen
-
- # This code for compression should not be streamres specific. It
- # is here because we only compress streamres at the moment.
- mediatype, engine, engineopts = p.responsetype(rsp.prefer_uncompressed)
- gen = engine.compressstream(gen, engineopts)
-
- if mediatype == HGTYPE2:
- gen = genversion2(gen, engine, engineopts)
-
- req.respond(HTTP_OK, mediatype)
- return gen
- elif isinstance(rsp, wireproto.pushres):
- val = p.restore()
- rsp = '%d\n%s' % (rsp.res, val)
- req.respond(HTTP_OK, HGTYPE, body=rsp)
- return []
- elif isinstance(rsp, wireproto.pusherr):
- # drain the incoming bundle
- req.drain()
- p.restore()
- rsp = '0\n%s\n' % rsp.res
- req.respond(HTTP_OK, HGTYPE, body=rsp)
- return []
- elif isinstance(rsp, wireproto.ooberror):
- rsp = rsp.message
- req.respond(HTTP_OK, HGERRTYPE, body=rsp)
- return []
- raise error.ProgrammingError('hgweb.protocol internal failure', rsp)
--- a/mercurial/hgweb/request.py Fri Feb 23 17:57:04 2018 -0800
+++ b/mercurial/hgweb/request.py Sat Feb 24 17:49:10 2018 -0600
@@ -115,13 +115,14 @@
self.headers = [(k, v) for (k, v) in self.headers if
k in ('Date', 'ETag', 'Expires',
'Cache-Control', 'Vary')]
- status = statusmessage(status.code, str(status))
+ status = statusmessage(status.code, pycompat.bytestr(status))
elif status == 200:
status = '200 Script output follows'
elif isinstance(status, int):
status = statusmessage(status)
- self.server_write = self._start_response(status, self.headers)
+ self.server_write = self._start_response(
+ pycompat.sysstr(status), self.headers)
self._start_response = None
self.headers = []
if body is not None:
--- a/mercurial/hgweb/server.py Fri Feb 23 17:57:04 2018 -0800
+++ b/mercurial/hgweb/server.py Sat Feb 24 17:49:10 2018 -0600
@@ -273,7 +273,7 @@
def openlog(opt, default):
if opt and opt != '-':
- return open(opt, 'a')
+ return open(opt, 'ab')
return default
class MercurialHTTPServer(_mixin, httpservermod.httpserver, object):
--- a/mercurial/hgweb/webcommands.py Fri Feb 23 17:57:04 2018 -0800
+++ b/mercurial/hgweb/webcommands.py Sat Feb 24 17:49:10 2018 -0600
@@ -542,7 +542,7 @@
emptydirs = []
h = dirs[d]
while isinstance(h, dict) and len(h) == 1:
- k, v = h.items()[0]
+ k, v = next(iter(h.items()))
if v:
emptydirs.append(k)
h = v
@@ -561,7 +561,7 @@
fentries=filelist,
dentries=dirlist,
archives=web.archivelist(hex(node)),
- **webutil.commonentry(web.repo, ctx))
+ **pycompat.strkwargs(webutil.commonentry(web.repo, ctx)))
@webcommand('tags')
def tags(web, req, tmpl):
@@ -1116,7 +1116,7 @@
msg = 'Archive type not allowed: %s' % type_
raise ErrorResponse(HTTP_FORBIDDEN, msg)
- reponame = re.sub(r"\W+", "-", os.path.basename(web.reponame))
+ reponame = re.sub(br"\W+", "-", os.path.basename(web.reponame))
cnode = web.repo.lookup(key)
arch_version = key
if cnode == key or key == 'tip':
@@ -1403,7 +1403,7 @@
try:
doc = helpmod.help_(u, commands, topic, subtopic=subtopic)
- except error.UnknownCommand:
+ except error.Abort:
raise ErrorResponse(HTTP_NOT_FOUND)
return tmpl('help', topic=topicname, doc=doc)
--- a/mercurial/hgweb/webutil.py Fri Feb 23 17:57:04 2018 -0800
+++ b/mercurial/hgweb/webutil.py Sat Feb 24 17:49:10 2018 -0600
@@ -347,7 +347,7 @@
try:
return util.processlinerange(fromline, toline)
except error.ParseError as exc:
- raise ErrorResponse(HTTP_BAD_REQUEST, str(exc))
+ raise ErrorResponse(HTTP_BAD_REQUEST, pycompat.bytestr(exc))
def formatlinerange(fromline, toline):
return '%d:%d' % (fromline + 1, toline)
@@ -619,14 +619,14 @@
websubdefs += repo.ui.configitems('interhg')
for key, pattern in websubdefs:
# grab the delimiter from the character after the "s"
- unesc = pattern[1]
+ unesc = pattern[1:2]
delim = re.escape(unesc)
# identify portions of the pattern, taking care to avoid escaped
# delimiters. the replace format and flags are optional, but
# delimiters are required.
match = re.match(
- r'^s%s(.+)(?:(?<=\\\\)|(?<!\\))%s(.*)%s([ilmsux])*$'
+ br'^s%s(.+)(?:(?<=\\\\)|(?<!\\))%s(.*)%s([ilmsux])*$'
% (delim, delim, delim), pattern)
if not match:
repo.ui.warn(_("websub: invalid pattern for %s: %s\n")
@@ -634,7 +634,7 @@
continue
# we need to unescape the delimiter for regexp and format
- delim_re = re.compile(r'(?<!\\)\\%s' % delim)
+ delim_re = re.compile(br'(?<!\\)\\%s' % delim)
regexp = delim_re.sub(unesc, match.group(1))
format = delim_re.sub(unesc, match.group(2))
--- a/mercurial/hook.py Fri Feb 23 17:57:04 2018 -0800
+++ b/mercurial/hook.py Sat Feb 24 17:49:10 2018 -0600
@@ -49,12 +49,12 @@
modname = modfile
with demandimport.deactivated():
try:
- obj = __import__(modname)
+ obj = __import__(pycompat.sysstr(modname))
except (ImportError, SyntaxError):
e1 = sys.exc_info()
try:
# extensions are loaded with hgext_ prefix
- obj = __import__("hgext_%s" % modname)
+ obj = __import__(r"hgext_%s" % pycompat.sysstr(modname))
except (ImportError, SyntaxError):
e2 = sys.exc_info()
if ui.tracebackflag:
--- a/mercurial/httppeer.py Fri Feb 23 17:57:04 2018 -0800
+++ b/mercurial/httppeer.py Sat Feb 24 17:49:10 2018 -0600
@@ -16,7 +16,6 @@
import tempfile
from .i18n import _
-from .node import nullid
from . import (
bundle2,
error,
@@ -222,13 +221,9 @@
# Begin of _basewirepeer interface.
def capabilities(self):
- if self._caps is None:
- try:
- self._fetchcaps()
- except error.RepoError:
- self._caps = set()
- self.ui.debug('capabilities: %s\n' %
- (' '.join(self._caps or ['none'])))
+ # self._fetchcaps() should have been called as part of peer
+ # handshake. So self._caps should always be set.
+ assert self._caps is not None
return self._caps
# End of _basewirepeer interface.
@@ -253,6 +248,8 @@
# with infinite recursion when trying to look up capabilities
# for the first time.
postargsok = self._caps is not None and 'httppostargs' in self._caps
+
+ # Send arguments via POST.
if postargsok and args:
strargs = urlreq.urlencode(sorted(args.items()))
if not data:
@@ -266,11 +263,16 @@
argsio.length = len(strargs)
data = _multifile(argsio, data)
headers[r'X-HgArgs-Post'] = len(strargs)
- else:
- if len(args) > 0:
- httpheader = self.capable('httpheader')
- if httpheader:
- headersize = int(httpheader.split(',', 1)[0])
+ elif args:
+ # Calling self.capable() can infinite loop if we are calling
+ # "capabilities". But that command should never accept wire
+ # protocol arguments. So this should never happen.
+ assert cmd != 'capabilities'
+ httpheader = self.capable('httpheader')
+ if httpheader:
+ headersize = int(httpheader.split(',', 1)[0])
+
+ # Send arguments via HTTP headers.
if headersize > 0:
# The headers can typically carry more data than the URL.
encargs = urlreq.urlencode(sorted(args.items()))
@@ -278,8 +280,10 @@
headersize):
headers[header] = value
varyheaders.append(header)
+ # Send arguments via query string (Mercurial <1.9).
else:
q += sorted(args.items())
+
qs = '?%s' % urlreq.urlencode(q)
cu = "%s%s" % (self._url, qs)
size = 0
@@ -330,8 +334,8 @@
req = self._requestbuilder(pycompat.strurl(cu), data, headers)
if data is not None:
- self.ui.debug("sending %s bytes\n" % size)
- req.add_unredirected_header('Content-Length', '%d' % size)
+ self.ui.debug("sending %d bytes\n" % size)
+ req.add_unredirected_header(r'Content-Length', r'%d' % size)
try:
resp = self._openurl(req)
except urlerr.httperror as inst:
@@ -430,7 +434,7 @@
tempname = bundle2.writebundle(self.ui, cg, None, type)
fp = httpconnection.httpsendfile(self.ui, tempname, "rb")
- headers = {'Content-Type': 'application/mercurial-0.1'}
+ headers = {r'Content-Type': r'application/mercurial-0.1'}
try:
r = self._call(cmd, data=fp, headers=headers, **args)
@@ -461,7 +465,7 @@
fh.close()
# start http push
fp_ = httpconnection.httpsendfile(self.ui, filename, "rb")
- headers = {'Content-Type': 'application/mercurial-0.1'}
+ headers = {r'Content-Type': r'application/mercurial-0.1'}
return self._callstream(cmd, data=fp_, headers=headers, **args)
finally:
if fp_ is not None:
@@ -476,28 +480,17 @@
def _abort(self, exception):
raise exception
-class httpspeer(httppeer):
- def __init__(self, ui, path):
- if not url.has_https:
- raise error.Abort(_('Python support for SSL and HTTPS '
- 'is not installed'))
- httppeer.__init__(self, ui, path)
-
def instance(ui, path, create):
if create:
raise error.Abort(_('cannot create new http repository'))
try:
- if path.startswith('https:'):
- inst = httpspeer(ui, path)
- else:
- inst = httppeer(ui, path)
- try:
- # Try to do useful work when checking compatibility.
- # Usually saves a roundtrip since we want the caps anyway.
- inst._fetchcaps()
- except error.RepoError:
- # No luck, try older compatibility check.
- inst.between([(nullid, nullid)])
+ if path.startswith('https:') and not url.has_https:
+ raise error.Abort(_('Python support for SSL and HTTPS '
+ 'is not installed'))
+
+ inst = httppeer(ui, path)
+ inst._fetchcaps()
+
return inst
except error.RepoError as httpexception:
try:
--- a/mercurial/keepalive.py Fri Feb 23 17:57:04 2018 -0800
+++ b/mercurial/keepalive.py Sat Feb 24 17:49:10 2018 -0600
@@ -324,11 +324,11 @@
h.putrequest(
req.get_method(), urllibcompat.getselector(req),
**pycompat.strkwargs(skipheaders))
- if 'content-type' not in headers:
- h.putheader('Content-type',
- 'application/x-www-form-urlencoded')
- if 'content-length' not in headers:
- h.putheader('Content-length', '%d' % len(data))
+ if r'content-type' not in headers:
+ h.putheader(r'Content-type',
+ r'application/x-www-form-urlencoded')
+ if r'content-length' not in headers:
+ h.putheader(r'Content-length', r'%d' % len(data))
else:
h.putrequest(
req.get_method(), urllibcompat.getselector(req),
--- a/mercurial/localrepo.py Fri Feb 23 17:57:04 2018 -0800
+++ b/mercurial/localrepo.py Sat Feb 24 17:49:10 2018 -0600
@@ -9,7 +9,6 @@
import errno
import hashlib
-import inspect
import os
import random
import time
@@ -57,7 +56,7 @@
scmutil,
sparse,
store,
- subrepo,
+ subrepoutil,
tags as tagsmod,
transaction,
txnutil,
@@ -304,11 +303,15 @@
class localrepository(object):
+ # obsolete experimental requirements:
+ # - manifestv2: An experimental new manifest format that allowed
+ # for stem compression of long paths. Experiment ended up not
+ # being successful (repository sizes went up due to worse delta
+ # chains), and the code was deleted in 4.6.
supportedformats = {
'revlogv1',
'generaldelta',
'treemanifest',
- 'manifestv2',
REVLOGV2_REQUIREMENT,
}
_basesupported = supportedformats | {
@@ -323,7 +326,6 @@
'revlogv1',
'generaldelta',
'treemanifest',
- 'manifestv2',
}
# a list of (ui, featureset) functions.
@@ -1068,7 +1070,7 @@
if not fn:
fn = lambda s, c, **kwargs: util.filter(s, c)
# Wrap old filters not supporting keyword arguments
- if not inspect.getargspec(fn)[2]:
+ if not pycompat.getargspec(fn)[2]:
oldfn = fn
fn = lambda s, c, **kwargs: oldfn(s, c)
l.append((mf, fn, params))
@@ -1332,7 +1334,7 @@
"""To be run if transaction is aborted
"""
reporef().hook('txnabort', throw=False, txnname=desc,
- **tr2.hookargs)
+ **pycompat.strkwargs(tr2.hookargs))
tr.addabort('txnabort-hook', txnaborthook)
# avoid eager cache invalidation. in-memory data should be identical
# to stored data if transaction has no error.
@@ -1574,7 +1576,8 @@
def _refreshfilecachestats(self, tr):
"""Reload stats of cached files so that they are flagged as valid"""
for k, ce in self._filecache.items():
- if k == 'dirstate' or k not in self.__dict__:
+ k = pycompat.sysstr(k)
+ if k == r'dirstate' or k not in self.__dict__:
continue
ce.refresh()
@@ -1832,7 +1835,7 @@
status.modified.extend(status.clean) # mq may commit clean files
# check subrepos
- subs, commitsubs, newstate = subrepo.precommit(
+ subs, commitsubs, newstate = subrepoutil.precommit(
self.ui, wctx, status, match, force=force)
# make sure all explicit patterns are matched
@@ -1869,10 +1872,10 @@
for s in sorted(commitsubs):
sub = wctx.sub(s)
self.ui.status(_('committing subrepository %s\n') %
- subrepo.subrelpath(sub))
+ subrepoutil.subrelpath(sub))
sr = sub.commit(cctx._text, user, date)
newstate[s] = (newstate[s][0], sr)
- subrepo.writestate(self, newstate)
+ subrepoutil.writestate(self, newstate)
p1, p2 = self.dirstate.parents()
hookp1, hookp2 = hex(p1), (p2 != nullid and hex(p2) or '')
@@ -1982,7 +1985,7 @@
self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
parent2=xp2)
# set the new commit is proper phase
- targetphase = subrepo.newcommitphase(self.ui, ctx)
+ targetphase = subrepoutil.newcommitphase(self.ui, ctx)
if targetphase:
# retract boundary do not alter parent changeset.
# if a parent have higher the resulting phase will
@@ -2047,15 +2050,6 @@
# tag cache retrieval" case to work.
self.invalidate()
- def walk(self, match, node=None):
- '''
- walk recursively through the directory tree or a given
- changeset, finding all files matched by the match
- function
- '''
- self.ui.deprecwarn('use repo[node].walk instead of repo.walk', '4.3')
- return self[node].walk(match)
-
def status(self, node1='.', node2=None, match=None,
ignored=False, clean=False, unknown=False,
listsubrepos=False):
@@ -2270,8 +2264,6 @@
requirements.add('generaldelta')
if ui.configbool('experimental', 'treemanifest'):
requirements.add('treemanifest')
- if ui.configbool('experimental', 'manifestv2'):
- requirements.add('manifestv2')
revlogv2 = ui.config('experimental', 'revlogv2')
if revlogv2 == 'enable-unstable-format-and-corrupt-my-data':
--- a/mercurial/lock.py Fri Feb 23 17:57:04 2018 -0800
+++ b/mercurial/lock.py Sat Feb 24 17:49:10 2018 -0600
@@ -30,9 +30,7 @@
confidence. Typically it's just hostname. On modern linux, we include an
extra Linux-specific pid namespace identifier.
"""
- result = socket.gethostname()
- if pycompat.ispy3:
- result = result.encode(pycompat.sysstr(encoding.encoding), 'replace')
+ result = encoding.strtolocal(socket.gethostname())
if pycompat.sysplatform.startswith('linux'):
try:
result += '/%x' % os.stat('/proc/self/ns/pid').st_ino
@@ -86,9 +84,9 @@
l.delay = delay
if l.delay:
if 0 <= warningidx <= l.delay:
- ui.warn(_("got lock after %s seconds\n") % l.delay)
+ ui.warn(_("got lock after %d seconds\n") % l.delay)
else:
- ui.debug("got lock after %s seconds\n" % l.delay)
+ ui.debug("got lock after %d seconds\n" % l.delay)
if l.acquirefn:
l.acquirefn()
return l
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/mercurial/logcmdutil.py Sat Feb 24 17:49:10 2018 -0600
@@ -0,0 +1,931 @@
+# logcmdutil.py - utility for log-like commands
+#
+# Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
+#
+# This software may be used and distributed according to the terms of the
+# GNU General Public License version 2 or any later version.
+
+from __future__ import absolute_import
+
+import itertools
+import os
+
+from .i18n import _
+from .node import (
+ hex,
+ nullid,
+)
+
+from . import (
+ dagop,
+ encoding,
+ error,
+ formatter,
+ graphmod,
+ match as matchmod,
+ mdiff,
+ patch,
+ pathutil,
+ pycompat,
+ revset,
+ revsetlang,
+ scmutil,
+ smartset,
+ templatekw,
+ templater,
+ util,
+)
+
+def getlimit(opts):
+ """get the log limit according to option -l/--limit"""
+ limit = opts.get('limit')
+ if limit:
+ try:
+ limit = int(limit)
+ except ValueError:
+ raise error.Abort(_('limit must be a positive integer'))
+ if limit <= 0:
+ raise error.Abort(_('limit must be positive'))
+ else:
+ limit = None
+ return limit
+
+def diffordiffstat(ui, repo, diffopts, node1, node2, match,
+ changes=None, stat=False, fp=None, prefix='',
+ root='', listsubrepos=False, hunksfilterfn=None):
+ '''show diff or diffstat.'''
+ if root:
+ relroot = pathutil.canonpath(repo.root, repo.getcwd(), root)
+ else:
+ relroot = ''
+ if relroot != '':
+ # XXX relative roots currently don't work if the root is within a
+ # subrepo
+ uirelroot = match.uipath(relroot)
+ relroot += '/'
+ for matchroot in match.files():
+ if not matchroot.startswith(relroot):
+ ui.warn(_('warning: %s not inside relative root %s\n') % (
+ match.uipath(matchroot), uirelroot))
+
+ if stat:
+ diffopts = diffopts.copy(context=0, noprefix=False)
+ width = 80
+ if not ui.plain():
+ width = ui.termwidth()
+
+ chunks = patch.diff(repo, node1, node2, match, changes, opts=diffopts,
+ prefix=prefix, relroot=relroot,
+ hunksfilterfn=hunksfilterfn)
+
+ if fp is not None or ui.canwritewithoutlabels():
+ out = fp or ui
+ if stat:
+ chunks = [patch.diffstat(util.iterlines(chunks), width=width)]
+ for chunk in util.filechunkiter(util.chunkbuffer(chunks)):
+ out.write(chunk)
+ else:
+ if stat:
+ chunks = patch.diffstatui(util.iterlines(chunks), width=width)
+ else:
+ chunks = patch.difflabel(lambda chunks, **kwargs: chunks, chunks,
+ opts=diffopts)
+ if ui.canbatchlabeledwrites():
+ def gen():
+ for chunk, label in chunks:
+ yield ui.label(chunk, label=label)
+ for chunk in util.filechunkiter(util.chunkbuffer(gen())):
+ ui.write(chunk)
+ else:
+ for chunk, label in chunks:
+ ui.write(chunk, label=label)
+
+ if listsubrepos:
+ ctx1 = repo[node1]
+ ctx2 = repo[node2]
+ for subpath, sub in scmutil.itersubrepos(ctx1, ctx2):
+ tempnode2 = node2
+ try:
+ if node2 is not None:
+ tempnode2 = ctx2.substate[subpath][1]
+ except KeyError:
+ # A subrepo that existed in node1 was deleted between node1 and
+ # node2 (inclusive). Thus, ctx2's substate won't contain that
+ # subpath. The best we can do is to ignore it.
+ tempnode2 = None
+ submatch = matchmod.subdirmatcher(subpath, match)
+ sub.diff(ui, diffopts, tempnode2, submatch, changes=changes,
+ stat=stat, fp=fp, prefix=prefix)
+
+class changesetdiffer(object):
+ """Generate diff of changeset with pre-configured filtering functions"""
+
+ def _makefilematcher(self, ctx):
+ return scmutil.matchall(ctx.repo())
+
+ def _makehunksfilter(self, ctx):
+ return None
+
+ def showdiff(self, ui, ctx, diffopts, stat=False):
+ repo = ctx.repo()
+ node = ctx.node()
+ prev = ctx.p1().node()
+ diffordiffstat(ui, repo, diffopts, prev, node,
+ match=self._makefilematcher(ctx), stat=stat,
+ hunksfilterfn=self._makehunksfilter(ctx))
+
+def changesetlabels(ctx):
+ labels = ['log.changeset', 'changeset.%s' % ctx.phasestr()]
+ if ctx.obsolete():
+ labels.append('changeset.obsolete')
+ if ctx.isunstable():
+ labels.append('changeset.unstable')
+ for instability in ctx.instabilities():
+ labels.append('instability.%s' % instability)
+ return ' '.join(labels)
+
+class changesetprinter(object):
+ '''show changeset information when templating not requested.'''
+
+ def __init__(self, ui, repo, differ=None, diffopts=None, buffered=False):
+ self.ui = ui
+ self.repo = repo
+ self.buffered = buffered
+ self._differ = differ or changesetdiffer()
+ self.diffopts = diffopts or {}
+ self.header = {}
+ self.hunk = {}
+ self.lastheader = None
+ self.footer = None
+ self._columns = templatekw.getlogcolumns()
+
+ def flush(self, ctx):
+ rev = ctx.rev()
+ if rev in self.header:
+ h = self.header[rev]
+ if h != self.lastheader:
+ self.lastheader = h
+ self.ui.write(h)
+ del self.header[rev]
+ if rev in self.hunk:
+ self.ui.write(self.hunk[rev])
+ del self.hunk[rev]
+
+ def close(self):
+ if self.footer:
+ self.ui.write(self.footer)
+
+ def show(self, ctx, copies=None, **props):
+ props = pycompat.byteskwargs(props)
+ if self.buffered:
+ self.ui.pushbuffer(labeled=True)
+ self._show(ctx, copies, props)
+ self.hunk[ctx.rev()] = self.ui.popbuffer()
+ else:
+ self._show(ctx, copies, props)
+
+ def _show(self, ctx, copies, props):
+ '''show a single changeset or file revision'''
+ changenode = ctx.node()
+ rev = ctx.rev()
+
+ if self.ui.quiet:
+ self.ui.write("%s\n" % scmutil.formatchangeid(ctx),
+ label='log.node')
+ return
+
+ columns = self._columns
+ self.ui.write(columns['changeset'] % scmutil.formatchangeid(ctx),
+ label=changesetlabels(ctx))
+
+ # branches are shown first before any other names due to backwards
+ # compatibility
+ branch = ctx.branch()
+ # don't show the default branch name
+ if branch != 'default':
+ self.ui.write(columns['branch'] % branch, label='log.branch')
+
+ for nsname, ns in self.repo.names.iteritems():
+ # branches has special logic already handled above, so here we just
+ # skip it
+ if nsname == 'branches':
+ continue
+ # we will use the templatename as the color name since those two
+ # should be the same
+ for name in ns.names(self.repo, changenode):
+ self.ui.write(ns.logfmt % name,
+ label='log.%s' % ns.colorname)
+ if self.ui.debugflag:
+ self.ui.write(columns['phase'] % ctx.phasestr(), label='log.phase')
+ for pctx in scmutil.meaningfulparents(self.repo, ctx):
+ label = 'log.parent changeset.%s' % pctx.phasestr()
+ self.ui.write(columns['parent'] % scmutil.formatchangeid(pctx),
+ label=label)
+
+ if self.ui.debugflag and rev is not None:
+ mnode = ctx.manifestnode()
+ mrev = self.repo.manifestlog._revlog.rev(mnode)
+ self.ui.write(columns['manifest']
+ % scmutil.formatrevnode(self.ui, mrev, mnode),
+ label='ui.debug log.manifest')
+ self.ui.write(columns['user'] % ctx.user(), label='log.user')
+ self.ui.write(columns['date'] % util.datestr(ctx.date()),
+ label='log.date')
+
+ if ctx.isunstable():
+ instabilities = ctx.instabilities()
+ self.ui.write(columns['instability'] % ', '.join(instabilities),
+ label='log.instability')
+
+ elif ctx.obsolete():
+ self._showobsfate(ctx)
+
+ self._exthook(ctx)
+
+ if self.ui.debugflag:
+ files = ctx.p1().status(ctx)[:3]
+ for key, value in zip(['files', 'files+', 'files-'], files):
+ if value:
+ self.ui.write(columns[key] % " ".join(value),
+ label='ui.debug log.files')
+ elif ctx.files() and self.ui.verbose:
+ self.ui.write(columns['files'] % " ".join(ctx.files()),
+ label='ui.note log.files')
+ if copies and self.ui.verbose:
+ copies = ['%s (%s)' % c for c in copies]
+ self.ui.write(columns['copies'] % ' '.join(copies),
+ label='ui.note log.copies')
+
+ extra = ctx.extra()
+ if extra and self.ui.debugflag:
+ for key, value in sorted(extra.items()):
+ self.ui.write(columns['extra'] % (key, util.escapestr(value)),
+ label='ui.debug log.extra')
+
+ description = ctx.description().strip()
+ if description:
+ if self.ui.verbose:
+ self.ui.write(_("description:\n"),
+ label='ui.note log.description')
+ self.ui.write(description,
+ label='ui.note log.description')
+ self.ui.write("\n\n")
+ else:
+ self.ui.write(columns['summary'] % description.splitlines()[0],
+ label='log.summary')
+ self.ui.write("\n")
+
+ self._showpatch(ctx)
+
+ def _showobsfate(self, ctx):
+ obsfate = templatekw.showobsfate(repo=self.repo, ctx=ctx, ui=self.ui)
+
+ if obsfate:
+ for obsfateline in obsfate:
+ self.ui.write(self._columns['obsolete'] % obsfateline,
+ label='log.obsfate')
+
+ def _exthook(self, ctx):
+ '''empty method used by extension as a hook point
+ '''
+
+ def _showpatch(self, ctx):
+ stat = self.diffopts.get('stat')
+ diff = self.diffopts.get('patch')
+ diffopts = patch.diffallopts(self.ui, self.diffopts)
+ if stat:
+ self._differ.showdiff(self.ui, ctx, diffopts, stat=True)
+ if stat and diff:
+ self.ui.write("\n")
+ if diff:
+ self._differ.showdiff(self.ui, ctx, diffopts, stat=False)
+ if stat or diff:
+ self.ui.write("\n")
+
+class jsonchangeset(changesetprinter):
+ '''format changeset information.'''
+
+ def __init__(self, ui, repo, differ=None, diffopts=None, buffered=False):
+ changesetprinter.__init__(self, ui, repo, differ, diffopts, buffered)
+ self.cache = {}
+ self._first = True
+
+ def close(self):
+ if not self._first:
+ self.ui.write("\n]\n")
+ else:
+ self.ui.write("[]\n")
+
+ def _show(self, ctx, copies, props):
+ '''show a single changeset or file revision'''
+ rev = ctx.rev()
+ if rev is None:
+ jrev = jnode = 'null'
+ else:
+ jrev = '%d' % rev
+ jnode = '"%s"' % hex(ctx.node())
+ j = encoding.jsonescape
+
+ if self._first:
+ self.ui.write("[\n {")
+ self._first = False
+ else:
+ self.ui.write(",\n {")
+
+ if self.ui.quiet:
+ self.ui.write(('\n "rev": %s') % jrev)
+ self.ui.write((',\n "node": %s') % jnode)
+ self.ui.write('\n }')
+ return
+
+ self.ui.write(('\n "rev": %s') % jrev)
+ self.ui.write((',\n "node": %s') % jnode)
+ self.ui.write((',\n "branch": "%s"') % j(ctx.branch()))
+ self.ui.write((',\n "phase": "%s"') % ctx.phasestr())
+ self.ui.write((',\n "user": "%s"') % j(ctx.user()))
+ self.ui.write((',\n "date": [%d, %d]') % ctx.date())
+ self.ui.write((',\n "desc": "%s"') % j(ctx.description()))
+
+ self.ui.write((',\n "bookmarks": [%s]') %
+ ", ".join('"%s"' % j(b) for b in ctx.bookmarks()))
+ self.ui.write((',\n "tags": [%s]') %
+ ", ".join('"%s"' % j(t) for t in ctx.tags()))
+ self.ui.write((',\n "parents": [%s]') %
+ ", ".join('"%s"' % c.hex() for c in ctx.parents()))
+
+ if self.ui.debugflag:
+ if rev is None:
+ jmanifestnode = 'null'
+ else:
+ jmanifestnode = '"%s"' % hex(ctx.manifestnode())
+ self.ui.write((',\n "manifest": %s') % jmanifestnode)
+
+ self.ui.write((',\n "extra": {%s}') %
+ ", ".join('"%s": "%s"' % (j(k), j(v))
+ for k, v in ctx.extra().items()))
+
+ files = ctx.p1().status(ctx)
+ self.ui.write((',\n "modified": [%s]') %
+ ", ".join('"%s"' % j(f) for f in files[0]))
+ self.ui.write((',\n "added": [%s]') %
+ ", ".join('"%s"' % j(f) for f in files[1]))
+ self.ui.write((',\n "removed": [%s]') %
+ ", ".join('"%s"' % j(f) for f in files[2]))
+
+ elif self.ui.verbose:
+ self.ui.write((',\n "files": [%s]') %
+ ", ".join('"%s"' % j(f) for f in ctx.files()))
+
+ if copies:
+ self.ui.write((',\n "copies": {%s}') %
+ ", ".join('"%s": "%s"' % (j(k), j(v))
+ for k, v in copies))
+
+ stat = self.diffopts.get('stat')
+ diff = self.diffopts.get('patch')
+ diffopts = patch.difffeatureopts(self.ui, self.diffopts, git=True)
+ if stat:
+ self.ui.pushbuffer()
+ self._differ.showdiff(self.ui, ctx, diffopts, stat=True)
+ self.ui.write((',\n "diffstat": "%s"')
+ % j(self.ui.popbuffer()))
+ if diff:
+ self.ui.pushbuffer()
+ self._differ.showdiff(self.ui, ctx, diffopts, stat=False)
+ self.ui.write((',\n "diff": "%s"') % j(self.ui.popbuffer()))
+
+ self.ui.write("\n }")
+
+class changesettemplater(changesetprinter):
+ '''format changeset information.
+
+ Note: there are a variety of convenience functions to build a
+ changesettemplater for common cases. See functions such as:
+ maketemplater, changesetdisplayer, buildcommittemplate, or other
+ functions that use changesest_templater.
+ '''
+
+ # Arguments before "buffered" used to be positional. Consider not
+ # adding/removing arguments before "buffered" to not break callers.
+ def __init__(self, ui, repo, tmplspec, differ=None, diffopts=None,
+ buffered=False):
+ changesetprinter.__init__(self, ui, repo, differ, diffopts, buffered)
+ tres = formatter.templateresources(ui, repo)
+ self.t = formatter.loadtemplater(ui, tmplspec,
+ defaults=templatekw.keywords,
+ resources=tres,
+ cache=templatekw.defaulttempl)
+ self._counter = itertools.count()
+ self.cache = tres['cache'] # shared with _graphnodeformatter()
+
+ self._tref = tmplspec.ref
+ self._parts = {'header': '', 'footer': '',
+ tmplspec.ref: tmplspec.ref,
+ 'docheader': '', 'docfooter': '',
+ 'separator': ''}
+ if tmplspec.mapfile:
+ # find correct templates for current mode, for backward
+ # compatibility with 'log -v/-q/--debug' using a mapfile
+ tmplmodes = [
+ (True, ''),
+ (self.ui.verbose, '_verbose'),
+ (self.ui.quiet, '_quiet'),
+ (self.ui.debugflag, '_debug'),
+ ]
+ for mode, postfix in tmplmodes:
+ for t in self._parts:
+ cur = t + postfix
+ if mode and cur in self.t:
+ self._parts[t] = cur
+ else:
+ partnames = [p for p in self._parts.keys() if p != tmplspec.ref]
+ m = formatter.templatepartsmap(tmplspec, self.t, partnames)
+ self._parts.update(m)
+
+ if self._parts['docheader']:
+ self.ui.write(templater.stringify(self.t(self._parts['docheader'])))
+
+ def close(self):
+ if self._parts['docfooter']:
+ if not self.footer:
+ self.footer = ""
+ self.footer += templater.stringify(self.t(self._parts['docfooter']))
+ return super(changesettemplater, self).close()
+
+ def _show(self, ctx, copies, props):
+ '''show a single changeset or file revision'''
+ props = props.copy()
+ props['ctx'] = ctx
+ props['index'] = index = next(self._counter)
+ props['revcache'] = {'copies': copies}
+ props = pycompat.strkwargs(props)
+
+ # write separator, which wouldn't work well with the header part below
+ # since there's inherently a conflict between header (across items) and
+ # separator (per item)
+ if self._parts['separator'] and index > 0:
+ self.ui.write(templater.stringify(self.t(self._parts['separator'])))
+
+ # write header
+ if self._parts['header']:
+ h = templater.stringify(self.t(self._parts['header'], **props))
+ if self.buffered:
+ self.header[ctx.rev()] = h
+ else:
+ if self.lastheader != h:
+ self.lastheader = h
+ self.ui.write(h)
+
+ # write changeset metadata, then patch if requested
+ key = self._parts[self._tref]
+ self.ui.write(templater.stringify(self.t(key, **props)))
+ self._showpatch(ctx)
+
+ if self._parts['footer']:
+ if not self.footer:
+ self.footer = templater.stringify(
+ self.t(self._parts['footer'], **props))
+
+def templatespec(tmpl, mapfile):
+ if mapfile:
+ return formatter.templatespec('changeset', tmpl, mapfile)
+ else:
+ return formatter.templatespec('', tmpl, None)
+
+def _lookuptemplate(ui, tmpl, style):
+ """Find the template matching the given template spec or style
+
+ See formatter.lookuptemplate() for details.
+ """
+
+ # ui settings
+ if not tmpl and not style: # template are stronger than style
+ tmpl = ui.config('ui', 'logtemplate')
+ if tmpl:
+ return templatespec(templater.unquotestring(tmpl), None)
+ else:
+ style = util.expandpath(ui.config('ui', 'style'))
+
+ if not tmpl and style:
+ mapfile = style
+ if not os.path.split(mapfile)[0]:
+ mapname = (templater.templatepath('map-cmdline.' + mapfile)
+ or templater.templatepath(mapfile))
+ if mapname:
+ mapfile = mapname
+ return templatespec(None, mapfile)
+
+ if not tmpl:
+ return templatespec(None, None)
+
+ return formatter.lookuptemplate(ui, 'changeset', tmpl)
+
+def maketemplater(ui, repo, tmpl, buffered=False):
+ """Create a changesettemplater from a literal template 'tmpl'
+ byte-string."""
+ spec = templatespec(tmpl, None)
+ return changesettemplater(ui, repo, spec, buffered=buffered)
+
+def changesetdisplayer(ui, repo, opts, differ=None, buffered=False):
+ """show one changeset using template or regular display.
+
+ Display format will be the first non-empty hit of:
+ 1. option 'template'
+ 2. option 'style'
+ 3. [ui] setting 'logtemplate'
+ 4. [ui] setting 'style'
+ If all of these values are either the unset or the empty string,
+ regular display via changesetprinter() is done.
+ """
+ postargs = (differ, opts, buffered)
+ if opts.get('template') == 'json':
+ return jsonchangeset(ui, repo, *postargs)
+
+ spec = _lookuptemplate(ui, opts.get('template'), opts.get('style'))
+
+ if not spec.ref and not spec.tmpl and not spec.mapfile:
+ return changesetprinter(ui, repo, *postargs)
+
+ return changesettemplater(ui, repo, spec, *postargs)
+
+def _makematcher(repo, revs, pats, opts):
+ """Build matcher and expanded patterns from log options
+
+ If --follow, revs are the revisions to follow from.
+
+ Returns (match, pats, slowpath) where
+ - match: a matcher built from the given pats and -I/-X opts
+ - pats: patterns used (globs are expanded on Windows)
+ - slowpath: True if patterns aren't as simple as scanning filelogs
+ """
+ # pats/include/exclude are passed to match.match() directly in
+ # _matchfiles() revset but walkchangerevs() builds its matcher with
+ # scmutil.match(). The difference is input pats are globbed on
+ # platforms without shell expansion (windows).
+ wctx = repo[None]
+ match, pats = scmutil.matchandpats(wctx, pats, opts)
+ slowpath = match.anypats() or (not match.always() and opts.get('removed'))
+ if not slowpath:
+ follow = opts.get('follow') or opts.get('follow_first')
+ startctxs = []
+ if follow and opts.get('rev'):
+ startctxs = [repo[r] for r in revs]
+ for f in match.files():
+ if follow and startctxs:
+ # No idea if the path was a directory at that revision, so
+ # take the slow path.
+ if any(f not in c for c in startctxs):
+ slowpath = True
+ continue
+ elif follow and f not in wctx:
+ # If the file exists, it may be a directory, so let it
+ # take the slow path.
+ if os.path.exists(repo.wjoin(f)):
+ slowpath = True
+ continue
+ else:
+ raise error.Abort(_('cannot follow file not in parent '
+ 'revision: "%s"') % f)
+ filelog = repo.file(f)
+ if not filelog:
+ # A zero count may be a directory or deleted file, so
+ # try to find matching entries on the slow path.
+ if follow:
+ raise error.Abort(
+ _('cannot follow nonexistent file: "%s"') % f)
+ slowpath = True
+
+ # We decided to fall back to the slowpath because at least one
+ # of the paths was not a file. Check to see if at least one of them
+ # existed in history - in that case, we'll continue down the
+ # slowpath; otherwise, we can turn off the slowpath
+ if slowpath:
+ for path in match.files():
+ if path == '.' or path in repo.store:
+ break
+ else:
+ slowpath = False
+
+ return match, pats, slowpath
+
+def _fileancestors(repo, revs, match, followfirst):
+ fctxs = []
+ for r in revs:
+ ctx = repo[r]
+ fctxs.extend(ctx[f].introfilectx() for f in ctx.walk(match))
+
+ # When displaying a revision with --patch --follow FILE, we have
+ # to know which file of the revision must be diffed. With
+ # --follow, we want the names of the ancestors of FILE in the
+ # revision, stored in "fcache". "fcache" is populated as a side effect
+ # of the graph traversal.
+ fcache = {}
+ def filematcher(ctx):
+ return scmutil.matchfiles(repo, fcache.get(ctx.rev(), []))
+
+ def revgen():
+ for rev, cs in dagop.filectxancestors(fctxs, followfirst=followfirst):
+ fcache[rev] = [c.path() for c in cs]
+ yield rev
+ return smartset.generatorset(revgen(), iterasc=False), filematcher
+
+def _makenofollowfilematcher(repo, pats, opts):
+ '''hook for extensions to override the filematcher for non-follow cases'''
+ return None
+
+_opt2logrevset = {
+ 'no_merges': ('not merge()', None),
+ 'only_merges': ('merge()', None),
+ '_matchfiles': (None, '_matchfiles(%ps)'),
+ 'date': ('date(%s)', None),
+ 'branch': ('branch(%s)', '%lr'),
+ '_patslog': ('filelog(%s)', '%lr'),
+ 'keyword': ('keyword(%s)', '%lr'),
+ 'prune': ('ancestors(%s)', 'not %lr'),
+ 'user': ('user(%s)', '%lr'),
+}
+
+def _makerevset(repo, match, pats, slowpath, opts):
+ """Return a revset string built from log options and file patterns"""
+ opts = dict(opts)
+ # follow or not follow?
+ follow = opts.get('follow') or opts.get('follow_first')
+
+ # branch and only_branch are really aliases and must be handled at
+ # the same time
+ opts['branch'] = opts.get('branch', []) + opts.get('only_branch', [])
+ opts['branch'] = [repo.lookupbranch(b) for b in opts['branch']]
+
+ if slowpath:
+ # See walkchangerevs() slow path.
+ #
+ # pats/include/exclude cannot be represented as separate
+ # revset expressions as their filtering logic applies at file
+ # level. For instance "-I a -X b" matches a revision touching
+ # "a" and "b" while "file(a) and not file(b)" does
+ # not. Besides, filesets are evaluated against the working
+ # directory.
+ matchargs = ['r:', 'd:relpath']
+ for p in pats:
+ matchargs.append('p:' + p)
+ for p in opts.get('include', []):
+ matchargs.append('i:' + p)
+ for p in opts.get('exclude', []):
+ matchargs.append('x:' + p)
+ opts['_matchfiles'] = matchargs
+ elif not follow:
+ opts['_patslog'] = list(pats)
+
+ expr = []
+ for op, val in sorted(opts.iteritems()):
+ if not val:
+ continue
+ if op not in _opt2logrevset:
+ continue
+ revop, listop = _opt2logrevset[op]
+ if revop and '%' not in revop:
+ expr.append(revop)
+ elif not listop:
+ expr.append(revsetlang.formatspec(revop, val))
+ else:
+ if revop:
+ val = [revsetlang.formatspec(revop, v) for v in val]
+ expr.append(revsetlang.formatspec(listop, val))
+
+ if expr:
+ expr = '(' + ' and '.join(expr) + ')'
+ else:
+ expr = None
+ return expr
+
+def _initialrevs(repo, opts):
+ """Return the initial set of revisions to be filtered or followed"""
+ follow = opts.get('follow') or opts.get('follow_first')
+ if opts.get('rev'):
+ revs = scmutil.revrange(repo, opts['rev'])
+ elif follow and repo.dirstate.p1() == nullid:
+ revs = smartset.baseset()
+ elif follow:
+ revs = repo.revs('.')
+ else:
+ revs = smartset.spanset(repo)
+ revs.reverse()
+ return revs
+
+def getrevs(repo, pats, opts):
+ """Return (revs, differ) where revs is a smartset
+
+ differ is a changesetdiffer with pre-configured file matcher.
+ """
+ follow = opts.get('follow') or opts.get('follow_first')
+ followfirst = opts.get('follow_first')
+ limit = getlimit(opts)
+ revs = _initialrevs(repo, opts)
+ if not revs:
+ return smartset.baseset(), None
+ match, pats, slowpath = _makematcher(repo, revs, pats, opts)
+ filematcher = None
+ if follow:
+ if slowpath or match.always():
+ revs = dagop.revancestors(repo, revs, followfirst=followfirst)
+ else:
+ revs, filematcher = _fileancestors(repo, revs, match, followfirst)
+ revs.reverse()
+ if filematcher is None:
+ filematcher = _makenofollowfilematcher(repo, pats, opts)
+ if filematcher is None:
+ def filematcher(ctx):
+ return match
+
+ expr = _makerevset(repo, match, pats, slowpath, opts)
+ if opts.get('graph') and opts.get('rev'):
+ # User-specified revs might be unsorted, but don't sort before
+ # _makerevset because it might depend on the order of revs
+ if not (revs.isdescending() or revs.istopo()):
+ revs.sort(reverse=True)
+ if expr:
+ matcher = revset.match(None, expr)
+ revs = matcher(repo, revs)
+ if limit is not None:
+ revs = revs.slice(0, limit)
+
+ differ = changesetdiffer()
+ differ._makefilematcher = filematcher
+ return revs, differ
+
+def _parselinerangeopt(repo, opts):
+ """Parse --line-range log option and return a list of tuples (filename,
+ (fromline, toline)).
+ """
+ linerangebyfname = []
+ for pat in opts.get('line_range', []):
+ try:
+ pat, linerange = pat.rsplit(',', 1)
+ except ValueError:
+ raise error.Abort(_('malformatted line-range pattern %s') % pat)
+ try:
+ fromline, toline = map(int, linerange.split(':'))
+ except ValueError:
+ raise error.Abort(_("invalid line range for %s") % pat)
+ msg = _("line range pattern '%s' must match exactly one file") % pat
+ fname = scmutil.parsefollowlinespattern(repo, None, pat, msg)
+ linerangebyfname.append(
+ (fname, util.processlinerange(fromline, toline)))
+ return linerangebyfname
+
+def getlinerangerevs(repo, userrevs, opts):
+ """Return (revs, differ).
+
+ "revs" are revisions obtained by processing "line-range" log options and
+ walking block ancestors of each specified file/line-range.
+
+ "differ" is a changesetdiffer with pre-configured file matcher and hunks
+ filter.
+ """
+ wctx = repo[None]
+
+ # Two-levels map of "rev -> file ctx -> [line range]".
+ linerangesbyrev = {}
+ for fname, (fromline, toline) in _parselinerangeopt(repo, opts):
+ if fname not in wctx:
+ raise error.Abort(_('cannot follow file not in parent '
+ 'revision: "%s"') % fname)
+ fctx = wctx.filectx(fname)
+ for fctx, linerange in dagop.blockancestors(fctx, fromline, toline):
+ rev = fctx.introrev()
+ if rev not in userrevs:
+ continue
+ linerangesbyrev.setdefault(
+ rev, {}).setdefault(
+ fctx.path(), []).append(linerange)
+
+ def nofilterhunksfn(fctx, hunks):
+ return hunks
+
+ def hunksfilter(ctx):
+ fctxlineranges = linerangesbyrev.get(ctx.rev())
+ if fctxlineranges is None:
+ return nofilterhunksfn
+
+ def filterfn(fctx, hunks):
+ lineranges = fctxlineranges.get(fctx.path())
+ if lineranges is not None:
+ for hr, lines in hunks:
+ if hr is None: # binary
+ yield hr, lines
+ continue
+ if any(mdiff.hunkinrange(hr[2:], lr)
+ for lr in lineranges):
+ yield hr, lines
+ else:
+ for hunk in hunks:
+ yield hunk
+
+ return filterfn
+
+ def filematcher(ctx):
+ files = list(linerangesbyrev.get(ctx.rev(), []))
+ return scmutil.matchfiles(repo, files)
+
+ revs = sorted(linerangesbyrev, reverse=True)
+
+ differ = changesetdiffer()
+ differ._makefilematcher = filematcher
+ differ._makehunksfilter = hunksfilter
+ return revs, differ
+
+def _graphnodeformatter(ui, displayer):
+ spec = ui.config('ui', 'graphnodetemplate')
+ if not spec:
+ return templatekw.showgraphnode # fast path for "{graphnode}"
+
+ spec = templater.unquotestring(spec)
+ tres = formatter.templateresources(ui)
+ if isinstance(displayer, changesettemplater):
+ tres['cache'] = displayer.cache # reuse cache of slow templates
+ templ = formatter.maketemplater(ui, spec, defaults=templatekw.keywords,
+ resources=tres)
+ def formatnode(repo, ctx):
+ props = {'ctx': ctx, 'repo': repo, 'revcache': {}}
+ return templ.render(props)
+ return formatnode
+
+def displaygraph(ui, repo, dag, displayer, edgefn, getrenamed=None, props=None):
+ props = props or {}
+ formatnode = _graphnodeformatter(ui, displayer)
+ state = graphmod.asciistate()
+ styles = state['styles']
+
+ # only set graph styling if HGPLAIN is not set.
+ if ui.plain('graph'):
+ # set all edge styles to |, the default pre-3.8 behaviour
+ styles.update(dict.fromkeys(styles, '|'))
+ else:
+ edgetypes = {
+ 'parent': graphmod.PARENT,
+ 'grandparent': graphmod.GRANDPARENT,
+ 'missing': graphmod.MISSINGPARENT
+ }
+ for name, key in edgetypes.items():
+ # experimental config: experimental.graphstyle.*
+ styles[key] = ui.config('experimental', 'graphstyle.%s' % name,
+ styles[key])
+ if not styles[key]:
+ styles[key] = None
+
+ # experimental config: experimental.graphshorten
+ state['graphshorten'] = ui.configbool('experimental', 'graphshorten')
+
+ for rev, type, ctx, parents in dag:
+ char = formatnode(repo, ctx)
+ copies = None
+ if getrenamed and ctx.rev():
+ copies = []
+ for fn in ctx.files():
+ rename = getrenamed(fn, ctx.rev())
+ if rename:
+ copies.append((fn, rename[0]))
+ edges = edgefn(type, char, state, rev, parents)
+ firstedge = next(edges)
+ width = firstedge[2]
+ displayer.show(ctx, copies=copies,
+ _graphwidth=width, **pycompat.strkwargs(props))
+ lines = displayer.hunk.pop(rev).split('\n')
+ if not lines[-1]:
+ del lines[-1]
+ displayer.flush(ctx)
+ for type, char, width, coldata in itertools.chain([firstedge], edges):
+ graphmod.ascii(ui, state, type, char, lines, coldata)
+ lines = []
+ displayer.close()
+
+def displaygraphrevs(ui, repo, revs, displayer, getrenamed):
+ revdag = graphmod.dagwalker(repo, revs)
+ displaygraph(ui, repo, revdag, displayer, graphmod.asciiedges, getrenamed)
+
+def displayrevs(ui, repo, revs, displayer, getrenamed):
+ for rev in revs:
+ ctx = repo[rev]
+ copies = None
+ if getrenamed is not None and rev:
+ copies = []
+ for fn in ctx.files():
+ rename = getrenamed(fn, rev)
+ if rename:
+ copies.append((fn, rename[0]))
+ displayer.show(ctx, copies=copies)
+ displayer.flush(ctx)
+ displayer.close()
+
+def checkunsupportedgraphflags(pats, opts):
+ for op in ["newest_first"]:
+ if op in opts and opts[op]:
+ raise error.Abort(_("-G/--graph option is incompatible with --%s")
+ % op.replace("_", "-"))
+
+def graphrevs(repo, nodes, opts):
+ limit = getlimit(opts)
+ nodes.reverse()
+ if limit is not None:
+ nodes = nodes[:limit]
+ return graphmod.nodes(repo, nodes)
--- a/mercurial/logexchange.py Fri Feb 23 17:57:04 2018 -0800
+++ b/mercurial/logexchange.py Sat Feb 24 17:49:10 2018 -0600
@@ -11,6 +11,7 @@
from .node import hex
from . import (
+ util,
vfs as vfsmod,
)
@@ -94,6 +95,30 @@
finally:
wlock.release()
+def activepath(repo, remote):
+ """returns remote path"""
+ local = None
+ # is the remote a local peer
+ local = remote.local()
+
+ # determine the remote path from the repo, if possible; else just
+ # use the string given to us
+ rpath = remote
+ if local:
+ rpath = remote._repo.root
+ elif not isinstance(remote, str):
+ rpath = remote._url
+
+ # represent the remotepath with user defined path name if exists
+ for path, url in repo.ui.configitems('paths'):
+ # remove auth info from user defined url
+ url = util.removeauth(url)
+ if url == rpath:
+ rpath = path
+ break
+
+ return rpath
+
def pullremotenames(localrepo, remoterepo):
"""
pulls bookmarks and branches information of the remote repo during a
@@ -101,7 +126,7 @@
localrepo is our local repository
remoterepo is the peer instance
"""
- remotepath = remoterepo.url()
+ remotepath = activepath(localrepo, remoterepo)
bookmarks = remoterepo.listkeys('bookmarks')
# on a push, we don't want to keep obsolete heads since
# they won't show up as heads on the next pull, so we
--- a/mercurial/lsprof.py Fri Feb 23 17:57:04 2018 -0800
+++ b/mercurial/lsprof.py Sat Feb 24 17:49:10 2018 -0600
@@ -27,7 +27,7 @@
def __init__(self, data):
self.data = data
- def sort(self, crit="inlinetime"):
+ def sort(self, crit=r"inlinetime"):
"""XXX docstring"""
# profiler_entries isn't defined when running under PyPy.
if profiler_entry:
--- a/mercurial/mail.py Fri Feb 23 17:57:04 2018 -0800
+++ b/mercurial/mail.py Sat Feb 24 17:49:10 2018 -0600
@@ -20,6 +20,7 @@
from . import (
encoding,
error,
+ pycompat,
sslutil,
util,
)
@@ -186,7 +187,7 @@
def codec2iana(cs):
''''''
- cs = email.charset.Charset(cs).input_charset.lower()
+ cs = pycompat.sysbytes(email.charset.Charset(cs).input_charset.lower())
# "latin1" normalizes to "iso8859-1", standard calls for "iso-8859-1"
if cs.startswith("iso") and not cs.startswith("iso-"):
@@ -205,7 +206,7 @@
return mimetextqp(s, subtype, 'us-ascii')
for charset in cs:
try:
- s.decode(charset)
+ s.decode(pycompat.sysstr(charset))
return mimetextqp(s, subtype, codec2iana(charset))
except UnicodeDecodeError:
pass
@@ -218,7 +219,7 @@
'''
cs = email.charset.Charset(charset)
msg = email.message.Message()
- msg.set_type('text/' + subtype)
+ msg.set_type(pycompat.sysstr('text/' + subtype))
for line in body.splitlines():
if len(line) > 950:
@@ -287,13 +288,13 @@
addr = addr.encode('ascii')
except UnicodeDecodeError:
raise error.Abort(_('invalid local address: %s') % addr)
- return email.Utils.formataddr((name, addr))
+ return email.utils.formataddr((name, addr))
def addressencode(ui, address, charsets=None, display=False):
'''Turns address into RFC-2047 compliant header.'''
if display or not address:
return address or ''
- name, addr = email.Utils.parseaddr(address)
+ name, addr = email.utils.parseaddr(address)
return _addressencode(ui, name, addr, charsets)
def addrlistencode(ui, addrs, charsets=None, display=False):
@@ -304,7 +305,7 @@
return [a.strip() for a in addrs if a.strip()]
result = []
- for name, addr in email.Utils.getaddresses(addrs):
+ for name, addr in email.utils.getaddresses(addrs):
if name or addr:
result.append(_addressencode(ui, name, addr, charsets))
return result
--- a/mercurial/manifest.py Fri Feb 23 17:57:04 2018 -0800
+++ b/mercurial/manifest.py Sat Feb 24 17:49:10 2018 -0600
@@ -9,7 +9,6 @@
import heapq
import itertools
-import os
import struct
from .i18n import _
@@ -28,7 +27,7 @@
parsers = policy.importmod(r'parsers')
propertycache = util.propertycache
-def _parsev1(data):
+def _parse(data):
# This method does a little bit of excessive-looking
# precondition checking. This is so that the behavior of this
# class exactly matches its C counterpart to try and help
@@ -47,43 +46,7 @@
else:
yield f, bin(n), ''
-def _parsev2(data):
- metadataend = data.find('\n')
- # Just ignore metadata for now
- pos = metadataend + 1
- prevf = ''
- while pos < len(data):
- end = data.find('\n', pos + 1) # +1 to skip stem length byte
- if end == -1:
- raise ValueError('Manifest ended with incomplete file entry.')
- stemlen = ord(data[pos:pos + 1])
- items = data[pos + 1:end].split('\0')
- f = prevf[:stemlen] + items[0]
- if prevf > f:
- raise ValueError('Manifest entries not in sorted order.')
- fl = items[1]
- # Just ignore metadata (items[2:] for now)
- n = data[end + 1:end + 21]
- yield f, n, fl
- pos = end + 22
- prevf = f
-
-def _parse(data):
- """Generates (path, node, flags) tuples from a manifest text"""
- if data.startswith('\0'):
- return iter(_parsev2(data))
- else:
- return iter(_parsev1(data))
-
-def _text(it, usemanifestv2):
- """Given an iterator over (path, node, flags) tuples, returns a manifest
- text"""
- if usemanifestv2:
- return _textv2(it)
- else:
- return _textv1(it)
-
-def _textv1(it):
+def _text(it):
files = []
lines = []
_hex = revlog.hex
@@ -96,19 +59,6 @@
_checkforbidden(files)
return ''.join(lines)
-def _textv2(it):
- files = []
- lines = ['\0\n']
- prevf = ''
- for f, n, fl in it:
- files.append(f)
- stem = os.path.commonprefix([prevf, f])
- stemlen = min(len(stem), 255)
- lines.append("%c%s\0%s\n%s\n" % (stemlen, f[stemlen:], fl, n))
- prevf = f
- _checkforbidden(files)
- return ''.join(lines)
-
class lazymanifestiter(object):
def __init__(self, lm):
self.pos = 0
@@ -414,13 +364,7 @@
class manifestdict(object):
def __init__(self, data=''):
- if data.startswith('\0'):
- #_lazymanifest can not parse v2
- self._lm = _lazymanifest('')
- for f, n, fl in _parsev2(data):
- self._lm[f] = n, fl
- else:
- self._lm = _lazymanifest(data)
+ self._lm = _lazymanifest(data)
def __getitem__(self, key):
return self._lm[key][0]
@@ -589,12 +533,9 @@
def iterentries(self):
return self._lm.iterentries()
- def text(self, usemanifestv2=False):
- if usemanifestv2:
- return _textv2(self._lm.iterentries())
- else:
- # use (probably) native version for v1
- return self._lm.text()
+ def text(self):
+ # most likely uses native version
+ return self._lm.text()
def fastdelta(self, base, changes):
"""Given a base manifest text as a bytearray and a list of changes
@@ -755,6 +696,12 @@
size += m.__len__()
return size
+ def __nonzero__(self):
+ # Faster than "__len() != 0" since it avoids loading sub-manifests
+ return not self._isempty()
+
+ __bool__ = __nonzero__
+
def _isempty(self):
self._load() # for consistency; already loaded by all callers
return (not self._files and (not self._dirs or
@@ -954,7 +901,7 @@
else:
files.update(m1.iterkeys())
- for fn in t1._files.iterkeys():
+ for fn in t1._files:
if fn not in t2._files:
files.add(t1._subpath(fn))
@@ -1013,7 +960,7 @@
# yield this dir's files and walk its submanifests
self._load()
- for p in sorted(self._dirs.keys() + self._files.keys()):
+ for p in sorted(list(self._dirs) + list(self._files)):
if p in self._files:
fullp = self._subpath(p)
if match(fullp):
@@ -1132,12 +1079,12 @@
if fl:
self._flags[f] = fl
- def text(self, usemanifestv2=False):
+ def text(self):
"""Get the full data of this manifest as a bytestring."""
self._load()
- return _text(self.iterentries(), usemanifestv2)
+ return _text(self.iterentries())
- def dirtext(self, usemanifestv2=False):
+ def dirtext(self):
"""Get the full data of this directory as a bytestring. Make sure that
any submanifests have been written first, so their nodeids are correct.
"""
@@ -1145,7 +1092,7 @@
flags = self.flags
dirs = [(d[:-1], self._dirs[d]._node, 't') for d in self._dirs]
files = [(f, self._files[f], flags(f)) for f in self._files]
- return _text(sorted(dirs + files), usemanifestv2)
+ return _text(sorted(dirs + files))
def read(self, gettext, readsubtree):
def _load_for_read(s):
@@ -1202,15 +1149,12 @@
# stacks of commits, the number can go up, hence the config knob below.
cachesize = 4
optiontreemanifest = False
- usemanifestv2 = False
opts = getattr(opener, 'options', None)
if opts is not None:
cachesize = opts.get('manifestcachesize', cachesize)
optiontreemanifest = opts.get('treemanifest', False)
- usemanifestv2 = opts.get('manifestv2', usemanifestv2)
self._treeondisk = optiontreemanifest or treemanifest
- self._usemanifestv2 = usemanifestv2
self._fulltextcache = util.lrucachedict(cachesize)
@@ -1245,19 +1189,18 @@
self._fulltextcache.clear()
self._dirlogcache = {'': self}
- def dirlog(self, dir):
- if dir:
+ def dirlog(self, d):
+ if d:
assert self._treeondisk
- if dir not in self._dirlogcache:
- mfrevlog = manifestrevlog(self.opener, dir,
+ if d not in self._dirlogcache:
+ mfrevlog = manifestrevlog(self.opener, d,
self._dirlogcache,
treemanifest=self._treeondisk)
- self._dirlogcache[dir] = mfrevlog
- return self._dirlogcache[dir]
+ self._dirlogcache[d] = mfrevlog
+ return self._dirlogcache[d]
def add(self, m, transaction, link, p1, p2, added, removed, readtree=None):
- if (p1 in self.fulltextcache and util.safehasattr(m, 'fastdelta')
- and not self._usemanifestv2):
+ if p1 in self.fulltextcache and util.safehasattr(m, 'fastdelta'):
# If our first parent is in the manifest cache, we can
# compute a delta here using properties we know about the
# manifest up-front, which may save time later for the
@@ -1284,7 +1227,7 @@
n = self._addtree(m, transaction, link, m1, m2, readtree)
arraytext = None
else:
- text = m.text(self._usemanifestv2)
+ text = m.text()
n = self.addrevision(text, transaction, link, p1, p2)
arraytext = bytearray(text)
@@ -1303,13 +1246,13 @@
sublog.add(subm, transaction, link, subp1, subp2, None, None,
readtree=readtree)
m.writesubtrees(m1, m2, writesubtree)
- text = m.dirtext(self._usemanifestv2)
+ text = m.dirtext()
n = None
if self._dir != '':
# Double-check whether contents are unchanged to one parent
- if text == m1.dirtext(self._usemanifestv2):
+ if text == m1.dirtext():
n = m1.node()
- elif text == m2.dirtext(self._usemanifestv2):
+ elif text == m2.dirtext():
n = m2.node()
if not n:
@@ -1487,19 +1430,6 @@
Changing the value of `shallow` has no effect on flat manifests.
'''
revlog = self._revlog()
- if revlog._usemanifestv2:
- # Need to perform a slow delta
- r0 = revlog.deltaparent(revlog.rev(self._node))
- m0 = self._manifestlog[revlog.node(r0)].read()
- m1 = self.read()
- md = manifestdict()
- for f, ((n0, fl0), (n1, fl1)) in m0.diff(m1).iteritems():
- if n1:
- md[f] = n1
- if fl1:
- md.setflag(f, fl1)
- return md
-
r = revlog.rev(self._node)
d = mdiff.patchtext(revlog.revdiff(revlog.deltaparent(r), r))
return manifestdict(d)
@@ -1602,7 +1532,7 @@
its 't' flag.
'''
revlog = self._revlog()
- if shallow and not revlog._usemanifestv2:
+ if shallow:
r = revlog.rev(self._node)
d = mdiff.patchtext(revlog.revdiff(revlog.deltaparent(r), r))
return manifestdict(d)
--- a/mercurial/match.py Fri Feb 23 17:57:04 2018 -0800
+++ b/mercurial/match.py Sat Feb 24 17:49:10 2018 -0600
@@ -13,6 +13,7 @@
from .i18n import _
from . import (
+ encoding,
error,
pathutil,
util,
@@ -345,7 +346,7 @@
return 'all'
def __repr__(self):
- return '<alwaysmatcher>'
+ return r'<alwaysmatcher>'
class nevermatcher(basematcher):
'''Matches nothing.'''
@@ -368,7 +369,7 @@
return False
def __repr__(self):
- return '<nevermatcher>'
+ return r'<nevermatcher>'
class patternmatcher(basematcher):
@@ -397,6 +398,7 @@
def prefix(self):
return self._prefix
+ @encoding.strmethod
def __repr__(self):
return ('<patternmatcher patterns=%r>' % self._pats)
@@ -424,6 +426,7 @@
any(parentdir in self._roots
for parentdir in util.finddirs(dir)))
+ @encoding.strmethod
def __repr__(self):
return ('<includematcher includes=%r>' % self._pats)
@@ -452,6 +455,7 @@
def isexact(self):
return True
+ @encoding.strmethod
def __repr__(self):
return ('<exactmatcher files=%r>' % self._files)
@@ -492,6 +496,7 @@
def isexact(self):
return self._m1.isexact()
+ @encoding.strmethod
def __repr__(self):
return ('<differencematcher m1=%r, m2=%r>' % (self._m1, self._m2))
@@ -558,6 +563,7 @@
def isexact(self):
return self._m1.isexact() or self._m2.isexact()
+ @encoding.strmethod
def __repr__(self):
return ('<intersectionmatcher m1=%r, m2=%r>' % (self._m1, self._m2))
@@ -638,6 +644,7 @@
def prefix(self):
return self._matcher.prefix() and not self._always
+ @encoding.strmethod
def __repr__(self):
return ('<subdirmatcher path=%r, matcher=%r>' %
(self._path, self._matcher))
@@ -671,6 +678,7 @@
r |= v
return r
+ @encoding.strmethod
def __repr__(self):
return ('<unionmatcher matchers=%r>' % self._matchers)
--- a/mercurial/mdiff.py Fri Feb 23 17:57:04 2018 -0800
+++ b/mercurial/mdiff.py Sat Feb 24 17:49:10 2018 -0600
@@ -19,6 +19,8 @@
util,
)
+_missing_newline_marker = "\\ No newline at end of file\n"
+
bdiff = policy.importmod(r'bdiff')
mpatch = policy.importmod(r'mpatch')
@@ -27,16 +29,7 @@
patches = mpatch.patches
patchedsize = mpatch.patchedsize
textdiff = bdiff.bdiff
-
-def splitnewlines(text):
- '''like str.splitlines, but only split on newlines.'''
- lines = [l + '\n' for l in text.split('\n')]
- if lines:
- if lines[-1] == '\n':
- lines.pop()
- else:
- lines[-1] = lines[-1][:-1]
- return lines
+splitnewlines = bdiff.splitnewlines
class diffopts(object):
'''context is the number of context lines
@@ -234,13 +227,15 @@
yield s, type
yield s1, '='
-def unidiff(a, ad, b, bd, fn1, fn2, opts=defaultopts):
+def unidiff(a, ad, b, bd, fn1, fn2, binary, opts=defaultopts):
"""Return a unified diff as a (headers, hunks) tuple.
If the diff is not null, `headers` is a list with unified diff header
lines "--- <original>" and "+++ <new>" and `hunks` is a generator yielding
(hunkrange, hunklines) coming from _unidiff().
Otherwise, `headers` and `hunks` are empty.
+
+ Set binary=True if either a or b should be taken as a binary file.
"""
def datetag(date, fn=None):
if not opts.git and not opts.nodates:
@@ -264,18 +259,13 @@
fn1 = util.pconvert(fn1)
fn2 = util.pconvert(fn2)
- def checknonewline(lines):
- for text in lines:
- if text[-1:] != '\n':
- text += "\n\ No newline at end of file\n"
- yield text
-
- if not opts.text and (util.binary(a) or util.binary(b)):
+ if binary:
if a and b and len(a) == len(b) and a == b:
return sentinel
headerlines = []
hunks = (None, ['Binary file %s has changed\n' % fn1]),
elif not a:
+ without_newline = not b.endswith('\n')
b = splitnewlines(b)
if a is None:
l1 = '--- /dev/null%s' % datetag(epoch)
@@ -286,8 +276,12 @@
size = len(b)
hunkrange = (0, 0, 1, size)
hunklines = ["@@ -0,0 +1,%d @@\n" % size] + ["+" + e for e in b]
- hunks = (hunkrange, checknonewline(hunklines)),
+ if without_newline:
+ hunklines[-1] += '\n'
+ hunklines.append(_missing_newline_marker)
+ hunks = (hunkrange, hunklines),
elif not b:
+ without_newline = not a.endswith('\n')
a = splitnewlines(a)
l1 = "--- %s%s%s" % (aprefix, fn1, datetag(ad, fn1))
if b is None:
@@ -298,24 +292,19 @@
size = len(a)
hunkrange = (1, size, 0, 0)
hunklines = ["@@ -1,%d +0,0 @@\n" % size] + ["-" + e for e in a]
- hunks = (hunkrange, checknonewline(hunklines)),
+ if without_newline:
+ hunklines[-1] += '\n'
+ hunklines.append(_missing_newline_marker)
+ hunks = (hunkrange, hunklines),
else:
- diffhunks = _unidiff(a, b, opts=opts)
- try:
- hunkrange, hunklines = next(diffhunks)
- except StopIteration:
+ hunks = _unidiff(a, b, opts=opts)
+ if not next(hunks):
return sentinel
headerlines = [
"--- %s%s%s" % (aprefix, fn1, datetag(ad, fn1)),
"+++ %s%s%s" % (bprefix, fn2, datetag(bd, fn2)),
]
- def rewindhunks():
- yield hunkrange, checknonewline(hunklines)
- for hr, hl in diffhunks:
- yield hr, checknonewline(hl)
-
- hunks = rewindhunks()
return headerlines, hunks
@@ -327,6 +316,8 @@
form the '@@ -s1,l1 +s2,l2 @@' header and `hunklines` is a list of lines
of the hunk combining said header followed by line additions and
deletions.
+
+ The hunks are prefixed with a bool.
"""
l1 = splitnewlines(t1)
l2 = splitnewlines(t2)
@@ -377,6 +368,26 @@
+ delta
+ [' ' + l1[x] for x in xrange(a2, aend)]
)
+ # If either file ends without a newline and the last line of
+ # that file is part of a hunk, a marker is printed. If the
+ # last line of both files is identical and neither ends in
+ # a newline, print only one marker. That's the only case in
+ # which the hunk can end in a shared line without a newline.
+ skip = False
+ if not t1.endswith('\n') and astart + alen == len(l1) + 1:
+ for i in xrange(len(hunklines) - 1, -1, -1):
+ if hunklines[i].startswith(('-', ' ')):
+ if hunklines[i].startswith(' '):
+ skip = True
+ hunklines[i] += '\n'
+ hunklines.insert(i + 1, _missing_newline_marker)
+ break
+ if not skip and not t2.endswith('\n') and bstart + blen == len(l2) + 1:
+ for i in xrange(len(hunklines) - 1, -1, -1):
+ if hunklines[i].startswith('+'):
+ hunklines[i] += '\n'
+ hunklines.insert(i + 1, _missing_newline_marker)
+ break
yield hunkrange, hunklines
# bdiff.blocks gives us the matching sequences in the files. The loop
@@ -385,6 +396,7 @@
#
hunk = None
ignoredlines = 0
+ has_hunks = False
for s, stype in allblocks(t1, t2, opts, l1, l2):
a1, a2, b1, b2 = s
if stype != '!':
@@ -411,6 +423,9 @@
astart = hunk[1]
bstart = hunk[3]
else:
+ if not has_hunks:
+ has_hunks = True
+ yield True
for x in yieldhunk(hunk):
yield x
if prev:
@@ -427,17 +442,22 @@
delta[len(delta):] = ['+' + x for x in new]
if hunk:
+ if not has_hunks:
+ has_hunks = True
+ yield True
for x in yieldhunk(hunk):
yield x
+ elif not has_hunks:
+ yield False
def b85diff(to, tn):
'''print base85-encoded binary diff'''
def fmtline(line):
l = len(line)
if l <= 26:
- l = chr(ord('A') + l - 1)
+ l = pycompat.bytechr(ord('A') + l - 1)
else:
- l = chr(l - 26 + ord('a') - 1)
+ l = pycompat.bytechr(l - 26 + ord('a') - 1)
return '%c%s\n' % (l, util.b85encode(line, True))
def chunk(text, csize=52):
--- a/mercurial/merge.py Fri Feb 23 17:57:04 2018 -0800
+++ b/mercurial/merge.py Sat Feb 24 17:49:10 2018 -0600
@@ -25,13 +25,12 @@
from . import (
copies,
error,
- extensions,
filemerge,
match as matchmod,
obsutil,
pycompat,
scmutil,
- subrepo,
+ subrepoutil,
util,
worker,
)
@@ -974,14 +973,14 @@
# Rename all local conflicting files that have not been deleted.
for p in localconflicts:
if p not in deletedfiles:
- ctxname = str(wctx).rstrip('+')
+ ctxname = bytes(wctx).rstrip('+')
pnew = util.safename(p, ctxname, wctx, set(actions.keys()))
actions[pnew] = ('pr', (p,), "local path conflict")
actions[p] = ('p', (pnew, 'l'), "path conflict")
if remoteconflicts:
# Check if all files in the conflicting directories have been removed.
- ctxname = str(mctx).rstrip('+')
+ ctxname = bytes(mctx).rstrip('+')
for f, p in _filesindirs(repo, mf, remoteconflicts):
if f not in deletedfiles:
m, args, msg = actions[p]
@@ -1186,8 +1185,9 @@
def _resolvetrivial(repo, wctx, mctx, ancestor, actions):
"""Resolves false conflicts where the nodeid changed but the content
remained the same."""
-
- for f, (m, args, msg) in actions.items():
+ # We force a copy of actions.items() because we're going to mutate
+ # actions as we resolve trivial conflicts.
+ for f, (m, args, msg) in list(actions.items()):
if m == 'cd' and f in ancestor and not wctx[f].cmp(ancestor[f]):
# local did change but ended up with same content
actions[f] = 'r', None, "prompt same"
@@ -1386,6 +1386,16 @@
if i > 0:
yield i, f
+def _prefetchfiles(repo, ctx, actions):
+ """Invoke ``scmutil.fileprefetchhooks()`` for the files relevant to the dict
+ of merge actions. ``ctx`` is the context being merged in."""
+
+ # Skipping 'a', 'am', 'f', 'r', 'dm', 'e', 'k', 'p' and 'pr', because they
+ # don't touch the context to be merged in. 'cd' is skipped, because
+ # changed/deleted never resolves to something from the remote side.
+ oplist = [actions[a] for a in 'g dc dg m'.split()]
+ prefetch = scmutil.fileprefetchhooks
+ prefetch(repo, ctx, [f for sublist in oplist for f, args, msg in sublist])
def applyupdates(repo, actions, wctx, mctx, overwrite, labels=None):
"""apply the merge action list to the working directory
@@ -1397,6 +1407,8 @@
describes how many files were affected by the update.
"""
+ _prefetchfiles(repo, mctx, actions)
+
updated, merged, removed = 0, 0, 0
ms = mergestate.clean(repo, wctx.p1().node(), mctx.node(), labels)
moves = []
@@ -1445,7 +1457,7 @@
z = 0
if [a for a in actions['r'] if a[0] == '.hgsubstate']:
- subrepo.submerge(repo, wctx, mctx, wctx, overwrite, labels)
+ subrepoutil.submerge(repo, wctx, mctx, wctx, overwrite, labels)
# record path conflicts
for f, args, msg in actions['p']:
@@ -1495,7 +1507,7 @@
updated = len(actions['g'])
if [a for a in actions['g'] if a[0] == '.hgsubstate']:
- subrepo.submerge(repo, wctx, mctx, wctx, overwrite, labels)
+ subrepoutil.submerge(repo, wctx, mctx, wctx, overwrite, labels)
# forget (manifest only, just log it) (must come first)
for f, args, msg in actions['f']:
@@ -1583,8 +1595,8 @@
z += 1
progress(_updating, z, item=f, total=numupdates, unit=_files)
if f == '.hgsubstate': # subrepo states need updating
- subrepo.submerge(repo, wctx, mctx, wctx.ancestor(mctx),
- overwrite, labels)
+ subrepoutil.submerge(repo, wctx, mctx, wctx.ancestor(mctx),
+ overwrite, labels)
continue
wctx[f].audit()
complete, r = ms.preresolve(f, wctx)
@@ -1835,7 +1847,7 @@
else:
pas = [p1.ancestor(p2, warn=branchmerge)]
- fp1, fp2, xp1, xp2 = p1.node(), p2.node(), str(p1), str(p2)
+ fp1, fp2, xp1, xp2 = p1.node(), p2.node(), bytes(p1), bytes(p2)
### check phase
if not overwrite:
@@ -1913,7 +1925,7 @@
# Prompt and create actions. Most of this is in the resolve phase
# already, but we can't handle .hgsubstate in filemerge or
- # subrepo.submerge yet so we have to keep prompting for it.
+ # subrepoutil.submerge yet so we have to keep prompting for it.
if '.hgsubstate' in actionbyfile:
f = '.hgsubstate'
m, args, msg = actionbyfile[f]
@@ -1992,6 +2004,8 @@
fsmonitorthreshold = repo.ui.configint('fsmonitor',
'warn_update_file_count')
try:
+ # avoid cycle: extensions -> cmdutil -> merge
+ from . import extensions
extensions.find('fsmonitor')
fsmonitorenabled = repo.ui.config('fsmonitor', 'mode') != 'off'
# We intentionally don't look at whether fsmonitor has disabled
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/mercurial/narrowspec.py Sat Feb 24 17:49:10 2018 -0600
@@ -0,0 +1,204 @@
+# narrowspec.py - methods for working with a narrow view of a repository
+#
+# Copyright 2017 Google, Inc.
+#
+# This software may be used and distributed according to the terms of the
+# GNU General Public License version 2 or any later version.
+
+from __future__ import absolute_import
+
+import errno
+
+from .i18n import _
+from . import (
+ error,
+ hg,
+ match as matchmod,
+ util,
+)
+
+FILENAME = 'narrowspec'
+
+def _parsestoredpatterns(text):
+ """Parses the narrowspec format that's stored on disk."""
+ patlist = None
+ includepats = []
+ excludepats = []
+ for l in text.splitlines():
+ if l == '[includes]':
+ if patlist is None:
+ patlist = includepats
+ else:
+ raise error.Abort(_('narrowspec includes section must appear '
+ 'at most once, before excludes'))
+ elif l == '[excludes]':
+ if patlist is not excludepats:
+ patlist = excludepats
+ else:
+ raise error.Abort(_('narrowspec excludes section must appear '
+ 'at most once'))
+ else:
+ patlist.append(l)
+
+ return set(includepats), set(excludepats)
+
+def parseserverpatterns(text):
+ """Parses the narrowspec format that's returned by the server."""
+ includepats = set()
+ excludepats = set()
+
+ # We get one entry per line, in the format "<key> <value>".
+ # It's OK for value to contain other spaces.
+ for kp in (l.split(' ', 1) for l in text.splitlines()):
+ if len(kp) != 2:
+ raise error.Abort(_('Invalid narrowspec pattern line: "%s"') % kp)
+ key = kp[0]
+ pat = kp[1]
+ if key == 'include':
+ includepats.add(pat)
+ elif key == 'exclude':
+ excludepats.add(pat)
+ else:
+ raise error.Abort(_('Invalid key "%s" in server response') % key)
+
+ return includepats, excludepats
+
+def normalizesplitpattern(kind, pat):
+ """Returns the normalized version of a pattern and kind.
+
+ Returns a tuple with the normalized kind and normalized pattern.
+ """
+ pat = pat.rstrip('/')
+ _validatepattern(pat)
+ return kind, pat
+
+def _numlines(s):
+ """Returns the number of lines in s, including ending empty lines."""
+ # We use splitlines because it is Unicode-friendly and thus Python 3
+ # compatible. However, it does not count empty lines at the end, so trick
+ # it by adding a character at the end.
+ return len((s + 'x').splitlines())
+
+def _validatepattern(pat):
+ """Validates the pattern and aborts if it is invalid.
+
+ Patterns are stored in the narrowspec as newline-separated
+ POSIX-style bytestring paths. There's no escaping.
+ """
+
+ # We use newlines as separators in the narrowspec file, so don't allow them
+ # in patterns.
+ if _numlines(pat) > 1:
+ raise error.Abort(_('newlines are not allowed in narrowspec paths'))
+
+ components = pat.split('/')
+ if '.' in components or '..' in components:
+ raise error.Abort(_('"." and ".." are not allowed in narrowspec paths'))
+
+def normalizepattern(pattern, defaultkind='path'):
+ """Returns the normalized version of a text-format pattern.
+
+ If the pattern has no kind, the default will be added.
+ """
+ kind, pat = matchmod._patsplit(pattern, defaultkind)
+ return '%s:%s' % normalizesplitpattern(kind, pat)
+
+def parsepatterns(pats):
+ """Parses a list of patterns into a typed pattern set."""
+ return set(normalizepattern(p) for p in pats)
+
+def format(includes, excludes):
+ output = '[includes]\n'
+ for i in sorted(includes - excludes):
+ output += i + '\n'
+ output += '[excludes]\n'
+ for e in sorted(excludes):
+ output += e + '\n'
+ return output
+
+def match(root, include=None, exclude=None):
+ if not include:
+ # Passing empty include and empty exclude to matchmod.match()
+ # gives a matcher that matches everything, so explicitly use
+ # the nevermatcher.
+ return matchmod.never(root, '')
+ return matchmod.match(root, '', [], include=include or [],
+ exclude=exclude or [])
+
+def needsexpansion(includes):
+ return [i for i in includes if i.startswith('include:')]
+
+def load(repo):
+ if repo.shared():
+ repo = hg.sharedreposource(repo)
+ try:
+ spec = repo.vfs.read(FILENAME)
+ except IOError as e:
+ # Treat "narrowspec does not exist" the same as "narrowspec file exists
+ # and is empty".
+ if e.errno == errno.ENOENT:
+ # Without this the next call to load will use the cached
+ # non-existence of the file, which can cause some odd issues.
+ repo.invalidate(clearfilecache=True)
+ return set(), set()
+ raise
+ return _parsestoredpatterns(spec)
+
+def save(repo, includepats, excludepats):
+ spec = format(includepats, excludepats)
+ if repo.shared():
+ repo = hg.sharedreposource(repo)
+ repo.vfs.write(FILENAME, spec)
+
+def restrictpatterns(req_includes, req_excludes, repo_includes, repo_excludes):
+ r""" Restricts the patterns according to repo settings,
+ results in a logical AND operation
+
+ :param req_includes: requested includes
+ :param req_excludes: requested excludes
+ :param repo_includes: repo includes
+ :param repo_excludes: repo excludes
+ :return: include patterns, exclude patterns, and invalid include patterns.
+
+ >>> restrictpatterns({'f1','f2'}, {}, ['f1'], [])
+ (set(['f1']), {}, [])
+ >>> restrictpatterns({'f1'}, {}, ['f1','f2'], [])
+ (set(['f1']), {}, [])
+ >>> restrictpatterns({'f1/fc1', 'f3/fc3'}, {}, ['f1','f2'], [])
+ (set(['f1/fc1']), {}, [])
+ >>> restrictpatterns({'f1_fc1'}, {}, ['f1','f2'], [])
+ ([], set(['path:.']), [])
+ >>> restrictpatterns({'f1/../f2/fc2'}, {}, ['f1','f2'], [])
+ (set(['f2/fc2']), {}, [])
+ >>> restrictpatterns({'f1/../f3/fc3'}, {}, ['f1','f2'], [])
+ ([], set(['path:.']), [])
+ >>> restrictpatterns({'f1/$non_exitent_var'}, {}, ['f1','f2'], [])
+ (set(['f1/$non_exitent_var']), {}, [])
+ """
+ res_excludes = set(req_excludes)
+ res_excludes.update(repo_excludes)
+ invalid_includes = []
+ if not req_includes:
+ res_includes = set(repo_includes)
+ elif 'path:.' not in repo_includes:
+ res_includes = []
+ for req_include in req_includes:
+ req_include = util.expandpath(util.normpath(req_include))
+ if req_include in repo_includes:
+ res_includes.append(req_include)
+ continue
+ valid = False
+ for repo_include in repo_includes:
+ if req_include.startswith(repo_include + '/'):
+ valid = True
+ res_includes.append(req_include)
+ break
+ if not valid:
+ invalid_includes.append(req_include)
+ if len(res_includes) == 0:
+ res_excludes = {'path:.'}
+ else:
+ res_includes = set(res_includes)
+ else:
+ res_includes = set(req_includes)
+ return res_includes, res_excludes, invalid_includes
--- a/mercurial/node.py Fri Feb 23 17:57:04 2018 -0800
+++ b/mercurial/node.py Sat Feb 24 17:49:10 2018 -0600
@@ -11,7 +11,14 @@
# This ugly style has a noticeable effect in manifest parsing
hex = binascii.hexlify
-bin = binascii.unhexlify
+# Adapt to Python 3 API changes. If this ends up showing up in
+# profiles, we can use this version only on Python 3, and forward
+# binascii.unhexlify like we used to on Python 2.
+def bin(s):
+ try:
+ return binascii.unhexlify(s)
+ except binascii.Error as e:
+ raise TypeError(e)
nullrev = -1
nullid = b"\0" * 20
--- a/mercurial/obsolete.py Fri Feb 23 17:57:04 2018 -0800
+++ b/mercurial/obsolete.py Sat Feb 24 17:49:10 2018 -0600
@@ -506,13 +506,6 @@
for mark in markers:
successors.setdefault(mark[0], set()).add(mark)
-def _addprecursors(*args, **kwargs):
- msg = ("'obsolete._addprecursors' is deprecated, "
- "use 'obsolete._addpredecessors'")
- util.nouideprecwarn(msg, '4.4')
-
- return _addpredecessors(*args, **kwargs)
-
@util.nogc
def _addpredecessors(predecessors, markers):
for mark in markers:
@@ -570,7 +563,7 @@
return len(self._all)
def __nonzero__(self):
- if not self._cached('_all'):
+ if not self._cached(r'_all'):
try:
return self.svfs.stat('obsstore').st_size > 1
except OSError as inst:
@@ -700,14 +693,6 @@
_addsuccessors(successors, self._all)
return successors
- @property
- def precursors(self):
- msg = ("'obsstore.precursors' is deprecated, "
- "use 'obsstore.predecessors'")
- util.nouideprecwarn(msg, '4.4')
-
- return self.predecessors
-
@propertycache
def predecessors(self):
predecessors = {}
@@ -727,11 +712,11 @@
markers = list(markers) # to allow repeated iteration
self._data = self._data + rawdata
self._all.extend(markers)
- if self._cached('successors'):
+ if self._cached(r'successors'):
_addsuccessors(self.successors, markers)
- if self._cached('predecessors'):
+ if self._cached(r'predecessors'):
_addpredecessors(self.predecessors, markers)
- if self._cached('children'):
+ if self._cached(r'children'):
_addchildren(self.children, markers)
_checkinvalidmarkers(markers)
@@ -843,42 +828,6 @@
repo.invalidatevolatilesets()
return True
-# keep compatibility for the 4.3 cycle
-def allprecursors(obsstore, nodes, ignoreflags=0):
- movemsg = 'obsolete.allprecursors moved to obsutil.allprecursors'
- util.nouideprecwarn(movemsg, '4.3')
- return obsutil.allprecursors(obsstore, nodes, ignoreflags)
-
-def allsuccessors(obsstore, nodes, ignoreflags=0):
- movemsg = 'obsolete.allsuccessors moved to obsutil.allsuccessors'
- util.nouideprecwarn(movemsg, '4.3')
- return obsutil.allsuccessors(obsstore, nodes, ignoreflags)
-
-def marker(repo, data):
- movemsg = 'obsolete.marker moved to obsutil.marker'
- repo.ui.deprecwarn(movemsg, '4.3')
- return obsutil.marker(repo, data)
-
-def getmarkers(repo, nodes=None, exclusive=False):
- movemsg = 'obsolete.getmarkers moved to obsutil.getmarkers'
- repo.ui.deprecwarn(movemsg, '4.3')
- return obsutil.getmarkers(repo, nodes=nodes, exclusive=exclusive)
-
-def exclusivemarkers(repo, nodes):
- movemsg = 'obsolete.exclusivemarkers moved to obsutil.exclusivemarkers'
- repo.ui.deprecwarn(movemsg, '4.3')
- return obsutil.exclusivemarkers(repo, nodes)
-
-def foreground(repo, nodes):
- movemsg = 'obsolete.foreground moved to obsutil.foreground'
- repo.ui.deprecwarn(movemsg, '4.3')
- return obsutil.foreground(repo, nodes)
-
-def successorssets(repo, initialnode, cache=None):
- movemsg = 'obsolete.successorssets moved to obsutil.successorssets'
- repo.ui.deprecwarn(movemsg, '4.3')
- return obsutil.successorssets(repo, initialnode, cache=cache)
-
# mapping of 'set-name' -> <function to compute this set>
cachefuncs = {}
def cachefor(name):
@@ -933,14 +882,6 @@
obs = set(r for r in notpublic if isobs(getnode(r)))
return obs
-@cachefor('unstable')
-def _computeunstableset(repo):
- msg = ("'unstable' volatile set is deprecated, "
- "use 'orphan'")
- repo.ui.deprecwarn(msg, '4.4')
-
- return _computeorphanset(repo)
-
@cachefor('orphan')
def _computeorphanset(repo):
"""the set of non obsolete revisions with obsolete parents"""
@@ -969,14 +910,6 @@
"""the set of obsolete parents without non obsolete descendants"""
return getrevs(repo, 'obsolete') - getrevs(repo, 'suspended')
-@cachefor('bumped')
-def _computebumpedset(repo):
- msg = ("'bumped' volatile set is deprecated, "
- "use 'phasedivergent'")
- repo.ui.deprecwarn(msg, '4.4')
-
- return _computephasedivergentset(repo)
-
@cachefor('phasedivergent')
def _computephasedivergentset(repo):
"""the set of revs trying to obsolete public revisions"""
@@ -1000,14 +933,6 @@
break # Next draft!
return bumped
-@cachefor('divergent')
-def _computedivergentset(repo):
- msg = ("'divergent' volatile set is deprecated, "
- "use 'contentdivergent'")
- repo.ui.deprecwarn(msg, '4.4')
-
- return _computecontentdivergentset(repo)
-
@cachefor('contentdivergent')
def _computecontentdivergentset(repo):
"""the set of rev that compete to be the final successors of some revision.
--- a/mercurial/obsutil.py Fri Feb 23 17:57:04 2018 -0800
+++ b/mercurial/obsutil.py Sat Feb 24 17:49:10 2018 -0600
@@ -33,12 +33,6 @@
return False
return self._data == other._data
- def precnode(self):
- msg = ("'marker.precnode' is deprecated, "
- "use 'marker.prednode'")
- util.nouideprecwarn(msg, '4.4')
- return self.prednode()
-
def prednode(self):
"""Predecessor changeset node identifier"""
return self._data[0]
@@ -106,15 +100,6 @@
else:
stack.append(precnodeid)
-def allprecursors(*args, **kwargs):
- """ (DEPRECATED)
- """
- msg = ("'obsutil.allprecursors' is deprecated, "
- "use 'obsutil.allpredecessors'")
- util.nouideprecwarn(msg, '4.4')
-
- return allpredecessors(*args, **kwargs)
-
def allpredecessors(obsstore, nodes, ignoreflags=0):
"""Yield node for every precursors of <nodes>.
@@ -421,10 +406,10 @@
# Check if other meta has changed
changeextra = changectx.extra().items()
- ctxmeta = filter(metanotblacklisted, changeextra)
+ ctxmeta = list(filter(metanotblacklisted, changeextra))
sourceextra = source.extra().items()
- srcmeta = filter(metanotblacklisted, sourceextra)
+ srcmeta = list(filter(metanotblacklisted, sourceextra))
if ctxmeta != srcmeta:
effects |= METACHANGED
--- a/mercurial/patch.py Fri Feb 23 17:57:04 2018 -0800
+++ b/mercurial/patch.py Sat Feb 24 17:49:10 2018 -0600
@@ -12,7 +12,6 @@
import copy
import difflib
import email
-import email.parser as emailparser
import errno
import hashlib
import os
@@ -109,7 +108,7 @@
cur.append(line)
c = chunk(cur)
- m = emailparser.Parser().parse(c)
+ m = pycompat.emailparser().parse(c)
if not m.is_multipart():
yield msgfp(m)
else:
@@ -216,9 +215,9 @@
data = {}
fd, tmpname = tempfile.mkstemp(prefix='hg-patch-')
- tmpfp = os.fdopen(fd, pycompat.sysstr('w'))
+ tmpfp = os.fdopen(fd, pycompat.sysstr('wb'))
try:
- msg = emailparser.Parser().parse(fileobj)
+ msg = pycompat.emailparser().parse(fileobj)
subject = msg['Subject'] and mail.headdecode(msg['Subject'])
data['user'] = msg['From'] and mail.headdecode(msg['From'])
@@ -242,7 +241,7 @@
ok_types = ('text/plain', 'text/x-diff', 'text/x-patch')
message = ''
for part in msg.walk():
- content_type = part.get_content_type()
+ content_type = pycompat.bytestr(part.get_content_type())
ui.debug('Content-Type: %s\n' % content_type)
if content_type not in ok_types:
continue
@@ -1451,7 +1450,7 @@
dec = []
line = getline(lr, self.hunk)
while len(line) > 1:
- l = line[0]
+ l = line[0:1]
if l <= 'Z' and l >= 'A':
l = ord(l) - ord('A') + 1
else:
@@ -1460,7 +1459,7 @@
dec.append(util.b85decode(line[1:])[:l])
except ValueError as e:
raise PatchError(_('could not decode "%s" binary patch: %s')
- % (self._fname, str(e)))
+ % (self._fname, util.forcebytestr(e)))
line = getline(lr, self.hunk)
text = zlib.decompress(''.join(dec))
if len(text) != size:
@@ -2342,7 +2341,7 @@
if hunksfilterfn is not None:
# If the file has been removed, fctx2 is None; but this should
# not occur here since we catch removed files early in
- # cmdutil.getloglinerangerevs() for 'hg log -L'.
+ # logcmdutil.getlinerangerevs() for 'hg log -L'.
assert fctx2 is not None, \
'fctx2 unexpectly None in diff hunks filtering'
hunks = hunksfilterfn(fctx2, hunks)
@@ -2698,8 +2697,10 @@
if opts.git or losedatafn:
flag2 = ctx2.flags(f2)
# if binary is True, output "summary" or "base85", but not "text diff"
- binary = not opts.text and any(f.isbinary()
- for f in [fctx1, fctx2] if f is not None)
+ if opts.text:
+ binary = False
+ else:
+ binary = any(f.isbinary() for f in [fctx1, fctx2] if f is not None)
if losedatafn and not opts.git:
if (binary or
@@ -2789,7 +2790,8 @@
uheaders, hunks = mdiff.unidiff(content1, date1,
content2, date2,
- path1, path2, opts=opts)
+ path1, path2,
+ binary=binary, opts=opts)
header.extend(uheaders)
yield fctx1, fctx2, header, hunks
--- a/mercurial/policy.py Fri Feb 23 17:57:04 2018 -0800
+++ b/mercurial/policy.py Sat Feb 24 17:49:10 2018 -0600
@@ -71,7 +71,7 @@
# keep in sync with "version" in C modules
_cextversions = {
(r'cext', r'base85'): 1,
- (r'cext', r'bdiff'): 1,
+ (r'cext', r'bdiff'): 2,
(r'cext', r'diffhelpers'): 1,
(r'cext', r'mpatch'): 1,
(r'cext', r'osutil'): 3,
--- a/mercurial/posix.py Fri Feb 23 17:57:04 2018 -0800
+++ b/mercurial/posix.py Sat Feb 24 17:49:10 2018 -0600
@@ -113,7 +113,7 @@
if l:
if not stat.S_ISLNK(s):
# switch file to link
- fp = open(f)
+ fp = open(f, 'rb')
data = fp.read()
fp.close()
unlink(f)
@@ -121,7 +121,7 @@
os.symlink(data, f)
except OSError:
# failed to make a link, rewrite file
- fp = open(f, "w")
+ fp = open(f, "wb")
fp.write(data)
fp.close()
# no chmod needed at this point
@@ -130,7 +130,7 @@
# switch link to file
data = os.readlink(f)
unlink(f)
- fp = open(f, "w")
+ fp = open(f, "wb")
fp.write(data)
fp.close()
s = 0o666 & ~umask # avoid restatting for chmod
--- a/mercurial/progress.py Fri Feb 23 17:57:04 2018 -0800
+++ b/mercurial/progress.py Sat Feb 24 17:49:10 2018 -0600
@@ -119,8 +119,7 @@
add = topic
elif indicator == 'number':
if total:
- add = ('% ' + str(len(str(total))) +
- 's/%s') % (pos, total)
+ add = b'%*d/%d' % (len(str(total)), pos, total)
else:
add = str(pos)
elif indicator.startswith('item') and item:
--- a/mercurial/pure/base85.py Fri Feb 23 17:57:04 2018 -0800
+++ b/mercurial/pure/base85.py Sat Feb 24 17:49:10 2018 -0600
@@ -9,8 +9,10 @@
import struct
-_b85chars = "0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZ" \
- "abcdefghijklmnopqrstuvwxyz!#$%&()*+-;<=>?@^_`{|}~"
+from .. import pycompat
+
+_b85chars = pycompat.bytestr("0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZabcdef"
+ "ghijklmnopqrstuvwxyz!#$%&()*+-;<=>?@^_`{|}~")
_b85chars2 = [(a + b) for a in _b85chars for b in _b85chars]
_b85dec = {}
@@ -51,6 +53,7 @@
out = []
for i in range(0, len(text), 5):
chunk = text[i:i + 5]
+ chunk = pycompat.bytestr(chunk)
acc = 0
for j, c in enumerate(chunk):
try:
--- a/mercurial/pure/bdiff.py Fri Feb 23 17:57:04 2018 -0800
+++ b/mercurial/pure/bdiff.py Sat Feb 24 17:49:10 2018 -0600
@@ -90,3 +90,13 @@
text = re.sub('[ \t\r]+', ' ', text)
text = text.replace(' \n', '\n')
return text
+
+def splitnewlines(text):
+ '''like str.splitlines, but only split on newlines.'''
+ lines = [l + '\n' for l in text.split('\n')]
+ if lines:
+ if lines[-1] == '\n':
+ lines.pop()
+ else:
+ lines[-1] = lines[-1][:-1]
+ return lines
--- a/mercurial/pycompat.py Fri Feb 23 17:57:04 2018 -0800
+++ b/mercurial/pycompat.py Sat Feb 24 17:49:10 2018 -0600
@@ -11,6 +11,7 @@
from __future__ import absolute_import
import getopt
+import inspect
import os
import shlex
import sys
@@ -65,6 +66,7 @@
maplist = lambda *args: list(map(*args))
ziplist = lambda *args: list(zip(*args))
rawinput = input
+ getargspec = inspect.getfullargspec
# TODO: .buffer might not exist if std streams were replaced; we'll need
# a silly wrapper to make a bytes stream backed by a unicode one.
@@ -83,12 +85,13 @@
sysargv = list(map(os.fsencode, sys.argv))
bytechr = struct.Struct('>B').pack
+ byterepr = b'%r'.__mod__
class bytestr(bytes):
"""A bytes which mostly acts as a Python 2 str
>>> bytestr(), bytestr(bytearray(b'foo')), bytestr(u'ascii'), bytestr(1)
- (b'', b'foo', b'ascii', b'1')
+ ('', 'foo', 'ascii', '1')
>>> s = bytestr(b'foo')
>>> assert s is bytestr(s)
@@ -98,7 +101,7 @@
... def __bytes__(self):
... return b'bytes'
>>> bytestr(bytesable())
- b'bytes'
+ 'bytes'
There's no implicit conversion from non-ascii str as its encoding is
unknown:
@@ -154,10 +157,19 @@
def __iter__(self):
return iterbytestr(bytes.__iter__(self))
+ def __repr__(self):
+ return bytes.__repr__(self)[1:] # drop b''
+
def iterbytestr(s):
"""Iterate bytes as if it were a str object of Python 2"""
return map(bytechr, s)
+ def maybebytestr(s):
+ """Promote bytes to bytestr"""
+ if isinstance(s, bytes):
+ return bytestr(s)
+ return s
+
def sysbytes(s):
"""Convert an internal str (e.g. keyword, __doc__) back to bytes
@@ -249,21 +261,27 @@
return dic
# TODO: handle shlex.shlex().
- def shlexsplit(s):
+ def shlexsplit(s, comments=False, posix=True):
"""
Takes bytes argument, convert it to str i.e. unicodes, pass that into
shlex.split(), convert the returned value to bytes and return that for
Python 3 compatibility as shelx.split() don't accept bytes on Python 3.
"""
- ret = shlex.split(s.decode('latin-1'))
+ ret = shlex.split(s.decode('latin-1'), comments, posix)
return [a.encode('latin-1') for a in ret]
+ def emailparser(*args, **kwargs):
+ import email.parser
+ return email.parser.BytesParser(*args, **kwargs)
+
else:
import cStringIO
bytechr = chr
+ byterepr = repr
bytestr = str
iterbytestr = iter
+ maybebytestr = identity
sysbytes = identity
sysstr = identity
strurl = identity
@@ -316,6 +334,11 @@
maplist = map
ziplist = zip
rawinput = raw_input
+ getargspec = inspect.getargspec
+
+ def emailparser(*args, **kwargs):
+ import email.parser
+ return email.parser.Parser(*args, **kwargs)
isjython = sysplatform.startswith('java')
--- a/mercurial/revlog.py Fri Feb 23 17:57:04 2018 -0800
+++ b/mercurial/revlog.py Sat Feb 24 17:49:10 2018 -0600
@@ -13,8 +13,8 @@
from __future__ import absolute_import
-import binascii
import collections
+import contextlib
import errno
import hashlib
import heapq
@@ -621,13 +621,12 @@
indexdata = ''
self._initempty = True
try:
- f = self.opener(self.indexfile)
- if (mmapindexthreshold is not None and
- self.opener.fstat(f).st_size >= mmapindexthreshold):
- indexdata = util.buffer(util.mmapread(f))
- else:
- indexdata = f.read()
- f.close()
+ with self._indexfp() as f:
+ if (mmapindexthreshold is not None and
+ self.opener.fstat(f).st_size >= mmapindexthreshold):
+ indexdata = util.buffer(util.mmapread(f))
+ else:
+ indexdata = f.read()
if len(indexdata) > 0:
v = versionformat_unpack(indexdata[:4])[0]
self._initempty = False
@@ -682,6 +681,32 @@
def _compressor(self):
return util.compengines[self._compengine].revlogcompressor()
+ def _indexfp(self, mode='r'):
+ """file object for the revlog's index file"""
+ args = {r'mode': mode}
+ if mode != 'r':
+ args[r'checkambig'] = self._checkambig
+ if mode == 'w':
+ args[r'atomictemp'] = True
+ return self.opener(self.indexfile, **args)
+
+ def _datafp(self, mode='r'):
+ """file object for the revlog's data file"""
+ return self.opener(self.datafile, mode=mode)
+
+ @contextlib.contextmanager
+ def _datareadfp(self, existingfp=None):
+ """file object suitable to read data"""
+ if existingfp is not None:
+ yield existingfp
+ else:
+ if self._inline:
+ func = self._indexfp
+ else:
+ func = self._datafp
+ with func() as fp:
+ yield fp
+
def tip(self):
return self.node(len(self.index) - 2)
def __contains__(self, rev):
@@ -1404,7 +1429,7 @@
if maybewdir:
raise error.WdirUnsupported
return None
- except (TypeError, binascii.Error):
+ except TypeError:
pass
def lookup(self, id):
@@ -1490,15 +1515,6 @@
Returns a str or buffer of raw byte data.
"""
- if df is not None:
- closehandle = False
- else:
- if self._inline:
- df = self.opener(self.indexfile)
- else:
- df = self.opener(self.datafile)
- closehandle = True
-
# Cache data both forward and backward around the requested
# data, in a fixed size window. This helps speed up operations
# involving reading the revlog backwards.
@@ -1506,10 +1522,9 @@
realoffset = offset & ~(cachesize - 1)
reallength = (((offset + length + cachesize) & ~(cachesize - 1))
- realoffset)
- df.seek(realoffset)
- d = df.read(reallength)
- if closehandle:
- df.close()
+ with self._datareadfp(df) as df:
+ df.seek(realoffset)
+ d = df.read(reallength)
self._cachesegment(realoffset, d)
if offset != realoffset or reallength != length:
return util.buffer(d, offset - realoffset, length)
@@ -1818,7 +1833,7 @@
raise RevlogError(_("integrity check failed on %s:%s")
% (self.indexfile, pycompat.bytestr(revornode)))
- def checkinlinesize(self, tr, fp=None):
+ def _enforceinlinesize(self, tr, fp=None):
"""Check if the revlog is too big for inline and convert if so.
This should be called after revisions are added to the revlog. If the
@@ -1847,24 +1862,20 @@
fp.flush()
fp.close()
- df = self.opener(self.datafile, 'w')
- try:
+ with self._datafp('w') as df:
for r in self:
df.write(self._getsegmentforrevs(r, r)[1])
- finally:
- df.close()
- fp = self.opener(self.indexfile, 'w', atomictemp=True,
- checkambig=self._checkambig)
- self.version &= ~FLAG_INLINE_DATA
- self._inline = False
- for i in self:
- e = self._io.packentry(self.index[i], self.node, self.version, i)
- fp.write(e)
+ with self._indexfp('w') as fp:
+ self.version &= ~FLAG_INLINE_DATA
+ self._inline = False
+ io = self._io
+ for i in self:
+ e = io.packentry(self.index[i], self.node, self.version, i)
+ fp.write(e)
- # if we don't call close, the temp file will never replace the
- # real index
- fp.close()
+ # the temp file replace the real index when we exit the context
+ # manager
tr.replace(self.indexfile, trindex * self._io.size)
self._chunkclear()
@@ -1923,8 +1934,8 @@
"""
dfh = None
if not self._inline:
- dfh = self.opener(self.datafile, "a+")
- ifh = self.opener(self.indexfile, "a+", checkambig=self._checkambig)
+ dfh = self._datafp("a+")
+ ifh = self._indexfp("a+")
try:
return self._addrevision(node, rawtext, transaction, link, p1, p2,
flags, cachedelta, ifh, dfh,
@@ -2099,7 +2110,7 @@
if alwayscache and rawtext is None:
rawtext = deltacomputer._buildtext(revinfo, fh)
- if type(rawtext) == str: # only accept immutable objects
+ if type(rawtext) == bytes: # only accept immutable objects
self._cache = (node, curr, rawtext)
self._chainbasecache[curr] = chainbase
return node
@@ -2133,7 +2144,7 @@
ifh.write(entry)
ifh.write(data[0])
ifh.write(data[1])
- self.checkinlinesize(transaction, ifh)
+ self._enforceinlinesize(transaction, ifh)
def addgroup(self, deltas, linkmapper, transaction, addrevisioncb=None):
"""
@@ -2153,7 +2164,7 @@
end = 0
if r:
end = self.end(r - 1)
- ifh = self.opener(self.indexfile, "a+", checkambig=self._checkambig)
+ ifh = self._indexfp("a+")
isize = r * self._io.size
if self._inline:
transaction.add(self.indexfile, end + isize, r)
@@ -2161,7 +2172,7 @@
else:
transaction.add(self.indexfile, isize, r)
transaction.add(self.datafile, end)
- dfh = self.opener(self.datafile, "a+")
+ dfh = self._datafp("a+")
def flush():
if dfh:
dfh.flush()
@@ -2224,9 +2235,8 @@
# addrevision switched from inline to conventional
# reopen the index
ifh.close()
- dfh = self.opener(self.datafile, "a+")
- ifh = self.opener(self.indexfile, "a+",
- checkambig=self._checkambig)
+ dfh = self._datafp("a+")
+ ifh = self._indexfp("a+")
finally:
if dfh:
dfh.close()
@@ -2328,10 +2338,9 @@
expected = max(0, self.end(len(self) - 1))
try:
- f = self.opener(self.datafile)
- f.seek(0, 2)
- actual = f.tell()
- f.close()
+ with self._datafp() as f:
+ f.seek(0, 2)
+ actual = f.tell()
dd = actual - expected
except IOError as inst:
if inst.errno != errno.ENOENT:
--- a/mercurial/revset.py Fri Feb 23 17:57:04 2018 -0800
+++ b/mercurial/revset.py Sat Feb 24 17:49:10 2018 -0600
@@ -105,6 +105,9 @@
pass
return None
+def _sortedb(xs):
+ return sorted(util.rapply(pycompat.maybebytestr, xs))
+
# operator methods
def stringset(repo, subset, x, order):
@@ -507,15 +510,7 @@
b.add(getbranch(r))
c = s.__contains__
return subset.filter(lambda r: c(r) or getbranch(r) in b,
- condrepr=lambda: '<branch %r>' % sorted(b))
-
-@predicate('bumped()', safe=True)
-def bumped(repo, subset, x):
- msg = ("'bumped()' is deprecated, "
- "use 'phasedivergent()'")
- repo.ui.deprecwarn(msg, '4.4')
-
- return phasedivergent(repo, subset, x)
+ condrepr=lambda: '<branch %r>' % _sortedb(b))
@predicate('phasedivergent()', safe=True)
def phasedivergent(repo, subset, x):
@@ -768,15 +763,7 @@
src = _getrevsource(repo, r)
return subset.filter(dests.__contains__,
- condrepr=lambda: '<destination %r>' % sorted(dests))
-
-@predicate('divergent()', safe=True)
-def divergent(repo, subset, x):
- msg = ("'divergent()' is deprecated, "
- "use 'contentdivergent()'")
- repo.ui.deprecwarn(msg, '4.4')
-
- return contentdivergent(repo, subset, x)
+ condrepr=lambda: '<destination %r>' % _sortedb(dests))
@predicate('contentdivergent()', safe=True)
def contentdivergent(repo, subset, x):
@@ -1854,7 +1841,7 @@
keyflags = []
for k in keys.split():
fk = k
- reverse = (k[0] == '-')
+ reverse = (k.startswith('-'))
if reverse:
k = k[1:]
if k not in _sortkeyfuncs and k != 'topo':
@@ -2031,14 +2018,6 @@
def tagged(repo, subset, x):
return tag(repo, subset, x)
-@predicate('unstable()', safe=True)
-def unstable(repo, subset, x):
- msg = ("'unstable()' is deprecated, "
- "use 'orphan()'")
- repo.ui.deprecwarn(msg, '4.4')
-
- return orphan(repo, subset, x)
-
@predicate('orphan()', safe=True)
def orphan(repo, subset, x):
"""Non-obsolete changesets with obsolete ancestors. (EXPERIMENTAL)
--- a/mercurial/revsetlang.py Fri Feb 23 17:57:04 2018 -0800
+++ b/mercurial/revsetlang.py Sat Feb 24 17:49:10 2018 -0600
@@ -635,7 +635,7 @@
"root(_list('a\\\\x00b\\\\x00c\\\\x00d'))"
>>> formatspec(b'sort(%r, %ps)', b':', [b'desc', b'user'])
"sort((:), 'desc', 'user')"
- >>> formatspec('%ls', ['a', "'"])
+ >>> formatspec(b'%ls', [b'a', b"'"])
"_list('a\\\\x00\\\\'')"
'''
expr = pycompat.bytestr(expr)
@@ -717,13 +717,13 @@
def gethashlikesymbols(tree):
"""returns the list of symbols of the tree that look like hashes
- >>> gethashlikesymbols(('dagrange', ('symbol', '3'), ('symbol', 'abe3ff')))
+ >>> gethashlikesymbols(parse(b'3::abe3ff'))
['3', 'abe3ff']
- >>> gethashlikesymbols(('func', ('symbol', 'precursors'), ('symbol', '.')))
+ >>> gethashlikesymbols(parse(b'precursors(.)'))
[]
- >>> gethashlikesymbols(('func', ('symbol', 'precursors'), ('symbol', '34')))
+ >>> gethashlikesymbols(parse(b'precursors(34)'))
['34']
- >>> gethashlikesymbols(('symbol', 'abe3ffZ'))
+ >>> gethashlikesymbols(parse(b'abe3ffZ'))
[]
"""
if not tree:
--- a/mercurial/scmutil.py Fri Feb 23 17:57:04 2018 -0800
+++ b/mercurial/scmutil.py Sat Feb 24 17:49:10 2018 -0600
@@ -215,7 +215,7 @@
ui.warn(_("(is your Python install correct?)\n"))
except IOError as inst:
if util.safehasattr(inst, "code"):
- ui.warn(_("abort: %s\n") % inst)
+ ui.warn(_("abort: %s\n") % util.forcebytestr(inst))
elif util.safehasattr(inst, "reason"):
try: # usually it is in the form (errno, strerror)
reason = inst.reason.args[1]
@@ -267,6 +267,8 @@
raise error.Abort(_("cannot use an integer as a name"))
except ValueError:
pass
+ if lbl.strip() != lbl:
+ raise error.Abort(_("leading or trailing whitespace in name %r") % lbl)
def checkfilename(f):
'''Check that the filename f is an acceptable filename for a tracked file'''
@@ -355,12 +357,8 @@
samestat = getattr(os.path, 'samestat', None)
if followsym and samestat is not None:
def adddir(dirlst, dirname):
- match = False
dirstat = os.stat(dirname)
- for lstdirstat in dirlst:
- if samestat(dirstat, lstdirstat):
- match = True
- break
+ match = any(samestat(dirstat, lstdirstat) for lstdirstat in dirlst)
if not match:
dirlst.append(dirstat)
return not match
@@ -411,7 +409,7 @@
def formatchangeid(ctx):
"""Format changectx as '{rev}:{node|formatnode}', which is the default
- template provided by cmdutil.changeset_templater"""
+ template provided by logcmdutil.changesettemplater"""
repo = ctx.repo()
return formatrevnode(repo.ui, intrev(ctx), binnode(ctx))
@@ -885,7 +883,7 @@
missings = []
for r in requirements:
if r not in supported:
- if not r or not r[0].isalnum():
+ if not r or not r[0:1].isalnum():
raise error.RequirementError(_(".hg/requires file is corrupt"))
missings.append(r)
missings.sort()
@@ -1196,7 +1194,7 @@
if k == self.firstlinekey:
e = "key name '%s' is reserved" % self.firstlinekey
raise error.ProgrammingError(e)
- if not k[0].isalpha():
+ if not k[0:1].isalpha():
e = "keys must start with a letter in a key-value file"
raise error.ProgrammingError(e)
if not k.isalnum():
@@ -1222,6 +1220,11 @@
'unbundle',
]
+# a list of (repo, ctx, files) functions called by various commands to allow
+# extensions to ensure the corresponding files are available locally, before the
+# command uses them.
+fileprefetchhooks = util.hooks()
+
# A marker that tells the evolve extension to suppress its own reporting
_reportstroubledchangesets = True
--- a/mercurial/setdiscovery.py Fri Feb 23 17:57:04 2018 -0800
+++ b/mercurial/setdiscovery.py Sat Feb 24 17:49:10 2018 -0600
@@ -130,7 +130,7 @@
sample = set(random.sample(sample, desiredlen))
return sample
-def findcommonheads(ui, local, remote,
+def findcommonheads(ui, local, remote, heads=None,
initialsamplesize=100,
fullsamplesize=200,
abortwhenunrelated=True,
@@ -155,11 +155,15 @@
sample = _limitsample(ownheads, initialsamplesize)
# indices between sample and externalized version must match
sample = list(sample)
- batch = remote.iterbatch()
- batch.heads()
- batch.known(dag.externalizeall(sample))
- batch.submit()
- srvheadhashes, yesno = batch.results()
+ if heads:
+ srvheadhashes = heads
+ yesno = remote.known(dag.externalizeall(sample))
+ else:
+ batch = remote.iterbatch()
+ batch.heads()
+ batch.known(dag.externalizeall(sample))
+ batch.submit()
+ srvheadhashes, yesno = batch.results()
if cl.tip() == nullid:
if srvheadhashes != [nullid]:
--- a/mercurial/smartset.py Fri Feb 23 17:57:04 2018 -0800
+++ b/mercurial/smartset.py Sat Feb 24 17:49:10 2018 -0600
@@ -8,7 +8,9 @@
from __future__ import absolute_import
from . import (
+ encoding,
error,
+ pycompat,
util,
)
@@ -19,7 +21,7 @@
type(r) example
======== =================================
tuple ('<not %r>', other)
- str '<branch closed>'
+ bytes '<branch closed>'
callable lambda: '<branch %r>' % sorted(b)
object other
======== =================================
@@ -27,13 +29,16 @@
if r is None:
return ''
elif isinstance(r, tuple):
- return r[0] % r[1:]
- elif isinstance(r, str):
+ return r[0] % util.rapply(pycompat.maybebytestr, r[1:])
+ elif isinstance(r, bytes):
return r
elif callable(r):
return r()
else:
- return repr(r)
+ return pycompat.byterepr(r)
+
+def _typename(o):
+ return pycompat.sysbytes(type(o).__name__).lstrip('_')
class abstractsmartset(object):
@@ -306,7 +311,7 @@
self._istopo = False
def __len__(self):
- if '_list' in self.__dict__:
+ if r'_list' in self.__dict__:
return len(self._list)
else:
return len(self._set)
@@ -384,6 +389,7 @@
s._ascending = self._ascending
return s
+ @encoding.strmethod
def __repr__(self):
d = {None: '', False: '-', True: '+'}[self._ascending]
s = _formatsetrepr(self._datarepr)
@@ -394,8 +400,8 @@
# We fallback to the sorted version for a stable output.
if self._ascending is not None:
l = self._asclist
- s = repr(l)
- return '<%s%s %s>' % (type(self).__name__, d, s)
+ s = pycompat.byterepr(l)
+ return '<%s%s %s>' % (_typename(self), d, s)
class filteredset(abstractsmartset):
"""Duck type for baseset class which iterates lazily over the revisions in
@@ -505,12 +511,13 @@
pass
return x
+ @encoding.strmethod
def __repr__(self):
- xs = [repr(self._subset)]
+ xs = [pycompat.byterepr(self._subset)]
s = _formatsetrepr(self._condrepr)
if s:
xs.append(s)
- return '<%s %s>' % (type(self).__name__, ', '.join(xs))
+ return '<%s %s>' % (_typename(self), ', '.join(xs))
def _iterordered(ascending, iter1, iter2):
"""produce an ordered iteration from two iterators with the same order
@@ -755,9 +762,10 @@
self.reverse()
return val
+ @encoding.strmethod
def __repr__(self):
d = {None: '', False: '-', True: '+'}[self._ascending]
- return '<%s%s %r, %r>' % (type(self).__name__, d, self._r1, self._r2)
+ return '<%s%s %r, %r>' % (_typename(self), d, self._r1, self._r2)
class generatorset(abstractsmartset):
"""Wrap a generator for lazy iteration
@@ -918,9 +926,10 @@
return self.last()
return next(it(), None)
+ @encoding.strmethod
def __repr__(self):
d = {False: '-', True: '+'}[self._ascending]
- return '<%s%s>' % (type(self).__name__.lstrip('_'), d)
+ return '<%s%s>' % (_typename(self), d)
class _generatorsetasc(generatorset):
"""Special case of generatorset optimized for ascending generators."""
@@ -1087,10 +1096,10 @@
y = max(self._end - start, self._start)
return _spanset(x, y, self._ascending, self._hiddenrevs)
+ @encoding.strmethod
def __repr__(self):
d = {False: '-', True: '+'}[self._ascending]
- return '<%s%s %d:%d>' % (type(self).__name__.lstrip('_'), d,
- self._start, self._end)
+ return '<%s%s %d:%d>' % (_typename(self), d, self._start, self._end)
class fullreposet(_spanset):
"""a set containing all revisions in the repo
@@ -1123,7 +1132,7 @@
def prettyformat(revs):
lines = []
- rs = repr(revs)
+ rs = pycompat.byterepr(revs)
p = 0
while p < len(rs):
q = rs.find('<', p + 1)
--- a/mercurial/sshpeer.py Fri Feb 23 17:57:04 2018 -0800
+++ b/mercurial/sshpeer.py Sat Feb 24 17:49:10 2018 -0600
@@ -8,6 +8,7 @@
from __future__ import absolute_import
import re
+import uuid
from .i18n import _
from . import (
@@ -15,6 +16,7 @@
pycompat,
util,
wireproto,
+ wireprotoserver,
)
def _serverquote(s):
@@ -63,8 +65,11 @@
(This will only wait for data if the setup is supported by `util.poll`)
"""
- if getattr(self._main, 'hasbuffer', False): # getattr for classic pipe
- return (True, True) # main has data, assume side is worth poking at.
+ if (isinstance(self._main, util.bufferedinputpipe) and
+ self._main.hasbuffer):
+ # Main has data. Assume side is worth poking at.
+ return True, True
+
fds = [self._main.fileno(), self._side.fileno()]
try:
act = util.poll(fds)
@@ -114,43 +119,253 @@
def flush(self):
return self._main.flush()
-class sshpeer(wireproto.wirepeer):
- def __init__(self, ui, path, create=False):
- self._url = path
- self._ui = ui
- self._pipeo = self._pipei = self._pipee = None
+def _cleanuppipes(ui, pipei, pipeo, pipee):
+ """Clean up pipes used by an SSH connection."""
+ if pipeo:
+ pipeo.close()
+ if pipei:
+ pipei.close()
+
+ if pipee:
+ # Try to read from the err descriptor until EOF.
+ try:
+ for l in pipee:
+ ui.status(_('remote: '), l)
+ except (IOError, ValueError):
+ pass
+
+ pipee.close()
+
+def _makeconnection(ui, sshcmd, args, remotecmd, path, sshenv=None):
+ """Create an SSH connection to a server.
+
+ Returns a tuple of (process, stdin, stdout, stderr) for the
+ spawned process.
+ """
+ cmd = '%s %s %s' % (
+ sshcmd,
+ args,
+ util.shellquote('%s -R %s serve --stdio' % (
+ _serverquote(remotecmd), _serverquote(path))))
+
+ ui.debug('running %s\n' % cmd)
+ cmd = util.quotecommand(cmd)
+
+ # no buffer allow the use of 'select'
+ # feel free to remove buffering and select usage when we ultimately
+ # move to threading.
+ stdin, stdout, stderr, proc = util.popen4(cmd, bufsize=0, env=sshenv)
+
+ return proc, stdin, stdout, stderr
+
+def _performhandshake(ui, stdin, stdout, stderr):
+ def badresponse():
+ # Flush any output on stderr.
+ _forwardoutput(ui, stderr)
+
+ msg = _('no suitable response from remote hg')
+ hint = ui.config('ui', 'ssherrorhint')
+ raise error.RepoError(msg, hint=hint)
- u = util.url(path, parsequery=False, parsefragment=False)
- if u.scheme != 'ssh' or not u.host or u.path is None:
- self._abort(error.RepoError(_("couldn't parse location %s") % path))
+ # The handshake consists of sending wire protocol commands in reverse
+ # order of protocol implementation and then sniffing for a response
+ # to one of them.
+ #
+ # Those commands (from oldest to newest) are:
+ #
+ # ``between``
+ # Asks for the set of revisions between a pair of revisions. Command
+ # present in all Mercurial server implementations.
+ #
+ # ``hello``
+ # Instructs the server to advertise its capabilities. Introduced in
+ # Mercurial 0.9.1.
+ #
+ # ``upgrade``
+ # Requests upgrade from default transport protocol version 1 to
+ # a newer version. Introduced in Mercurial 4.6 as an experimental
+ # feature.
+ #
+ # The ``between`` command is issued with a request for the null
+ # range. If the remote is a Mercurial server, this request will
+ # generate a specific response: ``1\n\n``. This represents the
+ # wire protocol encoded value for ``\n``. We look for ``1\n\n``
+ # in the output stream and know this is the response to ``between``
+ # and we're at the end of our handshake reply.
+ #
+ # The response to the ``hello`` command will be a line with the
+ # length of the value returned by that command followed by that
+ # value. If the server doesn't support ``hello`` (which should be
+ # rare), that line will be ``0\n``. Otherwise, the value will contain
+ # RFC 822 like lines. Of these, the ``capabilities:`` line contains
+ # the capabilities of the server.
+ #
+ # The ``upgrade`` command isn't really a command in the traditional
+ # sense of version 1 of the transport because it isn't using the
+ # proper mechanism for formatting insteads: instead, it just encodes
+ # arguments on the line, delimited by spaces.
+ #
+ # The ``upgrade`` line looks like ``upgrade <token> <capabilities>``.
+ # If the server doesn't support protocol upgrades, it will reply to
+ # this line with ``0\n``. Otherwise, it emits an
+ # ``upgraded <token> <protocol>`` line to both stdout and stderr.
+ # Content immediately following this line describes additional
+ # protocol and server state.
+ #
+ # In addition to the responses to our command requests, the server
+ # may emit "banner" output on stdout. SSH servers are allowed to
+ # print messages to stdout on login. Issuing commands on connection
+ # allows us to flush this banner output from the server by scanning
+ # for output to our well-known ``between`` command. Of course, if
+ # the banner contains ``1\n\n``, this will throw off our detection.
- util.checksafessh(path)
+ requestlog = ui.configbool('devel', 'debug.peer-request')
+
+ # Generate a random token to help identify responses to version 2
+ # upgrade request.
+ token = pycompat.sysbytes(str(uuid.uuid4()))
+ upgradecaps = [
+ ('proto', wireprotoserver.SSHV2),
+ ]
+ upgradecaps = util.urlreq.urlencode(upgradecaps)
- if u.passwd is not None:
- self._abort(error.RepoError(_("password in URL not supported")))
+ try:
+ pairsarg = '%s-%s' % ('0' * 40, '0' * 40)
+ handshake = [
+ 'hello\n',
+ 'between\n',
+ 'pairs %d\n' % len(pairsarg),
+ pairsarg,
+ ]
+
+ # Request upgrade to version 2 if configured.
+ if ui.configbool('experimental', 'sshpeer.advertise-v2'):
+ ui.debug('sending upgrade request: %s %s\n' % (token, upgradecaps))
+ handshake.insert(0, 'upgrade %s %s\n' % (token, upgradecaps))
- self._user = u.user
- self._host = u.host
- self._port = u.port
- self._path = u.path or '.'
+ if requestlog:
+ ui.debug('devel-peer-request: hello\n')
+ ui.debug('sending hello command\n')
+ if requestlog:
+ ui.debug('devel-peer-request: between\n')
+ ui.debug('devel-peer-request: pairs: %d bytes\n' % len(pairsarg))
+ ui.debug('sending between command\n')
+
+ stdin.write(''.join(handshake))
+ stdin.flush()
+ except IOError:
+ badresponse()
+
+ # Assume version 1 of wire protocol by default.
+ protoname = wireprotoserver.SSHV1
+ reupgraded = re.compile(b'^upgraded %s (.*)$' % re.escape(token))
+
+ lines = ['', 'dummy']
+ max_noise = 500
+ while lines[-1] and max_noise:
+ try:
+ l = stdout.readline()
+ _forwardoutput(ui, stderr)
- sshcmd = self.ui.config("ui", "ssh")
- remotecmd = self.ui.config("ui", "remotecmd")
- sshaddenv = dict(self.ui.configitems("sshenv"))
- sshenv = util.shellenviron(sshaddenv)
+ # Look for reply to protocol upgrade request. It has a token
+ # in it, so there should be no false positives.
+ m = reupgraded.match(l)
+ if m:
+ protoname = m.group(1)
+ ui.debug('protocol upgraded to %s\n' % protoname)
+ # If an upgrade was handled, the ``hello`` and ``between``
+ # requests are ignored. The next output belongs to the
+ # protocol, so stop scanning lines.
+ break
+
+ # Otherwise it could be a banner, ``0\n`` response if server
+ # doesn't support upgrade.
+
+ if lines[-1] == '1\n' and l == '\n':
+ break
+ if l:
+ ui.debug('remote: ', l)
+ lines.append(l)
+ max_noise -= 1
+ except IOError:
+ badresponse()
+ else:
+ badresponse()
+
+ caps = set()
- args = util.sshargs(sshcmd, self._host, self._user, self._port)
+ # For version 1, we should see a ``capabilities`` line in response to the
+ # ``hello`` command.
+ if protoname == wireprotoserver.SSHV1:
+ for l in reversed(lines):
+ # Look for response to ``hello`` command. Scan from the back so
+ # we don't misinterpret banner output as the command reply.
+ if l.startswith('capabilities:'):
+ caps.update(l[:-1].split(':')[1].split())
+ break
+ elif protoname == wireprotoserver.SSHV2:
+ # We see a line with number of bytes to follow and then a value
+ # looking like ``capabilities: *``.
+ line = stdout.readline()
+ try:
+ valuelen = int(line)
+ except ValueError:
+ badresponse()
+
+ capsline = stdout.read(valuelen)
+ if not capsline.startswith('capabilities: '):
+ badresponse()
+
+ ui.debug('remote: %s\n' % capsline)
+
+ caps.update(capsline.split(':')[1].split())
+ # Trailing newline.
+ stdout.read(1)
- if create:
- cmd = '%s %s %s' % (sshcmd, args,
- util.shellquote("%s init %s" %
- (_serverquote(remotecmd), _serverquote(self._path))))
- ui.debug('running %s\n' % cmd)
- res = ui.system(cmd, blockedtag='sshpeer', environ=sshenv)
- if res != 0:
- self._abort(error.RepoError(_("could not create remote repo")))
+ # Error if we couldn't find capabilities, this means:
+ #
+ # 1. Remote isn't a Mercurial server
+ # 2. Remote is a <0.9.1 Mercurial server
+ # 3. Remote is a future Mercurial server that dropped ``hello``
+ # and other attempted handshake mechanisms.
+ if not caps:
+ badresponse()
+
+ # Flush any output on stderr before proceeding.
+ _forwardoutput(ui, stderr)
+
+ return protoname, caps
+
+class sshv1peer(wireproto.wirepeer):
+ def __init__(self, ui, url, proc, stdin, stdout, stderr, caps):
+ """Create a peer from an existing SSH connection.
- self._validaterepo(sshcmd, args, remotecmd, sshenv)
+ ``proc`` is a handle on the underlying SSH process.
+ ``stdin``, ``stdout``, and ``stderr`` are handles on the stdio
+ pipes for that process.
+ ``caps`` is a set of capabilities supported by the remote.
+ """
+ self._url = url
+ self._ui = ui
+ # self._subprocess is unused. Keeping a handle on the process
+ # holds a reference and prevents it from being garbage collected.
+ self._subprocess = proc
+
+ # And we hook up our "doublepipe" wrapper to allow querying
+ # stderr any time we perform I/O.
+ stdout = doublepipe(ui, util.bufferedinputpipe(stdout), stderr)
+ stdin = doublepipe(ui, stdin, stderr)
+
+ self._pipeo = stdin
+ self._pipei = stdout
+ self._pipee = stderr
+ self._caps = caps
+
+ # Commands that have a "framed" response where the first line of the
+ # response contains the length of that response.
+ _FRAMED_COMMANDS = {
+ 'batch',
+ }
# Begin of _basepeer interface.
@@ -182,64 +397,6 @@
# End of _basewirecommands interface.
- def _validaterepo(self, sshcmd, args, remotecmd, sshenv=None):
- # cleanup up previous run
- self._cleanup()
-
- cmd = '%s %s %s' % (sshcmd, args,
- util.shellquote("%s -R %s serve --stdio" %
- (_serverquote(remotecmd), _serverquote(self._path))))
- self.ui.debug('running %s\n' % cmd)
- cmd = util.quotecommand(cmd)
-
- # while self._subprocess isn't used, having it allows the subprocess to
- # to clean up correctly later
- #
- # no buffer allow the use of 'select'
- # feel free to remove buffering and select usage when we ultimately
- # move to threading.
- sub = util.popen4(cmd, bufsize=0, env=sshenv)
- self._pipeo, self._pipei, self._pipee, self._subprocess = sub
-
- self._pipei = util.bufferedinputpipe(self._pipei)
- self._pipei = doublepipe(self.ui, self._pipei, self._pipee)
- self._pipeo = doublepipe(self.ui, self._pipeo, self._pipee)
-
- def badresponse():
- msg = _("no suitable response from remote hg")
- hint = self.ui.config("ui", "ssherrorhint")
- self._abort(error.RepoError(msg, hint=hint))
-
- try:
- # skip any noise generated by remote shell
- self._callstream("hello")
- r = self._callstream("between", pairs=("%s-%s" % ("0"*40, "0"*40)))
- except IOError:
- badresponse()
-
- lines = ["", "dummy"]
- max_noise = 500
- while lines[-1] and max_noise:
- try:
- l = r.readline()
- self._readerr()
- if lines[-1] == "1\n" and l == "\n":
- break
- if l:
- self.ui.debug("remote: ", l)
- lines.append(l)
- max_noise -= 1
- except IOError:
- badresponse()
- else:
- badresponse()
-
- self._caps = set()
- for l in reversed(lines):
- if l.startswith("capabilities:"):
- self._caps.update(l[:-1].split(":")[1].split())
- break
-
def _readerr(self):
_forwardoutput(self.ui, self._pipee)
@@ -248,41 +405,11 @@
raise exception
def _cleanup(self):
- if self._pipeo is None:
- return
- self._pipeo.close()
- self._pipei.close()
- try:
- # read the error descriptor until EOF
- for l in self._pipee:
- self.ui.status(_("remote: "), l)
- except (IOError, ValueError):
- pass
- self._pipee.close()
+ _cleanuppipes(self.ui, self._pipei, self._pipeo, self._pipee)
__del__ = _cleanup
- def _submitbatch(self, req):
- rsp = self._callstream("batch", cmds=wireproto.encodebatchcmds(req))
- available = self._getamount()
- # TODO this response parsing is probably suboptimal for large
- # batches with large responses.
- toread = min(available, 1024)
- work = rsp.read(toread)
- available -= toread
- chunk = work
- while chunk:
- while ';' in work:
- one, work = work.split(';', 1)
- yield wireproto.unescapearg(one)
- toread = min(available, 1024)
- chunk = rsp.read(toread)
- available -= toread
- work += chunk
- yield wireproto.unescapearg(work)
-
- def _callstream(self, cmd, **args):
- args = pycompat.byteskwargs(args)
+ def _sendrequest(self, cmd, args, framed=False):
if (self.ui.debugflag
and self.ui.configbool('devel', 'debug.peer-request')):
dbg = self.ui.debug
@@ -316,35 +443,63 @@
self._pipeo.write(v)
self._pipeo.flush()
+ # We know exactly how many bytes are in the response. So return a proxy
+ # around the raw output stream that allows reading exactly this many
+ # bytes. Callers then can read() without fear of overrunning the
+ # response.
+ if framed:
+ amount = self._getamount()
+ return util.cappedreader(self._pipei, amount)
+
return self._pipei
+ def _callstream(self, cmd, **args):
+ args = pycompat.byteskwargs(args)
+ return self._sendrequest(cmd, args, framed=cmd in self._FRAMED_COMMANDS)
+
def _callcompressable(self, cmd, **args):
- return self._callstream(cmd, **args)
+ args = pycompat.byteskwargs(args)
+ return self._sendrequest(cmd, args, framed=cmd in self._FRAMED_COMMANDS)
def _call(self, cmd, **args):
- self._callstream(cmd, **args)
- return self._recv()
+ args = pycompat.byteskwargs(args)
+ return self._sendrequest(cmd, args, framed=True).read()
def _callpush(self, cmd, fp, **args):
+ # The server responds with an empty frame if the client should
+ # continue submitting the payload.
r = self._call(cmd, **args)
if r:
return '', r
+
+ # The payload consists of frames with content followed by an empty
+ # frame.
for d in iter(lambda: fp.read(4096), ''):
- self._send(d)
- self._send("", flush=True)
- r = self._recv()
+ self._writeframed(d)
+ self._writeframed("", flush=True)
+
+ # In case of success, there is an empty frame and a frame containing
+ # the integer result (as a string).
+ # In case of error, there is a non-empty frame containing the error.
+ r = self._readframed()
if r:
return '', r
- return self._recv(), ''
+ return self._readframed(), ''
def _calltwowaystream(self, cmd, fp, **args):
+ # The server responds with an empty frame if the client should
+ # continue submitting the payload.
r = self._call(cmd, **args)
if r:
# XXX needs to be made better
raise error.Abort(_('unexpected remote reply: %s') % r)
+
+ # The payload consists of frames with content followed by an empty
+ # frame.
for d in iter(lambda: fp.read(4096), ''):
- self._send(d)
- self._send("", flush=True)
+ self._writeframed(d)
+ self._writeframed("", flush=True)
+
return self._pipei
def _getamount(self):
@@ -359,10 +514,10 @@
except ValueError:
self._abort(error.ResponseError(_("unexpected response:"), l))
- def _recv(self):
+ def _readframed(self):
return self._pipei.read(self._getamount())
- def _send(self, data, flush=False):
+ def _writeframed(self, data, flush=False):
self._pipeo.write("%d\n" % len(data))
if data:
self._pipeo.write(data)
@@ -370,4 +525,57 @@
self._pipeo.flush()
self._readerr()
-instance = sshpeer
+class sshv2peer(sshv1peer):
+ """A peer that speakers version 2 of the transport protocol."""
+ # Currently version 2 is identical to version 1 post handshake.
+ # And handshake is performed before the peer is instantiated. So
+ # we need no custom code.
+
+def instance(ui, path, create):
+ """Create an SSH peer.
+
+ The returned object conforms to the ``wireproto.wirepeer`` interface.
+ """
+ u = util.url(path, parsequery=False, parsefragment=False)
+ if u.scheme != 'ssh' or not u.host or u.path is None:
+ raise error.RepoError(_("couldn't parse location %s") % path)
+
+ util.checksafessh(path)
+
+ if u.passwd is not None:
+ raise error.RepoError(_('password in URL not supported'))
+
+ sshcmd = ui.config('ui', 'ssh')
+ remotecmd = ui.config('ui', 'remotecmd')
+ sshaddenv = dict(ui.configitems('sshenv'))
+ sshenv = util.shellenviron(sshaddenv)
+ remotepath = u.path or '.'
+
+ args = util.sshargs(sshcmd, u.host, u.user, u.port)
+
+ if create:
+ cmd = '%s %s %s' % (sshcmd, args,
+ util.shellquote('%s init %s' %
+ (_serverquote(remotecmd), _serverquote(remotepath))))
+ ui.debug('running %s\n' % cmd)
+ res = ui.system(cmd, blockedtag='sshpeer', environ=sshenv)
+ if res != 0:
+ raise error.RepoError(_('could not create remote repo'))
+
+ proc, stdin, stdout, stderr = _makeconnection(ui, sshcmd, args, remotecmd,
+ remotepath, sshenv)
+
+ try:
+ protoname, caps = _performhandshake(ui, stdin, stdout, stderr)
+ except Exception:
+ _cleanuppipes(ui, stdout, stdin, stderr)
+ raise
+
+ if protoname == wireprotoserver.SSHV1:
+ return sshv1peer(ui, path, proc, stdin, stdout, stderr, caps)
+ elif protoname == wireprotoserver.SSHV2:
+ return sshv2peer(ui, path, proc, stdin, stdout, stderr, caps)
+ else:
+ _cleanuppipes(ui, stdout, stdin, stderr)
+ raise error.RepoError(_('unknown version of SSH protocol: %s') %
+ protoname)
--- a/mercurial/sshserver.py Fri Feb 23 17:57:04 2018 -0800
+++ /dev/null Thu Jan 01 00:00:00 1970 +0000
@@ -1,131 +0,0 @@
-# sshserver.py - ssh protocol server support for mercurial
-#
-# Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
-# Copyright 2006 Vadim Gelfer <vadim.gelfer@gmail.com>
-#
-# This software may be used and distributed according to the terms of the
-# GNU General Public License version 2 or any later version.
-
-from __future__ import absolute_import
-
-import sys
-
-from .i18n import _
-from . import (
- encoding,
- error,
- hook,
- util,
- wireproto,
-)
-
-class sshserver(wireproto.abstractserverproto):
- def __init__(self, ui, repo):
- self.ui = ui
- self.repo = repo
- self.lock = None
- self.fin = ui.fin
- self.fout = ui.fout
- self.name = 'ssh'
-
- hook.redirect(True)
- ui.fout = repo.ui.fout = ui.ferr
-
- # Prevent insertion/deletion of CRs
- util.setbinary(self.fin)
- util.setbinary(self.fout)
-
- def getargs(self, args):
- data = {}
- keys = args.split()
- for n in xrange(len(keys)):
- argline = self.fin.readline()[:-1]
- arg, l = argline.split()
- if arg not in keys:
- raise error.Abort(_("unexpected parameter %r") % arg)
- if arg == '*':
- star = {}
- for k in xrange(int(l)):
- argline = self.fin.readline()[:-1]
- arg, l = argline.split()
- val = self.fin.read(int(l))
- star[arg] = val
- data['*'] = star
- else:
- val = self.fin.read(int(l))
- data[arg] = val
- return [data[k] for k in keys]
-
- def getarg(self, name):
- return self.getargs(name)[0]
-
- def getfile(self, fpout):
- self.sendresponse('')
- count = int(self.fin.readline())
- while count:
- fpout.write(self.fin.read(count))
- count = int(self.fin.readline())
-
- def redirect(self):
- pass
-
- def sendresponse(self, v):
- self.fout.write("%d\n" % len(v))
- self.fout.write(v)
- self.fout.flush()
-
- def sendstream(self, source):
- write = self.fout.write
- for chunk in source.gen:
- write(chunk)
- self.fout.flush()
-
- def sendpushresponse(self, rsp):
- self.sendresponse('')
- self.sendresponse(str(rsp.res))
-
- def sendpusherror(self, rsp):
- self.sendresponse(rsp.res)
-
- def sendooberror(self, rsp):
- self.ui.ferr.write('%s\n-\n' % rsp.message)
- self.ui.ferr.flush()
- self.fout.write('\n')
- self.fout.flush()
-
- def serve_forever(self):
- try:
- while self.serve_one():
- pass
- finally:
- if self.lock is not None:
- self.lock.release()
- sys.exit(0)
-
- handlers = {
- str: sendresponse,
- wireproto.streamres: sendstream,
- wireproto.streamres_legacy: sendstream,
- wireproto.pushres: sendpushresponse,
- wireproto.pusherr: sendpusherror,
- wireproto.ooberror: sendooberror,
- }
-
- def serve_one(self):
- cmd = self.fin.readline()[:-1]
- if cmd and cmd in wireproto.commands:
- rsp = wireproto.dispatch(self.repo, self, cmd)
- self.handlers[rsp.__class__](self, rsp)
- elif cmd:
- impl = getattr(self, 'do_' + cmd, None)
- if impl:
- r = impl()
- if r is not None:
- self.sendresponse(r)
- else:
- self.sendresponse("")
- return cmd != ''
-
- def _client(self):
- client = encoding.environ.get('SSH_CLIENT', '').split(' ', 1)[0]
- return 'remote:ssh:' + client
--- a/mercurial/subrepo.py Fri Feb 23 17:57:04 2018 -0800
+++ b/mercurial/subrepo.py Sat Feb 24 17:49:10 2018 -0600
@@ -1,4 +1,4 @@
-# subrepo.py - sub-repository handling for Mercurial
+# subrepo.py - sub-repository classes and factory
#
# Copyright 2009-2010 Matt Mackall <mpm@selenic.com>
#
@@ -19,30 +19,30 @@
import tarfile
import xml.dom.minidom
-
from .i18n import _
from . import (
cmdutil,
- config,
encoding,
error,
exchange,
- filemerge,
+ logcmdutil,
match as matchmod,
node,
pathutil,
phases,
pycompat,
scmutil,
+ subrepoutil,
util,
vfs as vfsmod,
)
hg = None
+reporelpath = subrepoutil.reporelpath
+subrelpath = subrepoutil.subrelpath
+_abssource = subrepoutil._abssource
propertycache = util.propertycache
-nullstate = ('', '', 'empty')
-
def _expandedabspath(path):
'''
get a path or url and if it is a path expand it and return an absolute path
@@ -80,284 +80,6 @@
return res
return decoratedmethod
-def state(ctx, ui):
- """return a state dict, mapping subrepo paths configured in .hgsub
- to tuple: (source from .hgsub, revision from .hgsubstate, kind
- (key in types dict))
- """
- p = config.config()
- repo = ctx.repo()
- def read(f, sections=None, remap=None):
- if f in ctx:
- try:
- data = ctx[f].data()
- except IOError as err:
- if err.errno != errno.ENOENT:
- raise
- # handle missing subrepo spec files as removed
- ui.warn(_("warning: subrepo spec file \'%s\' not found\n") %
- repo.pathto(f))
- return
- p.parse(f, data, sections, remap, read)
- else:
- raise error.Abort(_("subrepo spec file \'%s\' not found") %
- repo.pathto(f))
- if '.hgsub' in ctx:
- read('.hgsub')
-
- for path, src in ui.configitems('subpaths'):
- p.set('subpaths', path, src, ui.configsource('subpaths', path))
-
- rev = {}
- if '.hgsubstate' in ctx:
- try:
- for i, l in enumerate(ctx['.hgsubstate'].data().splitlines()):
- l = l.lstrip()
- if not l:
- continue
- try:
- revision, path = l.split(" ", 1)
- except ValueError:
- raise error.Abort(_("invalid subrepository revision "
- "specifier in \'%s\' line %d")
- % (repo.pathto('.hgsubstate'), (i + 1)))
- rev[path] = revision
- except IOError as err:
- if err.errno != errno.ENOENT:
- raise
-
- def remap(src):
- for pattern, repl in p.items('subpaths'):
- # Turn r'C:\foo\bar' into r'C:\\foo\\bar' since re.sub
- # does a string decode.
- repl = util.escapestr(repl)
- # However, we still want to allow back references to go
- # through unharmed, so we turn r'\\1' into r'\1'. Again,
- # extra escapes are needed because re.sub string decodes.
- repl = re.sub(br'\\\\([0-9]+)', br'\\\1', repl)
- try:
- src = re.sub(pattern, repl, src, 1)
- except re.error as e:
- raise error.Abort(_("bad subrepository pattern in %s: %s")
- % (p.source('subpaths', pattern), e))
- return src
-
- state = {}
- for path, src in p[''].items():
- kind = 'hg'
- if src.startswith('['):
- if ']' not in src:
- raise error.Abort(_('missing ] in subrepository source'))
- kind, src = src.split(']', 1)
- kind = kind[1:]
- src = src.lstrip() # strip any extra whitespace after ']'
-
- if not util.url(src).isabs():
- parent = _abssource(repo, abort=False)
- if parent:
- parent = util.url(parent)
- parent.path = posixpath.join(parent.path or '', src)
- parent.path = posixpath.normpath(parent.path)
- joined = str(parent)
- # Remap the full joined path and use it if it changes,
- # else remap the original source.
- remapped = remap(joined)
- if remapped == joined:
- src = remap(src)
- else:
- src = remapped
-
- src = remap(src)
- state[util.pconvert(path)] = (src.strip(), rev.get(path, ''), kind)
-
- return state
-
-def writestate(repo, state):
- """rewrite .hgsubstate in (outer) repo with these subrepo states"""
- lines = ['%s %s\n' % (state[s][1], s) for s in sorted(state)
- if state[s][1] != nullstate[1]]
- repo.wwrite('.hgsubstate', ''.join(lines), '')
-
-def submerge(repo, wctx, mctx, actx, overwrite, labels=None):
- """delegated from merge.applyupdates: merging of .hgsubstate file
- in working context, merging context and ancestor context"""
- if mctx == actx: # backwards?
- actx = wctx.p1()
- s1 = wctx.substate
- s2 = mctx.substate
- sa = actx.substate
- sm = {}
-
- repo.ui.debug("subrepo merge %s %s %s\n" % (wctx, mctx, actx))
-
- def debug(s, msg, r=""):
- if r:
- r = "%s:%s:%s" % r
- repo.ui.debug(" subrepo %s: %s %s\n" % (s, msg, r))
-
- promptssrc = filemerge.partextras(labels)
- for s, l in sorted(s1.iteritems()):
- prompts = None
- a = sa.get(s, nullstate)
- ld = l # local state with possible dirty flag for compares
- if wctx.sub(s).dirty():
- ld = (l[0], l[1] + "+")
- if wctx == actx: # overwrite
- a = ld
-
- prompts = promptssrc.copy()
- prompts['s'] = s
- if s in s2:
- r = s2[s]
- if ld == r or r == a: # no change or local is newer
- sm[s] = l
- continue
- elif ld == a: # other side changed
- debug(s, "other changed, get", r)
- wctx.sub(s).get(r, overwrite)
- sm[s] = r
- elif ld[0] != r[0]: # sources differ
- prompts['lo'] = l[0]
- prompts['ro'] = r[0]
- if repo.ui.promptchoice(
- _(' subrepository sources for %(s)s differ\n'
- 'use (l)ocal%(l)s source (%(lo)s)'
- ' or (r)emote%(o)s source (%(ro)s)?'
- '$$ &Local $$ &Remote') % prompts, 0):
- debug(s, "prompt changed, get", r)
- wctx.sub(s).get(r, overwrite)
- sm[s] = r
- elif ld[1] == a[1]: # local side is unchanged
- debug(s, "other side changed, get", r)
- wctx.sub(s).get(r, overwrite)
- sm[s] = r
- else:
- debug(s, "both sides changed")
- srepo = wctx.sub(s)
- prompts['sl'] = srepo.shortid(l[1])
- prompts['sr'] = srepo.shortid(r[1])
- option = repo.ui.promptchoice(
- _(' subrepository %(s)s diverged (local revision: %(sl)s, '
- 'remote revision: %(sr)s)\n'
- '(M)erge, keep (l)ocal%(l)s or keep (r)emote%(o)s?'
- '$$ &Merge $$ &Local $$ &Remote')
- % prompts, 0)
- if option == 0:
- wctx.sub(s).merge(r)
- sm[s] = l
- debug(s, "merge with", r)
- elif option == 1:
- sm[s] = l
- debug(s, "keep local subrepo revision", l)
- else:
- wctx.sub(s).get(r, overwrite)
- sm[s] = r
- debug(s, "get remote subrepo revision", r)
- elif ld == a: # remote removed, local unchanged
- debug(s, "remote removed, remove")
- wctx.sub(s).remove()
- elif a == nullstate: # not present in remote or ancestor
- debug(s, "local added, keep")
- sm[s] = l
- continue
- else:
- if repo.ui.promptchoice(
- _(' local%(l)s changed subrepository %(s)s'
- ' which remote%(o)s removed\n'
- 'use (c)hanged version or (d)elete?'
- '$$ &Changed $$ &Delete') % prompts, 0):
- debug(s, "prompt remove")
- wctx.sub(s).remove()
-
- for s, r in sorted(s2.items()):
- prompts = None
- if s in s1:
- continue
- elif s not in sa:
- debug(s, "remote added, get", r)
- mctx.sub(s).get(r)
- sm[s] = r
- elif r != sa[s]:
- prompts = promptssrc.copy()
- prompts['s'] = s
- if repo.ui.promptchoice(
- _(' remote%(o)s changed subrepository %(s)s'
- ' which local%(l)s removed\n'
- 'use (c)hanged version or (d)elete?'
- '$$ &Changed $$ &Delete') % prompts, 0) == 0:
- debug(s, "prompt recreate", r)
- mctx.sub(s).get(r)
- sm[s] = r
-
- # record merged .hgsubstate
- writestate(repo, sm)
- return sm
-
-def precommit(ui, wctx, status, match, force=False):
- """Calculate .hgsubstate changes that should be applied before committing
-
- Returns (subs, commitsubs, newstate) where
- - subs: changed subrepos (including dirty ones)
- - commitsubs: dirty subrepos which the caller needs to commit recursively
- - newstate: new state dict which the caller must write to .hgsubstate
-
- This also updates the given status argument.
- """
- subs = []
- commitsubs = set()
- newstate = wctx.substate.copy()
-
- # only manage subrepos and .hgsubstate if .hgsub is present
- if '.hgsub' in wctx:
- # we'll decide whether to track this ourselves, thanks
- for c in status.modified, status.added, status.removed:
- if '.hgsubstate' in c:
- c.remove('.hgsubstate')
-
- # compare current state to last committed state
- # build new substate based on last committed state
- oldstate = wctx.p1().substate
- for s in sorted(newstate.keys()):
- if not match(s):
- # ignore working copy, use old state if present
- if s in oldstate:
- newstate[s] = oldstate[s]
- continue
- if not force:
- raise error.Abort(
- _("commit with new subrepo %s excluded") % s)
- dirtyreason = wctx.sub(s).dirtyreason(True)
- if dirtyreason:
- if not ui.configbool('ui', 'commitsubrepos'):
- raise error.Abort(dirtyreason,
- hint=_("use --subrepos for recursive commit"))
- subs.append(s)
- commitsubs.add(s)
- else:
- bs = wctx.sub(s).basestate()
- newstate[s] = (newstate[s][0], bs, newstate[s][2])
- if oldstate.get(s, (None, None, None))[1] != bs:
- subs.append(s)
-
- # check for removed subrepos
- for p in wctx.parents():
- r = [s for s in p.substate if s not in newstate]
- subs += [s for s in r if match(s)]
- if subs:
- if (not match('.hgsub') and
- '.hgsub' in (wctx.modified() + wctx.added())):
- raise error.Abort(_("can't commit subrepos without .hgsub"))
- status.modified.insert(0, '.hgsubstate')
-
- elif '.hgsub' in status.removed:
- # clean up .hgsubstate when .hgsub is removed
- if ('.hgsubstate' in wctx and
- '.hgsubstate' not in (status.modified + status.added +
- status.removed)):
- status.removed.insert(0, '.hgsubstate')
-
- return subs, commitsubs, newstate
-
def _updateprompt(ui, sub, dirty, local, remote):
if dirty:
msg = (_(' subrepository sources for %s differ\n'
@@ -372,64 +94,6 @@
% (subrelpath(sub), local, remote))
return ui.promptchoice(msg, 0)
-def reporelpath(repo):
- """return path to this (sub)repo as seen from outermost repo"""
- parent = repo
- while util.safehasattr(parent, '_subparent'):
- parent = parent._subparent
- return repo.root[len(pathutil.normasprefix(parent.root)):]
-
-def subrelpath(sub):
- """return path to this subrepo as seen from outermost repo"""
- return sub._relpath
-
-def _abssource(repo, push=False, abort=True):
- """return pull/push path of repo - either based on parent repo .hgsub info
- or on the top repo config. Abort or return None if no source found."""
- if util.safehasattr(repo, '_subparent'):
- source = util.url(repo._subsource)
- if source.isabs():
- return bytes(source)
- source.path = posixpath.normpath(source.path)
- parent = _abssource(repo._subparent, push, abort=False)
- if parent:
- parent = util.url(util.pconvert(parent))
- parent.path = posixpath.join(parent.path or '', source.path)
- parent.path = posixpath.normpath(parent.path)
- return bytes(parent)
- else: # recursion reached top repo
- path = None
- if util.safehasattr(repo, '_subtoppath'):
- path = repo._subtoppath
- elif push and repo.ui.config('paths', 'default-push'):
- path = repo.ui.config('paths', 'default-push')
- elif repo.ui.config('paths', 'default'):
- path = repo.ui.config('paths', 'default')
- elif repo.shared():
- # chop off the .hg component to get the default path form. This has
- # already run through vfsmod.vfs(..., realpath=True), so it doesn't
- # have problems with 'C:'
- return os.path.dirname(repo.sharedpath)
- if path:
- # issue5770: 'C:\' and 'C:' are not equivalent paths. The former is
- # as expected: an absolute path to the root of the C: drive. The
- # latter is a relative path, and works like so:
- #
- # C:\>cd C:\some\path
- # C:\>D:
- # D:\>python -c "import os; print os.path.abspath('C:')"
- # C:\some\path
- #
- # D:\>python -c "import os; print os.path.abspath('C:relative')"
- # C:\some\path\relative
- if util.hasdriveletter(path):
- if len(path) == 2 or path[2:3] not in br'\/':
- path = os.path.abspath(path)
- return path
-
- if abort:
- raise error.Abort(_("default path for subrepository not found"))
-
def _sanitize(ui, vfs, ignore):
for dirname, dirs, names in vfs.walk():
for i, d in enumerate(dirs):
@@ -508,37 +172,6 @@
subrev = "0" * 40
return types[state[2]](pctx, path, (state[0], subrev), True)
-def newcommitphase(ui, ctx):
- commitphase = phases.newcommitphase(ui)
- substate = getattr(ctx, "substate", None)
- if not substate:
- return commitphase
- check = ui.config('phases', 'checksubrepos')
- if check not in ('ignore', 'follow', 'abort'):
- raise error.Abort(_('invalid phases.checksubrepos configuration: %s')
- % (check))
- if check == 'ignore':
- return commitphase
- maxphase = phases.public
- maxsub = None
- for s in sorted(substate):
- sub = ctx.sub(s)
- subphase = sub.phase(substate[s][1])
- if maxphase < subphase:
- maxphase = subphase
- maxsub = s
- if commitphase < maxphase:
- if check == 'abort':
- raise error.Abort(_("can't commit in %s phase"
- " conflicting %s from subrepository %s") %
- (phases.phasenames[commitphase],
- phases.phasenames[maxphase], maxsub))
- ui.warn(_("warning: changes are committed in"
- " %s phase from subrepository %s\n") %
- (phases.phasenames[maxphase], maxsub))
- return maxphase
- return commitphase
-
# subrepo classes need to implement the following abstract class:
class abstractsubrepo(object):
@@ -907,10 +540,10 @@
# in hex format
if node2 is not None:
node2 = node.bin(node2)
- cmdutil.diffordiffstat(ui, self._repo, diffopts,
- node1, node2, match,
- prefix=posixpath.join(prefix, self._path),
- listsubrepos=True, **opts)
+ logcmdutil.diffordiffstat(ui, self._repo, diffopts,
+ node1, node2, match,
+ prefix=posixpath.join(prefix, self._path),
+ listsubrepos=True, **opts)
except error.RepoLookupError as inst:
self.ui.warn(_('warning: error "%s" in subrepository "%s"\n')
% (inst, subrelpath(self)))
@@ -918,9 +551,13 @@
@annotatesubrepoerror
def archive(self, archiver, prefix, match=None, decode=True):
self._get(self._state + ('hg',))
- total = abstractsubrepo.archive(self, archiver, prefix, match)
+ files = self.files()
+ if match:
+ files = [f for f in files if match(f)]
rev = self._state[1]
ctx = self._repo[rev]
+ scmutil.fileprefetchhooks(self._repo, ctx, files)
+ total = abstractsubrepo.archive(self, archiver, prefix, match)
for subpath in ctx.substate:
s = subrepo(ctx, subpath, True)
submatch = matchmod.subdirmatcher(subpath, match)
@@ -2005,8 +1642,7 @@
# TODO: add support for non-plain formatter (see cmdutil.cat())
for f in match.files():
output = self._gitcommand(["show", "%s:%s" % (rev, f)])
- fp = cmdutil.makefileobj(self._subparent, fntemplate,
- self._ctx.node(),
+ fp = cmdutil.makefileobj(self._ctx, fntemplate,
pathname=self.wvfs.reljoin(prefix, f))
fp.write(output)
fp.close()
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/mercurial/subrepoutil.py Sat Feb 24 17:49:10 2018 -0600
@@ -0,0 +1,392 @@
+# subrepoutil.py - sub-repository operations and substate handling
+#
+# Copyright 2009-2010 Matt Mackall <mpm@selenic.com>
+#
+# This software may be used and distributed according to the terms of the
+# GNU General Public License version 2 or any later version.
+
+from __future__ import absolute_import
+
+import errno
+import os
+import posixpath
+import re
+
+from .i18n import _
+from . import (
+ config,
+ error,
+ filemerge,
+ pathutil,
+ phases,
+ util,
+)
+
+nullstate = ('', '', 'empty')
+
+def state(ctx, ui):
+ """return a state dict, mapping subrepo paths configured in .hgsub
+ to tuple: (source from .hgsub, revision from .hgsubstate, kind
+ (key in types dict))
+ """
+ p = config.config()
+ repo = ctx.repo()
+ def read(f, sections=None, remap=None):
+ if f in ctx:
+ try:
+ data = ctx[f].data()
+ except IOError as err:
+ if err.errno != errno.ENOENT:
+ raise
+ # handle missing subrepo spec files as removed
+ ui.warn(_("warning: subrepo spec file \'%s\' not found\n") %
+ repo.pathto(f))
+ return
+ p.parse(f, data, sections, remap, read)
+ else:
+ raise error.Abort(_("subrepo spec file \'%s\' not found") %
+ repo.pathto(f))
+ if '.hgsub' in ctx:
+ read('.hgsub')
+
+ for path, src in ui.configitems('subpaths'):
+ p.set('subpaths', path, src, ui.configsource('subpaths', path))
+
+ rev = {}
+ if '.hgsubstate' in ctx:
+ try:
+ for i, l in enumerate(ctx['.hgsubstate'].data().splitlines()):
+ l = l.lstrip()
+ if not l:
+ continue
+ try:
+ revision, path = l.split(" ", 1)
+ except ValueError:
+ raise error.Abort(_("invalid subrepository revision "
+ "specifier in \'%s\' line %d")
+ % (repo.pathto('.hgsubstate'), (i + 1)))
+ rev[path] = revision
+ except IOError as err:
+ if err.errno != errno.ENOENT:
+ raise
+
+ def remap(src):
+ for pattern, repl in p.items('subpaths'):
+ # Turn r'C:\foo\bar' into r'C:\\foo\\bar' since re.sub
+ # does a string decode.
+ repl = util.escapestr(repl)
+ # However, we still want to allow back references to go
+ # through unharmed, so we turn r'\\1' into r'\1'. Again,
+ # extra escapes are needed because re.sub string decodes.
+ repl = re.sub(br'\\\\([0-9]+)', br'\\\1', repl)
+ try:
+ src = re.sub(pattern, repl, src, 1)
+ except re.error as e:
+ raise error.Abort(_("bad subrepository pattern in %s: %s")
+ % (p.source('subpaths', pattern), e))
+ return src
+
+ state = {}
+ for path, src in p[''].items():
+ kind = 'hg'
+ if src.startswith('['):
+ if ']' not in src:
+ raise error.Abort(_('missing ] in subrepository source'))
+ kind, src = src.split(']', 1)
+ kind = kind[1:]
+ src = src.lstrip() # strip any extra whitespace after ']'
+
+ if not util.url(src).isabs():
+ parent = _abssource(repo, abort=False)
+ if parent:
+ parent = util.url(parent)
+ parent.path = posixpath.join(parent.path or '', src)
+ parent.path = posixpath.normpath(parent.path)
+ joined = str(parent)
+ # Remap the full joined path and use it if it changes,
+ # else remap the original source.
+ remapped = remap(joined)
+ if remapped == joined:
+ src = remap(src)
+ else:
+ src = remapped
+
+ src = remap(src)
+ state[util.pconvert(path)] = (src.strip(), rev.get(path, ''), kind)
+
+ return state
+
+def writestate(repo, state):
+ """rewrite .hgsubstate in (outer) repo with these subrepo states"""
+ lines = ['%s %s\n' % (state[s][1], s) for s in sorted(state)
+ if state[s][1] != nullstate[1]]
+ repo.wwrite('.hgsubstate', ''.join(lines), '')
+
+def submerge(repo, wctx, mctx, actx, overwrite, labels=None):
+ """delegated from merge.applyupdates: merging of .hgsubstate file
+ in working context, merging context and ancestor context"""
+ if mctx == actx: # backwards?
+ actx = wctx.p1()
+ s1 = wctx.substate
+ s2 = mctx.substate
+ sa = actx.substate
+ sm = {}
+
+ repo.ui.debug("subrepo merge %s %s %s\n" % (wctx, mctx, actx))
+
+ def debug(s, msg, r=""):
+ if r:
+ r = "%s:%s:%s" % r
+ repo.ui.debug(" subrepo %s: %s %s\n" % (s, msg, r))
+
+ promptssrc = filemerge.partextras(labels)
+ for s, l in sorted(s1.iteritems()):
+ prompts = None
+ a = sa.get(s, nullstate)
+ ld = l # local state with possible dirty flag for compares
+ if wctx.sub(s).dirty():
+ ld = (l[0], l[1] + "+")
+ if wctx == actx: # overwrite
+ a = ld
+
+ prompts = promptssrc.copy()
+ prompts['s'] = s
+ if s in s2:
+ r = s2[s]
+ if ld == r or r == a: # no change or local is newer
+ sm[s] = l
+ continue
+ elif ld == a: # other side changed
+ debug(s, "other changed, get", r)
+ wctx.sub(s).get(r, overwrite)
+ sm[s] = r
+ elif ld[0] != r[0]: # sources differ
+ prompts['lo'] = l[0]
+ prompts['ro'] = r[0]
+ if repo.ui.promptchoice(
+ _(' subrepository sources for %(s)s differ\n'
+ 'use (l)ocal%(l)s source (%(lo)s)'
+ ' or (r)emote%(o)s source (%(ro)s)?'
+ '$$ &Local $$ &Remote') % prompts, 0):
+ debug(s, "prompt changed, get", r)
+ wctx.sub(s).get(r, overwrite)
+ sm[s] = r
+ elif ld[1] == a[1]: # local side is unchanged
+ debug(s, "other side changed, get", r)
+ wctx.sub(s).get(r, overwrite)
+ sm[s] = r
+ else:
+ debug(s, "both sides changed")
+ srepo = wctx.sub(s)
+ prompts['sl'] = srepo.shortid(l[1])
+ prompts['sr'] = srepo.shortid(r[1])
+ option = repo.ui.promptchoice(
+ _(' subrepository %(s)s diverged (local revision: %(sl)s, '
+ 'remote revision: %(sr)s)\n'
+ '(M)erge, keep (l)ocal%(l)s or keep (r)emote%(o)s?'
+ '$$ &Merge $$ &Local $$ &Remote')
+ % prompts, 0)
+ if option == 0:
+ wctx.sub(s).merge(r)
+ sm[s] = l
+ debug(s, "merge with", r)
+ elif option == 1:
+ sm[s] = l
+ debug(s, "keep local subrepo revision", l)
+ else:
+ wctx.sub(s).get(r, overwrite)
+ sm[s] = r
+ debug(s, "get remote subrepo revision", r)
+ elif ld == a: # remote removed, local unchanged
+ debug(s, "remote removed, remove")
+ wctx.sub(s).remove()
+ elif a == nullstate: # not present in remote or ancestor
+ debug(s, "local added, keep")
+ sm[s] = l
+ continue
+ else:
+ if repo.ui.promptchoice(
+ _(' local%(l)s changed subrepository %(s)s'
+ ' which remote%(o)s removed\n'
+ 'use (c)hanged version or (d)elete?'
+ '$$ &Changed $$ &Delete') % prompts, 0):
+ debug(s, "prompt remove")
+ wctx.sub(s).remove()
+
+ for s, r in sorted(s2.items()):
+ prompts = None
+ if s in s1:
+ continue
+ elif s not in sa:
+ debug(s, "remote added, get", r)
+ mctx.sub(s).get(r)
+ sm[s] = r
+ elif r != sa[s]:
+ prompts = promptssrc.copy()
+ prompts['s'] = s
+ if repo.ui.promptchoice(
+ _(' remote%(o)s changed subrepository %(s)s'
+ ' which local%(l)s removed\n'
+ 'use (c)hanged version or (d)elete?'
+ '$$ &Changed $$ &Delete') % prompts, 0) == 0:
+ debug(s, "prompt recreate", r)
+ mctx.sub(s).get(r)
+ sm[s] = r
+
+ # record merged .hgsubstate
+ writestate(repo, sm)
+ return sm
+
+def precommit(ui, wctx, status, match, force=False):
+ """Calculate .hgsubstate changes that should be applied before committing
+
+ Returns (subs, commitsubs, newstate) where
+ - subs: changed subrepos (including dirty ones)
+ - commitsubs: dirty subrepos which the caller needs to commit recursively
+ - newstate: new state dict which the caller must write to .hgsubstate
+
+ This also updates the given status argument.
+ """
+ subs = []
+ commitsubs = set()
+ newstate = wctx.substate.copy()
+
+ # only manage subrepos and .hgsubstate if .hgsub is present
+ if '.hgsub' in wctx:
+ # we'll decide whether to track this ourselves, thanks
+ for c in status.modified, status.added, status.removed:
+ if '.hgsubstate' in c:
+ c.remove('.hgsubstate')
+
+ # compare current state to last committed state
+ # build new substate based on last committed state
+ oldstate = wctx.p1().substate
+ for s in sorted(newstate.keys()):
+ if not match(s):
+ # ignore working copy, use old state if present
+ if s in oldstate:
+ newstate[s] = oldstate[s]
+ continue
+ if not force:
+ raise error.Abort(
+ _("commit with new subrepo %s excluded") % s)
+ dirtyreason = wctx.sub(s).dirtyreason(True)
+ if dirtyreason:
+ if not ui.configbool('ui', 'commitsubrepos'):
+ raise error.Abort(dirtyreason,
+ hint=_("use --subrepos for recursive commit"))
+ subs.append(s)
+ commitsubs.add(s)
+ else:
+ bs = wctx.sub(s).basestate()
+ newstate[s] = (newstate[s][0], bs, newstate[s][2])
+ if oldstate.get(s, (None, None, None))[1] != bs:
+ subs.append(s)
+
+ # check for removed subrepos
+ for p in wctx.parents():
+ r = [s for s in p.substate if s not in newstate]
+ subs += [s for s in r if match(s)]
+ if subs:
+ if (not match('.hgsub') and
+ '.hgsub' in (wctx.modified() + wctx.added())):
+ raise error.Abort(_("can't commit subrepos without .hgsub"))
+ status.modified.insert(0, '.hgsubstate')
+
+ elif '.hgsub' in status.removed:
+ # clean up .hgsubstate when .hgsub is removed
+ if ('.hgsubstate' in wctx and
+ '.hgsubstate' not in (status.modified + status.added +
+ status.removed)):
+ status.removed.insert(0, '.hgsubstate')
+
+ return subs, commitsubs, newstate
+
+def reporelpath(repo):
+ """return path to this (sub)repo as seen from outermost repo"""
+ parent = repo
+ while util.safehasattr(parent, '_subparent'):
+ parent = parent._subparent
+ return repo.root[len(pathutil.normasprefix(parent.root)):]
+
+def subrelpath(sub):
+ """return path to this subrepo as seen from outermost repo"""
+ return sub._relpath
+
+def _abssource(repo, push=False, abort=True):
+ """return pull/push path of repo - either based on parent repo .hgsub info
+ or on the top repo config. Abort or return None if no source found."""
+ if util.safehasattr(repo, '_subparent'):
+ source = util.url(repo._subsource)
+ if source.isabs():
+ return bytes(source)
+ source.path = posixpath.normpath(source.path)
+ parent = _abssource(repo._subparent, push, abort=False)
+ if parent:
+ parent = util.url(util.pconvert(parent))
+ parent.path = posixpath.join(parent.path or '', source.path)
+ parent.path = posixpath.normpath(parent.path)
+ return bytes(parent)
+ else: # recursion reached top repo
+ path = None
+ if util.safehasattr(repo, '_subtoppath'):
+ path = repo._subtoppath
+ elif push and repo.ui.config('paths', 'default-push'):
+ path = repo.ui.config('paths', 'default-push')
+ elif repo.ui.config('paths', 'default'):
+ path = repo.ui.config('paths', 'default')
+ elif repo.shared():
+ # chop off the .hg component to get the default path form. This has
+ # already run through vfsmod.vfs(..., realpath=True), so it doesn't
+ # have problems with 'C:'
+ return os.path.dirname(repo.sharedpath)
+ if path:
+ # issue5770: 'C:\' and 'C:' are not equivalent paths. The former is
+ # as expected: an absolute path to the root of the C: drive. The
+ # latter is a relative path, and works like so:
+ #
+ # C:\>cd C:\some\path
+ # C:\>D:
+ # D:\>python -c "import os; print os.path.abspath('C:')"
+ # C:\some\path
+ #
+ # D:\>python -c "import os; print os.path.abspath('C:relative')"
+ # C:\some\path\relative
+ if util.hasdriveletter(path):
+ if len(path) == 2 or path[2:3] not in br'\/':
+ path = os.path.abspath(path)
+ return path
+
+ if abort:
+ raise error.Abort(_("default path for subrepository not found"))
+
+def newcommitphase(ui, ctx):
+ commitphase = phases.newcommitphase(ui)
+ substate = getattr(ctx, "substate", None)
+ if not substate:
+ return commitphase
+ check = ui.config('phases', 'checksubrepos')
+ if check not in ('ignore', 'follow', 'abort'):
+ raise error.Abort(_('invalid phases.checksubrepos configuration: %s')
+ % (check))
+ if check == 'ignore':
+ return commitphase
+ maxphase = phases.public
+ maxsub = None
+ for s in sorted(substate):
+ sub = ctx.sub(s)
+ subphase = sub.phase(substate[s][1])
+ if maxphase < subphase:
+ maxphase = subphase
+ maxsub = s
+ if commitphase < maxphase:
+ if check == 'abort':
+ raise error.Abort(_("can't commit in %s phase"
+ " conflicting %s from subrepository %s") %
+ (phases.phasenames[commitphase],
+ phases.phasenames[maxphase], maxsub))
+ ui.warn(_("warning: changes are committed in"
+ " %s phase from subrepository %s\n") %
+ (phases.phasenames[maxphase], maxsub))
+ return maxphase
+ return commitphase
--- a/mercurial/tags.py Fri Feb 23 17:57:04 2018 -0800
+++ b/mercurial/tags.py Sat Feb 24 17:49:10 2018 -0600
@@ -244,7 +244,7 @@
# remove tags pointing to invalid nodes
cl = repo.changelog
- for t in filetags.keys():
+ for t in list(filetags):
try:
cl.rev(filetags[t][0])
except (LookupError, ValueError):
@@ -739,7 +739,7 @@
entry = bytearray(prefix + fnode)
self._raw[offset:offset + _fnodesrecsize] = entry
# self._dirtyoffset could be None.
- self._dirtyoffset = min(self._dirtyoffset, offset) or 0
+ self._dirtyoffset = min(self._dirtyoffset or 0, offset or 0)
def write(self):
"""Perform all necessary writes to cache file.
@@ -783,6 +783,6 @@
except (IOError, OSError) as inst:
repo.ui.log('tagscache',
"couldn't write cache/%s: %s\n" % (
- _fnodescachefile, inst))
+ _fnodescachefile, util.forcebytestr(inst)))
finally:
lock.release()
--- a/mercurial/templatefilters.py Fri Feb 23 17:57:04 2018 -0800
+++ b/mercurial/templatefilters.py Sat Feb 24 17:49:10 2018 -0600
@@ -100,6 +100,13 @@
"""List or text. Returns the length as an integer."""
return len(i)
+@templatefilter('dirname')
+def dirname(path):
+ """Any text. Treats the text as a path, and strips the last
+ component of the path after splitting by the path separator.
+ """
+ return os.path.dirname(path)
+
@templatefilter('domain')
def domain(author):
"""Any text. Finds the first string that looks like an email
--- a/mercurial/templatekw.py Fri Feb 23 17:57:04 2018 -0800
+++ b/mercurial/templatekw.py Sat Feb 24 17:49:10 2018 -0600
@@ -192,11 +192,15 @@
def one(v, tag=name):
try:
vmapping.update(v)
- except (AttributeError, ValueError):
+ # Python 2 raises ValueError if the type of v is wrong. Python
+ # 3 raises TypeError.
+ except (AttributeError, TypeError, ValueError):
try:
+ # Python 2 raises ValueError trying to destructure an e.g.
+ # bytes. Python 3 raises TypeError.
for a, b in v:
vmapping[a] = b
- except ValueError:
+ except (TypeError, ValueError):
vmapping[name] = v
return templ(tag, **pycompat.strkwargs(vmapping))
lastname = 'last_' + name
@@ -722,6 +726,11 @@
lambda x: {'ctx': repo[x], 'revcache': {}},
lambda x: scmutil.formatchangeid(repo[x]))
+@templatekeyword('reporoot')
+def showreporoot(repo, **args):
+ """String. The root directory of the current repository."""
+ return repo.root
+
@templatekeyword("successorssets")
def showsuccessorssets(repo, ctx, **args):
"""Returns a string of sets of successors for a changectx. Format used
@@ -893,17 +902,6 @@
"""Integer. The width of the current terminal."""
return repo.ui.termwidth()
-@templatekeyword('troubles')
-def showtroubles(repo, **args):
- """List of strings. Evolution troubles affecting the changeset.
- (DEPRECATED)
- """
- msg = ("'troubles' is deprecated, "
- "use 'instabilities'")
- repo.ui.deprecwarn(msg, '4.4')
-
- return showinstabilities(repo=repo, **args)
-
@templatekeyword('instabilities')
def showinstabilities(**args):
"""List of strings. Evolution instabilities affecting the changeset.
@@ -917,7 +915,7 @@
def showverbosity(ui, **args):
"""String. The current output verbosity in 'debug', 'quiet', 'verbose',
or ''."""
- # see cmdutil.changeset_templater for priority of these flags
+ # see logcmdutil.changesettemplater for priority of these flags
if ui.debugflag:
return 'debug'
elif ui.quiet:
--- a/mercurial/templater.py Fri Feb 23 17:57:04 2018 -0800
+++ b/mercurial/templater.py Sat Feb 24 17:49:10 2018 -0600
@@ -161,6 +161,39 @@
([('string', 'foo\\')], 6)
"""
parsed = []
+ for typ, val, pos in _scantemplate(tmpl, start, stop, quote):
+ if typ == 'string':
+ parsed.append((typ, val))
+ elif typ == 'template':
+ parsed.append(val)
+ elif typ == 'end':
+ return parsed, pos
+ else:
+ raise error.ProgrammingError('unexpected type: %s' % typ)
+ raise error.ProgrammingError('unterminated scanning of template')
+
+def scantemplate(tmpl):
+ """Scan (type, start, end) positions of outermost elements in template
+
+ >>> list(scantemplate(b'foo{bar}"baz'))
+ [('string', 0, 3), ('template', 3, 8), ('string', 8, 12)]
+ >>> list(scantemplate(b'outer{"inner"}outer'))
+ [('string', 0, 5), ('template', 5, 14), ('string', 14, 19)]
+ >>> list(scantemplate(b'foo\\{escaped}'))
+ [('string', 0, 5), ('string', 5, 13)]
+ """
+ last = None
+ for typ, val, pos in _scantemplate(tmpl, 0, len(tmpl)):
+ if last:
+ yield last + (pos,)
+ if typ == 'end':
+ return
+ else:
+ last = (typ, pos)
+ raise error.ProgrammingError('unterminated scanning of template')
+
+def _scantemplate(tmpl, start, stop, quote=''):
+ """Parse template string into chunks of strings and template expressions"""
sepchars = '{' + quote
pos = start
p = parser.parser(elements)
@@ -168,29 +201,30 @@
n = min((tmpl.find(c, pos, stop) for c in sepchars),
key=lambda n: (n < 0, n))
if n < 0:
- parsed.append(('string', parser.unescapestr(tmpl[pos:stop])))
+ yield ('string', parser.unescapestr(tmpl[pos:stop]), pos)
pos = stop
break
c = tmpl[n:n + 1]
bs = (n - pos) - len(tmpl[pos:n].rstrip('\\'))
if bs % 2 == 1:
# escaped (e.g. '\{', '\\\{', but not '\\{')
- parsed.append(('string', parser.unescapestr(tmpl[pos:n - 1]) + c))
+ yield ('string', parser.unescapestr(tmpl[pos:n - 1]) + c, pos)
pos = n + 1
continue
if n > pos:
- parsed.append(('string', parser.unescapestr(tmpl[pos:n])))
+ yield ('string', parser.unescapestr(tmpl[pos:n]), pos)
if c == quote:
- return parsed, n + 1
+ yield ('end', None, n + 1)
+ return
parseres, pos = p.parse(tokenize(tmpl, n + 1, stop, '}'))
if not tmpl.endswith('}', n + 1, pos):
raise error.ParseError(_("invalid token"), pos)
- parsed.append(parseres)
+ yield ('template', parseres, n)
if quote:
raise error.ParseError(_("unterminated string"), start)
- return parsed, pos
+ yield ('end', None, pos)
def _unnesttemplatelist(tree):
"""Expand list of templates to node tuple
--- a/mercurial/templates/gitweb/changeset.tmpl Fri Feb 23 17:57:04 2018 -0800
+++ b/mercurial/templates/gitweb/changeset.tmpl Sat Feb 24 17:49:10 2018 -0600
@@ -44,7 +44,7 @@
<td>changeset {rev}</td>
<td style="font-family:monospace"><a class="list" href="{url|urlescape}rev/{node|short}{sessionvars%urlparameter}">{node|short}</a></td>
</tr>
-{if(obsolete, '<tr><td>obsolete</td><td>{succsandmarkers%obsfateentry}</td></tr>')}
+{if(obsolete, succsandmarkers%obsfateentry)}
{ifeq(count(parent), '2', parent%changesetparentdiff, parent%changesetparent)}
{child%changesetchild}
</table></div>
--- a/mercurial/templates/gitweb/map Fri Feb 23 17:57:04 2018 -0800
+++ b/mercurial/templates/gitweb/map Sat Feb 24 17:49:10 2018 -0600
@@ -275,7 +275,13 @@
obsfatesuccessors = '{if(successors, ' as ')}{successors%successorlink}'
obsfateverb = '{obsfateverb(successors, markers)}'
obsfateoperations = '{if(obsfateoperations(markers), ' using {join(obsfateoperations(markers), ', ')}')}'
-obsfateentry = '{obsfateverb}{obsfateoperations}{obsfatesuccessors}'
+obsfateusers = '{if(obsfateusers(markers), ' by {join(obsfateusers(markers)%'{user|obfuscate}', ', ')}')}'
+obsfatedate = '{if(obsfatedate(markers), ' {ifeq(min(obsfatedate(markers)), max(obsfatedate(markers)), '<span class="age">{min(obsfatedate(markers))|rfc822date}</span>', 'between <span class="age">{min(obsfatedate(markers))|rfc822date}</span> and <span class="age">{max(obsfatedate(markers))|rfc822date}</span>')}')}'
+obsfateentry = '
+ <tr>
+ <td>obsolete</td>
+ <td>{obsfateverb}{obsfateoperations}{obsfatesuccessors}{obsfateusers}{obsfatedate}</td>
+ </tr>'
shortlogentry = '
<tr class="parity{parity}">
<td class="age"><i class="age">{date|rfc822date}</i></td>
--- a/mercurial/templates/monoblue/changeset.tmpl Fri Feb 23 17:57:04 2018 -0800
+++ b/mercurial/templates/monoblue/changeset.tmpl Sat Feb 24 17:49:10 2018 -0600
@@ -48,7 +48,7 @@
{branch%changesetbranch}
<dt>changeset {rev}</dt>
<dd><a href="{url|urlescape}rev/{node|short}{sessionvars%urlparameter}">{node|short}</a></dd>
- {if(obsolete, '<dt>obsolete</dt><dd>{succsandmarkers%obsfateentry}</dd>')}
+ {if(obsolete, succsandmarkers%obsfateentry)}
{ifeq(count(parent), '2', parent%changesetparentdiff, parent%changesetparent)}
{child%changesetchild}
</dl>
--- a/mercurial/templates/monoblue/map Fri Feb 23 17:57:04 2018 -0800
+++ b/mercurial/templates/monoblue/map Sat Feb 24 17:49:10 2018 -0600
@@ -233,7 +233,11 @@
obsfatesuccessors = '{if(successors, ' as ')}{successors%successorlink}'
obsfateverb = '{obsfateverb(successors, markers)}'
obsfateoperations = '{if(obsfateoperations(markers), ' using {join(obsfateoperations(markers), ', ')}')}'
-obsfateentry = '{obsfateverb}{obsfateoperations}{obsfatesuccessors}'
+obsfateusers = '{if(obsfateusers(markers), ' by {join(obsfateusers(markers)%'{user|obfuscate}', ', ')}')}'
+obsfatedate = '{if(obsfatedate(markers), ' {ifeq(min(obsfatedate(markers)), max(obsfatedate(markers)), '<span class="age">{min(obsfatedate(markers))|rfc822date}</span>', 'between <span class="age">{min(obsfatedate(markers))|rfc822date}</span> and <span class="age">{max(obsfatedate(markers))|rfc822date}</span>')}')}'
+obsfateentry = '
+ <dt>obsolete</dt>
+ <dd>{obsfateverb}{obsfateoperations}{obsfatesuccessors}{obsfateusers}{obsfatedate}</dd>'
shortlogentry = '
<tr class="parity{parity}">
<td class="nowrap age">{date|rfc822date}</td>
--- a/mercurial/templates/paper/changeset.tmpl Fri Feb 23 17:57:04 2018 -0800
+++ b/mercurial/templates/paper/changeset.tmpl Sat Feb 24 17:49:10 2018 -0600
@@ -51,7 +51,7 @@
</tr>
{if(obsolete, '<tr>
<th>obsolete</th>
- <td>{succsandmarkers%obsfateentry}</td>
+ <td>{join(succsandmarkers%obsfateentry, '<br>\n')}</td>
</tr>')}
<tr>
<th class="author">parents</th>
--- a/mercurial/templates/paper/map Fri Feb 23 17:57:04 2018 -0800
+++ b/mercurial/templates/paper/map Sat Feb 24 17:49:10 2018 -0600
@@ -213,7 +213,9 @@
obsfatesuccessors = '{if(successors, ' as ')}{successors%successorlink}'
obsfateverb = '{obsfateverb(successors, markers)}'
obsfateoperations = '{if(obsfateoperations(markers), ' using {join(obsfateoperations(markers), ', ')}')}'
-obsfateentry = '{obsfateverb}{obsfateoperations}{obsfatesuccessors}'
+obsfateusers = '{if(obsfateusers(markers), ' by {join(obsfateusers(markers)%'{user|obfuscate}', ', ')}')}'
+obsfatedate = '{if(obsfatedate(markers), ' {ifeq(min(obsfatedate(markers)), max(obsfatedate(markers)), '<span class="age">{min(obsfatedate(markers))|rfc822date}</span>', 'between <span class="age">{min(obsfatedate(markers))|rfc822date}</span> and <span class="age">{max(obsfatedate(markers))|rfc822date}</span>')}')}'
+obsfateentry = '{obsfateverb}{obsfateoperations}{obsfatesuccessors}{obsfateusers}{obsfatedate}'
filediffparent = '
<tr>
--- a/mercurial/templates/spartan/changelogentry.tmpl Fri Feb 23 17:57:04 2018 -0800
+++ b/mercurial/templates/spartan/changelogentry.tmpl Sat Feb 24 17:49:10 2018 -0600
@@ -22,10 +22,7 @@
<th class="phase">phase:</th>
<td class="phase">{phase|escape}</td>
</tr>')}
- {if(obsolete, '<tr>
- <th class="obsolete">obsolete:</th>
- <td class="obsolete">{succsandmarkers%obsfateentry}</td>
- </tr>')}
+ {if(obsolete, succsandmarkers%obsfateentry)}
{ifeq(count(instabilities), '0', '', '<tr>
<th class="instabilities">instabilities:</th>
<td class="instabilities">{instabilities%"{instability} "|escape}</td>
--- a/mercurial/templates/spartan/changeset.tmpl Fri Feb 23 17:57:04 2018 -0800
+++ b/mercurial/templates/spartan/changeset.tmpl Sat Feb 24 17:49:10 2018 -0600
@@ -37,10 +37,7 @@
<th class="phase">phase:</th>
<td class="phase">{phase|escape}</td>
</tr>')}
-{if(obsolete, '<tr>
- <th class="obsolete">obsolete:</th>
- <td class="obsolete">{succsandmarkers%obsfateentry}</td>
-</tr>')}
+{if(obsolete, succsandmarkers%obsfateentry)}
{ifeq(count(instabilities), '0', '', '<tr>
<th class="instabilities">instabilities:</th>
<td class="instabilities">{instabilities%"{instability} "|escape}</td>
--- a/mercurial/templates/spartan/map Fri Feb 23 17:57:04 2018 -0800
+++ b/mercurial/templates/spartan/map Sat Feb 24 17:49:10 2018 -0600
@@ -170,7 +170,13 @@
obsfatesuccessors = '{if(successors, ' as ')}{successors%successorlink}'
obsfateverb = '{obsfateverb(successors, markers)}'
obsfateoperations = '{if(obsfateoperations(markers), ' using {join(obsfateoperations(markers), ', ')}')}'
-obsfateentry = '{obsfateverb}{obsfateoperations}{obsfatesuccessors}'
+obsfateusers = '{if(obsfateusers(markers), ' by {join(obsfateusers(markers)%'{user|obfuscate}', ', ')}')}'
+obsfatedate = '{if(obsfatedate(markers), ' {ifeq(min(obsfatedate(markers)), max(obsfatedate(markers)), '<span class="age">{min(obsfatedate(markers))|rfc822date}</span>', 'between <span class="age">{min(obsfatedate(markers))|rfc822date}</span> and <span class="age">{max(obsfatedate(markers))|rfc822date}</span>')}')}'
+obsfateentry = '
+ <tr>
+ <th class="obsolete">obsolete:</th>
+ <td class="obsolete">{obsfateverb}{obsfateoperations}{obsfatesuccessors}{obsfateusers}{obsfatedate}</td>
+ </tr>'
filediffparent = '
<tr>
<th class="parent">parent {rev}:</th>
--- a/mercurial/templates/static/style-gitweb.css Fri Feb 23 17:57:04 2018 -0800
+++ b/mercurial/templates/static/style-gitweb.css Sat Feb 24 17:49:10 2018 -0600
@@ -29,7 +29,7 @@
div.title_text { padding:6px 0px; border: solid #d9d8d1; border-width:0px 0px 1px; }
div.log_body { padding:8px 8px 8px 150px; }
.age { white-space:nowrap; }
-span.age { position:relative; float:left; width:142px; font-style:italic; }
+a.title span.age { position:relative; float:left; width:142px; font-style:italic; }
div.log_link {
padding:0px 8px;
font-size:10px; font-family:sans-serif; font-style:normal;
--- a/mercurial/transaction.py Fri Feb 23 17:57:04 2018 -0800
+++ b/mercurial/transaction.py Sat Feb 24 17:49:10 2018 -0600
@@ -612,7 +612,7 @@
lines = fp.readlines()
if lines:
ver = lines[0][:-1]
- if ver == str(version):
+ if ver == (b'%d' % version):
for line in lines[1:]:
if line:
# Shave off the trailing newline
--- a/mercurial/ui.py Fri Feb 23 17:57:04 2018 -0800
+++ b/mercurial/ui.py Sat Feb 24 17:49:10 2018 -0600
@@ -45,7 +45,7 @@
if not c.isalnum())
# The config knobs that will be altered (if unset) by ui.tweakdefaults.
-tweakrc = """
+tweakrc = b"""
[ui]
# The rollback command is dangerous. As a rule, don't use it.
rollback = False
@@ -148,14 +148,10 @@
}
def _maybestrurl(maybebytes):
- if maybebytes is None:
- return None
- return pycompat.strurl(maybebytes)
+ return util.rapply(pycompat.strurl, maybebytes)
def _maybebytesurl(maybestr):
- if maybestr is None:
- return None
- return pycompat.bytesurl(maybestr)
+ return util.rapply(pycompat.bytesurl, maybestr)
class httppasswordmgrdbproxy(object):
"""Delays loading urllib2 until it's needed."""
@@ -168,18 +164,14 @@
return self._mgr
def add_password(self, realm, uris, user, passwd):
- if isinstance(uris, tuple):
- uris = tuple(_maybestrurl(u) for u in uris)
- else:
- uris = _maybestrurl(uris)
return self._get_mgr().add_password(
- _maybestrurl(realm), uris,
+ _maybestrurl(realm), _maybestrurl(uris),
_maybestrurl(user), _maybestrurl(passwd))
def find_user_password(self, realm, uri):
- return tuple(_maybebytesurl(v) for v in
- self._get_mgr().find_user_password(_maybestrurl(realm),
- _maybestrurl(uri)))
+ mgr = self._get_mgr()
+ return _maybebytesurl(mgr.find_user_password(_maybestrurl(realm),
+ _maybestrurl(uri)))
def _catchterm(*args):
raise error.SignalInterrupt
@@ -506,7 +498,7 @@
and default != itemdefault):
msg = ("specifying a mismatched default value for a registered "
"config item: '%s.%s' '%s'")
- msg %= (section, name, default)
+ msg %= (section, name, pycompat.bytestr(default))
self.develwarn(msg, 2, 'warn-config-default')
for s, n in alternates:
@@ -816,8 +808,8 @@
hint=_("use 'hg config --edit' "
'to set your username'))
if "\n" in user:
- raise error.Abort(_("username %s contains a newline\n")
- % repr(user))
+ raise error.Abort(_("username %r contains a newline\n")
+ % pycompat.bytestr(user))
return user
def shortuser(self, user):
@@ -878,6 +870,17 @@
return "".join(self._buffers.pop())
+ def canwritewithoutlabels(self):
+ '''check if write skips the label'''
+ if self._buffers and not self._bufferapplylabels:
+ return True
+ return self._colormode is None
+
+ def canbatchlabeledwrites(self):
+ '''check if write calls with labels are batchable'''
+ # Windows color printing is special, see ``write``.
+ return self._colormode != 'win32'
+
def write(self, *args, **opts):
'''write args to output
@@ -894,13 +897,17 @@
"cmdname.type" is recommended. For example, status issues
a label of "status.modified" for modified files.
'''
- if self._buffers and not opts.get(r'prompt', False):
+ if self._buffers:
if self._bufferapplylabels:
label = opts.get(r'label', '')
self._buffers[-1].extend(self.label(a, label) for a in args)
else:
self._buffers[-1].extend(args)
- elif self._colormode == 'win32':
+ else:
+ self._writenobuf(*args, **opts)
+
+ def _writenobuf(self, *args, **opts):
+ if self._colormode == 'win32':
# windows color printing is its own can of crab, defer to
# the color module and that is it.
color.win32print(self, self._write, *args, **opts)
@@ -916,8 +923,7 @@
# opencode timeblockedsection because this is a critical path
starttime = util.timer()
try:
- for a in msgs:
- self.fout.write(a)
+ self.fout.write(''.join(msgs))
except IOError as err:
raise error.StdioError(err)
finally:
@@ -1255,7 +1261,7 @@
return i
- def _readline(self, prompt=''):
+ def _readline(self):
if self._isatty(self.fin):
try:
# magically add command line editing support, where
@@ -1267,11 +1273,6 @@
except Exception:
pass
- # call write() so output goes through subclassed implementation
- # e.g. color extension on Windows
- self.write(prompt, prompt=True)
- self.flush()
-
# prompt ' ' must exist; otherwise readline may delete entire line
# - http://bugs.python.org/issue12833
with self.timeblockedsection('stdio'):
@@ -1290,8 +1291,10 @@
if not self.interactive():
self.write(msg, ' ', default or '', "\n")
return default
+ self._writenobuf(msg, label='ui.prompt')
+ self.flush()
try:
- r = self._readline(self.label(msg, 'ui.prompt'))
+ r = self._readline()
if not r:
r = default
if self.configbool('ui', 'promptecho'):
@@ -1509,11 +1512,7 @@
''.join(exconly))
else:
output = traceback.format_exception(exc[0], exc[1], exc[2])
- data = r''.join(output)
- if pycompat.ispy3:
- enc = pycompat.sysstr(encoding.encoding)
- data = data.encode(enc, errors=r'replace')
- self.write_err(data)
+ self.write_err(encoding.strtolocal(r''.join(output)))
return self.tracebackflag or force
def geteditor(self):
@@ -1621,13 +1620,15 @@
else:
curframe = inspect.currentframe()
calframe = inspect.getouterframes(curframe, 2)
- self.write_err('%s at: %s:%s (%s)\n'
- % ((msg,) + calframe[stacklevel][1:4]))
- self.log('develwarn', '%s at: %s:%s (%s)\n',
- msg, *calframe[stacklevel][1:4])
+ fname, lineno, fmsg = calframe[stacklevel][1:4]
+ fname, fmsg = pycompat.sysbytes(fname), pycompat.sysbytes(fmsg)
+ self.write_err('%s at: %s:%d (%s)\n'
+ % (msg, fname, lineno, fmsg))
+ self.log('develwarn', '%s at: %s:%d (%s)\n',
+ msg, fname, lineno, fmsg)
curframe = calframe = None # avoid cycles
- def deprecwarn(self, msg, version):
+ def deprecwarn(self, msg, version, stacklevel=2):
"""issue a deprecation warning
- msg: message explaining what is deprecated and how to upgrade,
@@ -1638,7 +1639,7 @@
return
msg += ("\n(compatibility will be dropped after Mercurial-%s,"
" update your code.)") % version
- self.develwarn(msg, stacklevel=2, config='deprec-warn')
+ self.develwarn(msg, stacklevel=stacklevel, config='deprec-warn')
def exportableenviron(self):
"""The environment variables that are safe to export, e.g. through
--- a/mercurial/upgrade.py Fri Feb 23 17:57:04 2018 -0800
+++ b/mercurial/upgrade.py Sat Feb 24 17:49:10 2018 -0600
@@ -46,7 +46,6 @@
return {
# The upgrade code does not yet support these experimental features.
# This is an artificial limitation.
- 'manifestv2',
'treemanifest',
# This was a precursor to generaldelta and was never enabled by default.
# It should (hopefully) not exist in the wild.
--- a/mercurial/url.py Fri Feb 23 17:57:04 2018 -0800
+++ b/mercurial/url.py Sat Feb 24 17:49:10 2018 -0600
@@ -71,10 +71,10 @@
u.query = None
if not self.ui.interactive():
raise error.Abort(_('http authorization required for %s') %
- util.hidepassword(str(u)))
+ util.hidepassword(bytes(u)))
self.ui.write(_("http authorization required for %s\n") %
- util.hidepassword(str(u)))
+ util.hidepassword(bytes(u)))
self.ui.write(_("realm: %s\n") % realm)
if user:
self.ui.write(_("user: %s\n") % user)
@@ -124,10 +124,9 @@
else:
self.no_list = no_list
- proxyurl = str(proxy)
+ proxyurl = bytes(proxy)
proxies = {'http': proxyurl, 'https': proxyurl}
- ui.debug('proxying through http://%s:%s\n' %
- (proxy.host, proxy.port))
+ ui.debug('proxying through %s\n' % util.hidepassword(proxyurl))
else:
proxies = {}
--- a/mercurial/urllibcompat.py Fri Feb 23 17:57:04 2018 -0800
+++ b/mercurial/urllibcompat.py Sat Feb 24 17:49:10 2018 -0600
@@ -47,6 +47,7 @@
"urlparse",
"urlunparse",
))
+ urlreq._registeralias(urllib.parse, "parse_qs", "parseqs")
urlreq._registeralias(urllib.parse, "unquote_to_bytes", "unquote")
import urllib.request
urlreq._registeraliases(urllib.request, (
@@ -157,6 +158,7 @@
"urlparse",
"urlunparse",
))
+ urlreq._registeralias(urlparse, "parse_qs", "parseqs")
urlerr._registeraliases(urllib2, (
"HTTPError",
"URLError",
--- a/mercurial/util.py Fri Feb 23 17:57:04 2018 -0800
+++ b/mercurial/util.py Sat Feb 24 17:49:10 2018 -0600
@@ -183,6 +183,39 @@
def safehasattr(thing, attr):
return getattr(thing, attr, _notset) is not _notset
+def _rapply(f, xs):
+ if xs is None:
+ # assume None means non-value of optional data
+ return xs
+ if isinstance(xs, (list, set, tuple)):
+ return type(xs)(_rapply(f, x) for x in xs)
+ if isinstance(xs, dict):
+ return type(xs)((_rapply(f, k), _rapply(f, v)) for k, v in xs.items())
+ return f(xs)
+
+def rapply(f, xs):
+ """Apply function recursively to every item preserving the data structure
+
+ >>> def f(x):
+ ... return 'f(%s)' % x
+ >>> rapply(f, None) is None
+ True
+ >>> rapply(f, 'a')
+ 'f(a)'
+ >>> rapply(f, {'a'}) == {'f(a)'}
+ True
+ >>> rapply(f, ['a', 'b', None, {'c': 'd'}, []])
+ ['f(a)', 'f(b)', None, {'f(c)': 'f(d)'}, []]
+
+ >>> xs = [object()]
+ >>> rapply(pycompat.identity, xs) is xs
+ True
+ """
+ if f is pycompat.identity:
+ # fast path mainly for py2
+ return xs
+ return _rapply(f, xs)
+
def bytesinput(fin, fout, *args, **kwargs):
sin, sout = sys.stdin, sys.stdout
try:
@@ -220,7 +253,7 @@
if _dowarn:
msg += ("\n(compatibility will be dropped after Mercurial-%s,"
" update your code.)") % version
- warnings.warn(msg, DeprecationWarning, stacklevel + 1)
+ warnings.warn(pycompat.sysstr(msg), DeprecationWarning, stacklevel + 1)
DIGESTS = {
'md5': hashlib.md5,
@@ -1947,6 +1980,35 @@
limit -= len(s)
yield s
+class cappedreader(object):
+ """A file object proxy that allows reading up to N bytes.
+
+ Given a source file object, instances of this type allow reading up to
+ N bytes from that source file object. Attempts to read past the allowed
+ limit are treated as EOF.
+
+ It is assumed that I/O is not performed on the original file object
+ in addition to I/O that is performed by this instance. If there is,
+ state tracking will get out of sync and unexpected results will ensue.
+ """
+ def __init__(self, fh, limit):
+ """Allow reading up to <limit> bytes from <fh>."""
+ self._fh = fh
+ self._left = limit
+
+ def read(self, n=-1):
+ if not self._left:
+ return b''
+
+ if n < 0:
+ n = self._left
+
+ data = self._fh.read(min(n, self._left))
+ self._left -= len(data)
+ assert self._left >= 0
+
+ return data
+
def makedate(timestamp=None):
'''Return a unix timestamp (or the current time) as a (unixtime,
offset) tuple based off the local timezone.'''
@@ -2394,7 +2456,7 @@
def uirepr(s):
# Avoid double backslash in Windows path repr()
- return repr(s).replace('\\\\', '\\')
+ return pycompat.byterepr(pycompat.bytestr(s)).replace(b'\\\\', b'\\')
# delay import of textwrap
def MBTextWrapper(**kwargs):
@@ -2684,7 +2746,7 @@
pass
try:
- return socket.getservbyname(port)
+ return socket.getservbyname(pycompat.sysstr(port))
except socket.error:
raise Abort(_("no port number associated with service '%s'") % port)
@@ -3126,7 +3188,7 @@
results.append(hook(*args))
return results
-def getstackframes(skip=0, line=' %-*s in %s\n', fileline='%s:%s', depth=0):
+def getstackframes(skip=0, line=' %-*s in %s\n', fileline='%s:%d', depth=0):
'''Yields lines for a nicely formatted stacktrace.
Skips the 'skip' last entries, then return the last 'depth' entries.
Each file+linenumber is formatted according to fileline.
@@ -3138,7 +3200,7 @@
Not be used in production code but very convenient while developing.
'''
- entries = [(fileline % (fn, ln), func)
+ entries = [(fileline % (pycompat.sysbytes(fn), ln), pycompat.sysbytes(func))
for fn, ln, func, _text in traceback.extract_stack()[:-skip - 1]
][-depth:]
if entries:
--- a/mercurial/verify.py Fri Feb 23 17:57:04 2018 -0800
+++ b/mercurial/verify.py Sat Feb 24 17:49:10 2018 -0600
@@ -60,6 +60,7 @@
def err(self, linkrev, msg, filename=None):
if linkrev is not None:
self.badrevs.add(linkrev)
+ linkrev = "%d" % linkrev
else:
linkrev = '?'
msg = "%s: %s" % (linkrev, msg)
@@ -455,12 +456,7 @@
if rp:
if lr is not None and ui.verbose:
ctx = lrugetctx(lr)
- found = False
- for pctx in ctx.parents():
- if rp[0] in pctx:
- found = True
- break
- if not found:
+ if not any(rp[0] in pctx for pctx in ctx.parents()):
self.warn(_("warning: copy source of '%s' not"
" in parents of %s") % (f, ctx))
fl2 = repo.file(rp[0])
--- a/mercurial/wireproto.py Fri Feb 23 17:57:04 2018 -0800
+++ b/mercurial/wireproto.py Sat Feb 24 17:49:10 2018 -0600
@@ -31,56 +31,24 @@
repository,
streamclone,
util,
+ wireprototypes,
)
urlerr = util.urlerr
urlreq = util.urlreq
+bytesresponse = wireprototypes.bytesresponse
+ooberror = wireprototypes.ooberror
+pushres = wireprototypes.pushres
+pusherr = wireprototypes.pusherr
+streamres = wireprototypes.streamres
+streamres_legacy = wireprototypes.streamreslegacy
+
bundle2requiredmain = _('incompatible Mercurial client; bundle2 required')
bundle2requiredhint = _('see https://www.mercurial-scm.org/wiki/'
'IncompatibleClient')
bundle2required = '%s\n(%s)\n' % (bundle2requiredmain, bundle2requiredhint)
-class abstractserverproto(object):
- """abstract class that summarizes the protocol API
-
- Used as reference and documentation.
- """
-
- def getargs(self, args):
- """return the value for arguments in <args>
-
- returns a list of values (same order as <args>)"""
- raise NotImplementedError()
-
- def getfile(self, fp):
- """write the whole content of a file into a file like object
-
- The file is in the form::
-
- (<chunk-size>\n<chunk>)+0\n
-
- chunk size is the ascii version of the int.
- """
- raise NotImplementedError()
-
- def redirect(self):
- """may setup interception for stdout and stderr
-
- See also the `restore` method."""
- raise NotImplementedError()
-
- # If the `redirect` function does install interception, the `restore`
- # function MUST be defined. If interception is not used, this function
- # MUST NOT be defined.
- #
- # left commented here on purpose
- #
- #def restore(self):
- # """reinstall previous stdout and stderr and return intercepted stdout
- # """
- # raise NotImplementedError()
-
class remoteiterbatcher(peer.iterbatcher):
def __init__(self, remote):
super(remoteiterbatcher, self).__init__()
@@ -517,58 +485,6 @@
# server side
# wire protocol command can either return a string or one of these classes.
-class streamres(object):
- """wireproto reply: binary stream
-
- The call was successful and the result is a stream.
-
- Accepts a generator containing chunks of data to be sent to the client.
-
- ``prefer_uncompressed`` indicates that the data is expected to be
- uncompressable and that the stream should therefore use the ``none``
- engine.
- """
- def __init__(self, gen=None, prefer_uncompressed=False):
- self.gen = gen
- self.prefer_uncompressed = prefer_uncompressed
-
-class streamres_legacy(object):
- """wireproto reply: uncompressed binary stream
-
- The call was successful and the result is a stream.
-
- Accepts a generator containing chunks of data to be sent to the client.
-
- Like ``streamres``, but sends an uncompressed data for "version 1" clients
- using the application/mercurial-0.1 media type.
- """
- def __init__(self, gen=None):
- self.gen = gen
-
-class pushres(object):
- """wireproto reply: success with simple integer return
-
- The call was successful and returned an integer contained in `self.res`.
- """
- def __init__(self, res):
- self.res = res
-
-class pusherr(object):
- """wireproto reply: failure
-
- The call failed. The `self.res` attribute contains the error message.
- """
- def __init__(self, res):
- self.res = res
-
-class ooberror(object):
- """wireproto reply: failure of a batch of operation
-
- Something failed during a batch call. The error message is stored in
- `self.message`.
- """
- def __init__(self, message):
- self.message = message
def getdispatchrepo(repo, proto, command):
"""Obtain the repo used for processing wire protocol commands.
@@ -625,7 +541,7 @@
return ui.configbool('server', 'bundle1')
-def supportedcompengines(ui, proto, role):
+def supportedcompengines(ui, role):
"""Obtain the list of supported compression engines for a request."""
assert role in (util.CLIENTROLE, util.SERVERROLE)
@@ -674,13 +590,81 @@
return compengines
-# list of commands
-commands = {}
+class commandentry(object):
+ """Represents a declared wire protocol command."""
+ def __init__(self, func, args=''):
+ self.func = func
+ self.args = args
+
+ def _merge(self, func, args):
+ """Merge this instance with an incoming 2-tuple.
+
+ This is called when a caller using the old 2-tuple API attempts
+ to replace an instance. The incoming values are merged with
+ data not captured by the 2-tuple and a new instance containing
+ the union of the two objects is returned.
+ """
+ return commandentry(func, args)
+
+ # Old code treats instances as 2-tuples. So expose that interface.
+ def __iter__(self):
+ yield self.func
+ yield self.args
+
+ def __getitem__(self, i):
+ if i == 0:
+ return self.func
+ elif i == 1:
+ return self.args
+ else:
+ raise IndexError('can only access elements 0 and 1')
+
+class commanddict(dict):
+ """Container for registered wire protocol commands.
+
+ It behaves like a dict. But __setitem__ is overwritten to allow silent
+ coercion of values from 2-tuples for API compatibility.
+ """
+ def __setitem__(self, k, v):
+ if isinstance(v, commandentry):
+ pass
+ # Cast 2-tuples to commandentry instances.
+ elif isinstance(v, tuple):
+ if len(v) != 2:
+ raise ValueError('command tuples must have exactly 2 elements')
+
+ # It is common for extensions to wrap wire protocol commands via
+ # e.g. ``wireproto.commands[x] = (newfn, args)``. Because callers
+ # doing this aren't aware of the new API that uses objects to store
+ # command entries, we automatically merge old state with new.
+ if k in self:
+ v = self[k]._merge(v[0], v[1])
+ else:
+ v = commandentry(v[0], v[1])
+ else:
+ raise ValueError('command entries must be commandentry instances '
+ 'or 2-tuples')
+
+ return super(commanddict, self).__setitem__(k, v)
+
+ def commandavailable(self, command, proto):
+ """Determine if a command is available for the requested protocol."""
+ # For now, commands are available for all protocols. So do a simple
+ # membership test.
+ return command in self
+
+commands = commanddict()
def wireprotocommand(name, args=''):
- """decorator for wire protocol command"""
+ """Decorator to declare a wire protocol command.
+
+ ``name`` is the name of the wire protocol command being provided.
+
+ ``args`` is a space-delimited list of named arguments that the command
+ accepts. ``*`` is a special value that says to accept all arguments.
+ """
def register(func):
- commands[name] = (func, args)
+ commands[name] = commandentry(func, args)
return func
return register
@@ -713,8 +697,15 @@
result = func(repo, proto)
if isinstance(result, ooberror):
return result
+
+ # For now, all batchable commands must return bytesresponse or
+ # raw bytes (for backwards compatibility).
+ assert isinstance(result, (bytesresponse, bytes))
+ if isinstance(result, bytesresponse):
+ result = result.data
res.append(escapearg(result))
- return ';'.join(res)
+
+ return bytesresponse(';'.join(res))
@wireprotocommand('between', 'pairs')
def between(repo, proto, pairs):
@@ -722,7 +713,8 @@
r = []
for b in repo.between(pairs):
r.append(encodelist(b) + "\n")
- return "".join(r)
+
+ return bytesresponse(''.join(r))
@wireprotocommand('branchmap')
def branchmap(repo, proto):
@@ -732,7 +724,8 @@
branchname = urlreq.quote(encoding.fromlocal(branch))
branchnodes = encodelist(nodes)
heads.append('%s %s' % (branchname, branchnodes))
- return '\n'.join(heads)
+
+ return bytesresponse('\n'.join(heads))
@wireprotocommand('branches', 'nodes')
def branches(repo, proto, nodes):
@@ -740,7 +733,8 @@
r = []
for b in repo.branches(nodes):
r.append(encodelist(b) + "\n")
- return "".join(r)
+
+ return bytesresponse(''.join(r))
@wireprotocommand('clonebundles', '')
def clonebundles(repo, proto):
@@ -752,7 +746,7 @@
depending on the request. e.g. you could advertise URLs for the closest
data center given the client's IP address.
"""
- return repo.vfs.tryread('clonebundles.manifest')
+ return bytesresponse(repo.vfs.tryread('clonebundles.manifest'))
wireprotocaps = ['lookup', 'changegroupsubset', 'branchmap', 'pushkey',
'known', 'getbundle', 'unbundlehash', 'batch']
@@ -784,7 +778,7 @@
caps.append('bundle2=' + urlreq.quote(capsblob))
caps.append('unbundle=%s' % ','.join(bundle2.bundlepriority))
- if proto.name == 'http':
+ if proto.name == 'http-v1':
caps.append('httpheader=%d' %
repo.ui.configint('server', 'maxhttpheaderlen'))
if repo.ui.configbool('experimental', 'httppostargs'):
@@ -794,7 +788,7 @@
# FUTURE advertise minrx and mintx after consulting config option
caps.append('httpmediatype=0.1rx,0.1tx,0.2tx')
- compengines = supportedcompengines(repo.ui, proto, util.SERVERROLE)
+ compengines = supportedcompengines(repo.ui, util.SERVERROLE)
if compengines:
comptypes = ','.join(urlreq.quote(e.wireprotosupport().name)
for e in compengines)
@@ -806,7 +800,7 @@
# `_capabilities` instead.
@wireprotocommand('capabilities')
def capabilities(repo, proto):
- return ' '.join(_capabilities(repo, proto))
+ return bytesresponse(' '.join(_capabilities(repo, proto)))
@wireprotocommand('changegroup', 'roots')
def changegroup(repo, proto, roots):
@@ -831,7 +825,8 @@
def debugwireargs(repo, proto, one, two, others):
# only accept optional args from the known set
opts = options('debugwireargs', ['three', 'four'], others)
- return repo.debugwireargs(one, two, **pycompat.strkwargs(opts))
+ return bytesresponse(repo.debugwireargs(one, two,
+ **pycompat.strkwargs(opts)))
@wireprotocommand('getbundle', '*')
def getbundle(repo, proto, others):
@@ -857,7 +852,7 @@
if not bundle1allowed(repo, 'pull'):
if not exchange.bundle2requested(opts.get('bundlecaps')):
- if proto.name == 'http':
+ if proto.name == 'http-v1':
return ooberror(bundle2required)
raise error.Abort(bundle2requiredmain,
hint=bundle2requiredhint)
@@ -883,12 +878,12 @@
except error.Abort as exc:
# cleanly forward Abort error to the client
if not exchange.bundle2requested(opts.get('bundlecaps')):
- if proto.name == 'http':
- return ooberror(str(exc) + '\n')
+ if proto.name == 'http-v1':
+ return ooberror(pycompat.bytestr(exc) + '\n')
raise # cannot do better for bundle1 + ssh
# bundle2 request expect a bundle2 reply
bundler = bundle2.bundle20(repo.ui)
- manargs = [('message', str(exc))]
+ manargs = [('message', pycompat.bytestr(exc))]
advargs = []
if exc.hint is not None:
advargs.append(('hint', exc.hint))
@@ -902,23 +897,27 @@
@wireprotocommand('heads')
def heads(repo, proto):
h = repo.heads()
- return encodelist(h) + "\n"
+ return bytesresponse(encodelist(h) + '\n')
@wireprotocommand('hello')
def hello(repo, proto):
- '''the hello command returns a set of lines describing various
- interesting things about the server, in an RFC822-like format.
- Currently the only one defined is "capabilities", which
- consists of a line in the form:
+ """Called as part of SSH handshake to obtain server info.
+
+ Returns a list of lines describing interesting things about the
+ server, in an RFC822-like format.
- capabilities: space separated list of tokens
- '''
- return "capabilities: %s\n" % (capabilities(repo, proto))
+ Currently, the only one defined is ``capabilities``, which consists of a
+ line of space separated tokens describing server abilities:
+
+ capabilities: <token0> <token1> <token2>
+ """
+ caps = capabilities(repo, proto).data
+ return bytesresponse('capabilities: %s\n' % caps)
@wireprotocommand('listkeys', 'namespace')
def listkeys(repo, proto, namespace):
d = repo.listkeys(encoding.tolocal(namespace)).items()
- return pushkeymod.encodekeys(d)
+ return bytesresponse(pushkeymod.encodekeys(d))
@wireprotocommand('lookup', 'key')
def lookup(repo, proto, key):
@@ -928,13 +927,14 @@
r = c.hex()
success = 1
except Exception as inst:
- r = str(inst)
+ r = util.forcebytestr(inst)
success = 0
- return "%d %s\n" % (success, r)
+ return bytesresponse('%d %s\n' % (success, r))
@wireprotocommand('known', 'nodes *')
def known(repo, proto, nodes, others):
- return ''.join(b and "1" or "0" for b in repo.known(decodelist(nodes)))
+ v = ''.join(b and '1' or '0' for b in repo.known(decodelist(nodes)))
+ return bytesresponse(v)
@wireprotocommand('pushkey', 'namespace key old new')
def pushkey(repo, proto, namespace, key, old, new):
@@ -950,23 +950,12 @@
else:
new = encoding.tolocal(new) # normal path
- if util.safehasattr(proto, 'restore'):
-
- proto.redirect()
+ with proto.mayberedirectstdio() as output:
+ r = repo.pushkey(encoding.tolocal(namespace), encoding.tolocal(key),
+ encoding.tolocal(old), new) or False
- try:
- r = repo.pushkey(encoding.tolocal(namespace), encoding.tolocal(key),
- encoding.tolocal(old), new) or False
- except error.Abort:
- r = False
-
- output = proto.restore()
-
- return '%s\n%s' % (int(r), output)
-
- r = repo.pushkey(encoding.tolocal(namespace), encoding.tolocal(key),
- encoding.tolocal(old), new)
- return '%s\n' % int(r)
+ output = output.getvalue() if output else ''
+ return bytesresponse('%s\n%s' % (int(r), output))
@wireprotocommand('stream_out')
def stream(repo, proto):
@@ -980,97 +969,99 @@
def unbundle(repo, proto, heads):
their_heads = decodelist(heads)
- try:
- proto.redirect()
-
- exchange.check_heads(repo, their_heads, 'preparing changes')
-
- # write bundle data to temporary file because it can be big
- fd, tempname = tempfile.mkstemp(prefix='hg-unbundle-')
- fp = os.fdopen(fd, pycompat.sysstr('wb+'))
- r = 0
+ with proto.mayberedirectstdio() as output:
try:
- proto.getfile(fp)
- fp.seek(0)
- gen = exchange.readbundle(repo.ui, fp, None)
- if (isinstance(gen, changegroupmod.cg1unpacker)
- and not bundle1allowed(repo, 'push')):
- if proto.name == 'http':
- # need to special case http because stderr do not get to
- # the http client on failed push so we need to abuse some
- # other error type to make sure the message get to the
- # user.
- return ooberror(bundle2required)
- raise error.Abort(bundle2requiredmain,
- hint=bundle2requiredhint)
+ exchange.check_heads(repo, their_heads, 'preparing changes')
- r = exchange.unbundle(repo, gen, their_heads, 'serve',
- proto._client())
- if util.safehasattr(r, 'addpart'):
- # The return looks streamable, we are in the bundle2 case and
- # should return a stream.
- return streamres_legacy(gen=r.getchunks())
- return pushres(r)
-
- finally:
- fp.close()
- os.unlink(tempname)
-
- except (error.BundleValueError, error.Abort, error.PushRaced) as exc:
- # handle non-bundle2 case first
- if not getattr(exc, 'duringunbundle2', False):
+ # write bundle data to temporary file because it can be big
+ fd, tempname = tempfile.mkstemp(prefix='hg-unbundle-')
+ fp = os.fdopen(fd, pycompat.sysstr('wb+'))
+ r = 0
try:
- raise
- except error.Abort:
- # The old code we moved used util.stderr directly.
- # We did not change it to minimise code change.
- # This need to be moved to something proper.
- # Feel free to do it.
- util.stderr.write("abort: %s\n" % exc)
- if exc.hint is not None:
- util.stderr.write("(%s)\n" % exc.hint)
- return pushres(0)
- except error.PushRaced:
- return pusherr(str(exc))
+ proto.forwardpayload(fp)
+ fp.seek(0)
+ gen = exchange.readbundle(repo.ui, fp, None)
+ if (isinstance(gen, changegroupmod.cg1unpacker)
+ and not bundle1allowed(repo, 'push')):
+ if proto.name == 'http-v1':
+ # need to special case http because stderr do not get to
+ # the http client on failed push so we need to abuse
+ # some other error type to make sure the message get to
+ # the user.
+ return ooberror(bundle2required)
+ raise error.Abort(bundle2requiredmain,
+ hint=bundle2requiredhint)
- bundler = bundle2.bundle20(repo.ui)
- for out in getattr(exc, '_bundle2salvagedoutput', ()):
- bundler.addpart(out)
- try:
- try:
- raise
- except error.PushkeyFailed as exc:
- # check client caps
- remotecaps = getattr(exc, '_replycaps', None)
- if (remotecaps is not None
- and 'pushkey' not in remotecaps.get('error', ())):
- # no support remote side, fallback to Abort handler.
+ r = exchange.unbundle(repo, gen, their_heads, 'serve',
+ proto.client())
+ if util.safehasattr(r, 'addpart'):
+ # The return looks streamable, we are in the bundle2 case
+ # and should return a stream.
+ return streamres_legacy(gen=r.getchunks())
+ return pushres(r, output.getvalue() if output else '')
+
+ finally:
+ fp.close()
+ os.unlink(tempname)
+
+ except (error.BundleValueError, error.Abort, error.PushRaced) as exc:
+ # handle non-bundle2 case first
+ if not getattr(exc, 'duringunbundle2', False):
+ try:
raise
- part = bundler.newpart('error:pushkey')
- part.addparam('in-reply-to', exc.partid)
- if exc.namespace is not None:
- part.addparam('namespace', exc.namespace, mandatory=False)
- if exc.key is not None:
- part.addparam('key', exc.key, mandatory=False)
- if exc.new is not None:
- part.addparam('new', exc.new, mandatory=False)
- if exc.old is not None:
- part.addparam('old', exc.old, mandatory=False)
- if exc.ret is not None:
- part.addparam('ret', exc.ret, mandatory=False)
- except error.BundleValueError as exc:
- errpart = bundler.newpart('error:unsupportedcontent')
- if exc.parttype is not None:
- errpart.addparam('parttype', exc.parttype)
- if exc.params:
- errpart.addparam('params', '\0'.join(exc.params))
- except error.Abort as exc:
- manargs = [('message', str(exc))]
- advargs = []
- if exc.hint is not None:
- advargs.append(('hint', exc.hint))
- bundler.addpart(bundle2.bundlepart('error:abort',
- manargs, advargs))
- except error.PushRaced as exc:
- bundler.newpart('error:pushraced', [('message', str(exc))])
- return streamres_legacy(gen=bundler.getchunks())
+ except error.Abort:
+ # The old code we moved used util.stderr directly.
+ # We did not change it to minimise code change.
+ # This need to be moved to something proper.
+ # Feel free to do it.
+ util.stderr.write("abort: %s\n" % exc)
+ if exc.hint is not None:
+ util.stderr.write("(%s)\n" % exc.hint)
+ return pushres(0, output.getvalue() if output else '')
+ except error.PushRaced:
+ return pusherr(str(exc),
+ output.getvalue() if output else '')
+
+ bundler = bundle2.bundle20(repo.ui)
+ for out in getattr(exc, '_bundle2salvagedoutput', ()):
+ bundler.addpart(out)
+ try:
+ try:
+ raise
+ except error.PushkeyFailed as exc:
+ # check client caps
+ remotecaps = getattr(exc, '_replycaps', None)
+ if (remotecaps is not None
+ and 'pushkey' not in remotecaps.get('error', ())):
+ # no support remote side, fallback to Abort handler.
+ raise
+ part = bundler.newpart('error:pushkey')
+ part.addparam('in-reply-to', exc.partid)
+ if exc.namespace is not None:
+ part.addparam('namespace', exc.namespace,
+ mandatory=False)
+ if exc.key is not None:
+ part.addparam('key', exc.key, mandatory=False)
+ if exc.new is not None:
+ part.addparam('new', exc.new, mandatory=False)
+ if exc.old is not None:
+ part.addparam('old', exc.old, mandatory=False)
+ if exc.ret is not None:
+ part.addparam('ret', exc.ret, mandatory=False)
+ except error.BundleValueError as exc:
+ errpart = bundler.newpart('error:unsupportedcontent')
+ if exc.parttype is not None:
+ errpart.addparam('parttype', exc.parttype)
+ if exc.params:
+ errpart.addparam('params', '\0'.join(exc.params))
+ except error.Abort as exc:
+ manargs = [('message', util.forcebytestr(exc))]
+ advargs = []
+ if exc.hint is not None:
+ advargs.append(('hint', exc.hint))
+ bundler.addpart(bundle2.bundlepart('error:abort',
+ manargs, advargs))
+ except error.PushRaced as exc:
+ bundler.newpart('error:pushraced',
+ [('message', util.forcebytestr(exc))])
+ return streamres_legacy(gen=bundler.getchunks())
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/mercurial/wireprotoserver.py Sat Feb 24 17:49:10 2018 -0600
@@ -0,0 +1,605 @@
+# Copyright 21 May 2005 - (c) 2005 Jake Edge <jake@edge2.net>
+# Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
+#
+# This software may be used and distributed according to the terms of the
+# GNU General Public License version 2 or any later version.
+
+from __future__ import absolute_import
+
+import contextlib
+import struct
+import sys
+
+from .i18n import _
+from . import (
+ encoding,
+ error,
+ hook,
+ pycompat,
+ util,
+ wireproto,
+ wireprototypes,
+)
+
+stringio = util.stringio
+
+urlerr = util.urlerr
+urlreq = util.urlreq
+
+HTTP_OK = 200
+
+HGTYPE = 'application/mercurial-0.1'
+HGTYPE2 = 'application/mercurial-0.2'
+HGERRTYPE = 'application/hg-error'
+
+# Names of the SSH protocol implementations.
+SSHV1 = 'ssh-v1'
+# This is advertised over the wire. Incremental the counter at the end
+# to reflect BC breakages.
+SSHV2 = 'exp-ssh-v2-0001'
+
+def decodevaluefromheaders(req, headerprefix):
+ """Decode a long value from multiple HTTP request headers.
+
+ Returns the value as a bytes, not a str.
+ """
+ chunks = []
+ i = 1
+ prefix = headerprefix.upper().replace(r'-', r'_')
+ while True:
+ v = req.env.get(r'HTTP_%s_%d' % (prefix, i))
+ if v is None:
+ break
+ chunks.append(pycompat.bytesurl(v))
+ i += 1
+
+ return ''.join(chunks)
+
+class httpv1protocolhandler(wireprototypes.baseprotocolhandler):
+ def __init__(self, req, ui):
+ self._req = req
+ self._ui = ui
+
+ @property
+ def name(self):
+ return 'http-v1'
+
+ def getargs(self, args):
+ knownargs = self._args()
+ data = {}
+ keys = args.split()
+ for k in keys:
+ if k == '*':
+ star = {}
+ for key in knownargs.keys():
+ if key != 'cmd' and key not in keys:
+ star[key] = knownargs[key][0]
+ data['*'] = star
+ else:
+ data[k] = knownargs[k][0]
+ return [data[k] for k in keys]
+
+ def _args(self):
+ args = util.rapply(pycompat.bytesurl, self._req.form.copy())
+ postlen = int(self._req.env.get(r'HTTP_X_HGARGS_POST', 0))
+ if postlen:
+ args.update(urlreq.parseqs(
+ self._req.read(postlen), keep_blank_values=True))
+ return args
+
+ argvalue = decodevaluefromheaders(self._req, r'X-HgArg')
+ args.update(urlreq.parseqs(argvalue, keep_blank_values=True))
+ return args
+
+ def forwardpayload(self, fp):
+ if r'HTTP_CONTENT_LENGTH' in self._req.env:
+ length = int(self._req.env[r'HTTP_CONTENT_LENGTH'])
+ else:
+ length = int(self._req.env[r'CONTENT_LENGTH'])
+ # If httppostargs is used, we need to read Content-Length
+ # minus the amount that was consumed by args.
+ length -= int(self._req.env.get(r'HTTP_X_HGARGS_POST', 0))
+ for s in util.filechunkiter(self._req, limit=length):
+ fp.write(s)
+
+ @contextlib.contextmanager
+ def mayberedirectstdio(self):
+ oldout = self._ui.fout
+ olderr = self._ui.ferr
+
+ out = util.stringio()
+
+ try:
+ self._ui.fout = out
+ self._ui.ferr = out
+ yield out
+ finally:
+ self._ui.fout = oldout
+ self._ui.ferr = olderr
+
+ def client(self):
+ return 'remote:%s:%s:%s' % (
+ self._req.env.get('wsgi.url_scheme') or 'http',
+ urlreq.quote(self._req.env.get('REMOTE_HOST', '')),
+ urlreq.quote(self._req.env.get('REMOTE_USER', '')))
+
+# This method exists mostly so that extensions like remotefilelog can
+# disable a kludgey legacy method only over http. As of early 2018,
+# there are no other known users, so with any luck we can discard this
+# hook if remotefilelog becomes a first-party extension.
+def iscmd(cmd):
+ return cmd in wireproto.commands
+
+def parsehttprequest(repo, req, query):
+ """Parse the HTTP request for a wire protocol request.
+
+ If the current request appears to be a wire protocol request, this
+ function returns a dict with details about that request, including
+ an ``abstractprotocolserver`` instance suitable for handling the
+ request. Otherwise, ``None`` is returned.
+
+ ``req`` is a ``wsgirequest`` instance.
+ """
+ # HTTP version 1 wire protocol requests are denoted by a "cmd" query
+ # string parameter. If it isn't present, this isn't a wire protocol
+ # request.
+ if r'cmd' not in req.form:
+ return None
+
+ cmd = pycompat.sysbytes(req.form[r'cmd'][0])
+
+ # The "cmd" request parameter is used by both the wire protocol and hgweb.
+ # While not all wire protocol commands are available for all transports,
+ # if we see a "cmd" value that resembles a known wire protocol command, we
+ # route it to a protocol handler. This is better than routing possible
+ # wire protocol requests to hgweb because it prevents hgweb from using
+ # known wire protocol commands and it is less confusing for machine
+ # clients.
+ if not iscmd(cmd):
+ return None
+
+ proto = httpv1protocolhandler(req, repo.ui)
+
+ return {
+ 'cmd': cmd,
+ 'proto': proto,
+ 'dispatch': lambda: _callhttp(repo, req, proto, cmd),
+ 'handleerror': lambda ex: _handlehttperror(ex, req, cmd),
+ }
+
+def _httpresponsetype(ui, req, prefer_uncompressed):
+ """Determine the appropriate response type and compression settings.
+
+ Returns a tuple of (mediatype, compengine, engineopts).
+ """
+ # Determine the response media type and compression engine based
+ # on the request parameters.
+ protocaps = decodevaluefromheaders(req, r'X-HgProto').split(' ')
+
+ if '0.2' in protocaps:
+ # All clients are expected to support uncompressed data.
+ if prefer_uncompressed:
+ return HGTYPE2, util._noopengine(), {}
+
+ # Default as defined by wire protocol spec.
+ compformats = ['zlib', 'none']
+ for cap in protocaps:
+ if cap.startswith('comp='):
+ compformats = cap[5:].split(',')
+ break
+
+ # Now find an agreed upon compression format.
+ for engine in wireproto.supportedcompengines(ui, util.SERVERROLE):
+ if engine.wireprotosupport().name in compformats:
+ opts = {}
+ level = ui.configint('server', '%slevel' % engine.name())
+ if level is not None:
+ opts['level'] = level
+
+ return HGTYPE2, engine, opts
+
+ # No mutually supported compression format. Fall back to the
+ # legacy protocol.
+
+ # Don't allow untrusted settings because disabling compression or
+ # setting a very high compression level could lead to flooding
+ # the server's network or CPU.
+ opts = {'level': ui.configint('server', 'zliblevel')}
+ return HGTYPE, util.compengines['zlib'], opts
+
+def _callhttp(repo, req, proto, cmd):
+ def genversion2(gen, engine, engineopts):
+ # application/mercurial-0.2 always sends a payload header
+ # identifying the compression engine.
+ name = engine.wireprotosupport().name
+ assert 0 < len(name) < 256
+ yield struct.pack('B', len(name))
+ yield name
+
+ for chunk in gen:
+ yield chunk
+
+ rsp = wireproto.dispatch(repo, proto, cmd)
+
+ if not wireproto.commands.commandavailable(cmd, proto):
+ req.respond(HTTP_OK, HGERRTYPE,
+ body=_('requested wire protocol command is not available '
+ 'over HTTP'))
+ return []
+
+ if isinstance(rsp, bytes):
+ req.respond(HTTP_OK, HGTYPE, body=rsp)
+ return []
+ elif isinstance(rsp, wireprototypes.bytesresponse):
+ req.respond(HTTP_OK, HGTYPE, body=rsp.data)
+ return []
+ elif isinstance(rsp, wireprototypes.streamreslegacy):
+ gen = rsp.gen
+ req.respond(HTTP_OK, HGTYPE)
+ return gen
+ elif isinstance(rsp, wireprototypes.streamres):
+ gen = rsp.gen
+
+ # This code for compression should not be streamres specific. It
+ # is here because we only compress streamres at the moment.
+ mediatype, engine, engineopts = _httpresponsetype(
+ repo.ui, req, rsp.prefer_uncompressed)
+ gen = engine.compressstream(gen, engineopts)
+
+ if mediatype == HGTYPE2:
+ gen = genversion2(gen, engine, engineopts)
+
+ req.respond(HTTP_OK, mediatype)
+ return gen
+ elif isinstance(rsp, wireprototypes.pushres):
+ rsp = '%d\n%s' % (rsp.res, rsp.output)
+ req.respond(HTTP_OK, HGTYPE, body=rsp)
+ return []
+ elif isinstance(rsp, wireprototypes.pusherr):
+ # This is the httplib workaround documented in _handlehttperror().
+ req.drain()
+
+ rsp = '0\n%s\n' % rsp.res
+ req.respond(HTTP_OK, HGTYPE, body=rsp)
+ return []
+ elif isinstance(rsp, wireprototypes.ooberror):
+ rsp = rsp.message
+ req.respond(HTTP_OK, HGERRTYPE, body=rsp)
+ return []
+ raise error.ProgrammingError('hgweb.protocol internal failure', rsp)
+
+def _handlehttperror(e, req, cmd):
+ """Called when an ErrorResponse is raised during HTTP request processing."""
+
+ # Clients using Python's httplib are stateful: the HTTP client
+ # won't process an HTTP response until all request data is
+ # sent to the server. The intent of this code is to ensure
+ # we always read HTTP request data from the client, thus
+ # ensuring httplib transitions to a state that allows it to read
+ # the HTTP response. In other words, it helps prevent deadlocks
+ # on clients using httplib.
+
+ if (req.env[r'REQUEST_METHOD'] == r'POST' and
+ # But not if Expect: 100-continue is being used.
+ (req.env.get('HTTP_EXPECT',
+ '').lower() != '100-continue') or
+ # Or the non-httplib HTTP library is being advertised by
+ # the client.
+ req.env.get('X-HgHttp2', '')):
+ req.drain()
+ else:
+ req.headers.append((r'Connection', r'Close'))
+
+ # TODO This response body assumes the failed command was
+ # "unbundle." That assumption is not always valid.
+ req.respond(e, HGTYPE, body='0\n%s\n' % pycompat.bytestr(e))
+
+ return ''
+
+def _sshv1respondbytes(fout, value):
+ """Send a bytes response for protocol version 1."""
+ fout.write('%d\n' % len(value))
+ fout.write(value)
+ fout.flush()
+
+def _sshv1respondstream(fout, source):
+ write = fout.write
+ for chunk in source.gen:
+ write(chunk)
+ fout.flush()
+
+def _sshv1respondooberror(fout, ferr, rsp):
+ ferr.write(b'%s\n-\n' % rsp)
+ ferr.flush()
+ fout.write(b'\n')
+ fout.flush()
+
+class sshv1protocolhandler(wireprototypes.baseprotocolhandler):
+ """Handler for requests services via version 1 of SSH protocol."""
+ def __init__(self, ui, fin, fout):
+ self._ui = ui
+ self._fin = fin
+ self._fout = fout
+
+ @property
+ def name(self):
+ return SSHV1
+
+ def getargs(self, args):
+ data = {}
+ keys = args.split()
+ for n in xrange(len(keys)):
+ argline = self._fin.readline()[:-1]
+ arg, l = argline.split()
+ if arg not in keys:
+ raise error.Abort(_("unexpected parameter %r") % arg)
+ if arg == '*':
+ star = {}
+ for k in xrange(int(l)):
+ argline = self._fin.readline()[:-1]
+ arg, l = argline.split()
+ val = self._fin.read(int(l))
+ star[arg] = val
+ data['*'] = star
+ else:
+ val = self._fin.read(int(l))
+ data[arg] = val
+ return [data[k] for k in keys]
+
+ def forwardpayload(self, fpout):
+ # We initially send an empty response. This tells the client it is
+ # OK to start sending data. If a client sees any other response, it
+ # interprets it as an error.
+ _sshv1respondbytes(self._fout, b'')
+
+ # The file is in the form:
+ #
+ # <chunk size>\n<chunk>
+ # ...
+ # 0\n
+ count = int(self._fin.readline())
+ while count:
+ fpout.write(self._fin.read(count))
+ count = int(self._fin.readline())
+
+ @contextlib.contextmanager
+ def mayberedirectstdio(self):
+ yield None
+
+ def client(self):
+ client = encoding.environ.get('SSH_CLIENT', '').split(' ', 1)[0]
+ return 'remote:ssh:' + client
+
+class sshv2protocolhandler(sshv1protocolhandler):
+ """Protocol handler for version 2 of the SSH protocol."""
+
+def _runsshserver(ui, repo, fin, fout):
+ # This function operates like a state machine of sorts. The following
+ # states are defined:
+ #
+ # protov1-serving
+ # Server is in protocol version 1 serving mode. Commands arrive on
+ # new lines. These commands are processed in this state, one command
+ # after the other.
+ #
+ # protov2-serving
+ # Server is in protocol version 2 serving mode.
+ #
+ # upgrade-initial
+ # The server is going to process an upgrade request.
+ #
+ # upgrade-v2-filter-legacy-handshake
+ # The protocol is being upgraded to version 2. The server is expecting
+ # the legacy handshake from version 1.
+ #
+ # upgrade-v2-finish
+ # The upgrade to version 2 of the protocol is imminent.
+ #
+ # shutdown
+ # The server is shutting down, possibly in reaction to a client event.
+ #
+ # And here are their transitions:
+ #
+ # protov1-serving -> shutdown
+ # When server receives an empty request or encounters another
+ # error.
+ #
+ # protov1-serving -> upgrade-initial
+ # An upgrade request line was seen.
+ #
+ # upgrade-initial -> upgrade-v2-filter-legacy-handshake
+ # Upgrade to version 2 in progress. Server is expecting to
+ # process a legacy handshake.
+ #
+ # upgrade-v2-filter-legacy-handshake -> shutdown
+ # Client did not fulfill upgrade handshake requirements.
+ #
+ # upgrade-v2-filter-legacy-handshake -> upgrade-v2-finish
+ # Client fulfilled version 2 upgrade requirements. Finishing that
+ # upgrade.
+ #
+ # upgrade-v2-finish -> protov2-serving
+ # Protocol upgrade to version 2 complete. Server can now speak protocol
+ # version 2.
+ #
+ # protov2-serving -> protov1-serving
+ # Ths happens by default since protocol version 2 is the same as
+ # version 1 except for the handshake.
+
+ state = 'protov1-serving'
+ proto = sshv1protocolhandler(ui, fin, fout)
+ protoswitched = False
+
+ while True:
+ if state == 'protov1-serving':
+ # Commands are issued on new lines.
+ request = fin.readline()[:-1]
+
+ # Empty lines signal to terminate the connection.
+ if not request:
+ state = 'shutdown'
+ continue
+
+ # It looks like a protocol upgrade request. Transition state to
+ # handle it.
+ if request.startswith(b'upgrade '):
+ if protoswitched:
+ _sshv1respondooberror(fout, ui.ferr,
+ b'cannot upgrade protocols multiple '
+ b'times')
+ state = 'shutdown'
+ continue
+
+ state = 'upgrade-initial'
+ continue
+
+ available = wireproto.commands.commandavailable(request, proto)
+
+ # This command isn't available. Send an empty response and go
+ # back to waiting for a new command.
+ if not available:
+ _sshv1respondbytes(fout, b'')
+ continue
+
+ rsp = wireproto.dispatch(repo, proto, request)
+
+ if isinstance(rsp, bytes):
+ _sshv1respondbytes(fout, rsp)
+ elif isinstance(rsp, wireprototypes.bytesresponse):
+ _sshv1respondbytes(fout, rsp.data)
+ elif isinstance(rsp, wireprototypes.streamres):
+ _sshv1respondstream(fout, rsp)
+ elif isinstance(rsp, wireprototypes.streamreslegacy):
+ _sshv1respondstream(fout, rsp)
+ elif isinstance(rsp, wireprototypes.pushres):
+ _sshv1respondbytes(fout, b'')
+ _sshv1respondbytes(fout, b'%d' % rsp.res)
+ elif isinstance(rsp, wireprototypes.pusherr):
+ _sshv1respondbytes(fout, rsp.res)
+ elif isinstance(rsp, wireprototypes.ooberror):
+ _sshv1respondooberror(fout, ui.ferr, rsp.message)
+ else:
+ raise error.ProgrammingError('unhandled response type from '
+ 'wire protocol command: %s' % rsp)
+
+ # For now, protocol version 2 serving just goes back to version 1.
+ elif state == 'protov2-serving':
+ state = 'protov1-serving'
+ continue
+
+ elif state == 'upgrade-initial':
+ # We should never transition into this state if we've switched
+ # protocols.
+ assert not protoswitched
+ assert proto.name == SSHV1
+
+ # Expected: upgrade <token> <capabilities>
+ # If we get something else, the request is malformed. It could be
+ # from a future client that has altered the upgrade line content.
+ # We treat this as an unknown command.
+ try:
+ token, caps = request.split(b' ')[1:]
+ except ValueError:
+ _sshv1respondbytes(fout, b'')
+ state = 'protov1-serving'
+ continue
+
+ # Send empty response if we don't support upgrading protocols.
+ if not ui.configbool('experimental', 'sshserver.support-v2'):
+ _sshv1respondbytes(fout, b'')
+ state = 'protov1-serving'
+ continue
+
+ try:
+ caps = urlreq.parseqs(caps)
+ except ValueError:
+ _sshv1respondbytes(fout, b'')
+ state = 'protov1-serving'
+ continue
+
+ # We don't see an upgrade request to protocol version 2. Ignore
+ # the upgrade request.
+ wantedprotos = caps.get(b'proto', [b''])[0]
+ if SSHV2 not in wantedprotos:
+ _sshv1respondbytes(fout, b'')
+ state = 'protov1-serving'
+ continue
+
+ # It looks like we can honor this upgrade request to protocol 2.
+ # Filter the rest of the handshake protocol request lines.
+ state = 'upgrade-v2-filter-legacy-handshake'
+ continue
+
+ elif state == 'upgrade-v2-filter-legacy-handshake':
+ # Client should have sent legacy handshake after an ``upgrade``
+ # request. Expected lines:
+ #
+ # hello
+ # between
+ # pairs 81
+ # 0000...-0000...
+
+ ok = True
+ for line in (b'hello', b'between', b'pairs 81'):
+ request = fin.readline()[:-1]
+
+ if request != line:
+ _sshv1respondooberror(fout, ui.ferr,
+ b'malformed handshake protocol: '
+ b'missing %s' % line)
+ ok = False
+ state = 'shutdown'
+ break
+
+ if not ok:
+ continue
+
+ request = fin.read(81)
+ if request != b'%s-%s' % (b'0' * 40, b'0' * 40):
+ _sshv1respondooberror(fout, ui.ferr,
+ b'malformed handshake protocol: '
+ b'missing between argument value')
+ state = 'shutdown'
+ continue
+
+ state = 'upgrade-v2-finish'
+ continue
+
+ elif state == 'upgrade-v2-finish':
+ # Send the upgrade response.
+ fout.write(b'upgraded %s %s\n' % (token, SSHV2))
+ servercaps = wireproto.capabilities(repo, proto)
+ rsp = b'capabilities: %s' % servercaps.data
+ fout.write(b'%d\n%s\n' % (len(rsp), rsp))
+ fout.flush()
+
+ proto = sshv2protocolhandler(ui, fin, fout)
+ protoswitched = True
+
+ state = 'protov2-serving'
+ continue
+
+ elif state == 'shutdown':
+ break
+
+ else:
+ raise error.ProgrammingError('unhandled ssh server state: %s' %
+ state)
+
+class sshserver(object):
+ def __init__(self, ui, repo):
+ self._ui = ui
+ self._repo = repo
+ self._fin = ui.fin
+ self._fout = ui.fout
+
+ hook.redirect(True)
+ ui.fout = repo.ui.fout = ui.ferr
+
+ # Prevent insertion/deletion of CRs
+ util.setbinary(self._fin)
+ util.setbinary(self._fout)
+
+ def serve_forever(self):
+ _runsshserver(self._ui, self._repo, self._fin, self._fout)
+ sys.exit(0)
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/mercurial/wireprototypes.py Sat Feb 24 17:49:10 2018 -0600
@@ -0,0 +1,117 @@
+# Copyright 2018 Gregory Szorc <gregory.szorc@gmail.com>
+#
+# This software may be used and distributed according to the terms of the
+# GNU General Public License version 2 or any later version.
+
+from __future__ import absolute_import
+
+import abc
+
+class bytesresponse(object):
+ """A wire protocol response consisting of raw bytes."""
+ def __init__(self, data):
+ self.data = data
+
+class ooberror(object):
+ """wireproto reply: failure of a batch of operation
+
+ Something failed during a batch call. The error message is stored in
+ `self.message`.
+ """
+ def __init__(self, message):
+ self.message = message
+
+class pushres(object):
+ """wireproto reply: success with simple integer return
+
+ The call was successful and returned an integer contained in `self.res`.
+ """
+ def __init__(self, res, output):
+ self.res = res
+ self.output = output
+
+class pusherr(object):
+ """wireproto reply: failure
+
+ The call failed. The `self.res` attribute contains the error message.
+ """
+ def __init__(self, res, output):
+ self.res = res
+ self.output = output
+
+class streamres(object):
+ """wireproto reply: binary stream
+
+ The call was successful and the result is a stream.
+
+ Accepts a generator containing chunks of data to be sent to the client.
+
+ ``prefer_uncompressed`` indicates that the data is expected to be
+ uncompressable and that the stream should therefore use the ``none``
+ engine.
+ """
+ def __init__(self, gen=None, prefer_uncompressed=False):
+ self.gen = gen
+ self.prefer_uncompressed = prefer_uncompressed
+
+class streamreslegacy(object):
+ """wireproto reply: uncompressed binary stream
+
+ The call was successful and the result is a stream.
+
+ Accepts a generator containing chunks of data to be sent to the client.
+
+ Like ``streamres``, but sends an uncompressed data for "version 1" clients
+ using the application/mercurial-0.1 media type.
+ """
+ def __init__(self, gen=None):
+ self.gen = gen
+
+class baseprotocolhandler(object):
+ """Abstract base class for wire protocol handlers.
+
+ A wire protocol handler serves as an interface between protocol command
+ handlers and the wire protocol transport layer. Protocol handlers provide
+ methods to read command arguments, redirect stdio for the duration of
+ the request, handle response types, etc.
+ """
+
+ __metaclass__ = abc.ABCMeta
+
+ @abc.abstractproperty
+ def name(self):
+ """The name of the protocol implementation.
+
+ Used for uniquely identifying the transport type.
+ """
+
+ @abc.abstractmethod
+ def getargs(self, args):
+ """return the value for arguments in <args>
+
+ returns a list of values (same order as <args>)"""
+
+ @abc.abstractmethod
+ def forwardpayload(self, fp):
+ """Read the raw payload and forward to a file.
+
+ The payload is read in full before the function returns.
+ """
+
+ @abc.abstractmethod
+ def mayberedirectstdio(self):
+ """Context manager to possibly redirect stdio.
+
+ The context manager yields a file-object like object that receives
+ stdout and stderr output when the context manager is active. Or it
+ yields ``None`` if no I/O redirection occurs.
+
+ The intent of this context manager is to capture stdio output
+ so it may be sent in the response. Some transports support streaming
+ stdio to the client in real time. For these transports, stdio output
+ won't be captured.
+ """
+
+ @abc.abstractmethod
+ def client(self):
+ """Returns a string representation of this client (as bytes)."""
--- a/setup.py Fri Feb 23 17:57:04 2018 -0800
+++ b/setup.py Sat Feb 24 17:49:10 2018 -0600
@@ -812,7 +812,8 @@
'mercurial.thirdparty.attr',
'hgext', 'hgext.convert', 'hgext.fsmonitor',
'hgext.fsmonitor.pywatchman', 'hgext.highlight',
- 'hgext.largefiles', 'hgext.lfs', 'hgext.zeroconf', 'hgext3rd',
+ 'hgext.largefiles', 'hgext.lfs', 'hgext.narrow',
+ 'hgext.zeroconf', 'hgext3rd',
'hgdemandimport']
common_depends = ['mercurial/bitmanipulation.h',
--- a/tests/badserverext.py Fri Feb 23 17:57:04 2018 -0800
+++ b/tests/badserverext.py Sat Feb 24 17:49:10 2018 -0600
@@ -44,16 +44,16 @@
configtable = {}
configitem = registrar.configitem(configtable)
-configitem('badserver', 'closeafteraccept',
+configitem(b'badserver', b'closeafteraccept',
default=False,
)
-configitem('badserver', 'closeafterrecvbytes',
+configitem(b'badserver', b'closeafterrecvbytes',
default=0,
)
-configitem('badserver', 'closeaftersendbytes',
+configitem(b'badserver', b'closeaftersendbytes',
default=0,
)
-configitem('badserver', 'closebeforeaccept',
+configitem(b'badserver', b'closebeforeaccept',
default=False,
)
--- a/tests/common-pattern.py Fri Feb 23 17:57:04 2018 -0800
+++ b/tests/common-pattern.py Sat Feb 24 17:49:10 2018 -0600
@@ -69,8 +69,8 @@
br'$USUAL_BUNDLE2_CAPS_SERVER$'
),
# HTTP log dates
- (br' - - \[\d\d/.../2\d\d\d \d\d:\d\d:\d\d] "GET',
- br' - - [$LOGDATE$] "GET'
+ (br' - - \[\d\d/.../2\d\d\d \d\d:\d\d:\d\d] "(GET|PUT|POST)',
+ lambda m: br' - - [$LOGDATE$] "' + m.group(1)
),
# Windows has an extra '/' in the following lines that get globbed away:
# pushing to file:/*/$TESTTMP/r2 (glob)
--- a/tests/dummyssh Fri Feb 23 17:57:04 2018 -0800
+++ b/tests/dummyssh Sat Feb 24 17:49:10 2018 -0600
@@ -15,8 +15,8 @@
log = open("dummylog", "ab")
log.write(b"Got arguments")
for i, arg in enumerate(sys.argv[1:]):
- log.write(b" %d:%s" % (i + 1, arg))
-log.write("\n")
+ log.write(b" %d:%s" % (i + 1, arg.encode('latin1')))
+log.write(b"\n")
log.close()
hgcmd = sys.argv[2]
if os.name == 'nt':
--- a/tests/f Fri Feb 23 17:57:04 2018 -0800
+++ b/tests/f Sat Feb 24 17:49:10 2018 -0600
@@ -25,6 +25,7 @@
from __future__ import absolute_import
+import binascii
import glob
import hashlib
import optparse
@@ -58,46 +59,47 @@
facts = []
if isfile:
if opts.type:
- facts.append('file')
+ facts.append(b'file')
if any((opts.hexdump, opts.dump, opts.md5, opts.sha1, opts.sha256)):
content = open(f, 'rb').read()
elif islink:
if opts.type:
- facts.append('link')
+ facts.append(b'link')
content = os.readlink(f)
elif isstdin:
content = getattr(sys.stdin, 'buffer', sys.stdin).read()
if opts.size:
- facts.append('size=%s' % len(content))
+ facts.append(b'size=%d' % len(content))
elif isdir:
if opts.recurse or opts.type:
dirfiles = glob.glob(f + '/*')
- facts.append('directory with %s files' % len(dirfiles))
+ facts.append(b'directory with %d files' % len(dirfiles))
elif opts.type:
- facts.append('type unknown')
+ facts.append(b'type unknown')
if not isstdin:
stat = os.lstat(f)
if opts.size and not isdir:
- facts.append('size=%s' % stat.st_size)
+ facts.append(b'size=%d' % stat.st_size)
if opts.mode and not islink:
- facts.append('mode=%o' % (stat.st_mode & 0o777))
+ facts.append(b'mode=%o' % (stat.st_mode & 0o777))
if opts.links:
- facts.append('links=%s' % stat.st_nlink)
+ facts.append(b'links=%s' % stat.st_nlink)
if opts.newer:
# mtime might be in whole seconds so newer file might be same
if stat.st_mtime >= os.stat(opts.newer).st_mtime:
- facts.append('newer than %s' % opts.newer)
+ facts.append(b'newer than %s' % opts.newer)
else:
- facts.append('older than %s' % opts.newer)
+ facts.append(b'older than %s' % opts.newer)
if opts.md5 and content is not None:
h = hashlib.md5(content)
- facts.append('md5=%s' % h.hexdigest()[:opts.bytes])
+ facts.append(b'md5=%s' % binascii.hexlify(h.digest())[:opts.bytes])
if opts.sha1 and content is not None:
h = hashlib.sha1(content)
- facts.append('sha1=%s' % h.hexdigest()[:opts.bytes])
+ facts.append(b'sha1=%s' % binascii.hexlify(h.digest())[:opts.bytes])
if opts.sha256 and content is not None:
h = hashlib.sha256(content)
- facts.append('sha256=%s' % h.hexdigest()[:opts.bytes])
+ facts.append(b'sha256=%s' %
+ binascii.hexlify(h.digest())[:opts.bytes])
if isstdin:
outfile.write(b', '.join(facts) + b'\n')
elif facts:
--- a/tests/fakedirstatewritetime.py Fri Feb 23 17:57:04 2018 -0800
+++ b/tests/fakedirstatewritetime.py Sat Feb 24 17:49:10 2018 -0600
@@ -19,7 +19,7 @@
configtable = {}
configitem = registrar.configitem(configtable)
-configitem('fakedirstatewritetime', 'fakenow',
+configitem(b'fakedirstatewritetime', b'fakenow',
default=None,
)
@@ -29,7 +29,7 @@
# execute what original parsers.pack_dirstate should do actually
# for consistency
actualnow = int(now)
- for f, e in dmap.iteritems():
+ for f, e in dmap.items():
if e[0] == 'n' and e[3] == actualnow:
e = parsers.dirstatetuple(e[0], e[1], e[2], -1)
dmap[f] = e
@@ -39,7 +39,7 @@
def fakewrite(ui, func):
# fake "now" of 'pack_dirstate' only if it is invoked while 'func'
- fakenow = ui.config('fakedirstatewritetime', 'fakenow')
+ fakenow = ui.config(b'fakedirstatewritetime', b'fakenow')
if not fakenow:
# Execute original one, if fakenow isn't configured. This is
# useful to prevent subrepos from executing replaced one,
@@ -49,7 +49,7 @@
# parsing 'fakenow' in YYYYmmddHHMM format makes comparison between
# 'fakenow' value and 'touch -t YYYYmmddHHMM' argument easy
- fakenow = util.parsedate(fakenow, ['%Y%m%d%H%M'])[0]
+ fakenow = util.parsedate(fakenow, [b'%Y%m%d%H%M'])[0]
orig_pack_dirstate = parsers.pack_dirstate
orig_dirstate_getfsnow = dirstate._getfsnow
--- a/tests/fakemergerecord.py Fri Feb 23 17:57:04 2018 -0800
+++ b/tests/fakemergerecord.py Sat Feb 24 17:49:10 2018 -0600
@@ -12,15 +12,15 @@
cmdtable = {}
command = registrar.command(cmdtable)
-@command('fakemergerecord',
- [('X', 'mandatory', None, 'add a fake mandatory record'),
- ('x', 'advisory', None, 'add a fake advisory record')], '')
+@command(b'fakemergerecord',
+ [(b'X', b'mandatory', None, b'add a fake mandatory record'),
+ (b'x', b'advisory', None, b'add a fake advisory record')], '')
def fakemergerecord(ui, repo, *pats, **opts):
with repo.wlock():
ms = merge.mergestate.read(repo)
records = ms._makerecords()
- if opts.get('mandatory'):
- records.append(('X', 'mandatory record'))
- if opts.get('advisory'):
- records.append(('x', 'advisory record'))
+ if opts.get(b'mandatory'):
+ records.append((b'X', b'mandatory record'))
+ if opts.get(b'advisory'):
+ records.append((b'x', b'advisory record'))
ms._writerecords(records)
--- a/tests/fakepatchtime.py Fri Feb 23 17:57:04 2018 -0800
+++ b/tests/fakepatchtime.py Sat Feb 24 17:49:10 2018 -0600
@@ -13,24 +13,24 @@
configtable = {}
configitem = registrar.configitem(configtable)
-configitem('fakepatchtime', 'fakenow',
+configitem(b'fakepatchtime', b'fakenow',
default=None,
)
def internalpatch(orig, ui, repo, patchobj, strip,
- prefix='', files=None,
- eolmode='strict', similarity=0):
+ prefix=b'', files=None,
+ eolmode=b'strict', similarity=0):
if files is None:
files = set()
r = orig(ui, repo, patchobj, strip,
prefix=prefix, files=files,
eolmode=eolmode, similarity=similarity)
- fakenow = ui.config('fakepatchtime', 'fakenow')
+ fakenow = ui.config(b'fakepatchtime', b'fakenow')
if fakenow:
# parsing 'fakenow' in YYYYmmddHHMM format makes comparison between
# 'fakenow' value and 'touch -t YYYYmmddHHMM' argument easy
- fakenow = util.parsedate(fakenow, ['%Y%m%d%H%M'])[0]
+ fakenow = util.parsedate(fakenow, [b'%Y%m%d%H%M'])[0]
for f in files:
repo.wvfs.utime(f, (fakenow, fakenow))
--- a/tests/flagprocessorext.py Fri Feb 23 17:57:04 2018 -0800
+++ b/tests/flagprocessorext.py Sat Feb 24 17:49:10 2018 -0600
@@ -45,14 +45,14 @@
def supportedoutgoingversions(orig, repo):
versions = orig(repo)
- versions.discard('01')
- versions.discard('02')
- versions.add('03')
+ versions.discard(b'01')
+ versions.discard(b'02')
+ versions.add(b'03')
return versions
def allsupportedversions(orig, ui):
versions = orig(ui)
- versions.add('03')
+ versions.add(b'03')
return versions
def noopaddrevision(orig, self, text, transaction, link, p1, p2,
@@ -106,7 +106,7 @@
# Teach exchange to use changegroup 3
for k in exchange._bundlespeccgversions.keys():
- exchange._bundlespeccgversions[k] = '03'
+ exchange._bundlespeccgversions[k] = b'03'
# Add wrappers for addrevision, responsible to set flags depending on the
# revision data contents.
--- a/tests/generate-working-copy-states.py Fri Feb 23 17:57:04 2018 -0800
+++ b/tests/generate-working-copy-states.py Sat Feb 24 17:49:10 2018 -0600
@@ -42,12 +42,12 @@
def generatestates(maxchangesets, parentcontents):
depth = len(parentcontents)
if depth == maxchangesets + 1:
- for tracked in ('untracked', 'tracked'):
- filename = "_".join([(content is None and 'missing' or content) for
- content in parentcontents]) + "-" + tracked
+ for tracked in (b'untracked', b'tracked'):
+ filename = b"_".join([(content is None and b'missing' or content)
+ for content in parentcontents]) + b"-" + tracked
yield (filename, parentcontents)
else:
- for content in ({None, 'content' + str(depth + 1)} |
+ for content in ({None, b'content' + (b"%d" % (depth + 1))} |
set(parentcontents)):
for combination in generatestates(maxchangesets,
parentcontents + [content]):
@@ -71,7 +71,7 @@
if depth == 'wc':
# Make sure there is content so the file gets written and can be
# tracked. It will be deleted outside of this script.
- content.append((filename, states[maxchangesets] or 'TOBEDELETED'))
+ content.append((filename, states[maxchangesets] or b'TOBEDELETED'))
else:
content.append((filename, states[int(depth) - 1]))
else:
@@ -82,7 +82,7 @@
for filename, data in content:
if data is not None:
f = open(filename, 'wb')
- f.write(data + '\n')
+ f.write(data + b'\n')
f.close()
elif os.path.exists(filename):
os.remove(filename)
--- a/tests/get-with-headers.py Fri Feb 23 17:57:04 2018 -0800
+++ b/tests/get-with-headers.py Sat Feb 24 17:49:10 2018 -0600
@@ -76,7 +76,7 @@
if args.bodyfile:
bodyfh = open(args.bodyfile, 'wb')
else:
- bodyfh = sys.stdout
+ bodyfh = getattr(sys.stdout, 'buffer', sys.stdout)
# Pretty print JSON. This also has the beneficial side-effect
# of verifying emitted JSON is well-formed.
--- a/tests/logexceptions.py Fri Feb 23 17:57:04 2018 -0800
+++ b/tests/logexceptions.py Sat Feb 24 17:49:10 2018 -0600
@@ -65,6 +65,7 @@
primaryframe,
hgframe,
hgline,
+ ui.environ[b'TESTNAME'].decode('utf-8', 'replace'),
]
fh.write(b'\0'.join(p.encode('utf-8', 'replace') for p in parts))
--- a/tests/mockblackbox.py Fri Feb 23 17:57:04 2018 -0800
+++ b/tests/mockblackbox.py Sat Feb 24 17:49:10 2018 -0600
@@ -5,7 +5,7 @@
# XXX: we should probably offer a devel option to do this in blackbox directly
def getuser():
- return 'bob'
+ return b'bob'
def getpid():
return 5000
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/tests/narrow-library.sh Sat Feb 24 17:49:10 2018 -0600
@@ -0,0 +1,8 @@
+cat >> $HGRCPATH <<EOF
+[extensions]
+narrow=
+[ui]
+ssh=python "$TESTDIR/dummyssh"
+[experimental]
+changegroup3 = True
+EOF
--- a/tests/printenv.py Fri Feb 23 17:57:04 2018 -0800
+++ b/tests/printenv.py Sat Feb 24 17:49:10 2018 -0600
@@ -35,7 +35,7 @@
# variables with empty values may not exist on all platforms, filter
# them now for portability sake.
-env = [(k, v) for k, v in os.environ.iteritems()
+env = [(k, v) for k, v in os.environ.items()
if k.startswith("HG_") and v]
env.sort()
--- a/tests/run-tests.py Fri Feb 23 17:57:04 2018 -0800
+++ b/tests/run-tests.py Sat Feb 24 17:49:10 2018 -0600
@@ -120,6 +120,7 @@
}
class TestRunnerLexer(lexer.RegexLexer):
+ testpattern = r'[\w-]+\.(t|py)( \(case [\w-]+\))?'
tokens = {
'root': [
(r'^Skipped', token.Generic.Skipped, 'skipped'),
@@ -127,11 +128,11 @@
(r'^ERROR: ', token.Generic.Failed, 'failed'),
],
'skipped': [
- (r'[\w-]+\.(t|py)', token.Generic.SName),
+ (testpattern, token.Generic.SName),
(r':.*', token.Generic.Skipped),
],
'failed': [
- (r'[\w-]+\.(t|py)', token.Generic.FName),
+ (testpattern, token.Generic.FName),
(r'(:| ).*', token.Generic.Failed),
]
}
@@ -989,7 +990,12 @@
# the intermediate 'compile' step help with debugging
code = compile(source.read(), replacementfile, 'exec')
exec(code, data)
- r.extend(data.get('substitutions', ()))
+ for value in data.get('substitutions', ()):
+ if len(value) != 2:
+ msg = 'malformatted substitution in %s: %r'
+ msg %= (replacementfile, value)
+ raise ValueError(msg)
+ r.append(value)
return r
def _escapepath(self, p):
@@ -1046,6 +1052,7 @@
env['PYTHONUSERBASE'] = sysconfig.get_config_var('userbase') or ''
env['HGEMITWARNINGS'] = '1'
env['TESTTMP'] = self._testtmp
+ env['TESTNAME'] = self.name
env['HOME'] = self._testtmp
# This number should match portneeded in _getport
for port in xrange(3):
@@ -1341,7 +1348,11 @@
if os.getenv('MSYSTEM'):
script.append(b'alias pwd="pwd -W"\n')
if self._case:
- script.append(b'TESTCASE=%s\n' % shellquote(self._case))
+ if isinstance(self._case, str):
+ quoted = shellquote(self._case)
+ else:
+ quoted = shellquote(self._case.decode('utf8')).encode('utf8')
+ script.append(b'TESTCASE=%s\n' % quoted)
script.append(b'export TESTCASE\n')
n = 0
@@ -2012,10 +2023,11 @@
def loadtimes(outputdir):
times = []
try:
- with open(os.path.join(outputdir, b'.testtimes-')) as fp:
+ with open(os.path.join(outputdir, b'.testtimes')) as fp:
for line in fp:
- ts = line.split()
- times.append((ts[0], [float(t) for t in ts[1:]]))
+ m = re.match('(.*?) ([0-9. ]+)', line)
+ times.append((m.group(1),
+ [float(t) for t in m.group(2).split()]))
except IOError as err:
if err.errno != errno.ENOENT:
raise
@@ -2124,13 +2136,21 @@
if self._runner.options.exceptions:
exceptions = aggregateexceptions(
os.path.join(self._runner._outputdir, b'exceptions'))
- total = sum(exceptions.values())
self.stream.writeln('Exceptions Report:')
self.stream.writeln('%d total from %d frames' %
- (total, len(exceptions)))
- for (frame, line, exc), count in exceptions.most_common():
- self.stream.writeln('%d\t%s: %s' % (count, frame, exc))
+ (exceptions['total'],
+ len(exceptions['exceptioncounts'])))
+ combined = exceptions['combined']
+ for key in sorted(combined, key=combined.get, reverse=True):
+ frame, line, exc = key
+ totalcount, testcount, leastcount, leasttest = combined[key]
+
+ self.stream.writeln('%d (%d tests)\t%s: %s (%s - %d total)'
+ % (totalcount,
+ testcount,
+ frame, exc,
+ leasttest, leastcount))
self.stream.flush()
@@ -3001,22 +3021,57 @@
p.decode("utf-8"))
def aggregateexceptions(path):
- exceptions = collections.Counter()
+ exceptioncounts = collections.Counter()
+ testsbyfailure = collections.defaultdict(set)
+ failuresbytest = collections.defaultdict(set)
for f in os.listdir(path):
with open(os.path.join(path, f), 'rb') as fh:
data = fh.read().split(b'\0')
- if len(data) != 4:
+ if len(data) != 5:
continue
- exc, mainframe, hgframe, hgline = data
+ exc, mainframe, hgframe, hgline, testname = data
exc = exc.decode('utf-8')
mainframe = mainframe.decode('utf-8')
hgframe = hgframe.decode('utf-8')
hgline = hgline.decode('utf-8')
- exceptions[(hgframe, hgline, exc)] += 1
-
- return exceptions
+ testname = testname.decode('utf-8')
+
+ key = (hgframe, hgline, exc)
+ exceptioncounts[key] += 1
+ testsbyfailure[key].add(testname)
+ failuresbytest[testname].add(key)
+
+ # Find test having fewest failures for each failure.
+ leastfailing = {}
+ for key, tests in testsbyfailure.items():
+ fewesttest = None
+ fewestcount = 99999999
+ for test in sorted(tests):
+ if len(failuresbytest[test]) < fewestcount:
+ fewesttest = test
+ fewestcount = len(failuresbytest[test])
+
+ leastfailing[key] = (fewestcount, fewesttest)
+
+ # Create a combined counter so we can sort by total occurrences and
+ # impacted tests.
+ combined = {}
+ for key in exceptioncounts:
+ combined[key] = (exceptioncounts[key],
+ len(testsbyfailure[key]),
+ leastfailing[key][0],
+ leastfailing[key][1])
+
+ return {
+ 'exceptioncounts': exceptioncounts,
+ 'total': sum(exceptioncounts.values()),
+ 'combined': combined,
+ 'leastfailing': leastfailing,
+ 'byfailure': testsbyfailure,
+ 'bytest': failuresbytest,
+ }
if __name__ == '__main__':
runner = TestRunner()
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/tests/sshprotoext.py Sat Feb 24 17:49:10 2018 -0600
@@ -0,0 +1,98 @@
+# sshprotoext.py - Extension to test behavior of SSH protocol
+#
+# Copyright 2018 Gregory Szorc <gregory.szorc@gmail.com>
+#
+# This software may be used and distributed according to the terms of the
+# GNU General Public License version 2 or any later version.
+
+# This extension replaces the SSH server started via `hg serve --stdio`.
+# The server behaves differently depending on environment variables.
+
+from __future__ import absolute_import
+
+from mercurial import (
+ error,
+ extensions,
+ registrar,
+ sshpeer,
+ wireproto,
+ wireprotoserver,
+)
+
+configtable = {}
+configitem = registrar.configitem(configtable)
+
+configitem(b'sshpeer', b'mode', default=None)
+configitem(b'sshpeer', b'handshake-mode', default=None)
+
+class bannerserver(wireprotoserver.sshserver):
+ """Server that sends a banner to stdout."""
+ def serve_forever(self):
+ for i in range(10):
+ self._fout.write(b'banner: line %d\n' % i)
+
+ super(bannerserver, self).serve_forever()
+
+class prehelloserver(wireprotoserver.sshserver):
+ """Tests behavior when connecting to <0.9.1 servers.
+
+ The ``hello`` wire protocol command was introduced in Mercurial
+ 0.9.1. Modern clients send the ``hello`` command when connecting
+ to SSH servers. This mock server tests behavior of the handshake
+ when ``hello`` is not supported.
+ """
+ def serve_forever(self):
+ l = self._fin.readline()
+ assert l == b'hello\n'
+ # Respond to unknown commands with an empty reply.
+ wireprotoserver._sshv1respondbytes(self._fout, b'')
+ l = self._fin.readline()
+ assert l == b'between\n'
+ proto = wireprotoserver.sshv1protocolhandler(self._ui, self._fin,
+ self._fout)
+ rsp = wireproto.dispatch(self._repo, proto, b'between')
+ wireprotoserver._sshv1respondbytes(self._fout, rsp.data)
+
+ super(prehelloserver, self).serve_forever()
+
+def performhandshake(orig, ui, stdin, stdout, stderr):
+ """Wrapped version of sshpeer._performhandshake to send extra commands."""
+ mode = ui.config(b'sshpeer', b'handshake-mode')
+ if mode == b'pre-no-args':
+ ui.debug(b'sending no-args command\n')
+ stdin.write(b'no-args\n')
+ stdin.flush()
+ return orig(ui, stdin, stdout, stderr)
+ elif mode == b'pre-multiple-no-args':
+ ui.debug(b'sending unknown1 command\n')
+ stdin.write(b'unknown1\n')
+ ui.debug(b'sending unknown2 command\n')
+ stdin.write(b'unknown2\n')
+ ui.debug(b'sending unknown3 command\n')
+ stdin.write(b'unknown3\n')
+ stdin.flush()
+ return orig(ui, stdin, stdout, stderr)
+ else:
+ raise error.ProgrammingError(b'unknown HANDSHAKECOMMANDMODE: %s' %
+ mode)
+
+def extsetup(ui):
+ # It's easier for tests to define the server behavior via environment
+ # variables than config options. This is because `hg serve --stdio`
+ # has to be invoked with a certain form for security reasons and
+ # `dummyssh` can't just add `--config` flags to the command line.
+ servermode = ui.environ.get(b'SSHSERVERMODE')
+
+ if servermode == b'banner':
+ wireprotoserver.sshserver = bannerserver
+ elif servermode == b'no-hello':
+ wireprotoserver.sshserver = prehelloserver
+ elif servermode:
+ raise error.ProgrammingError(b'unknown server mode: %s' % servermode)
+
+ peermode = ui.config(b'sshpeer', b'mode')
+
+ if peermode == b'extra-handshake-commands':
+ extensions.wrapfunction(sshpeer, '_performhandshake', performhandshake)
+ elif peermode:
+ raise error.ProgrammingError(b'unknown peer mode: %s' % peermode)
--- a/tests/test-abort-checkin.t Fri Feb 23 17:57:04 2018 -0800
+++ b/tests/test-abort-checkin.t Sat Feb 24 17:49:10 2018 -0600
@@ -1,9 +1,9 @@
$ cat > abortcommit.py <<EOF
> from mercurial import error
> def hook(**args):
- > raise error.Abort("no commits allowed")
+ > raise error.Abort(b"no commits allowed")
> def reposetup(ui, repo):
- > repo.ui.setconfig("hooks", "pretxncommit.nocommits", hook)
+ > repo.ui.setconfig(b"hooks", b"pretxncommit.nocommits", hook)
> EOF
$ abspath=`pwd`/abortcommit.py
--- a/tests/test-add.t Fri Feb 23 17:57:04 2018 -0800
+++ b/tests/test-add.t Sat Feb 24 17:49:10 2018 -0600
@@ -146,6 +146,13 @@
M a
? a.orig
+excluded file shouldn't be added even if it is explicitly specified
+
+ $ hg add a.orig -X '*.orig'
+ $ hg st
+ M a
+ ? a.orig
+
Forgotten file can be added back (as either clean or modified)
$ hg forget b
--- a/tests/test-alias.t Fri Feb 23 17:57:04 2018 -0800
+++ b/tests/test-alias.t Sat Feb 24 17:49:10 2018 -0600
@@ -548,12 +548,12 @@
> from mercurial import cmdutil, commands, registrar
> cmdtable = {}
> command = registrar.command(cmdtable)
- > @command('expandalias')
+ > @command(b'expandalias')
> def expandalias(ui, repo, name):
> alias = cmdutil.findcmd(name, commands.table)[1][0]
- > ui.write('%s args: %s\n' % (name, ' '.join(alias.args)))
+ > ui.write(b'%s args: %s\n' % (name, b' '.join(alias.args)))
> os.environ['COUNT'] = '2'
- > ui.write('%s args: %s (with COUNT=2)\n' % (name, ' '.join(alias.args)))
+ > ui.write(b'%s args: %s (with COUNT=2)\n' % (name, b' '.join(alias.args)))
> EOF
$ cat >> $HGRCPATH <<'EOF'
--- a/tests/test-annotate.py Fri Feb 23 17:57:04 2018 -0800
+++ b/tests/test-annotate.py Sat Feb 24 17:49:10 2018 -0600
@@ -27,7 +27,7 @@
def decorate(text, rev):
return ([annotateline(fctx=rev, lineno=i)
- for i in xrange(1, text.count(b'\n') + 1)],
+ for i in range(1, text.count(b'\n') + 1)],
text)
# Basic usage
@@ -36,17 +36,17 @@
p1ann = decorate(p1data, p1fctx)
p1ann = _annotatepair([oldann], p1fctx, p1ann, False, diffopts)
self.assertEqual(p1ann[0], [
- annotateline('old', 1),
- annotateline('old', 2),
- annotateline('p1', 3),
+ annotateline(b'old', 1),
+ annotateline(b'old', 2),
+ annotateline(b'p1', 3),
])
p2ann = decorate(p2data, p2fctx)
p2ann = _annotatepair([oldann], p2fctx, p2ann, False, diffopts)
self.assertEqual(p2ann[0], [
- annotateline('old', 1),
- annotateline('p2', 2),
- annotateline('p2', 3),
+ annotateline(b'old', 1),
+ annotateline(b'p2', 2),
+ annotateline(b'p2', 3),
])
# Test with multiple parents (note the difference caused by ordering)
@@ -55,22 +55,22 @@
childann = _annotatepair([p1ann, p2ann], childfctx, childann, False,
diffopts)
self.assertEqual(childann[0], [
- annotateline('old', 1),
- annotateline('c', 2),
- annotateline('p2', 2),
- annotateline('c', 4),
- annotateline('p2', 3),
+ annotateline(b'old', 1),
+ annotateline(b'c', 2),
+ annotateline(b'p2', 2),
+ annotateline(b'c', 4),
+ annotateline(b'p2', 3),
])
childann = decorate(childdata, childfctx)
childann = _annotatepair([p2ann, p1ann], childfctx, childann, False,
diffopts)
self.assertEqual(childann[0], [
- annotateline('old', 1),
- annotateline('c', 2),
- annotateline('p1', 3),
- annotateline('c', 4),
- annotateline('p2', 3),
+ annotateline(b'old', 1),
+ annotateline(b'c', 2),
+ annotateline(b'p1', 3),
+ annotateline(b'c', 4),
+ annotateline(b'p2', 3),
])
# Test with skipchild (note the difference caused by ordering)
@@ -79,24 +79,24 @@
childann = _annotatepair([p1ann, p2ann], childfctx, childann, True,
diffopts)
self.assertEqual(childann[0], [
- annotateline('old', 1),
- annotateline('old', 2, True),
+ annotateline(b'old', 1),
+ annotateline(b'old', 2, True),
# note that this line was carried over from earlier so it is *not*
# marked skipped
- annotateline('p2', 2),
- annotateline('p2', 2, True),
- annotateline('p2', 3),
+ annotateline(b'p2', 2),
+ annotateline(b'p2', 2, True),
+ annotateline(b'p2', 3),
])
childann = decorate(childdata, childfctx)
childann = _annotatepair([p2ann, p1ann], childfctx, childann, True,
diffopts)
self.assertEqual(childann[0], [
- annotateline('old', 1),
- annotateline('old', 2, True),
- annotateline('p1', 3),
- annotateline('p1', 3, True),
- annotateline('p2', 3),
+ annotateline(b'old', 1),
+ annotateline(b'old', 2, True),
+ annotateline(b'p1', 3),
+ annotateline(b'p1', 3, True),
+ annotateline(b'p2', 3),
])
if __name__ == '__main__':
--- a/tests/test-arbitraryfilectx.t Fri Feb 23 17:57:04 2018 -0800
+++ b/tests/test-arbitraryfilectx.t Sat Feb 24 17:49:10 2018 -0600
@@ -5,11 +5,11 @@
> from mercurial import commands, context, registrar
> cmdtable = {}
> command = registrar.command(cmdtable)
- > @command(b'eval', [], 'hg eval CMD')
+ > @command(b'eval', [], b'hg eval CMD')
> def eval_(ui, repo, *cmds, **opts):
- > cmd = " ".join(cmds)
+ > cmd = b" ".join(cmds)
> res = str(eval(cmd, globals(), locals()))
- > ui.warn("%s" % res)
+ > ui.warn(b"%s" % res)
> EOF
$ echo "[extensions]" >> $HGRCPATH
--- a/tests/test-atomictempfile.py Fri Feb 23 17:57:04 2018 -0800
+++ b/tests/test-atomictempfile.py Sat Feb 24 17:49:10 2018 -0600
@@ -7,10 +7,14 @@
import unittest
from mercurial import (
+ pycompat,
util,
)
atomictempfile = util.atomictempfile
+if pycompat.ispy3:
+ xrange = range
+
class testatomictempfile(unittest.TestCase):
def setUp(self):
self._testdir = tempfile.mkdtemp('atomictempfiletest')
--- a/tests/test-basic.t Fri Feb 23 17:57:04 2018 -0800
+++ b/tests/test-basic.t Sat Feb 24 17:49:10 2018 -0600
@@ -87,6 +87,13 @@
checking files
1 files, 1 changesets, 1 total revisions
+Repository root:
+
+ $ hg root
+ $TESTTMP/t
+ $ hg log -l1 -T '{reporoot}\n'
+ $TESTTMP/t
+
At the end...
$ cd ..
--- a/tests/test-bundle2-exchange.t Fri Feb 23 17:57:04 2018 -0800
+++ b/tests/test-bundle2-exchange.t Sat Feb 24 17:49:10 2018 -0600
@@ -1,3 +1,13 @@
+#testcases sshv1 sshv2
+
+#if sshv2
+ $ cat >> $HGRCPATH << EOF
+ > [experimental]
+ > sshpeer.advertise-v2 = true
+ > sshserver.support-v2 = true
+ > EOF
+#endif
+
Test exchange of common information using bundle2
--- a/tests/test-bundle2-pushback.t Fri Feb 23 17:57:04 2018 -0800
+++ b/tests/test-bundle2-pushback.t Sat Feb 24 17:49:10 2018 -0600
@@ -1,3 +1,13 @@
+#testcases sshv1 sshv2
+
+#if sshv2
+ $ cat >> $HGRCPATH << EOF
+ > [experimental]
+ > sshpeer.advertise-v2 = true
+ > sshserver.support-v2 = true
+ > EOF
+#endif
+
$ cat > bundle2.py << EOF
> """A small extension to test bundle2 pushback parts.
> Current bundle2 implementation doesn't provide a way to generate those
--- a/tests/test-bundle2-remote-changegroup.t Fri Feb 23 17:57:04 2018 -0800
+++ b/tests/test-bundle2-remote-changegroup.t Sat Feb 24 17:49:10 2018 -0600
@@ -1,5 +1,15 @@
#require killdaemons
+#testcases sshv1 sshv2
+
+#if sshv2
+ $ cat >> $HGRCPATH << EOF
+ > [experimental]
+ > sshpeer.advertise-v2 = true
+ > sshserver.support-v2 = true
+ > EOF
+#endif
+
Create an extension to test bundle2 remote-changegroup parts
$ cat > bundle2.py << EOF
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/tests/test-cappedreader.py Sat Feb 24 17:49:10 2018 -0600
@@ -0,0 +1,91 @@
+from __future__ import absolute_import, print_function
+
+import io
+import unittest
+
+from mercurial import (
+ util,
+)
+
+class CappedReaderTests(unittest.TestCase):
+ def testreadfull(self):
+ source = io.BytesIO(b'x' * 100)
+
+ reader = util.cappedreader(source, 10)
+ res = reader.read(10)
+ self.assertEqual(res, b'x' * 10)
+ self.assertEqual(source.tell(), 10)
+ source.seek(0)
+
+ reader = util.cappedreader(source, 15)
+ res = reader.read(16)
+ self.assertEqual(res, b'x' * 15)
+ self.assertEqual(source.tell(), 15)
+ source.seek(0)
+
+ reader = util.cappedreader(source, 100)
+ res = reader.read(100)
+ self.assertEqual(res, b'x' * 100)
+ self.assertEqual(source.tell(), 100)
+ source.seek(0)
+
+ reader = util.cappedreader(source, 50)
+ res = reader.read()
+ self.assertEqual(res, b'x' * 50)
+ self.assertEqual(source.tell(), 50)
+ source.seek(0)
+
+ def testreadnegative(self):
+ source = io.BytesIO(b'x' * 100)
+
+ reader = util.cappedreader(source, 20)
+ res = reader.read(-1)
+ self.assertEqual(res, b'x' * 20)
+ self.assertEqual(source.tell(), 20)
+ source.seek(0)
+
+ reader = util.cappedreader(source, 100)
+ res = reader.read(-1)
+ self.assertEqual(res, b'x' * 100)
+ self.assertEqual(source.tell(), 100)
+ source.seek(0)
+
+ def testreadmultiple(self):
+ source = io.BytesIO(b'x' * 100)
+
+ reader = util.cappedreader(source, 10)
+ for i in range(10):
+ res = reader.read(1)
+ self.assertEqual(res, b'x')
+ self.assertEqual(source.tell(), i + 1)
+
+ self.assertEqual(source.tell(), 10)
+ res = reader.read(1)
+ self.assertEqual(res, b'')
+ self.assertEqual(source.tell(), 10)
+ source.seek(0)
+
+ reader = util.cappedreader(source, 45)
+ for i in range(4):
+ res = reader.read(10)
+ self.assertEqual(res, b'x' * 10)
+ self.assertEqual(source.tell(), (i + 1) * 10)
+
+ res = reader.read(10)
+ self.assertEqual(res, b'x' * 5)
+ self.assertEqual(source.tell(), 45)
+
+ def readlimitpasteof(self):
+ source = io.BytesIO(b'x' * 100)
+
+ reader = util.cappedreader(source, 1024)
+ res = reader.read(1000)
+ self.assertEqual(res, b'x' * 100)
+ self.assertEqual(source.tell(), 100)
+ res = reader.read(1000)
+ self.assertEqual(res, b'')
+ self.assertEqual(source.tell(), 100)
+
+if __name__ == '__main__':
+ import silenttestrunner
+ silenttestrunner.main(__name__)
--- a/tests/test-check-help.t Fri Feb 23 17:57:04 2018 -0800
+++ b/tests/test-check-help.t Sat Feb 24 17:49:10 2018 -0600
@@ -10,9 +10,9 @@
> import os, msvcrt
> msvcrt.setmode(sys.stdout.fileno(), os.O_BINARY)
> topics = set()
- > topicre = re.compile(r':hg:`help ([a-z0-9\-.]+)`')
+ > topicre = re.compile(br':hg:`help ([a-z0-9\-.]+)`')
> for fname in sys.argv:
- > with open(fname) as f:
+ > with open(fname, 'rb') as f:
> topics.update(m.group(1) for m in topicre.finditer(f.read()))
> for s in sorted(topics):
> print(s)
--- a/tests/test-check-interfaces.py Fri Feb 23 17:57:04 2018 -0800
+++ b/tests/test-check-interfaces.py Sat Feb 24 17:49:10 2018 -0600
@@ -51,10 +51,6 @@
pass
# Facilitates testing sshpeer without requiring an SSH server.
-class testingsshpeer(sshpeer.sshpeer):
- def _validaterepo(self, *args, **kwargs):
- pass
-
class badpeer(httppeer.httppeer):
def __init__(self):
super(badpeer, self).__init__(uimod.ui(), 'http://localhost')
@@ -63,13 +59,20 @@
def badmethod(self):
pass
+class dummypipe(object):
+ def close(self):
+ pass
+
def main():
ui = uimod.ui()
checkobject(badpeer())
checkobject(httppeer.httppeer(ui, 'http://localhost'))
checkobject(localrepo.localpeer(dummyrepo()))
- checkobject(testingsshpeer(ui, 'ssh://localhost/foo'))
+ checkobject(sshpeer.sshv1peer(ui, 'ssh://localhost/foo', None, dummypipe(),
+ dummypipe(), None, None))
+ checkobject(sshpeer.sshv2peer(ui, 'ssh://localhost/foo', None, dummypipe(),
+ dummypipe(), None, None))
checkobject(bundlerepo.bundlepeer(dummyrepo()))
checkobject(statichttprepo.statichttppeer(dummyrepo()))
checkobject(unionrepo.unionpeer(dummyrepo()))
--- a/tests/test-clone.t Fri Feb 23 17:57:04 2018 -0800
+++ b/tests/test-clone.t Sat Feb 24 17:49:10 2018 -0600
@@ -1,3 +1,13 @@
+#testcases sshv1 sshv2
+
+#if sshv2
+ $ cat >> $HGRCPATH << EOF
+ > [experimental]
+ > sshpeer.advertise-v2 = true
+ > sshserver.support-v2 = true
+ > EOF
+#endif
+
Prepare repo a:
$ hg init a
@@ -10,7 +20,7 @@
Create a non-inlined filelog:
- $ $PYTHON -c 'file("data1", "wb").write("".join("%s\n" % x for x in range(10000)))'
+ $ $PYTHON -c 'open("data1", "wb").write(b"".join(b"%d\n" % x for x in range(10000)))'
$ for j in 0 1 2 3 4 5 6 7 8 9; do
> cat data1 >> b
> hg commit -m test
@@ -1142,12 +1152,14 @@
#if windows
$ hg clone "ssh://%26touch%20owned%20/" --debug
running sh -c "read l; read l; read l" "&touch owned " "hg -R . serve --stdio"
+ sending upgrade request: * proto=exp-ssh-v2-0001 (glob) (sshv2 !)
sending hello command
sending between command
abort: no suitable response from remote hg!
[255]
$ hg clone "ssh://example.com:%26touch%20owned%20/" --debug
running sh -c "read l; read l; read l" -p "&touch owned " example.com "hg -R . serve --stdio"
+ sending upgrade request: * proto=exp-ssh-v2-0001 (glob) (sshv2 !)
sending hello command
sending between command
abort: no suitable response from remote hg!
@@ -1155,12 +1167,14 @@
#else
$ hg clone "ssh://%3btouch%20owned%20/" --debug
running sh -c "read l; read l; read l" ';touch owned ' 'hg -R . serve --stdio'
+ sending upgrade request: * proto=exp-ssh-v2-0001 (glob) (sshv2 !)
sending hello command
sending between command
abort: no suitable response from remote hg!
[255]
$ hg clone "ssh://example.com:%3btouch%20owned%20/" --debug
running sh -c "read l; read l; read l" -p ';touch owned ' example.com 'hg -R . serve --stdio'
+ sending upgrade request: * proto=exp-ssh-v2-0001 (glob) (sshv2 !)
sending hello command
sending between command
abort: no suitable response from remote hg!
@@ -1169,6 +1183,7 @@
$ hg clone "ssh://v-alid.example.com/" --debug
running sh -c "read l; read l; read l" v-alid\.example\.com ['"]hg -R \. serve --stdio['"] (re)
+ sending upgrade request: * proto=exp-ssh-v2-0001 (glob) (sshv2 !)
sending hello command
sending between command
abort: no suitable response from remote hg!
--- a/tests/test-clonebundles.t Fri Feb 23 17:57:04 2018 -0800
+++ b/tests/test-clonebundles.t Sat Feb 24 17:49:10 2018 -0600
@@ -53,7 +53,7 @@
$ echo 'http://does.not.exist/bundle.hg' > server/.hg/clonebundles.manifest
$ hg clone http://localhost:$HGPORT 404-url
applying clone bundle from http://does.not.exist/bundle.hg
- error fetching bundle: (.* not known|No address associated with hostname) (re) (no-windows !)
+ error fetching bundle: (.* not known|(\[Errno -?\d+])? No address associated with hostname) (re) (no-windows !)
error fetching bundle: [Errno 11004] getaddrinfo failed (windows !)
abort: error applying bundle
(if this error persists, consider contacting the server operator or disable clone bundles via "--config ui.clonebundles=false")
--- a/tests/test-command-template.t Fri Feb 23 17:57:04 2018 -0800
+++ b/tests/test-command-template.t Sat Feb 24 17:49:10 2018 -0600
@@ -2232,6 +2232,10 @@
$ hg debugtemplate '{"foo/bar"|basename}|{"foo/"|basename}|{"foo"|basename}|\n'
bar||foo|
+ $ hg debugtemplate '{"foo/bar"|dirname}|{"foo/"|dirname}|{"foo"|dirname}|\n'
+ foo|foo||
+ $ hg debugtemplate '{"foo/bar"|stripdir}|{"foo/"|stripdir}|{"foo"|stripdir}|\n'
+ foo|foo|foo|
Add a dummy commit to make up for the instability of the above:
--- a/tests/test-commandserver.t Fri Feb 23 17:57:04 2018 -0800
+++ b/tests/test-commandserver.t Sat Feb 24 17:49:10 2018 -0600
@@ -411,7 +411,7 @@
... # load _phasecache._phaserevs and _phasesets
... runcommand(server, ['log', '-qr', 'draft()'])
... # create draft commits by another process
- ... for i in xrange(5, 7):
+ ... for i in range(5, 7):
... f = open('a', 'ab')
... f.seek(0, os.SEEK_END)
... f.write('a\n')
--- a/tests/test-completion.t Fri Feb 23 17:57:04 2018 -0800
+++ b/tests/test-completion.t Sat Feb 24 17:49:10 2018 -0600
@@ -102,6 +102,7 @@
debugnamecomplete
debugobsolete
debugpathcomplete
+ debugpeer
debugpickmergetool
debugpushkey
debugpvec
@@ -281,6 +282,7 @@
debugnamecomplete:
debugobsolete: flags, record-parents, rev, exclusive, index, delete, date, user, template
debugpathcomplete: full, normal, added, removed
+ debugpeer:
debugpickmergetool: rev, changedelete, include, exclude, tool
debugpushkey:
debugpvec:
--- a/tests/test-conflict.t Fri Feb 23 17:57:04 2018 -0800
+++ b/tests/test-conflict.t Sat Feb 24 17:49:10 2018 -0600
@@ -138,9 +138,9 @@
$ hg up -q --clean .
$ $PYTHON <<EOF
- > fp = open('logfile', 'w')
- > fp.write('12345678901234567890123456789012345678901234567890' +
- > '1234567890') # there are 5 more columns for 80 columns
+ > fp = open('logfile', 'wb')
+ > fp.write(b'12345678901234567890123456789012345678901234567890' +
+ > b'1234567890') # there are 5 more columns for 80 columns
>
> # 2 x 4 = 8 columns, but 3 x 4 = 12 bytes
> fp.write(u'\u3042\u3044\u3046\u3048'.encode('utf-8'))
--- a/tests/test-context-metadata.t Fri Feb 23 17:57:04 2018 -0800
+++ b/tests/test-context-metadata.t Sat Feb 24 17:49:10 2018 -0600
@@ -13,18 +13,19 @@
$ cat > metaedit.py <<EOF
> from __future__ import absolute_import
- > from mercurial import context, registrar
+ > from mercurial import context, pycompat, registrar
> cmdtable = {}
> command = registrar.command(cmdtable)
- > @command('metaedit')
+ > @command(b'metaedit')
> def metaedit(ui, repo, arg):
> # Modify commit message to "FOO"
- > with repo.wlock(), repo.lock(), repo.transaction('metaedit'):
- > old = repo['.']
- > kwargs = dict(s.split('=', 1) for s in arg.split(';'))
+ > with repo.wlock(), repo.lock(), repo.transaction(b'metaedit'):
+ > old = repo[b'.']
+ > kwargs = dict(s.split(b'=', 1) for s in arg.split(b';'))
> if 'parents' in kwargs:
- > kwargs['parents'] = kwargs['parents'].split(',')
- > new = context.metadataonlyctx(repo, old, **kwargs)
+ > kwargs[b'parents'] = kwargs[b'parents'].split(b',')
+ > new = context.metadataonlyctx(repo, old,
+ > **pycompat.strkwargs(kwargs))
> new.commit()
> EOF
$ hg --config extensions.metaedit=$TESTTMP/metaedit.py metaedit 'text=Changed'
--- a/tests/test-contrib-perf.t Fri Feb 23 17:57:04 2018 -0800
+++ b/tests/test-contrib-perf.t Sat Feb 24 17:49:10 2018 -0600
@@ -114,6 +114,7 @@
perftags (no help text available)
perftemplating
(no help text available)
+ perfunidiff benchmark a unified diff between revisions
perfvolatilesets
benchmark the computation of various volatile set
perfwalk (no help text available)
@@ -126,6 +127,8 @@
$ hg perfannotate a
$ hg perfbdiff -c 1
$ hg perfbdiff --alldata 1
+ $ hg perfunidiff -c 1
+ $ hg perfunidiff --alldata 1
$ hg perfbookmarks
$ hg perfbranchmap
$ hg perfcca
--- a/tests/test-contrib.t Fri Feb 23 17:57:04 2018 -0800
+++ b/tests/test-contrib.t Sat Feb 24 17:49:10 2018 -0600
@@ -201,7 +201,7 @@
binary file
- $ $PYTHON -c "f = file('binary-local', 'w'); f.write('\x00'); f.close()"
+ $ $PYTHON -c "f = open('binary-local', 'w'); f.write('\x00'); f.close()"
$ cat orig >> binary-local
$ $PYTHON simplemerge -p binary-local base other
warning: binary-local looks like a binary file.
--- a/tests/test-convert-git.t Fri Feb 23 17:57:04 2018 -0800
+++ b/tests/test-convert-git.t Sat Feb 24 17:49:10 2018 -0600
@@ -420,7 +420,7 @@
$ mkdir git-repo3
$ cd git-repo3
$ git init-db >/dev/null 2>/dev/null
- $ $PYTHON -c 'file("b", "wb").write("".join([chr(i) for i in range(256)])*16)'
+ $ $PYTHON -c 'import struct; open("b", "wb").write(b"".join([struct.Struct(">B").pack(i) for i in range(256)])*16)'
$ git add b
$ commit -a -m addbinary
$ cd ..
@@ -437,7 +437,7 @@
$ cd git-repo3-hg
$ hg up -C
1 files updated, 0 files merged, 0 files removed, 0 files unresolved
- $ $PYTHON -c 'print len(file("b", "rb").read())'
+ $ $PYTHON -c 'print len(open("b", "rb").read())'
4096
$ cd ..
--- a/tests/test-convert-hg-source.t Fri Feb 23 17:57:04 2018 -0800
+++ b/tests/test-convert-hg-source.t Sat Feb 24 17:49:10 2018 -0600
@@ -126,9 +126,9 @@
$ cat > rewrite.py <<EOF
> import sys
> # Interlace LF and CRLF
- > lines = [(l.rstrip() + ((i % 2) and '\n' or '\r\n'))
- > for i, l in enumerate(file(sys.argv[1]))]
- > file(sys.argv[1], 'wb').write(''.join(lines))
+ > lines = [(l.rstrip() + ((i % 2) and b'\n' or b'\r\n'))
+ > for i, l in enumerate(open(sys.argv[1], 'rb'))]
+ > open(sys.argv[1], 'wb').write(b''.join(lines))
> EOF
$ $PYTHON rewrite.py new/.hg/shamap
$ cd orig
--- a/tests/test-convert-mtn.t Fri Feb 23 17:57:04 2018 -0800
+++ b/tests/test-convert-mtn.t Sat Feb 24 17:49:10 2018 -0600
@@ -43,7 +43,7 @@
$ mkdir dir
$ echo b > dir/b
$ echo d > dir/d
- $ $PYTHON -c 'file("bin", "wb").write("a\\x00b")'
+ $ $PYTHON -c 'open("bin", "wb").write(b"a\\x00b")'
$ echo c > c
$ mtn add a dir/b dir/d c bin
mtn: adding 'a' to workspace manifest
@@ -65,7 +65,7 @@
$ echo b >> dir/b
$ mtn drop c
mtn: dropping 'c' from workspace manifest
- $ $PYTHON -c 'file("bin", "wb").write("b\\x00c")'
+ $ $PYTHON -c 'open("bin", "wb").write(b"b\\x00c")'
$ mtn ci -m update1
mtn: beginning commit on branch 'com.selenic.test'
mtn: committed revision 51d0a982464573a2a2cf5ee2c9219c652aaebeff
@@ -217,8 +217,8 @@
test large file support (> 32kB)
- >>> fp = file('large-file', 'wb')
- >>> for x in xrange(10000): fp.write('%d\n' % x)
+ >>> fp = open('large-file', 'wb')
+ >>> for x in range(10000): fp.write(b'%d\n' % x)
>>> fp.close()
$ md5sum.py large-file
5d6de8a95c3b6bf9e0ffb808ba5299c1 large-file
--- a/tests/test-convert-p4-filetypes.t Fri Feb 23 17:57:04 2018 -0800
+++ b/tests/test-convert-p4-filetypes.t Sat Feb 24 17:49:10 2018 -0600
@@ -52,7 +52,7 @@
> p4 add -t $T file_$T2
> ;;
> binary*)
- > $PYTHON -c "file('file_$T2', 'wb').write('this is $T')"
+ > $PYTHON -c "open('file_$T2', 'wb').write(b'this is $T')"
> p4 add -t $T file_$T2
> ;;
> *)
--- a/tests/test-debugcommands.t Fri Feb 23 17:57:04 2018 -0800
+++ b/tests/test-debugcommands.t Sat Feb 24 17:49:10 2018 -0600
@@ -381,3 +381,25 @@
https
stream
v2
+
+Test debugpeer
+
+ $ hg --config ui.ssh="\"$PYTHON\" \"$TESTDIR/dummyssh\"" debugpeer ssh://user@dummy/debugrevlog
+ url: ssh://user@dummy/debugrevlog
+ local: no
+ pushable: yes
+
+ $ hg --config ui.ssh="\"$PYTHON\" \"$TESTDIR/dummyssh\"" --debug debugpeer ssh://user@dummy/debugrevlog
+ running "*" "*/tests/dummyssh" 'user@dummy' 'hg -R debugrevlog serve --stdio' (glob) (no-windows !)
+ running "*" "*\tests/dummyssh" "user@dummy" "hg -R debugrevlog serve --stdio" (glob) (windows !)
+ devel-peer-request: hello
+ sending hello command
+ devel-peer-request: between
+ devel-peer-request: pairs: 81 bytes
+ sending between command
+ remote: 384
+ remote: capabilities: lookup changegroupsubset branchmap pushkey known getbundle unbundlehash batch streamreqs=generaldelta,revlogv1 $USUAL_BUNDLE2_CAPS_SERVER$ unbundle=HG10GZ,HG10BZ,HG10UN
+ remote: 1
+ url: ssh://user@dummy/debugrevlog
+ local: no
+ pushable: yes
--- a/tests/test-demandimport.py Fri Feb 23 17:57:04 2018 -0800
+++ b/tests/test-demandimport.py Sat Feb 24 17:49:10 2018 -0600
@@ -31,6 +31,27 @@
l = rsub("'<[a-z]*>'", "'<whatever>'", l)
return l
+demandimport.disable()
+os.environ['HGDEMANDIMPORT'] = 'disable'
+# this enable call should not actually enable demandimport!
+demandimport.enable()
+from mercurial import node
+print("node =", f(node))
+# now enable it for real
+del os.environ['HGDEMANDIMPORT']
+demandimport.enable()
+
+# Test access to special attributes through demandmod proxy
+from mercurial import error as errorproxy
+print("errorproxy =", f(errorproxy))
+print("errorproxy.__doc__ = %r"
+ % (' '.join(errorproxy.__doc__.split()[:3]) + ' ...'))
+print("errorproxy.__name__ = %r" % errorproxy.__name__)
+# __name__ must be accessible via __dict__ so the relative imports can be
+# resolved
+print("errorproxy.__dict__['__name__'] = %r" % errorproxy.__dict__['__name__'])
+print("errorproxy =", f(errorproxy))
+
import os
print("os =", f(os))
@@ -69,17 +90,6 @@
print("re.stderr =", f(re.stderr))
print("re =", f(re))
-# Test access to special attributes through demandmod proxy
-from mercurial import pvec as pvecproxy
-print("pvecproxy =", f(pvecproxy))
-print("pvecproxy.__doc__ = %r"
- % (' '.join(pvecproxy.__doc__.split()[:3]) + ' ...'))
-print("pvecproxy.__name__ = %r" % pvecproxy.__name__)
-# __name__ must be accessible via __dict__ so the relative imports can be
-# resolved
-print("pvecproxy.__dict__['__name__'] = %r" % pvecproxy.__dict__['__name__'])
-print("pvecproxy =", f(pvecproxy))
-
import contextlib
print("contextlib =", f(contextlib))
try:
@@ -97,10 +107,3 @@
print("__import__('contextlib', ..., ['unknownattr']) =", f(contextlibimp))
print("hasattr(contextlibimp, 'unknownattr') =",
util.safehasattr(contextlibimp, 'unknownattr'))
-
-demandimport.disable()
-os.environ['HGDEMANDIMPORT'] = 'disable'
-# this enable call should not actually enable demandimport!
-demandimport.enable()
-from mercurial import node
-print("node =", f(node))
--- a/tests/test-demandimport.py.out Fri Feb 23 17:57:04 2018 -0800
+++ b/tests/test-demandimport.py.out Sat Feb 24 17:49:10 2018 -0600
@@ -1,3 +1,9 @@
+node = <module 'mercurial.node' from '?'>
+errorproxy = <unloaded module 'error'>
+errorproxy.__doc__ = 'Mercurial exceptions. This ...'
+errorproxy.__name__ = 'mercurial.error'
+errorproxy.__dict__['__name__'] = 'mercurial.error'
+errorproxy = <proxied module 'error'>
os = <unloaded module 'os'>
os.system = <built-in function system>
os = <module 'os' from '?'>
@@ -18,13 +24,7 @@
re = <unloaded module 'sys'>
re.stderr = <open file '<whatever>', mode 'w' at 0x?>
re = <proxied module 'sys'>
-pvecproxy = <unloaded module 'pvec'>
-pvecproxy.__doc__ = 'A "pvec" is ...'
-pvecproxy.__name__ = 'mercurial.pvec'
-pvecproxy.__dict__['__name__'] = 'mercurial.pvec'
-pvecproxy = <proxied module 'pvec'>
contextlib = <unloaded module 'contextlib'>
contextlib.unknownattr = ImportError: cannot import name unknownattr
__import__('contextlib', ..., ['unknownattr']) = <module 'contextlib' from '?'>
hasattr(contextlibimp, 'unknownattr') = False
-node = <module 'mercurial.node' from '?'>
--- a/tests/test-devel-warnings.t Fri Feb 23 17:57:04 2018 -0800
+++ b/tests/test-devel-warnings.t Sat Feb 24 17:49:10 2018 -0600
@@ -17,7 +17,7 @@
>
> @command(b'buggytransaction', [], '')
> def buggylocking(ui, repo):
- > tr = repo.transaction('buggy')
+ > tr = repo.transaction(b'buggy')
> # make sure we rollback the transaction as we don't want to rely on the__del__
> tr.release()
>
@@ -26,8 +26,8 @@
> """check that reentrance is fine"""
> wl = repo.wlock()
> lo = repo.lock()
- > tr = repo.transaction('proper')
- > tr2 = repo.transaction('proper')
+ > tr = repo.transaction(b'proper')
+ > tr2 = repo.transaction(b'proper')
> lo2 = repo.lock()
> wl2 = repo.wlock()
> wl2.release()
@@ -46,34 +46,34 @@
>
> @command(b'no-wlock-write', [], '')
> def nowlockwrite(ui, repo):
- > with repo.vfs(b'branch', 'a'):
+ > with repo.vfs(b'branch', b'a'):
> pass
>
> @command(b'no-lock-write', [], '')
> def nolockwrite(ui, repo):
- > with repo.svfs(b'fncache', 'a'):
+ > with repo.svfs(b'fncache', b'a'):
> pass
>
> @command(b'stripintr', [], '')
> def stripintr(ui, repo):
> lo = repo.lock()
- > tr = repo.transaction('foobar')
+ > tr = repo.transaction(b'foobar')
> try:
- > repair.strip(repo.ui, repo, [repo['.'].node()])
+ > repair.strip(repo.ui, repo, [repo[b'.'].node()])
> finally:
> lo.release()
> @command(b'oldanddeprecated', [], '')
> def oldanddeprecated(ui, repo):
> """test deprecation warning API"""
> def foobar(ui):
- > ui.deprecwarn('foorbar is deprecated, go shopping', '42.1337')
+ > ui.deprecwarn(b'foorbar is deprecated, go shopping', b'42.1337')
> foobar(ui)
> @command(b'nouiwarning', [], '')
> def nouiwarning(ui, repo):
- > util.nouideprecwarn('this is a test', '13.37')
+ > util.nouideprecwarn(b'this is a test', b'13.37')
> @command(b'programmingerror', [], '')
> def programmingerror(ui, repo):
- > raise error.ProgrammingError('something went wrong', hint='try again')
+ > raise error.ProgrammingError(b'something went wrong', hint=b'try again')
> EOF
$ cat << EOF >> $HGRCPATH
@@ -331,7 +331,7 @@
$ hg nouiwarning
$TESTTMP/buggylocking.py:*: DeprecationWarning: this is a test (glob)
(compatibility will be dropped after Mercurial-13.37, update your code.)
- util.nouideprecwarn('this is a test', '13.37')
+ util.nouideprecwarn(b'this is a test', b'13.37')
(disabled outside of test run)
@@ -350,25 +350,25 @@
> configtable = {}
> configitem = registrar.configitem(configtable)
>
- > configitem('test', 'some', default='foo')
- > configitem('test', 'dynamic', default=configitems.dynamicdefault)
- > configitem('test', 'callable', default=list)
+ > configitem(b'test', b'some', default=b'foo')
+ > configitem(b'test', b'dynamic', default=configitems.dynamicdefault)
+ > configitem(b'test', b'callable', default=list)
> # overwrite a core config
- > configitem('ui', 'quiet', default=False)
- > configitem('ui', 'interactive', default=None)
+ > configitem(b'ui', b'quiet', default=False)
+ > configitem(b'ui', b'interactive', default=None)
>
> @command(b'buggyconfig')
> def cmdbuggyconfig(ui, repo):
- > repo.ui.config('ui', 'quiet', True)
- > repo.ui.config('ui', 'interactive', False)
- > repo.ui.config('test', 'some', 'bar')
- > repo.ui.config('test', 'some', 'foo')
- > repo.ui.config('test', 'dynamic', 'some-required-default')
- > repo.ui.config('test', 'dynamic')
- > repo.ui.config('test', 'callable', [])
- > repo.ui.config('test', 'callable', 'foo')
- > repo.ui.config('test', 'unregistered')
- > repo.ui.config('unregistered', 'unregistered')
+ > repo.ui.config(b'ui', b'quiet', True)
+ > repo.ui.config(b'ui', b'interactive', False)
+ > repo.ui.config(b'test', b'some', b'bar')
+ > repo.ui.config(b'test', b'some', b'foo')
+ > repo.ui.config(b'test', b'dynamic', b'some-required-default')
+ > repo.ui.config(b'test', b'dynamic')
+ > repo.ui.config(b'test', b'callable', [])
+ > repo.ui.config(b'test', b'callable', b'foo')
+ > repo.ui.config(b'test', b'unregistered')
+ > repo.ui.config(b'unregistered', b'unregistered')
> EOF
$ hg --config "extensions.buggyconfig=${TESTTMP}/buggyconfig.py" buggyconfig
--- a/tests/test-diff-binary-file.t Fri Feb 23 17:57:04 2018 -0800
+++ b/tests/test-diff-binary-file.t Sat Feb 24 17:49:10 2018 -0600
@@ -81,7 +81,7 @@
$ cat > writebin.py <<EOF
> import sys
> path = sys.argv[1]
- > open(path, 'wb').write('\x00\x01\x02\x03')
+ > open(path, 'wb').write(b'\x00\x01\x02\x03')
> EOF
$ $PYTHON writebin.py binfile.bin
$ hg add binfile.bin
--- a/tests/test-dispatch.py Fri Feb 23 17:57:04 2018 -0800
+++ b/tests/test-dispatch.py Sat Feb 24 17:49:10 2018 -0600
@@ -9,27 +9,27 @@
Prints command and result value, but does not handle quoting.
"""
- print("running: %s" % (cmd,))
+ print(b"running: %s" % (cmd,))
req = dispatch.request(cmd.split())
result = dispatch.dispatch(req)
- print("result: %r" % (result,))
+ print(b"result: %r" % (result,))
-testdispatch("init test1")
+testdispatch(b"init test1")
os.chdir('test1')
# create file 'foo', add and commit
f = open('foo', 'wb')
-f.write('foo\n')
+f.write(b'foo\n')
f.close()
-testdispatch("add foo")
-testdispatch("commit -m commit1 -d 2000-01-01 foo")
+testdispatch(b"add foo")
+testdispatch(b"commit -m commit1 -d 2000-01-01 foo")
# append to file 'foo' and commit
f = open('foo', 'ab')
-f.write('bar\n')
+f.write(b'bar\n')
f.close()
-testdispatch("commit -m commit2 -d 2000-01-02 foo")
+testdispatch(b"commit -m commit2 -d 2000-01-02 foo")
# check 88803a69b24 (fancyopts modified command table)
-testdispatch("log -r 0")
-testdispatch("log -r tip")
+testdispatch(b"log -r 0")
+testdispatch(b"log -r tip")
--- a/tests/test-encoding-align.t Fri Feb 23 17:57:04 2018 -0800
+++ b/tests/test-encoding-align.t Sat Feb 24 17:49:10 2018 -0600
@@ -6,16 +6,16 @@
$ cd t
$ $PYTHON << EOF
> # (byte, width) = (6, 4)
- > s = "\xe7\x9f\xad\xe5\x90\x8d"
+ > s = b"\xe7\x9f\xad\xe5\x90\x8d"
> # (byte, width) = (7, 7): odd width is good for alignment test
- > m = "MIDDLE_"
+ > m = b"MIDDLE_"
> # (byte, width) = (18, 12)
- > l = "\xe9\x95\xb7\xe3\x81\x84\xe9\x95\xb7\xe3\x81\x84\xe5\x90\x8d\xe5\x89\x8d"
- > f = file('s', 'w'); f.write(s); f.close()
- > f = file('m', 'w'); f.write(m); f.close()
- > f = file('l', 'w'); f.write(l); f.close()
+ > l = b"\xe9\x95\xb7\xe3\x81\x84\xe9\x95\xb7\xe3\x81\x84\xe5\x90\x8d\xe5\x89\x8d"
+ > f = open('s', 'wb'); f.write(s); f.close()
+ > f = open('m', 'wb'); f.write(m); f.close()
+ > f = open('l', 'wb'); f.write(l); f.close()
> # instant extension to show list of options
- > f = file('showoptlist.py', 'w'); f.write("""# encoding: utf-8
+ > f = open('showoptlist.py', 'wb'); f.write(b"""# encoding: utf-8
> from mercurial import registrar
> cmdtable = {}
> command = registrar.command(cmdtable)
--- a/tests/test-encoding.t Fri Feb 23 17:57:04 2018 -0800
+++ b/tests/test-encoding.t Sat Feb 24 17:49:10 2018 -0600
@@ -15,9 +15,9 @@
$ hg co
1 files updated, 0 files merged, 0 files removed, 0 files unresolved
$ $PYTHON << EOF
- > f = file('latin-1', 'w'); f.write("latin-1 e' encoded: \xe9"); f.close()
- > f = file('utf-8', 'w'); f.write("utf-8 e' encoded: \xc3\xa9"); f.close()
- > f = file('latin-1-tag', 'w'); f.write("\xe9"); f.close()
+ > f = open('latin-1', 'wb'); f.write(b"latin-1 e' encoded: \xe9"); f.close()
+ > f = open('utf-8', 'wb'); f.write(b"utf-8 e' encoded: \xc3\xa9"); f.close()
+ > f = open('latin-1-tag', 'wb'); f.write(b"\xe9"); f.close()
> EOF
should fail with encoding error
--- a/tests/test-eol.t Fri Feb 23 17:57:04 2018 -0800
+++ b/tests/test-eol.t Sat Feb 24 17:49:10 2018 -0600
@@ -17,12 +17,12 @@
> msvcrt.setmode(sys.stdout.fileno(), os.O_BINARY)
> except ImportError:
> pass
- > (old, new) = sys.argv[1] == 'LF' and ('\n', '\r\n') or ('\r\n', '\n')
+ > (old, new) = sys.argv[1] == 'LF' and (b'\n', b'\r\n') or (b'\r\n', b'\n')
> print("%% switching encoding from %r to %r" % (old, new))
> for path in sys.argv[2:]:
- > data = file(path, 'rb').read()
+ > data = open(path, 'rb').read()
> data = data.replace(old, new)
- > file(path, 'wb').write(data)
+ > open(path, 'wb').write(data)
> EOF
$ seteol () {
--- a/tests/test-export.t Fri Feb 23 17:57:04 2018 -0800
+++ b/tests/test-export.t Sat Feb 24 17:49:10 2018 -0600
@@ -186,6 +186,12 @@
exporting patch:
____________0123456789_______ABCDEFGHIJKLMNOPQRSTUVWXYZ______abcdefghijklmnopqrstuvwxyz____.patch
+Invalid pattern in file name:
+
+ $ hg export -o '%x.patch' tip
+ abort: invalid format spec '%x' in output filename
+ [255]
+
Catch exporting unknown revisions (especially empty revsets, see issue3353)
$ hg export
--- a/tests/test-extdiff.t Fri Feb 23 17:57:04 2018 -0800
+++ b/tests/test-extdiff.t Sat Feb 24 17:49:10 2018 -0600
@@ -252,8 +252,8 @@
> #!$PYTHON
> import time
> time.sleep(1) # avoid unchanged-timestamp problems
- > file('a/a', 'ab').write('edited\n')
- > file('a/b', 'ab').write('edited\n')
+ > open('a/a', 'ab').write(b'edited\n')
+ > open('a/b', 'ab').write(b'edited\n')
> EOT
#if execbit
@@ -424,7 +424,8 @@
Test handling of non-ASCII paths in generated docstrings (issue5301)
- >>> open("u", "w").write("\xa5\xa5")
+ >>> with open("u", "wb") as f:
+ ... n = f.write(b"\xa5\xa5")
$ U=`cat u`
$ HGPLAIN=1 hg --config hgext.extdiff= --config extdiff.cmd.td=hi help -k xyzzy
--- a/tests/test-extension.t Fri Feb 23 17:57:04 2018 -0800
+++ b/tests/test-extension.t Sat Feb 24 17:49:10 2018 -0600
@@ -1707,8 +1707,8 @@
> test_unicode_default_value = $TESTTMP/test_unicode_default_value.py
> EOF
$ hg -R $TESTTMP/opt-unicode-default dummy
- *** failed to import extension test_unicode_default_value from $TESTTMP/test_unicode_default_value.py: option 'dummy.opt' has a unicode default value
- *** (change the dummy.opt default value to a non-unicode string)
+ *** failed to import extension test_unicode_default_value from $TESTTMP/test_unicode_default_value.py: unicode u'value' found in cmdtable.dummy
+ *** (use b'' to make it byte string)
hg: unknown command 'dummy'
(did you mean summary?)
[255]
--- a/tests/test-filecache.py Fri Feb 23 17:57:04 2018 -0800
+++ b/tests/test-filecache.py Sat Feb 24 17:49:10 2018 -0600
@@ -11,11 +11,15 @@
extensions,
hg,
localrepo,
+ pycompat,
ui as uimod,
util,
vfs as vfsmod,
)
+if pycompat.ispy3:
+ xrange = range
+
class fakerepo(object):
def __init__(self):
self._filecache = {}
--- a/tests/test-fileset.t Fri Feb 23 17:57:04 2018 -0800
+++ b/tests/test-fileset.t Sat Feb 24 17:49:10 2018 -0600
@@ -180,7 +180,7 @@
Test files properties
- >>> file('bin', 'wb').write('\0a')
+ >>> open('bin', 'wb').write(b'\0a')
$ fileset 'binary()'
$ fileset 'binary() and unknown()'
bin
@@ -219,8 +219,8 @@
$ hg --config ui.portablefilenames=ignore add con.xml
#endif
- >>> file('1k', 'wb').write(' '*1024)
- >>> file('2k', 'wb').write(' '*2048)
+ >>> open('1k', 'wb').write(b' '*1024)
+ >>> open('2k', 'wb').write(b' '*2048)
$ hg add 1k 2k
$ fileset 'size("bar")'
hg: parse error: couldn't parse size: bar
--- a/tests/test-glog.t Fri Feb 23 17:57:04 2018 -0800
+++ b/tests/test-glog.t Sat Feb 24 17:49:10 2018 -0600
@@ -87,21 +87,22 @@
> cmdutil,
> commands,
> extensions,
+ > logcmdutil,
> revsetlang,
> smartset,
> )
>
> def logrevset(repo, pats, opts):
- > revs = cmdutil._logrevs(repo, opts)
+ > revs = logcmdutil._initialrevs(repo, opts)
> if not revs:
> return None
- > match, pats, slowpath = cmdutil._makelogmatcher(repo, revs, pats, opts)
- > return cmdutil._makelogrevset(repo, match, pats, slowpath, opts)
+ > match, pats, slowpath = logcmdutil._makematcher(repo, revs, pats, opts)
+ > return logcmdutil._makerevset(repo, match, pats, slowpath, opts)
>
> def uisetup(ui):
> def printrevset(orig, repo, pats, opts):
> revs, filematcher = orig(repo, pats, opts)
- > if opts.get('print_revset'):
+ > if opts.get(b'print_revset'):
> expr = logrevset(repo, pats, opts)
> if expr:
> tree = revsetlang.parse(expr)
@@ -109,15 +110,15 @@
> else:
> tree = []
> ui = repo.ui
- > ui.write('%r\n' % (opts.get('rev', []),))
- > ui.write(revsetlang.prettyformat(tree) + '\n')
- > ui.write(smartset.prettyformat(revs) + '\n')
+ > ui.write(b'%r\n' % (opts.get(b'rev', []),))
+ > ui.write(revsetlang.prettyformat(tree) + b'\n')
+ > ui.write(smartset.prettyformat(revs) + b'\n')
> revs = smartset.baseset() # display no revisions
> return revs, filematcher
- > extensions.wrapfunction(cmdutil, 'getlogrevs', printrevset)
- > aliases, entry = cmdutil.findcmd('log', commands.table)
- > entry[1].append(('', 'print-revset', False,
- > 'print generated revset and exit (DEPRECATED)'))
+ > extensions.wrapfunction(logcmdutil, 'getrevs', printrevset)
+ > aliases, entry = cmdutil.findcmd(b'log', commands.table)
+ > entry[1].append((b'', b'print-revset', False,
+ > b'print generated revset and exit (DEPRECATED)'))
> EOF
$ echo "[extensions]" >> $HGRCPATH
@@ -2420,7 +2421,7 @@
|
~
-node template with changeset_printer:
+node template with changesetprinter:
$ hg log -Gqr 5:7 --config ui.graphnodetemplate='"{rev}"'
7 7:02dbb8e276b8
@@ -2432,7 +2433,7 @@
|
~
-node template with changeset_templater (shared cache variable):
+node template with changesettemplater (shared cache variable):
$ hg log -Gr 5:7 -T '{latesttag % "{rev} {tag}+{distance}"}\n' \
> --config ui.graphnodetemplate='{ifeq(latesttagdistance, 0, "#", graphnode)}'
--- a/tests/test-help.t Fri Feb 23 17:57:04 2018 -0800
+++ b/tests/test-help.t Sat Feb 24 17:49:10 2018 -0600
@@ -274,6 +274,7 @@
purge command to delete untracked files from the working
directory
relink recreates hardlinks between repository clones
+ remotenames showing remotebookmarks and remotebranches in UI
schemes extend schemes with shortcuts to repository swarms
share share a common history between several working directories
shelve save and restore changes to the working directory
@@ -282,6 +283,11 @@
win32mbcs allow the use of MBCS paths with problematic encodings
zeroconf discover and advertise repositories on the local network
+Verify that deprecated extensions are included if --verbose:
+
+ $ hg -v help extensions | grep children
+ children command to display child changesets (DEPRECATED)
+
Verify that extension keywords appear in help templates
$ hg help --config extensions.transplant= templating|grep transplant > /dev/null
@@ -948,6 +954,7 @@
debugoptEXP (no help text available)
debugpathcomplete
complete part or all of a tracked path
+ debugpeer establish a connection to a peer repository
debugpickmergetool
examine which merge tool is chosen for specified file
debugpushkey access the pushkey key/value protocol
@@ -1492,6 +1499,8 @@
Extensions:
clonebundles advertise pre-generated bundles to seed clones
+ narrow create clones which fetch history data for subset of files
+ (EXPERIMENTAL)
prefixedname matched against word "clone"
relink recreates hardlinks between repository clones
@@ -3387,6 +3396,70 @@
</html>
+ $ get-with-headers.py 127.0.0.1:$HGPORT "help/unknowntopic"
+ 404 Not Found
+
+ <!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.1//EN" "http://www.w3.org/TR/xhtml11/DTD/xhtml11.dtd">
+ <html xmlns="http://www.w3.org/1999/xhtml" xml:lang="en-US">
+ <head>
+ <link rel="icon" href="/static/hgicon.png" type="image/png" />
+ <meta name="robots" content="index, nofollow" />
+ <link rel="stylesheet" href="/static/style-paper.css" type="text/css" />
+ <script type="text/javascript" src="/static/mercurial.js"></script>
+
+ <title>test: error</title>
+ </head>
+ <body>
+
+ <div class="container">
+ <div class="menu">
+ <div class="logo">
+ <a href="https://mercurial-scm.org/">
+ <img src="/static/hglogo.png" width=75 height=90 border=0 alt="mercurial" /></a>
+ </div>
+ <ul>
+ <li><a href="/shortlog">log</a></li>
+ <li><a href="/graph">graph</a></li>
+ <li><a href="/tags">tags</a></li>
+ <li><a href="/bookmarks">bookmarks</a></li>
+ <li><a href="/branches">branches</a></li>
+ </ul>
+ <ul>
+ <li><a href="/help">help</a></li>
+ </ul>
+ </div>
+
+ <div class="main">
+
+ <h2 class="breadcrumb"><a href="/">Mercurial</a> </h2>
+ <h3>error</h3>
+
+
+ <form class="search" action="/log">
+
+ <p><input name="rev" id="search1" type="text" size="30" value="" /></p>
+ <div id="hint">Find changesets by keywords (author, files, the commit message), revision
+ number or hash, or <a href="/help/revsets">revset expression</a>.</div>
+ </form>
+
+ <div class="description">
+ <p>
+ An error occurred while processing your request:
+ </p>
+ <p>
+ Not Found
+ </p>
+ </div>
+ </div>
+ </div>
+
+
+
+ </body>
+ </html>
+
+ [1]
+
$ killdaemons.py
#endif
--- a/tests/test-hgweb-auth.py Fri Feb 23 17:57:04 2018 -0800
+++ b/tests/test-hgweb-auth.py Sat Feb 24 17:49:10 2018 -0600
@@ -19,7 +19,7 @@
def writeauth(items):
ui = origui.copy()
- for name, value in items.iteritems():
+ for name, value in items.items():
ui.setconfig('auth', name, value)
return ui
@@ -36,7 +36,7 @@
for name in ('.username', '.password'):
if (p + name) not in auth:
auth[p + name] = p
- auth = dict((k, v) for k, v in auth.iteritems() if v is not None)
+ auth = dict((k, v) for k, v in auth.items() if v is not None)
ui = writeauth(auth)
--- a/tests/test-hgweb.t Fri Feb 23 17:57:04 2018 -0800
+++ b/tests/test-hgweb.t Sat Feb 24 17:49:10 2018 -0600
@@ -333,14 +333,14 @@
Test the access/error files are opened in append mode
- $ $PYTHON -c "print len(file('access.log').readlines()), 'log lines written'"
+ $ $PYTHON -c "print len(open('access.log', 'rb').readlines()), 'log lines written'"
14 log lines written
static file
$ get-with-headers.py --twice localhost:$HGPORT 'static/style-gitweb.css' - date etag server
200 Script output follows
- content-length: 9118
+ content-length: 9126
content-type: text/css
body { font-family: sans-serif; font-size: 12px; border:solid #d9d8d1; border-width:1px; margin:10px; background: white; color: black; }
@@ -374,7 +374,7 @@
div.title_text { padding:6px 0px; border: solid #d9d8d1; border-width:0px 0px 1px; }
div.log_body { padding:8px 8px 8px 150px; }
.age { white-space:nowrap; }
- span.age { position:relative; float:left; width:142px; font-style:italic; }
+ a.title span.age { position:relative; float:left; width:142px; font-style:italic; }
div.log_link {
padding:0px 8px;
font-size:10px; font-family:sans-serif; font-style:normal;
--- a/tests/test-histedit-arguments.t Fri Feb 23 17:57:04 2018 -0800
+++ b/tests/test-histedit-arguments.t Sat Feb 24 17:49:10 2018 -0600
@@ -280,9 +280,9 @@
--------------------------------------------------------------------
$ $PYTHON <<EOF
- > fp = open('logfile', 'w')
- > fp.write('12345678901234567890123456789012345678901234567890' +
- > '12345') # there are 5 more columns for 80 columns
+ > fp = open('logfile', 'wb')
+ > fp.write(b'12345678901234567890123456789012345678901234567890' +
+ > b'12345') # there are 5 more columns for 80 columns
>
> # 2 x 4 = 8 columns, but 3 x 4 = 12 bytes
> fp.write(u'\u3042\u3044\u3046\u3048'.encode('utf-8'))
--- a/tests/test-histedit-fold.t Fri Feb 23 17:57:04 2018 -0800
+++ b/tests/test-histedit-fold.t Sat Feb 24 17:49:10 2018 -0600
@@ -154,9 +154,9 @@
> from mercurial import util
> def abortfolding(ui, repo, hooktype, **kwargs):
> ctx = repo[kwargs.get('node')]
- > if set(ctx.files()) == {'c', 'd', 'f'}:
+ > if set(ctx.files()) == {b'c', b'd', b'f'}:
> return True # abort folding commit only
- > ui.warn('allow non-folding commit\\n')
+ > ui.warn(b'allow non-folding commit\\n')
> EOF
$ cat > .hg/hgrc <<EOF
> [hooks]
--- a/tests/test-hook.t Fri Feb 23 17:57:04 2018 -0800
+++ b/tests/test-hook.t Sat Feb 24 17:49:10 2018 -0600
@@ -417,9 +417,9 @@
> def printargs(ui, args):
> a = list(args.items())
> a.sort()
- > ui.write('hook args:\n')
+ > ui.write(b'hook args:\n')
> for k, v in a:
- > ui.write(' %s %s\n' % (k, v))
+ > ui.write(b' %s %s\n' % (k, v))
>
> def passhook(ui, repo, **args):
> printargs(ui, args)
@@ -432,19 +432,19 @@
> pass
>
> def raisehook(**args):
- > raise LocalException('exception from hook')
+ > raise LocalException(b'exception from hook')
>
> def aborthook(**args):
- > raise error.Abort('raise abort from hook')
+ > raise error.Abort(b'raise abort from hook')
>
> def brokenhook(**args):
> return 1 + {}
>
> def verbosehook(ui, **args):
- > ui.note('verbose output from hook\n')
+ > ui.note(b'verbose output from hook\n')
>
> def printtags(ui, repo, **args):
- > ui.write('%s\n' % sorted(repo.tags()))
+ > ui.write(b'%s\n' % sorted(repo.tags()))
>
> class container:
> unreachable = 1
@@ -667,7 +667,7 @@
$ cd hooks
$ cat > testhooks.py <<EOF
> def testhook(ui, **args):
- > ui.write('hook works\n')
+ > ui.write(b'hook works\n')
> EOF
$ echo '[hooks]' > ../repo/.hg/hgrc
$ echo "pre-commit.test = python:`pwd`/testhooks.py:testhook" >> ../repo/.hg/hgrc
@@ -886,7 +886,7 @@
> def uisetup(ui):
> class untrustedui(ui.__class__):
> def _trusted(self, fp, f):
- > if util.normpath(fp.name).endswith('untrusted/.hg/hgrc'):
+ > if util.normpath(fp.name).endswith(b'untrusted/.hg/hgrc'):
> return False
> return super(untrustedui, self)._trusted(fp, f)
> ui.__class__ = untrustedui
--- a/tests/test-http-branchmap.t Fri Feb 23 17:57:04 2018 -0800
+++ b/tests/test-http-branchmap.t Sat Feb 24 17:49:10 2018 -0600
@@ -68,22 +68,22 @@
> self._file = stdout
>
> def write(self, data):
- > if data == '47\n':
+ > if data == b'47\n':
> # latin1 encoding is one %xx (3 bytes) shorter
- > data = '44\n'
- > elif data.startswith('%C3%A6 '):
+ > data = b'44\n'
+ > elif data.startswith(b'%C3%A6 '):
> # translate to latin1 encoding
- > data = '%%E6 %s' % data[7:]
+ > data = b'%%E6 %s' % data[7:]
> self._file.write(data)
>
> def __getattr__(self, name):
> return getattr(self._file, name)
>
- > sys.stdout = StdoutWrapper(sys.stdout)
- > sys.stderr = StdoutWrapper(sys.stderr)
+ > sys.stdout = StdoutWrapper(getattr(sys.stdout, 'buffer', sys.stdout))
+ > sys.stderr = StdoutWrapper(getattr(sys.stderr, 'buffer', sys.stderr))
>
> myui = ui.ui.load()
- > repo = hg.repository(myui, 'a')
+ > repo = hg.repository(myui, b'a')
> commands.serve(myui, repo, stdio=True, cmdserver=False)
> EOF
$ echo baz >> b/foo
--- a/tests/test-http-bundle1.t Fri Feb 23 17:57:04 2018 -0800
+++ b/tests/test-http-bundle1.t Sat Feb 24 17:49:10 2018 -0600
@@ -68,7 +68,7 @@
$ cat > $TESTTMP/removesupportedformat.py << EOF
> from mercurial import localrepo
> def extsetup(ui):
- > localrepo.localrepository.supportedformats.remove('generaldelta')
+ > localrepo.localrepository.supportedformats.remove(b'generaldelta')
> EOF
$ hg clone --config extensions.rsf=$TESTTMP/removesupportedformat.py --stream http://localhost:$HGPORT/ copy3
@@ -181,7 +181,8 @@
> if not auth:
> raise common.ErrorResponse(common.HTTP_UNAUTHORIZED, 'who',
> [('WWW-Authenticate', 'Basic Realm="mercurial"')])
- > if base64.b64decode(auth.split()[1]).split(':', 1) != ['user', 'pass']:
+ > if base64.b64decode(auth.split()[1]).split(b':', 1) != [b'user',
+ > b'pass']:
> raise common.ErrorResponse(common.HTTP_FORBIDDEN, 'no')
> def extsetup():
> common.permhooks.insert(0, perform_authentication)
--- a/tests/test-impexp-branch.t Fri Feb 23 17:57:04 2018 -0800
+++ b/tests/test-impexp-branch.t Sat Feb 24 17:49:10 2018 -0600
@@ -74,9 +74,9 @@
$ hg strip --no-backup .
1 files updated, 0 files merged, 0 files removed, 0 files unresolved
>>> import re
- >>> p = file('../r1.patch', 'rb').read()
+ >>> p = open('../r1.patch', 'rb').read()
>>> p = re.sub(r'Parent\s+', 'Parent ', p)
- >>> file('../r1-ws.patch', 'wb').write(p)
+ >>> open('../r1-ws.patch', 'wb').write(p)
$ hg import --exact ../r1-ws.patch
applying ../r1-ws.patch
--- a/tests/test-import-bypass.t Fri Feb 23 17:57:04 2018 -0800
+++ b/tests/test-import-bypass.t Sat Feb 24 17:49:10 2018 -0600
@@ -227,7 +227,7 @@
(this also tests that editor is not invoked for '--bypass', if the
commit message is explicitly specified, regardless of '--edit')
- $ $PYTHON -c 'file("a", "wb").write("a\r\n")'
+ $ $PYTHON -c 'open("a", "wb").write(b"a\r\n")'
$ hg ci -m makeacrlf
$ HGEDITOR=cat hg import -m 'should fail because of eol' --edit --bypass ../test.diff
applying ../test.diff
--- a/tests/test-import-context.t Fri Feb 23 17:57:04 2018 -0800
+++ b/tests/test-import-context.t Sat Feb 24 17:49:10 2018 -0600
@@ -7,7 +7,7 @@
> lasteol = sys.argv[2] == '1'
> patterns = sys.argv[3:]
>
- > fp = file(path, 'wb')
+ > fp = open(path, 'wb')
> for i, pattern in enumerate(patterns):
> count = int(pattern[0:-1])
> char = pattern[-1] + '\n'
@@ -19,7 +19,7 @@
> EOF
$ cat > cat.py <<EOF
> import sys
- > sys.stdout.write(repr(file(sys.argv[1], 'rb').read()) + '\n')
+ > sys.stdout.write(repr(open(sys.argv[1], 'rb').read()) + '\n')
> EOF
Initialize the test repository
--- a/tests/test-import-eol.t Fri Feb 23 17:57:04 2018 -0800
+++ b/tests/test-import-eol.t Sat Feb 24 17:49:10 2018 -0600
@@ -1,21 +1,21 @@
$ cat > makepatch.py <<EOF
- > f = file('eol.diff', 'wb')
+ > f = open('eol.diff', 'wb')
> w = f.write
- > w('test message\n')
- > w('diff --git a/a b/a\n')
- > w('--- a/a\n')
- > w('+++ b/a\n')
- > w('@@ -1,5 +1,5 @@\n')
- > w(' a\n')
- > w('-bbb\r\n')
- > w('+yyyy\r\n')
- > w(' cc\r\n')
- > w(' \n')
- > w(' d\n')
- > w('-e\n')
- > w('\ No newline at end of file\n')
- > w('+z\r\n')
- > w('\ No newline at end of file\r\n')
+ > w(b'test message\n')
+ > w(b'diff --git a/a b/a\n')
+ > w(b'--- a/a\n')
+ > w(b'+++ b/a\n')
+ > w(b'@@ -1,5 +1,5 @@\n')
+ > w(b' a\n')
+ > w(b'-bbb\r\n')
+ > w(b'+yyyy\r\n')
+ > w(b' cc\r\n')
+ > w(b' \n')
+ > w(b' d\n')
+ > w(b'-e\n')
+ > w(b'\ No newline at end of file\n')
+ > w(b'+z\r\n')
+ > w(b'\ No newline at end of file\r\n')
> EOF
$ hg init repo
@@ -25,7 +25,7 @@
Test different --eol values
- $ $PYTHON -c 'file("a", "wb").write("a\nbbb\ncc\n\nd\ne")'
+ $ $PYTHON -c 'open("a", "wb").write(b"a\nbbb\ncc\n\nd\ne")'
$ hg ci -Am adda
adding .hgignore
adding a
@@ -89,7 +89,7 @@
auto EOL on CRLF file
- $ $PYTHON -c 'file("a", "wb").write("a\r\nbbb\r\ncc\r\n\r\nd\r\ne")'
+ $ $PYTHON -c 'open("a", "wb").write(b"a\r\nbbb\r\ncc\r\n\r\nd\r\ne")'
$ hg commit -m 'switch EOLs in a'
$ hg --traceback --config patch.eol='auto' import eol.diff
applying eol.diff
@@ -105,11 +105,11 @@
auto EOL on new file or source without any EOL
- $ $PYTHON -c 'file("noeol", "wb").write("noeol")'
+ $ $PYTHON -c 'open("noeol", "wb").write(b"noeol")'
$ hg add noeol
$ hg commit -m 'add noeol'
- $ $PYTHON -c 'file("noeol", "wb").write("noeol\r\nnoeol\n")'
- $ $PYTHON -c 'file("neweol", "wb").write("neweol\nneweol\r\n")'
+ $ $PYTHON -c 'open("noeol", "wb").write(b"noeol\r\nnoeol\n")'
+ $ $PYTHON -c 'open("neweol", "wb").write(b"neweol\nneweol\r\n")'
$ hg add neweol
$ hg diff --git > noeol.diff
$ hg revert --no-backup noeol neweol
@@ -127,10 +127,10 @@
Test --eol and binary patches
- $ $PYTHON -c 'file("b", "wb").write("a\x00\nb\r\nd")'
+ $ $PYTHON -c 'open("b", "wb").write(b"a\x00\nb\r\nd")'
$ hg ci -Am addb
adding b
- $ $PYTHON -c 'file("b", "wb").write("a\x00\nc\r\nd")'
+ $ $PYTHON -c 'open("b", "wb").write(b"a\x00\nc\r\nd")'
$ hg diff --git > bin.diff
$ hg revert --no-backup b
--- a/tests/test-import-git.t Fri Feb 23 17:57:04 2018 -0800
+++ b/tests/test-import-git.t Sat Feb 24 17:49:10 2018 -0600
@@ -563,10 +563,10 @@
> Mc$`b*O5$Pw00T?_*Z=?k
>
> EOF
- >>> fp = file('binary.diff', 'rb')
+ >>> fp = open('binary.diff', 'rb')
>>> data = fp.read()
>>> fp.close()
- >>> file('binary.diff', 'wb').write(data.replace('\n', '\r\n'))
+ >>> open('binary.diff', 'wb').write(data.replace(b'\n', b'\r\n'))
$ rm binary2
$ hg import --no-commit binary.diff
applying binary.diff
--- a/tests/test-import.t Fri Feb 23 17:57:04 2018 -0800
+++ b/tests/test-import.t Sat Feb 24 17:49:10 2018 -0600
@@ -56,7 +56,7 @@
$ cat > dummypatch.py <<EOF
> from __future__ import print_function
> print('patching file a')
- > file('a', 'wb').write('line2\n')
+ > open('a', 'wb').write(b'line2\n')
> EOF
$ hg clone -r0 a b
adding changesets
@@ -291,7 +291,7 @@
> msg.set_payload('email commit message\n' + patch)
> msg['Subject'] = 'email patch'
> msg['From'] = 'email patcher'
- > file(sys.argv[2], 'wb').write(msg.as_string())
+ > open(sys.argv[2], 'wb').write(msg.as_string())
> EOF
@@ -389,7 +389,7 @@
> msg.set_payload('email patch\n\nnext line\n---\n' + patch)
> msg['Subject'] = '[PATCH] email patch'
> msg['From'] = 'email patcher'
- > file(sys.argv[2], 'wb').write(msg.as_string())
+ > open(sys.argv[2], 'wb').write(msg.as_string())
> EOF
@@ -829,7 +829,7 @@
$ hg init binaryremoval
$ cd binaryremoval
$ echo a > a
- $ $PYTHON -c "file('b', 'wb').write('a\x00b')"
+ $ $PYTHON -c "open('b', 'wb').write(b'a\x00b')"
$ hg ci -Am addall
adding a
adding b
--- a/tests/test-install.t Fri Feb 23 17:57:04 2018 -0800
+++ b/tests/test-install.t Sat Feb 24 17:49:10 2018 -0600
@@ -17,7 +17,7 @@
checking "re2" regexp engine \((available|missing)\) (re)
checking templates (*mercurial?templates)... (glob)
checking default template (*mercurial?templates?map-cmdline.default) (glob)
- checking commit editor... (* -c "import sys; sys.exit(0)") (glob)
+ checking commit editor... (*) (glob)
checking username (test)
no problems detected
@@ -31,7 +31,7 @@
"defaulttemplate": "*mercurial?templates?map-cmdline.default", (glob)
"defaulttemplateerror": null,
"defaulttemplatenotfound": "default",
- "editor": "* -c \"import sys; sys.exit(0)\"", (glob)
+ "editor": "*", (glob)
"editornotfound": false,
"encoding": "ascii",
"encodingerror": null,
@@ -72,7 +72,7 @@
checking "re2" regexp engine \((available|missing)\) (re)
checking templates (*mercurial?templates)... (glob)
checking default template (*mercurial?templates?map-cmdline.default) (glob)
- checking commit editor... (* -c "import sys; sys.exit(0)") (glob)
+ checking commit editor... (*) (glob)
checking username...
no username supplied
(specify a username in your configuration file)
@@ -120,6 +120,35 @@
checking username (test)
no problems detected
+print out the binary post-shlexsplit in the error message when commit editor is
+not found (this is intentionally using backslashes to mimic a windows usecase).
+ $ HGEDITOR="c:\foo\bar\baz.exe -y -z" hg debuginstall
+ checking encoding (ascii)...
+ checking Python executable (*) (glob)
+ checking Python version (*) (glob)
+ checking Python lib (*lib*)... (glob)
+ checking Python security support (*) (glob)
+ TLS 1.2 not supported by Python install; network connections lack modern security (?)
+ SNI not supported by Python install; may have connectivity issues with some servers (?)
+ checking Mercurial version (*) (glob)
+ checking Mercurial custom build (*) (glob)
+ checking module policy (*) (glob)
+ checking installed modules (*mercurial)... (glob)
+ checking registered compression engines (*zlib*) (glob)
+ checking available compression engines (*zlib*) (glob)
+ checking available compression engines for wire protocol (*zlib*) (glob)
+ checking "re2" regexp engine \((available|missing)\) (re)
+ checking templates (*mercurial?templates)... (glob)
+ checking default template (*mercurial?templates?map-cmdline.default) (glob)
+ checking commit editor... (c:\foo\bar\baz.exe) (windows !)
+ Can't find editor 'c:\foo\bar\baz.exe' in PATH (windows !)
+ checking commit editor... (c:foobarbaz.exe) (no-windows !)
+ Can't find editor 'c:foobarbaz.exe' in PATH (no-windows !)
+ (specify a commit editor in your configuration file)
+ checking username (test)
+ 1 problems detected, please check your install!
+ [1]
+
#if test-repo
$ . "$TESTDIR/helpers-testrepo.sh"
--- a/tests/test-issue4074.t Fri Feb 23 17:57:04 2018 -0800
+++ b/tests/test-issue4074.t Sat Feb 24 17:49:10 2018 -0600
@@ -4,7 +4,7 @@
$ cat > s.py <<EOF
> import random
- > for x in xrange(100000):
+ > for x in range(100000):
> print
> if random.randint(0, 100) >= 50:
> x += 1
--- a/tests/test-largefiles-small-disk.t Fri Feb 23 17:57:04 2018 -0800
+++ b/tests/test-largefiles-small-disk.t Sat Feb 24 17:49:10 2018 -0600
@@ -11,7 +11,7 @@
> _origcopyfileobj = shutil.copyfileobj
> def copyfileobj(fsrc, fdst, length=16*1024):
> # allow journal files (used by transaction) to be written
- > if 'journal.' in fdst.name:
+ > if b'journal.' in fdst.name:
> return _origcopyfileobj(fsrc, fdst, length)
> fdst.write(fsrc.read(4))
> raise IOError(errno.ENOSPC, os.strerror(errno.ENOSPC))
--- a/tests/test-largefiles-wireproto.t Fri Feb 23 17:57:04 2018 -0800
+++ b/tests/test-largefiles-wireproto.t Sat Feb 24 17:49:10 2018 -0600
@@ -1,3 +1,13 @@
+#testcases sshv1 sshv2
+
+#if sshv2
+ $ cat >> $HGRCPATH << EOF
+ > [experimental]
+ > sshpeer.advertise-v2 = true
+ > sshserver.support-v2 = true
+ > EOF
+#endif
+
This file contains testcases that tend to be related to the wire protocol part
of largefiles.
--- a/tests/test-lfs-largefiles.t Fri Feb 23 17:57:04 2018 -0800
+++ b/tests/test-lfs-largefiles.t Sat Feb 24 17:49:10 2018 -0600
@@ -298,7 +298,7 @@
$TESTTMP/nolargefiles/.hg/hgrc:*: extensions.lfs= (glob)
$ hg log -r 'all()' -G -T '{rev} {join(lfs_files, ", ")} ({desc})\n'
- o 8 (remove large_by_size.bin)
+ o 8 large_by_size.bin (remove large_by_size.bin)
|
o 7 large_by_size.bin (large by size)
|
--- a/tests/test-lfs-test-server.t Fri Feb 23 17:57:04 2018 -0800
+++ b/tests/test-lfs-test-server.t Sat Feb 24 17:49:10 2018 -0600
@@ -48,6 +48,7 @@
searching for changes
lfs: uploading 31cf46fbc4ecd458a0943c5b4881f1f5a6dd36c53d6167d5b69ac45149b38e5b (12 bytes)
lfs: processed: 31cf46fbc4ecd458a0943c5b4881f1f5a6dd36c53d6167d5b69ac45149b38e5b
+ lfs: uploaded 1 files (12 bytes)
1 changesets found
uncompressed size of bundle content:
* (changelog) (glob)
@@ -65,10 +66,10 @@
$ cd ../repo2
$ hg update tip -v
resolving manifests
- getting a
lfs: downloading 31cf46fbc4ecd458a0943c5b4881f1f5a6dd36c53d6167d5b69ac45149b38e5b (12 bytes)
lfs: adding 31cf46fbc4ecd458a0943c5b4881f1f5a6dd36c53d6167d5b69ac45149b38e5b to the usercache
lfs: processed: 31cf46fbc4ecd458a0943c5b4881f1f5a6dd36c53d6167d5b69ac45149b38e5b
+ getting a
lfs: found 31cf46fbc4ecd458a0943c5b4881f1f5a6dd36c53d6167d5b69ac45149b38e5b in the local lfs store
1 files updated, 0 files merged, 0 files removed, 0 files unresolved
@@ -86,6 +87,7 @@
lfs: processed: 37a65ab78d5ecda767e8622c248b5dbff1e68b1678ab0e730d5eb8601ec8ad19
lfs: uploading d11e1a642b60813aee592094109b406089b8dff4cb157157f753418ec7857998 (19 bytes)
lfs: processed: d11e1a642b60813aee592094109b406089b8dff4cb157157f753418ec7857998
+ lfs: uploaded 2 files (39 bytes)
1 changesets found
uncompressed size of bundle content:
adding changesets
@@ -97,17 +99,18 @@
$ rm -rf `hg config lfs.usercache`
$ hg --repo ../repo1 update tip -v
resolving manifests
- getting b
- lfs: found 31cf46fbc4ecd458a0943c5b4881f1f5a6dd36c53d6167d5b69ac45149b38e5b in the local lfs store
- getting c
+ lfs: need to transfer 2 objects (39 bytes)
+ lfs: downloading 37a65ab78d5ecda767e8622c248b5dbff1e68b1678ab0e730d5eb8601ec8ad19 (20 bytes)
+ lfs: adding 37a65ab78d5ecda767e8622c248b5dbff1e68b1678ab0e730d5eb8601ec8ad19 to the usercache
+ lfs: processed: 37a65ab78d5ecda767e8622c248b5dbff1e68b1678ab0e730d5eb8601ec8ad19
lfs: downloading d11e1a642b60813aee592094109b406089b8dff4cb157157f753418ec7857998 (19 bytes)
lfs: adding d11e1a642b60813aee592094109b406089b8dff4cb157157f753418ec7857998 to the usercache
lfs: processed: d11e1a642b60813aee592094109b406089b8dff4cb157157f753418ec7857998
+ getting b
+ lfs: found 31cf46fbc4ecd458a0943c5b4881f1f5a6dd36c53d6167d5b69ac45149b38e5b in the local lfs store
+ getting c
lfs: found d11e1a642b60813aee592094109b406089b8dff4cb157157f753418ec7857998 in the local lfs store
getting d
- lfs: downloading 37a65ab78d5ecda767e8622c248b5dbff1e68b1678ab0e730d5eb8601ec8ad19 (20 bytes)
- lfs: adding 37a65ab78d5ecda767e8622c248b5dbff1e68b1678ab0e730d5eb8601ec8ad19 to the usercache
- lfs: processed: 37a65ab78d5ecda767e8622c248b5dbff1e68b1678ab0e730d5eb8601ec8ad19
lfs: found 37a65ab78d5ecda767e8622c248b5dbff1e68b1678ab0e730d5eb8601ec8ad19 in the local lfs store
3 files updated, 0 files merged, 0 files removed, 0 files unresolved
@@ -121,11 +124,6 @@
$ hg --repo ../repo1 update -C tip -v
resolving manifests
- getting a
- lfs: found 31cf46fbc4ecd458a0943c5b4881f1f5a6dd36c53d6167d5b69ac45149b38e5b in the local lfs store
- getting b
- lfs: found 31cf46fbc4ecd458a0943c5b4881f1f5a6dd36c53d6167d5b69ac45149b38e5b in the local lfs store
- getting c
lfs: downloading d11e1a642b60813aee592094109b406089b8dff4cb157157f753418ec7857998 (19 bytes)
abort: corrupt remote lfs object: d11e1a642b60813aee592094109b406089b8dff4cb157157f753418ec7857998
[255]
@@ -151,6 +149,75 @@
(run hg verify)
[255]
+Archive will prefetch blobs in a group
+
+ $ rm -rf .hg/store/lfs `hg config lfs.usercache`
+ $ hg archive -vr 1 ../archive
+ lfs: need to transfer 3 objects (51 bytes)
+ lfs: downloading 31cf46fbc4ecd458a0943c5b4881f1f5a6dd36c53d6167d5b69ac45149b38e5b (12 bytes)
+ lfs: adding 31cf46fbc4ecd458a0943c5b4881f1f5a6dd36c53d6167d5b69ac45149b38e5b to the usercache
+ lfs: processed: 31cf46fbc4ecd458a0943c5b4881f1f5a6dd36c53d6167d5b69ac45149b38e5b
+ lfs: downloading 37a65ab78d5ecda767e8622c248b5dbff1e68b1678ab0e730d5eb8601ec8ad19 (20 bytes)
+ lfs: adding 37a65ab78d5ecda767e8622c248b5dbff1e68b1678ab0e730d5eb8601ec8ad19 to the usercache
+ lfs: processed: 37a65ab78d5ecda767e8622c248b5dbff1e68b1678ab0e730d5eb8601ec8ad19
+ lfs: downloading d11e1a642b60813aee592094109b406089b8dff4cb157157f753418ec7857998 (19 bytes)
+ lfs: adding d11e1a642b60813aee592094109b406089b8dff4cb157157f753418ec7857998 to the usercache
+ lfs: processed: d11e1a642b60813aee592094109b406089b8dff4cb157157f753418ec7857998
+ lfs: found 31cf46fbc4ecd458a0943c5b4881f1f5a6dd36c53d6167d5b69ac45149b38e5b in the local lfs store
+ lfs: found 31cf46fbc4ecd458a0943c5b4881f1f5a6dd36c53d6167d5b69ac45149b38e5b in the local lfs store
+ lfs: found d11e1a642b60813aee592094109b406089b8dff4cb157157f753418ec7857998 in the local lfs store
+ lfs: found 37a65ab78d5ecda767e8622c248b5dbff1e68b1678ab0e730d5eb8601ec8ad19 in the local lfs store
+ $ find ../archive | sort
+ ../archive
+ ../archive/.hg_archival.txt
+ ../archive/a
+ ../archive/b
+ ../archive/c
+ ../archive/d
+
+Cat will prefetch blobs in a group
+
+ $ rm -rf .hg/store/lfs `hg config lfs.usercache`
+ $ hg cat -vr 1 a b c
+ lfs: need to transfer 2 objects (31 bytes)
+ lfs: downloading 31cf46fbc4ecd458a0943c5b4881f1f5a6dd36c53d6167d5b69ac45149b38e5b (12 bytes)
+ lfs: adding 31cf46fbc4ecd458a0943c5b4881f1f5a6dd36c53d6167d5b69ac45149b38e5b to the usercache
+ lfs: processed: 31cf46fbc4ecd458a0943c5b4881f1f5a6dd36c53d6167d5b69ac45149b38e5b
+ lfs: downloading d11e1a642b60813aee592094109b406089b8dff4cb157157f753418ec7857998 (19 bytes)
+ lfs: adding d11e1a642b60813aee592094109b406089b8dff4cb157157f753418ec7857998 to the usercache
+ lfs: processed: d11e1a642b60813aee592094109b406089b8dff4cb157157f753418ec7857998
+ lfs: found 31cf46fbc4ecd458a0943c5b4881f1f5a6dd36c53d6167d5b69ac45149b38e5b in the local lfs store
+ THIS-IS-LFS
+ lfs: found 31cf46fbc4ecd458a0943c5b4881f1f5a6dd36c53d6167d5b69ac45149b38e5b in the local lfs store
+ THIS-IS-LFS
+ lfs: found d11e1a642b60813aee592094109b406089b8dff4cb157157f753418ec7857998 in the local lfs store
+ ANOTHER-LARGE-FILE
+
+Revert will prefetch blobs in a group
+
+ $ rm -rf .hg/store/lfs
+ $ rm -rf `hg config lfs.usercache`
+ $ rm *
+ $ hg revert --all -r 1 -v
+ adding a
+ reverting b
+ reverting c
+ reverting d
+ lfs: need to transfer 3 objects (51 bytes)
+ lfs: downloading 31cf46fbc4ecd458a0943c5b4881f1f5a6dd36c53d6167d5b69ac45149b38e5b (12 bytes)
+ lfs: adding 31cf46fbc4ecd458a0943c5b4881f1f5a6dd36c53d6167d5b69ac45149b38e5b to the usercache
+ lfs: processed: 31cf46fbc4ecd458a0943c5b4881f1f5a6dd36c53d6167d5b69ac45149b38e5b
+ lfs: downloading 37a65ab78d5ecda767e8622c248b5dbff1e68b1678ab0e730d5eb8601ec8ad19 (20 bytes)
+ lfs: adding 37a65ab78d5ecda767e8622c248b5dbff1e68b1678ab0e730d5eb8601ec8ad19 to the usercache
+ lfs: processed: 37a65ab78d5ecda767e8622c248b5dbff1e68b1678ab0e730d5eb8601ec8ad19
+ lfs: downloading d11e1a642b60813aee592094109b406089b8dff4cb157157f753418ec7857998 (19 bytes)
+ lfs: adding d11e1a642b60813aee592094109b406089b8dff4cb157157f753418ec7857998 to the usercache
+ lfs: processed: d11e1a642b60813aee592094109b406089b8dff4cb157157f753418ec7857998
+ lfs: found 31cf46fbc4ecd458a0943c5b4881f1f5a6dd36c53d6167d5b69ac45149b38e5b in the local lfs store
+ lfs: found d11e1a642b60813aee592094109b406089b8dff4cb157157f753418ec7857998 in the local lfs store
+ lfs: found 37a65ab78d5ecda767e8622c248b5dbff1e68b1678ab0e730d5eb8601ec8ad19 in the local lfs store
+ lfs: found 31cf46fbc4ecd458a0943c5b4881f1f5a6dd36c53d6167d5b69ac45149b38e5b in the local lfs store
+
Check error message when the remote missed a blob:
$ echo FFFFF > b
--- a/tests/test-lfs.t Fri Feb 23 17:57:04 2018 -0800
+++ b/tests/test-lfs.t Sat Feb 24 17:49:10 2018 -0600
@@ -154,10 +154,32 @@
$ hg add . -q
$ hg commit -m 'commit with lfs content'
+ $ hg files -r . 'set:added()'
+ large
+ small
+ $ hg files -r . 'set:added() & lfs()'
+ large
+
$ hg mv large l
$ hg mv small s
+ $ hg status 'set:removed()'
+ R large
+ R small
+ $ hg status 'set:removed() & lfs()'
+ R large
$ hg commit -m 'renames'
+ $ hg files -r . 'set:copied()'
+ l
+ s
+ $ hg files -r . 'set:copied() & lfs()'
+ l
+ $ hg status --change . 'set:removed()'
+ R large
+ R small
+ $ hg status --change . 'set:removed() & lfs()'
+ R large
+
$ echo SHORT > l
$ echo BECOME-LARGER-FROM-SHORTER > s
$ hg commit -m 'large to small, small to large'
@@ -174,7 +196,7 @@
$ hg log -r 'all()' -T '{rev} {join(lfs_files, ", ")}\n'
0 large
- 1 l
+ 1 l, large
2 s
3 s
4 l
@@ -760,7 +782,6 @@
$ hg --config lfs.usercache=emptycache clone -v repo5 fromcorrupt2
updating to branch default
resolving manifests
- getting l
abort: corrupt remote lfs object: 22f66a3fc0b9bf3f012c814303995ec07099b3a9ce02a7af84b5970811074a3b
[255]
@@ -1007,7 +1028,7 @@
The LFS policy stops when the .hglfs is gone
- $ hg rm .hglfs
+ $ mv .hglfs .hglfs_
$ echo 'largefile3' > lfs.test
$ echo '012345678901234567890abc' > nolfs.exclude
$ echo '01234567890123456abc' > lfs.catchall
@@ -1015,6 +1036,28 @@
$ hg log -r . -T '{rev}: {lfs_files % "{file}: {lfsoid}\n"}\n'
4:
+ $ mv .hglfs_ .hglfs
+ $ echo '012345678901234567890abc' > lfs.test
+ $ hg ci -m 'back to lfs'
+ $ hg rm lfs.test
+ $ hg ci -qm 'remove lfs'
+
+{lfs_files} will list deleted files too
+
+ $ hg log -T "{lfs_files % '{rev} {file}: {lfspointer.oid}\n'}"
+ 6 lfs.test:
+ 5 lfs.test: sha256:43f8f41171b6f62a6b61ba4ce98a8a6c1649240a47ebafd43120aa215ac9e7f6
+ 3 lfs.catchall: sha256:31f43b9c62b540126b0ad5884dc013d21a61c9329b77de1fceeae2fc58511573
+ 3 lfs.test: sha256:8acd23467967bc7b8cc5a280056589b0ba0b17ff21dbd88a7b6474d6290378a6
+ 2 lfs.catchall: sha256:d4ec46c2869ba22eceb42a729377432052d9dd75d82fc40390ebaadecee87ee9
+ 2 lfs.test: sha256:5489e6ced8c36a7b267292bde9fd5242a5f80a7482e8f23fa0477393dfaa4d6c
+
+ $ hg log -r 'file("set:lfs()")' -T '{rev} {join(lfs_files, ", ")}\n'
+ 2 lfs.catchall, lfs.test
+ 3 lfs.catchall, lfs.test
+ 5 lfs.test
+ 6 lfs.test
+
$ cd ..
Unbundling adds a requirement to a non-lfs repo, if necessary.
--- a/tests/test-log-exthook.t Fri Feb 23 17:57:04 2018 -0800
+++ b/tests/test-log-exthook.t Sat Feb 24 17:49:10 2018 -0600
@@ -4,8 +4,8 @@
$ cat > $TESTTMP/logexthook.py <<EOF
> from __future__ import absolute_import
> from mercurial import (
- > cmdutil,
> commands,
+ > logcmdutil,
> repair,
> )
> def rot13description(self, ctx):
@@ -13,7 +13,7 @@
> description = ctx.description().strip().splitlines()[0].encode('rot13')
> self.ui.write("%s: %s\n" % (summary, description))
> def reposetup(ui, repo):
- > cmdutil.changeset_printer._exthook = rot13description
+ > logcmdutil.changesetprinter._exthook = rot13description
> EOF
Prepare the repository
--- a/tests/test-log-linerange.t Fri Feb 23 17:57:04 2018 -0800
+++ b/tests/test-log-linerange.t Sat Feb 24 17:49:10 2018 -0600
@@ -172,6 +172,77 @@
+3
+4
+ $ hg log -f --graph -L foo,5:7 -p
+ @ changeset: 5:cfdf972b3971
+ | tag: tip
+ | user: test
+ | date: Thu Jan 01 00:00:00 1970 +0000
+ | summary: foo: 3 -> 3+ and 11+ -> 11-; bar: a -> a+
+ |
+ | diff --git a/foo b/foo
+ | --- a/foo
+ | +++ b/foo
+ | @@ -4,7 +4,7 @@
+ | 0
+ | 1
+ | 2+
+ | -3
+ | +3+
+ | 4
+ | 5
+ | 6
+ |
+ o changeset: 4:eaec41c1a0c9
+ : user: test
+ : date: Thu Jan 01 00:00:00 1970 +0000
+ : summary: 11 -> 11+; leading space before "1"
+ :
+ : diff --git a/foo b/foo
+ : --- a/foo
+ : +++ b/foo
+ : @@ -2,7 +2,7 @@
+ : 0
+ : 0
+ : 0
+ : -1
+ : + 1
+ : 2+
+ : 3
+ : 4
+ :
+ o changeset: 2:63a884426fd0
+ : user: test
+ : date: Thu Jan 01 00:00:00 1970 +0000
+ : summary: 2 -> 2+; added bar
+ :
+ : diff --git a/foo b/foo
+ : --- a/foo
+ : +++ b/foo
+ : @@ -3,6 +3,6 @@
+ : 0
+ : 0
+ : 1
+ : -2
+ : +2+
+ : 3
+ : 4
+ :
+ o changeset: 0:5ae1f82b9a00
+ user: test
+ date: Thu Jan 01 00:00:00 1970 +0000
+ summary: init
+
+ diff --git a/foo b/foo
+ new file mode 100644
+ --- /dev/null
+ +++ b/foo
+ @@ -0,0 +1,5 @@
+ +0
+ +1
+ +2
+ +3
+ +4
+
With --template.
@@ -849,9 +920,3 @@
$ hg log -f -L dir/baz,5:7 -p
abort: cannot follow file not in parent revision: "dir/baz"
[255]
-
-Graph log does work yet.
-
- $ hg log -f -L dir/baz,5:7 --graph
- abort: graph not supported with line range patterns
- [255]
--- a/tests/test-log.t Fri Feb 23 17:57:04 2018 -0800
+++ b/tests/test-log.t Sat Feb 24 17:49:10 2018 -0600
@@ -2258,7 +2258,7 @@
> foo = {'foo': repo[0].node()}
> names = lambda r: foo.keys()
> namemap = lambda r, name: foo.get(name)
- > nodemap = lambda r, node: [name for name, n in foo.iteritems()
+ > nodemap = lambda r, node: [name for name, n in foo.items()
> if n == node]
> ns = namespaces.namespace(
> "bars", templatename="bar", logname="barlog",
--- a/tests/test-logexchange.t Fri Feb 23 17:57:04 2018 -0800
+++ b/tests/test-logexchange.t Sat Feb 24 17:49:10 2018 -0600
@@ -6,6 +6,9 @@
> glog = log -G -T '{rev}:{node|short} {desc}'
> [experimental]
> remotenames = True
+ > [extensions]
+ > remotenames =
+ > show =
> EOF
Making a server repo
@@ -57,14 +60,27 @@
$ cat .hg/logexchange/bookmarks
0
- 87d6d66763085b629e6d7ed56778c79827273022\x00file:$TESTTMP/server\x00bar (esc)
- 62615734edd52f06b6fb9c2beb429e4fe30d57b8\x00file:$TESTTMP/server\x00foo (esc)
+ 87d6d66763085b629e6d7ed56778c79827273022\x00default\x00bar (esc)
+ 62615734edd52f06b6fb9c2beb429e4fe30d57b8\x00default\x00foo (esc)
$ cat .hg/logexchange/branches
0
- ec2426147f0e39dbc9cef599b066be6035ce691d\x00file:$TESTTMP/server\x00default (esc)
- 3e1487808078543b0af6d10dadf5d46943578db0\x00file:$TESTTMP/server\x00wat (esc)
+ ec2426147f0e39dbc9cef599b066be6035ce691d\x00default\x00default (esc)
+ 3e1487808078543b0af6d10dadf5d46943578db0\x00default\x00wat (esc)
+
+ $ hg show work
+ o 3e14 (wat) (default/wat) added bar
+ |
+ ~
+ @ ec24 (default/default) Added h
+ |
+ ~
+
+ $ hg update "default/wat"
+ 1 files updated, 0 files merged, 3 files removed, 0 files unresolved
+ $ hg identify
+ 3e1487808078 (wat) tip
Making a new server
-------------------
@@ -94,15 +110,152 @@
$ cat .hg/logexchange/bookmarks
0
- 62615734edd52f06b6fb9c2beb429e4fe30d57b8\x00file:$TESTTMP/server\x00foo (esc)
- 87d6d66763085b629e6d7ed56778c79827273022\x00file:$TESTTMP/server\x00bar (esc)
- 87d6d66763085b629e6d7ed56778c79827273022\x00file:$TESTTMP/server2\x00bar (esc)
- 62615734edd52f06b6fb9c2beb429e4fe30d57b8\x00file:$TESTTMP/server2\x00foo (esc)
+ 62615734edd52f06b6fb9c2beb429e4fe30d57b8\x00default\x00foo (esc)
+ 87d6d66763085b629e6d7ed56778c79827273022\x00default\x00bar (esc)
+ 87d6d66763085b629e6d7ed56778c79827273022\x00$TESTTMP/server2\x00bar (esc)
+ 62615734edd52f06b6fb9c2beb429e4fe30d57b8\x00$TESTTMP/server2\x00foo (esc)
$ cat .hg/logexchange/branches
0
- 3e1487808078543b0af6d10dadf5d46943578db0\x00file:$TESTTMP/server\x00wat (esc)
- ec2426147f0e39dbc9cef599b066be6035ce691d\x00file:$TESTTMP/server\x00default (esc)
- ec2426147f0e39dbc9cef599b066be6035ce691d\x00file:$TESTTMP/server2\x00default (esc)
- 3e1487808078543b0af6d10dadf5d46943578db0\x00file:$TESTTMP/server2\x00wat (esc)
+ 3e1487808078543b0af6d10dadf5d46943578db0\x00default\x00wat (esc)
+ ec2426147f0e39dbc9cef599b066be6035ce691d\x00default\x00default (esc)
+ ec2426147f0e39dbc9cef599b066be6035ce691d\x00$TESTTMP/server2\x00default (esc)
+ 3e1487808078543b0af6d10dadf5d46943578db0\x00$TESTTMP/server2\x00wat (esc)
+
+ $ hg log -G
+ @ changeset: 8:3e1487808078
+ | branch: wat
+ | tag: tip
+ | remote branch: $TESTTMP/server2/wat
+ | remote branch: default/wat
+ | parent: 4:aa98ab95a928
+ | user: test
+ | date: Thu Jan 01 00:00:00 1970 +0000
+ | summary: added bar
+ |
+ | o changeset: 7:ec2426147f0e
+ | | remote branch: $TESTTMP/server2/default
+ | | remote branch: default/default
+ | | user: test
+ | | date: Thu Jan 01 00:00:00 1970 +0000
+ | | summary: Added h
+ | |
+ | o changeset: 6:87d6d6676308
+ | | bookmark: bar
+ | | remote bookmark: $TESTTMP/server2/bar
+ | | remote bookmark: default/bar
+ | | user: test
+ | | date: Thu Jan 01 00:00:00 1970 +0000
+ | | summary: Added g
+ | |
+ | o changeset: 5:825660c69f0c
+ |/ user: test
+ | date: Thu Jan 01 00:00:00 1970 +0000
+ | summary: Added f
+ |
+ o changeset: 4:aa98ab95a928
+ | user: test
+ | date: Thu Jan 01 00:00:00 1970 +0000
+ | summary: Added e
+ |
+ o changeset: 3:62615734edd5
+ | bookmark: foo
+ | remote bookmark: $TESTTMP/server2/foo
+ | remote bookmark: default/foo
+ | user: test
+ | date: Thu Jan 01 00:00:00 1970 +0000
+ | summary: Added d
+ |
+ o changeset: 2:28ad74487de9
+ | user: test
+ | date: Thu Jan 01 00:00:00 1970 +0000
+ | summary: Added c
+ |
+ o changeset: 1:29becc82797a
+ | user: test
+ | date: Thu Jan 01 00:00:00 1970 +0000
+ | summary: Added b
+ |
+ o changeset: 0:18d04c59bb5d
+ user: test
+ date: Thu Jan 01 00:00:00 1970 +0000
+ summary: Added a
+
+Testing the templates provided by remotenames extension
+
+`remotenames` keyword
+
+ $ hg log -G -T "{rev}:{node|short} {remotenames}\n"
+ @ 8:3e1487808078 $TESTTMP/server2/wat default/wat
+ |
+ | o 7:ec2426147f0e $TESTTMP/server2/default default/default
+ | |
+ | o 6:87d6d6676308 $TESTTMP/server2/bar default/bar
+ | |
+ | o 5:825660c69f0c
+ |/
+ o 4:aa98ab95a928
+ |
+ o 3:62615734edd5 $TESTTMP/server2/foo default/foo
+ |
+ o 2:28ad74487de9
+ |
+ o 1:29becc82797a
+ |
+ o 0:18d04c59bb5d
+
+`remotebookmarks` and `remotebranches` keywords
+
+ $ hg log -G -T "{rev}:{node|short} [{remotebookmarks}] ({remotebranches})"
+ @ 8:3e1487808078 [] ($TESTTMP/server2/wat default/wat)
+ |
+ | o 7:ec2426147f0e [] ($TESTTMP/server2/default default/default)
+ | |
+ | o 6:87d6d6676308 [$TESTTMP/server2/bar default/bar] ()
+ | |
+ | o 5:825660c69f0c [] ()
+ |/
+ o 4:aa98ab95a928 [] ()
+ |
+ o 3:62615734edd5 [$TESTTMP/server2/foo default/foo] ()
+ |
+ o 2:28ad74487de9 [] ()
+ |
+ o 1:29becc82797a [] ()
+ |
+ o 0:18d04c59bb5d [] ()
+
+Testing the revsets provided by remotenames extension
+
+`remotenames` revset
+
+ $ hg log -r "remotenames()" -GT "{rev}:{node|short} {remotenames}\n"
+ @ 8:3e1487808078 $TESTTMP/server2/wat default/wat
+ :
+ : o 7:ec2426147f0e $TESTTMP/server2/default default/default
+ : |
+ : o 6:87d6d6676308 $TESTTMP/server2/bar default/bar
+ :/
+ o 3:62615734edd5 $TESTTMP/server2/foo default/foo
+ |
+ ~
+
+`remotebranches` revset
+
+ $ hg log -r "remotebranches()" -GT "{rev}:{node|short} {remotenames}\n"
+ @ 8:3e1487808078 $TESTTMP/server2/wat default/wat
+ |
+ ~
+ o 7:ec2426147f0e $TESTTMP/server2/default default/default
+ |
+ ~
+
+`remotebookmarks` revset
+
+ $ hg log -r "remotebookmarks()" -GT "{rev}:{node|short} {remotenames}\n"
+ o 6:87d6d6676308 $TESTTMP/server2/bar default/bar
+ :
+ o 3:62615734edd5 $TESTTMP/server2/foo default/foo
+ |
+ ~
--- a/tests/test-mactext.t Fri Feb 23 17:57:04 2018 -0800
+++ b/tests/test-mactext.t Sat Feb 24 17:49:10 2018 -0600
@@ -3,9 +3,9 @@
> import sys
>
> for path in sys.argv[1:]:
- > data = file(path, 'rb').read()
- > data = data.replace('\n', '\r')
- > file(path, 'wb').write(data)
+ > data = open(path, 'rb').read()
+ > data = data.replace(b'\n', b'\r')
+ > open(path, 'wb').write(data)
> EOF
$ cat > print.py <<EOF
> import sys
--- a/tests/test-manifest.py Fri Feb 23 17:57:04 2018 -0800
+++ b/tests/test-manifest.py Sat Feb 24 17:49:10 2018 -0600
@@ -11,7 +11,6 @@
)
EMTPY_MANIFEST = b''
-EMTPY_MANIFEST_V2 = b'\0\n'
HASH_1 = b'1' * 40
BIN_HASH_1 = binascii.unhexlify(HASH_1)
@@ -28,42 +27,6 @@
b'flag2': b'l',
}
-# Same data as A_SHORT_MANIFEST
-A_SHORT_MANIFEST_V2 = (
- b'\0\n'
- b'\x00bar/baz/qux.py\0%(flag2)s\n%(hash2)s\n'
- b'\x00foo\0%(flag1)s\n%(hash1)s\n'
- ) % {b'hash1': BIN_HASH_1,
- b'flag1': b'',
- b'hash2': BIN_HASH_2,
- b'flag2': b'l',
- }
-
-# Same data as A_SHORT_MANIFEST
-A_METADATA_MANIFEST = (
- b'\0foo\0bar\n'
- b'\x00bar/baz/qux.py\0%(flag2)s\0foo\0bar\n%(hash2)s\n' # flag and metadata
- b'\x00foo\0%(flag1)s\0foo\n%(hash1)s\n' # no flag, but metadata
- ) % {b'hash1': BIN_HASH_1,
- b'flag1': b'',
- b'hash2': BIN_HASH_2,
- b'flag2': b'l',
- }
-
-A_STEM_COMPRESSED_MANIFEST = (
- b'\0\n'
- b'\x00bar/baz/qux.py\0%(flag2)s\n%(hash2)s\n'
- b'\x04qux/foo.py\0%(flag1)s\n%(hash1)s\n' # simple case of 4 stem chars
- b'\x0az.py\0%(flag1)s\n%(hash1)s\n' # tricky newline = 10 stem characters
- b'\x00%(verylongdir)sx/x\0\n%(hash1)s\n'
- b'\xffx/y\0\n%(hash2)s\n' # more than 255 stem chars
- ) % {b'hash1': BIN_HASH_1,
- b'flag1': b'',
- b'hash2': BIN_HASH_2,
- b'flag2': b'l',
- b'verylongdir': 255 * b'x',
- }
-
A_DEEPER_MANIFEST = (
b'a/b/c/bar.py\0%(hash3)s%(flag1)s\n'
b'a/b/c/bar.txt\0%(hash1)s%(flag1)s\n'
@@ -111,11 +74,6 @@
self.assertEqual(0, len(m))
self.assertEqual([], list(m))
- def testEmptyManifestv2(self):
- m = self.parsemanifest(EMTPY_MANIFEST_V2)
- self.assertEqual(0, len(m))
- self.assertEqual([], list(m))
-
def testManifest(self):
m = self.parsemanifest(A_SHORT_MANIFEST)
self.assertEqual([b'bar/baz/qux.py', b'foo'], list(m))
@@ -126,31 +84,6 @@
with self.assertRaises(KeyError):
m[b'wat']
- def testParseManifestV2(self):
- m1 = self.parsemanifest(A_SHORT_MANIFEST)
- m2 = self.parsemanifest(A_SHORT_MANIFEST_V2)
- # Should have same content as A_SHORT_MANIFEST
- self.assertEqual(m1.text(), m2.text())
-
- def testParseManifestMetadata(self):
- # Metadata is for future-proofing and should be accepted but ignored
- m = self.parsemanifest(A_METADATA_MANIFEST)
- self.assertEqual(A_SHORT_MANIFEST, m.text())
-
- def testParseManifestStemCompression(self):
- m = self.parsemanifest(A_STEM_COMPRESSED_MANIFEST)
- self.assertIn(b'bar/baz/qux.py', m)
- self.assertIn(b'bar/qux/foo.py', m)
- self.assertIn(b'bar/qux/foz.py', m)
- self.assertIn(256 * b'x' + b'/x', m)
- self.assertIn(256 * b'x' + b'/y', m)
- self.assertEqual(A_STEM_COMPRESSED_MANIFEST, m.text(usemanifestv2=True))
-
- def testTextV2(self):
- m1 = self.parsemanifest(A_SHORT_MANIFEST)
- v2text = m1.text(usemanifestv2=True)
- self.assertEqual(A_SHORT_MANIFEST_V2, v2text)
-
def testSetItem(self):
want = BIN_HASH_1
@@ -223,7 +156,7 @@
self.assertEqual(want, m[b'foo'])
self.assertEqual([(b'bar/baz/qux.py', BIN_HASH_2),
(b'foo', BIN_HASH_1 + b'a')],
- list(m.iteritems()))
+ list(m.items()))
# Sometimes it even tries a 22-byte fake hash, but we can
# return 21 and it'll work out
m[b'foo'] = want + b'+'
@@ -238,7 +171,7 @@
# suffix with iteration
self.assertEqual([(b'bar/baz/qux.py', BIN_HASH_2),
(b'foo', want)],
- list(m.iteritems()))
+ list(m.items()))
# shows up in diff
self.assertEqual({b'foo': ((want, f), (h, b''))}, m.diff(clean))
--- a/tests/test-manifestv2.t Fri Feb 23 17:57:04 2018 -0800
+++ /dev/null Thu Jan 01 00:00:00 1970 +0000
@@ -1,102 +0,0 @@
-Create repo with old manifest
-
- $ cat << EOF >> $HGRCPATH
- > [format]
- > usegeneraldelta=yes
- > EOF
-
- $ hg init existing
- $ cd existing
- $ echo footext > foo
- $ hg add foo
- $ hg commit -m initial
-
-We're using v1, so no manifestv2 entry is in requires yet.
-
- $ grep manifestv2 .hg/requires
- [1]
-
-Let's clone this with manifestv2 enabled to switch to the new format for
-future commits.
-
- $ cd ..
- $ hg clone --pull existing new --config experimental.manifestv2=1
- requesting all changes
- adding changesets
- adding manifests
- adding file changes
- added 1 changesets with 1 changes to 1 files
- new changesets 0fc9a4fafa44
- updating to branch default
- 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
- $ cd new
-
-Check that entry was added to .hg/requires.
-
- $ grep manifestv2 .hg/requires
- manifestv2
-
-Make a new commit.
-
- $ echo newfootext > foo
- $ hg commit -m new
-
-Check that the manifest actually switched to v2.
-
- $ hg debugdata -m 0
- foo\x0021e958b1dca695a60ee2e9cf151753204ee0f9e9 (esc)
-
- $ hg debugdata -m 1
- \x00 (esc)
- \x00foo\x00 (esc)
- I\xab\x7f\xb8(\x83\xcas\x15\x9d\xc2\xd3\xd3:5\x08\xbad5_ (esc)
-
-Check that manifestv2 is used if the requirement is present, even if it's
-disabled in the config.
-
- $ echo newerfootext > foo
- $ hg --config experimental.manifestv2=False commit -m newer
-
- $ hg debugdata -m 2
- \x00 (esc)
- \x00foo\x00 (esc)
- \xa6\xb1\xfb\xef]\x91\xa1\x19`\xf3.#\x90S\xf8\x06 \xe2\x19\x00 (esc)
-
-Check that we can still read v1 manifests.
-
- $ hg files -r 0
- foo
-
- $ cd ..
-
-Check that entry is added to .hg/requires on repo creation
-
- $ hg --config experimental.manifestv2=True init repo
- $ cd repo
- $ grep manifestv2 .hg/requires
- manifestv2
-
-Set up simple repo
-
- $ echo a > file1
- $ echo b > file2
- $ echo c > file3
- $ hg ci -Aqm 'initial'
- $ echo d > file2
- $ hg ci -m 'modify file2'
-
-Check that 'hg verify', which uses manifest.readdelta(), works
-
- $ hg verify
- checking changesets
- checking manifests
- crosschecking files in changesets and manifests
- checking files
- 3 files, 2 changesets, 4 total revisions
-
-Check that manifest revlog is smaller than for v1
-
- $ hg debugindex -m
- rev offset length delta linkrev nodeid p1 p2
- 0 0 81 -1 0 57361477c778 000000000000 000000000000
- 1 81 33 0 1 aeaab5a2ef74 57361477c778 000000000000
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/tests/test-mdiff.py Sat Feb 24 17:49:10 2018 -0600
@@ -0,0 +1,24 @@
+from __future__ import absolute_import
+from __future__ import print_function
+
+import unittest
+
+from mercurial import (
+ mdiff,
+)
+
+class splitnewlinesTests(unittest.TestCase):
+
+ def test_splitnewlines(self):
+ cases = {b'a\nb\nc\n': [b'a\n', b'b\n', b'c\n'],
+ b'a\nb\nc': [b'a\n', b'b\n', b'c'],
+ b'a\nb\nc\n\n': [b'a\n', b'b\n', b'c\n', b'\n'],
+ b'': [],
+ b'abcabc': [b'abcabc'],
+ }
+ for inp, want in cases.items():
+ self.assertEqual(mdiff.splitnewlines(inp), want)
+
+if __name__ == '__main__':
+ import silenttestrunner
+ silenttestrunner.main(__name__)
--- a/tests/test-merge-tools.t Fri Feb 23 17:57:04 2018 -0800
+++ b/tests/test-merge-tools.t Sat Feb 24 17:49:10 2018 -0600
@@ -1059,6 +1059,150 @@
# hg resolve --list
R f
+premerge=keep respects ui.mergemarkers=basic:
+
+ $ beforemerge
+ [merge-tools]
+ false.whatever=
+ true.priority=1
+ true.executable=cat
+ # hg update -C 1
+ $ hg merge -r 4 --config merge-tools.true.premerge=keep --config ui.mergemarkers=basic
+ merging f
+ <<<<<<< working copy
+ revision 1
+ space
+ =======
+ revision 4
+ >>>>>>> merge rev
+ revision 0
+ space
+ revision 4
+ 0 files updated, 1 files merged, 0 files removed, 0 files unresolved
+ (branch merge, don't forget to commit)
+ $ aftermerge
+ # cat f
+ <<<<<<< working copy
+ revision 1
+ space
+ =======
+ revision 4
+ >>>>>>> merge rev
+ # hg stat
+ M f
+ # hg resolve --list
+ R f
+
+premerge=keep ignores ui.mergemarkers=basic if true.mergemarkers=detailed:
+
+ $ beforemerge
+ [merge-tools]
+ false.whatever=
+ true.priority=1
+ true.executable=cat
+ # hg update -C 1
+ $ hg merge -r 4 --config merge-tools.true.premerge=keep \
+ > --config ui.mergemarkers=basic \
+ > --config merge-tools.true.mergemarkers=detailed
+ merging f
+ <<<<<<< working copy: ef83787e2614 - test: revision 1
+ revision 1
+ space
+ =======
+ revision 4
+ >>>>>>> merge rev: 81448d39c9a0 - test: revision 4
+ revision 0
+ space
+ revision 4
+ 0 files updated, 1 files merged, 0 files removed, 0 files unresolved
+ (branch merge, don't forget to commit)
+ $ aftermerge
+ # cat f
+ <<<<<<< working copy: ef83787e2614 - test: revision 1
+ revision 1
+ space
+ =======
+ revision 4
+ >>>>>>> merge rev: 81448d39c9a0 - test: revision 4
+ # hg stat
+ M f
+ # hg resolve --list
+ R f
+
+premerge=keep respects ui.mergemarkertemplate instead of
+true.mergemarkertemplate if true.mergemarkers=basic:
+
+ $ beforemerge
+ [merge-tools]
+ false.whatever=
+ true.priority=1
+ true.executable=cat
+ # hg update -C 1
+ $ hg merge -r 4 --config merge-tools.true.premerge=keep \
+ > --config ui.mergemarkertemplate='uitmpl {rev}' \
+ > --config merge-tools.true.mergemarkertemplate='tooltmpl {short(node)}'
+ merging f
+ <<<<<<< working copy: uitmpl 1
+ revision 1
+ space
+ =======
+ revision 4
+ >>>>>>> merge rev: uitmpl 4
+ revision 0
+ space
+ revision 4
+ 0 files updated, 1 files merged, 0 files removed, 0 files unresolved
+ (branch merge, don't forget to commit)
+ $ aftermerge
+ # cat f
+ <<<<<<< working copy: uitmpl 1
+ revision 1
+ space
+ =======
+ revision 4
+ >>>>>>> merge rev: uitmpl 4
+ # hg stat
+ M f
+ # hg resolve --list
+ R f
+
+premerge=keep respects true.mergemarkertemplate instead of
+true.mergemarkertemplate if true.mergemarkers=detailed:
+
+ $ beforemerge
+ [merge-tools]
+ false.whatever=
+ true.priority=1
+ true.executable=cat
+ # hg update -C 1
+ $ hg merge -r 4 --config merge-tools.true.premerge=keep \
+ > --config ui.mergemarkertemplate='uitmpl {rev}' \
+ > --config merge-tools.true.mergemarkertemplate='tooltmpl {short(node)}' \
+ > --config merge-tools.true.mergemarkers=detailed
+ merging f
+ <<<<<<< working copy: tooltmpl ef83787e2614
+ revision 1
+ space
+ =======
+ revision 4
+ >>>>>>> merge rev: tooltmpl 81448d39c9a0
+ revision 0
+ space
+ revision 4
+ 0 files updated, 1 files merged, 0 files removed, 0 files unresolved
+ (branch merge, don't forget to commit)
+ $ aftermerge
+ # cat f
+ <<<<<<< working copy: tooltmpl ef83787e2614
+ revision 1
+ space
+ =======
+ revision 4
+ >>>>>>> merge rev: tooltmpl 81448d39c9a0
+ # hg stat
+ M f
+ # hg resolve --list
+ R f
Tool execution
@@ -1190,6 +1334,142 @@
# hg resolve --list
R f
+Merge using a tool that supports labellocal, labelother, and labelbase, checking
+that they're quoted properly as well. This is using the default 'basic'
+mergemarkers even though ui.mergemarkers is 'detailed', so it's ignoring both
+mergemarkertemplate settings:
+
+ $ beforemerge
+ [merge-tools]
+ false.whatever=
+ true.priority=1
+ true.executable=cat
+ # hg update -C 1
+ $ cat <<EOF > printargs_merge_tool
+ > while test \$# -gt 0; do echo arg: \"\$1\"; shift; done
+ > EOF
+ $ hg --config merge-tools.true.executable='sh' \
+ > --config merge-tools.true.args='./printargs_merge_tool ll:$labellocal lo: $labelother lb:$labelbase": "$base' \
+ > --config merge-tools.true.mergemarkertemplate='tooltmpl {short(node)}' \
+ > --config ui.mergemarkertemplate='uitmpl {rev}' \
+ > --config ui.mergemarkers=detailed \
+ > merge -r 2
+ merging f
+ arg: "ll:working copy"
+ arg: "lo:"
+ arg: "merge rev"
+ arg: "lb:base: */f~base.*" (glob)
+ 0 files updated, 1 files merged, 0 files removed, 0 files unresolved
+ (branch merge, don't forget to commit)
+ $ rm -f 'printargs_merge_tool'
+
+Merge using a tool that supports labellocal, labelother, and labelbase, checking
+that they're quoted properly as well. This is using 'detailed' mergemarkers,
+even though ui.mergemarkers is 'basic', and using the tool's
+mergemarkertemplate:
+
+ $ beforemerge
+ [merge-tools]
+ false.whatever=
+ true.priority=1
+ true.executable=cat
+ # hg update -C 1
+ $ cat <<EOF > printargs_merge_tool
+ > while test \$# -gt 0; do echo arg: \"\$1\"; shift; done
+ > EOF
+ $ hg --config merge-tools.true.executable='sh' \
+ > --config merge-tools.true.args='./printargs_merge_tool ll:$labellocal lo: $labelother lb:$labelbase": "$base' \
+ > --config merge-tools.true.mergemarkers=detailed \
+ > --config merge-tools.true.mergemarkertemplate='tooltmpl {short(node)}' \
+ > --config ui.mergemarkertemplate='uitmpl {rev}' \
+ > --config ui.mergemarkers=basic \
+ > merge -r 2
+ merging f
+ arg: "ll:working copy: tooltmpl ef83787e2614"
+ arg: "lo:"
+ arg: "merge rev: tooltmpl 0185f4e0cf02"
+ arg: "lb:base: */f~base.*" (glob)
+ 0 files updated, 1 files merged, 0 files removed, 0 files unresolved
+ (branch merge, don't forget to commit)
+ $ rm -f 'printargs_merge_tool'
+
+The merge tool still gets labellocal and labelother as 'basic' even when
+premerge=keep is used and has 'detailed' markers:
+
+ $ beforemerge
+ [merge-tools]
+ false.whatever=
+ true.priority=1
+ true.executable=cat
+ # hg update -C 1
+ $ cat <<EOF > mytool
+ > echo labellocal: \"\$1\"
+ > echo labelother: \"\$2\"
+ > echo "output (arg)": \"\$3\"
+ > echo "output (contents)":
+ > cat "\$3"
+ > EOF
+ $ hg --config merge-tools.true.executable='sh' \
+ > --config merge-tools.true.args='mytool $labellocal $labelother $output' \
+ > --config merge-tools.true.premerge=keep \
+ > --config merge-tools.true.mergemarkertemplate='tooltmpl {short(node)}' \
+ > --config ui.mergemarkertemplate='uitmpl {rev}' \
+ > --config ui.mergemarkers=detailed \
+ > merge -r 2
+ merging f
+ labellocal: "working copy"
+ labelother: "merge rev"
+ output (arg): "$TESTTMP/f"
+ output (contents):
+ <<<<<<< working copy: uitmpl 1
+ revision 1
+ =======
+ revision 2
+ >>>>>>> merge rev: uitmpl 2
+ space
+ 0 files updated, 1 files merged, 0 files removed, 0 files unresolved
+ (branch merge, don't forget to commit)
+ $ rm -f 'mytool'
+
+premerge=keep uses the *tool's* mergemarkertemplate if tool's
+mergemarkers=detailed; labellocal and labelother also use the tool's template
+
+ $ beforemerge
+ [merge-tools]
+ false.whatever=
+ true.priority=1
+ true.executable=cat
+ # hg update -C 1
+ $ cat <<EOF > mytool
+ > echo labellocal: \"\$1\"
+ > echo labelother: \"\$2\"
+ > echo "output (arg)": \"\$3\"
+ > echo "output (contents)":
+ > cat "\$3"
+ > EOF
+ $ hg --config merge-tools.true.executable='sh' \
+ > --config merge-tools.true.args='mytool $labellocal $labelother $output' \
+ > --config merge-tools.true.premerge=keep \
+ > --config merge-tools.true.mergemarkers=detailed \
+ > --config merge-tools.true.mergemarkertemplate='tooltmpl {short(node)}' \
+ > --config ui.mergemarkertemplate='uitmpl {rev}' \
+ > --config ui.mergemarkers=detailed \
+ > merge -r 2
+ merging f
+ labellocal: "working copy: tooltmpl ef83787e2614"
+ labelother: "merge rev: tooltmpl 0185f4e0cf02"
+ output (arg): "$TESTTMP/f"
+ output (contents):
+ <<<<<<< working copy: tooltmpl ef83787e2614
+ revision 1
+ =======
+ revision 2
+ >>>>>>> merge rev: tooltmpl 0185f4e0cf02
+ space
+ 0 files updated, 1 files merged, 0 files removed, 0 files unresolved
+ (branch merge, don't forget to commit)
+ $ rm -f 'mytool'
+
Issue3581: Merging a filename that needs to be quoted
(This test doesn't work on Windows filesystems even on Linux, so check
for Unix-like permission)
--- a/tests/test-mq-eol.t Fri Feb 23 17:57:04 2018 -0800
+++ b/tests/test-mq-eol.t Sat Feb 24 17:49:10 2018 -0600
@@ -10,29 +10,29 @@
> EOF
$ cat > makepatch.py <<EOF
- > f = file('eol.diff', 'wb')
+ > f = open('eol.diff', 'wb')
> w = f.write
- > w('test message\n')
- > w('diff --git a/a b/a\n')
- > w('--- a/a\n')
- > w('+++ b/a\n')
- > w('@@ -1,5 +1,5 @@\n')
- > w(' a\n')
- > w('-b\r\n')
- > w('+y\r\n')
- > w(' c\r\n')
- > w(' d\n')
- > w('-e\n')
- > w('\ No newline at end of file\n')
- > w('+z\r\n')
- > w('\ No newline at end of file\r\n')
+ > w(b'test message\n')
+ > w(b'diff --git a/a b/a\n')
+ > w(b'--- a/a\n')
+ > w(b'+++ b/a\n')
+ > w(b'@@ -1,5 +1,5 @@\n')
+ > w(b' a\n')
+ > w(b'-b\r\n')
+ > w(b'+y\r\n')
+ > w(b' c\r\n')
+ > w(b' d\n')
+ > w(b'-e\n')
+ > w(b'\ No newline at end of file\n')
+ > w(b'+z\r\n')
+ > w(b'\ No newline at end of file\r\n')
> EOF
$ cat > cateol.py <<EOF
> import sys
- > for line in file(sys.argv[1], 'rb'):
- > line = line.replace('\r', '<CR>')
- > line = line.replace('\n', '<LF>')
+ > for line in open(sys.argv[1], 'rb'):
+ > line = line.replace(b'\r', b'<CR>')
+ > line = line.replace(b'\n', b'<LF>')
> print(line)
> EOF
@@ -44,7 +44,7 @@
Test different --eol values
- $ $PYTHON -c 'file("a", "wb").write("a\nb\nc\nd\ne")'
+ $ $PYTHON -c 'open("a", "wb").write(b"a\nb\nc\nd\ne")'
$ hg ci -Am adda
adding .hgignore
adding a
@@ -152,15 +152,15 @@
$ hg init testeol
$ cd testeol
- $ $PYTHON -c "file('a', 'wb').write('1\r\n2\r\n3\r\n4')"
+ $ $PYTHON -c "open('a', 'wb').write(b'1\r\n2\r\n3\r\n4')"
$ hg ci -Am adda
adding a
- $ $PYTHON -c "file('a', 'wb').write('1\r\n2\r\n33\r\n4')"
+ $ $PYTHON -c "open('a', 'wb').write(b'1\r\n2\r\n33\r\n4')"
$ hg qnew patch1
$ hg qpop
popping patch1
patch queue now empty
- $ $PYTHON -c "file('a', 'wb').write('1\r\n22\r\n33\r\n4')"
+ $ $PYTHON -c "open('a', 'wb').write(b'1\r\n22\r\n33\r\n4')"
$ hg ci -m changea
$ hg --config 'patch.eol=LF' qpush
--- a/tests/test-mq-missingfiles.t Fri Feb 23 17:57:04 2018 -0800
+++ b/tests/test-mq-missingfiles.t Sat Feb 24 17:49:10 2018 -0600
@@ -9,8 +9,8 @@
> args = sys.argv[2:]
> assert (len(args) % 2) == 0
>
- > f = file(path, 'wb')
- > for i in xrange(len(args)/2):
+ > f = open(path, 'wb')
+ > for i in range(len(args) // 2):
> count, s = args[2*i:2*i+2]
> count = int(count)
> s = s.decode('string_escape')
--- a/tests/test-mq-qimport.t Fri Feb 23 17:57:04 2018 -0800
+++ b/tests/test-mq-qimport.t Sat Feb 24 17:49:10 2018 -0600
@@ -6,8 +6,8 @@
> args = sys.argv[2:]
> assert (len(args) % 2) == 0
>
- > f = file(path, 'wb')
- > for i in xrange(len(args)/2):
+ > f = open(path, 'wb')
+ > for i in range(len(args)/2):
> count, s = args[2*i:2*i+2]
> count = int(count)
> s = s.decode('string_escape')
--- a/tests/test-mq-qrefresh-replace-log-message.t Fri Feb 23 17:57:04 2018 -0800
+++ b/tests/test-mq-qrefresh-replace-log-message.t Sat Feb 24 17:49:10 2018 -0600
@@ -119,7 +119,7 @@
> def reposetup(ui, repo):
> class commitfailure(repo.__class__):
> def commit(self, *args, **kwargs):
- > raise error.Abort('emulating unexpected abort')
+ > raise error.Abort(b'emulating unexpected abort')
> repo.__class__ = commitfailure
> EOF
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/tests/test-narrow-acl.t Sat Feb 24 17:49:10 2018 -0600
@@ -0,0 +1,42 @@
+Make a narrow clone then archive it
+ $ . "$TESTDIR/narrow-library.sh"
+
+ $ hg init master
+ $ cd master
+
+ $ for x in `$TESTDIR/seq.py 3`; do
+ > echo $x > "f$x"
+ > hg add "f$x"
+ > hg commit -m "Add $x"
+ > done
+ $ cat >> .hg/hgrc << EOF
+ > [narrowhgacl]
+ > default.includes=f1 f2
+ > EOF
+ $ hg serve -a localhost -p $HGPORT1 -d --pid-file=hg.pid
+ $ cat hg.pid >> "$DAEMON_PIDS"
+
+ $ cd ..
+ $ hg clone http://localhost:$HGPORT1 narrowclone1
+ requesting all changes
+ adding changesets
+ adding manifests
+ adding file changes
+ added 3 changesets with 2 changes to 2 files
+ new changesets * (glob)
+ updating to branch default
+ 2 files updated, 0 files merged, 0 files removed, 0 files unresolved
+
+The clone directory should only contain f1 and f2
+ $ ls -1 narrowclone1 | sort
+ f1
+ f2
+
+Requirements should contain narrowhg
+ $ cat narrowclone1/.hg/requires | grep narrowhg
+ narrowhg-experimental
+
+NarrowHG should track f1 and f2
+ $ hg -R narrowclone1 tracked
+ I path:f1
+ I path:f2
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/tests/test-narrow-archive.t Sat Feb 24 17:49:10 2018 -0600
@@ -0,0 +1,32 @@
+Make a narrow clone then archive it
+ $ . "$TESTDIR/narrow-library.sh"
+
+ $ hg init master
+ $ cd master
+
+ $ for x in `$TESTDIR/seq.py 3`; do
+ > echo $x > "f$x"
+ > hg add "f$x"
+ > hg commit -m "Add $x"
+ > done
+
+ $ hg serve -a localhost -p $HGPORT1 -d --pid-file=hg.pid
+ $ cat hg.pid >> "$DAEMON_PIDS"
+
+ $ cd ..
+ $ hg clone --narrow --include f1 --include f2 http://localhost:$HGPORT1/ narrowclone1
+ requesting all changes
+ adding changesets
+ adding manifests
+ adding file changes
+ added 3 changesets with 2 changes to 2 files
+ new changesets * (glob)
+ updating to branch default
+ 2 files updated, 0 files merged, 0 files removed, 0 files unresolved
+
+The tar should only contain f1 and f2
+ $ cd narrowclone1
+ $ hg archive -t tgz repo.tgz
+ $ tar tfz repo.tgz
+ repo/f1
+ repo/f2
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/tests/test-narrow-clone-no-ellipsis.t Sat Feb 24 17:49:10 2018 -0600
@@ -0,0 +1,130 @@
+ $ . "$TESTDIR/narrow-library.sh"
+
+ $ hg init master
+ $ cd master
+ $ mkdir dir
+ $ mkdir dir/src
+ $ cd dir/src
+ $ for x in `$TESTDIR/seq.py 20`; do echo $x > "f$x"; hg add "f$x"; hg commit -m "Commit src $x"; done
+ $ cd ..
+ $ mkdir tests
+ $ cd tests
+ $ for x in `$TESTDIR/seq.py 20`; do echo $x > "t$x"; hg add "t$x"; hg commit -m "Commit test $x"; done
+ $ cd ../../..
+
+narrow clone a file, f10
+
+ $ hg clone --narrow ssh://user@dummy/master narrow --noupdate --include "dir/src/f10"
+ requesting all changes
+ adding changesets
+ adding manifests
+ adding file changes
+ added 40 changesets with 1 changes to 1 files
+ new changesets *:* (glob)
+ $ cd narrow
+ $ cat .hg/requires | grep -v generaldelta
+ dotencode
+ fncache
+ narrowhg-experimental
+ revlogv1
+ store
+
+ $ cat .hg/narrowspec
+ [includes]
+ path:dir/src/f10
+ [excludes]
+ $ hg update
+ 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
+ $ find * | sort
+ dir
+ dir/src
+ dir/src/f10
+ $ cat dir/src/f10
+ 10
+
+ $ cd ..
+
+narrow clone a directory, tests/, except tests/t19
+
+ $ hg clone --narrow ssh://user@dummy/master narrowdir --noupdate --include "dir/tests/" --exclude "dir/tests/t19"
+ requesting all changes
+ adding changesets
+ adding manifests
+ adding file changes
+ added 40 changesets with 19 changes to 19 files
+ new changesets *:* (glob)
+ $ cd narrowdir
+ $ cat .hg/narrowspec
+ [includes]
+ path:dir/tests
+ [excludes]
+ path:dir/tests/t19
+ $ hg update
+ 19 files updated, 0 files merged, 0 files removed, 0 files unresolved
+ $ find * | sort
+ dir
+ dir/tests
+ dir/tests/t1
+ dir/tests/t10
+ dir/tests/t11
+ dir/tests/t12
+ dir/tests/t13
+ dir/tests/t14
+ dir/tests/t15
+ dir/tests/t16
+ dir/tests/t17
+ dir/tests/t18
+ dir/tests/t2
+ dir/tests/t20
+ dir/tests/t3
+ dir/tests/t4
+ dir/tests/t5
+ dir/tests/t6
+ dir/tests/t7
+ dir/tests/t8
+ dir/tests/t9
+
+ $ cd ..
+
+narrow clone everything but a directory (tests/)
+
+ $ hg clone --narrow ssh://user@dummy/master narrowroot --noupdate --exclude "dir/tests"
+ requesting all changes
+ adding changesets
+ adding manifests
+ adding file changes
+ added 40 changesets with 20 changes to 20 files
+ new changesets *:* (glob)
+ $ cd narrowroot
+ $ cat .hg/narrowspec
+ [includes]
+ path:.
+ [excludes]
+ path:dir/tests
+ $ hg update
+ 20 files updated, 0 files merged, 0 files removed, 0 files unresolved
+ $ find * | sort
+ dir
+ dir/src
+ dir/src/f1
+ dir/src/f10
+ dir/src/f11
+ dir/src/f12
+ dir/src/f13
+ dir/src/f14
+ dir/src/f15
+ dir/src/f16
+ dir/src/f17
+ dir/src/f18
+ dir/src/f19
+ dir/src/f2
+ dir/src/f20
+ dir/src/f3
+ dir/src/f4
+ dir/src/f5
+ dir/src/f6
+ dir/src/f7
+ dir/src/f8
+ dir/src/f9
+
+ $ cd ..
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/tests/test-narrow-clone-non-narrow-server.t Sat Feb 24 17:49:10 2018 -0600
@@ -0,0 +1,53 @@
+Test attempting a narrow clone against a server that doesn't support narrowhg.
+
+ $ . "$TESTDIR/narrow-library.sh"
+
+ $ hg init master
+ $ cd master
+
+ $ for x in `$TESTDIR/seq.py 10`; do
+ > echo $x > "f$x"
+ > hg add "f$x"
+ > hg commit -m "Add $x"
+ > done
+
+ $ hg serve -a localhost -p $HGPORT1 --config extensions.narrow=! -d \
+ > --pid-file=hg.pid
+ $ cat hg.pid >> "$DAEMON_PIDS"
+ $ hg serve -a localhost -p $HGPORT2 -d --pid-file=hg.pid
+ $ cat hg.pid >> "$DAEMON_PIDS"
+
+Verify that narrow is advertised in the bundle2 capabilities:
+ $ echo hello | hg -R . serve --stdio | \
+ > python -c "import sys, urllib; print urllib.unquote_plus(list(sys.stdin)[1])" | grep narrow
+ narrow=v0
+
+ $ cd ..
+
+ $ hg clone --narrow --include f1 http://localhost:$HGPORT1/ narrowclone
+ requesting all changes
+ abort: server doesn't support narrow clones
+ [255]
+
+Make a narrow clone (via HGPORT2), then try to narrow and widen
+into it (from HGPORT1) to prove that narrowing is fine and widening fails
+gracefully:
+ $ hg clone -r 0 --narrow --include f1 http://localhost:$HGPORT2/ narrowclone
+ adding changesets
+ adding manifests
+ adding file changes
+ added 1 changesets with 1 changes to 1 files
+ new changesets * (glob)
+ updating to branch default
+ 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
+ $ cd narrowclone
+ $ hg tracked --addexclude f2 http://localhost:$HGPORT1/
+ comparing with http://localhost:$HGPORT1/
+ searching for changes
+ looking for local changes to affected paths
+ $ hg tracked --addinclude f1 http://localhost:$HGPORT1/
+ comparing with http://localhost:$HGPORT1/
+ searching for changes
+ no changes found
+ abort: server doesn't support narrow clones
+ [255]
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/tests/test-narrow-clone-nonlinear.t Sat Feb 24 17:49:10 2018 -0600
@@ -0,0 +1,148 @@
+Testing narrow clones when changesets modifying a matching file exist on
+multiple branches
+
+ $ . "$TESTDIR/narrow-library.sh"
+
+ $ hg init master
+ $ cd master
+ $ cat >> .hg/hgrc <<EOF
+ > [narrow]
+ > serveellipses=True
+ > EOF
+
+ $ hg branch default
+ marked working directory as branch default
+ (branches are permanent and global, did you want a bookmark?)
+ $ for x in `$TESTDIR/seq.py 10`; do
+ > echo $x > "f$x"
+ > hg add "f$x"
+ > hg commit -m "Add $x"
+ > done
+
+ $ hg branch release-v1
+ marked working directory as branch release-v1
+ (branches are permanent and global, did you want a bookmark?)
+ $ hg commit -m "Start release for v1"
+
+ $ hg update default
+ 0 files updated, 0 files merged, 0 files removed, 0 files unresolved
+ $ for x in `$TESTDIR/seq.py 10`; do
+ > echo "$x v2" > "f$x"
+ > hg commit -m "Update $x to v2"
+ > done
+
+ $ hg update release-v1
+ 10 files updated, 0 files merged, 0 files removed, 0 files unresolved
+ $ hg branch release-v1
+ marked working directory as branch release-v1
+ $ for x in `$TESTDIR/seq.py 1 5`; do
+ > echo "$x v1 hotfix" > "f$x"
+ > hg commit -m "Hotfix $x in v1"
+ > done
+
+ $ hg update default
+ 10 files updated, 0 files merged, 0 files removed, 0 files unresolved
+ $ hg branch release-v2
+ marked working directory as branch release-v2
+ $ hg commit -m "Start release for v2"
+
+ $ hg update default
+ 0 files updated, 0 files merged, 0 files removed, 0 files unresolved
+ $ hg branch default
+ marked working directory as branch default
+ $ for x in `$TESTDIR/seq.py 10`; do
+ > echo "$x v3" > "f$x"
+ > hg commit -m "Update $x to v3"
+ > done
+
+ $ hg update release-v2
+ 10 files updated, 0 files merged, 0 files removed, 0 files unresolved
+ $ hg branch release-v2
+ marked working directory as branch release-v2
+ $ for x in `$TESTDIR/seq.py 4 9`; do
+ > echo "$x v2 hotfix" > "f$x"
+ > hg commit -m "Hotfix $x in v2"
+ > done
+
+ $ hg heads -T '{rev} <- {p1rev} ({branch}): {desc}\n'
+ 42 <- 41 (release-v2): Hotfix 9 in v2
+ 36 <- 35 (default): Update 10 to v3
+ 25 <- 24 (release-v1): Hotfix 5 in v1
+
+ $ cd ..
+
+We now have 3 branches: default, which has v3 of all files, release-v1 which
+has v1 of all files, and release-v2 with v2 of all files.
+
+Narrow clone which should get all branches
+
+ $ hg clone --narrow ssh://user@dummy/master narrow --include "f5"
+ requesting all changes
+ adding changesets
+ adding manifests
+ adding file changes
+ added 12 changesets with 5 changes to 1 files (+2 heads)
+ new changesets *:* (glob)
+ updating to branch default
+ 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
+ $ cd narrow
+ $ hg log -G -T "{if(ellipsis, '...')}{node|short} ({branch}): {desc}\n"
+ o ...031f516143fe (release-v2): Hotfix 9 in v2
+ |
+ o 9cd7f7bb9ca1 (release-v2): Hotfix 5 in v2
+ |
+ o ...37bbc88f3ef0 (release-v2): Hotfix 4 in v2
+ |
+ | @ ...dae2f368ca07 (default): Update 10 to v3
+ | |
+ | o 9c224e89cb31 (default): Update 5 to v3
+ | |
+ | o ...04fb59c7c9dc (default): Update 4 to v3
+ |/
+ | o b2253e82401f (release-v1): Hotfix 5 in v1
+ | |
+ | o ...960ac37d74fd (release-v1): Hotfix 4 in v1
+ | |
+ o | 986298e3f347 (default): Update 5 to v2
+ | |
+ o | ...75d539c667ec (default): Update 4 to v2
+ |/
+ o 04c71bd5707f (default): Add 5
+ |
+ o ...881b3891d041 (default): Add 4
+
+
+Narrow clone the first file, hitting edge condition where unaligned
+changeset and manifest revnums cross branches.
+
+ $ hg clone --narrow ssh://user@dummy/master narrow --include "f1"
+ requesting all changes
+ adding changesets
+ adding manifests
+ adding file changes
+ added 10 changesets with 4 changes to 1 files (+2 heads)
+ new changesets *:* (glob)
+ updating to branch default
+ 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
+ $ cd narrow
+ $ hg log -G -T "{if(ellipsis, '...')}{node|short} ({branch}): {desc}\n"
+ o ...031f516143fe (release-v2): Hotfix 9 in v2
+ |
+ | @ ...dae2f368ca07 (default): Update 10 to v3
+ | |
+ | o 1f5d184b8e96 (default): Update 1 to v3
+ |/
+ | o ...b2253e82401f (release-v1): Hotfix 5 in v1
+ | |
+ | o 133502f6b7e5 (release-v1): Hotfix 1 in v1
+ | |
+ o | ...79165c83d644 (default): Update 10 to v2
+ | |
+ o | c7b7a5f2f088 (default): Update 1 to v2
+ | |
+ | o ...f0531a3db7a9 (release-v1): Start release for v1
+ |/
+ o ...6a3f0f0abef3 (default): Add 10
+ |
+ o e012ac15eaaa (default): Add 1
+
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/tests/test-narrow-clone.t Sat Feb 24 17:49:10 2018 -0600
@@ -0,0 +1,225 @@
+ $ . "$TESTDIR/narrow-library.sh"
+
+ $ hg init master
+ $ cd master
+ $ cat >> .hg/hgrc <<EOF
+ > [narrow]
+ > serveellipses=True
+ > EOF
+ $ mkdir dir
+ $ mkdir dir/src
+ $ cd dir/src
+ $ for x in `$TESTDIR/seq.py 20`; do echo $x > "f$x"; hg add "f$x"; hg commit -m "Commit src $x"; done
+ $ cd ..
+ $ mkdir tests
+ $ cd tests
+ $ for x in `$TESTDIR/seq.py 20`; do echo $x > "t$x"; hg add "t$x"; hg commit -m "Commit test $x"; done
+ $ cd ../../..
+
+narrow clone a file, f10
+
+ $ hg clone --narrow ssh://user@dummy/master narrow --noupdate --include "dir/src/f10"
+ requesting all changes
+ adding changesets
+ adding manifests
+ adding file changes
+ added 3 changesets with 1 changes to 1 files
+ new changesets *:* (glob)
+ $ cd narrow
+ $ cat .hg/requires | grep -v generaldelta
+ dotencode
+ fncache
+ narrowhg-experimental
+ revlogv1
+ store
+
+ $ cat .hg/narrowspec
+ [includes]
+ path:dir/src/f10
+ [excludes]
+ $ hg tracked
+ I path:dir/src/f10
+ $ hg update
+ 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
+ $ find * | sort
+ dir
+ dir/src
+ dir/src/f10
+ $ cat dir/src/f10
+ 10
+
+ $ cd ..
+
+narrow clone with a newline should fail
+
+ $ hg clone --narrow ssh://user@dummy/master narrow_fail --noupdate --include 'dir/src/f10
+ > '
+ requesting all changes
+ abort: newlines are not allowed in narrowspec paths
+ [255]
+
+narrow clone a directory, tests/, except tests/t19
+
+ $ hg clone --narrow ssh://user@dummy/master narrowdir --noupdate --include "dir/tests/" --exclude "dir/tests/t19"
+ requesting all changes
+ adding changesets
+ adding manifests
+ adding file changes
+ added 21 changesets with 19 changes to 19 files
+ new changesets *:* (glob)
+ $ cd narrowdir
+ $ cat .hg/narrowspec
+ [includes]
+ path:dir/tests
+ [excludes]
+ path:dir/tests/t19
+ $ hg tracked
+ I path:dir/tests
+ X path:dir/tests/t19
+ $ hg update
+ 19 files updated, 0 files merged, 0 files removed, 0 files unresolved
+ $ find * | sort
+ dir
+ dir/tests
+ dir/tests/t1
+ dir/tests/t10
+ dir/tests/t11
+ dir/tests/t12
+ dir/tests/t13
+ dir/tests/t14
+ dir/tests/t15
+ dir/tests/t16
+ dir/tests/t17
+ dir/tests/t18
+ dir/tests/t2
+ dir/tests/t20
+ dir/tests/t3
+ dir/tests/t4
+ dir/tests/t5
+ dir/tests/t6
+ dir/tests/t7
+ dir/tests/t8
+ dir/tests/t9
+
+ $ cd ..
+
+narrow clone everything but a directory (tests/)
+
+ $ hg clone --narrow ssh://user@dummy/master narrowroot --noupdate --exclude "dir/tests"
+ requesting all changes
+ adding changesets
+ adding manifests
+ adding file changes
+ added 21 changesets with 20 changes to 20 files
+ new changesets *:* (glob)
+ $ cd narrowroot
+ $ cat .hg/narrowspec
+ [includes]
+ path:.
+ [excludes]
+ path:dir/tests
+ $ hg tracked
+ I path:.
+ X path:dir/tests
+ $ hg update
+ 20 files updated, 0 files merged, 0 files removed, 0 files unresolved
+ $ find * | sort
+ dir
+ dir/src
+ dir/src/f1
+ dir/src/f10
+ dir/src/f11
+ dir/src/f12
+ dir/src/f13
+ dir/src/f14
+ dir/src/f15
+ dir/src/f16
+ dir/src/f17
+ dir/src/f18
+ dir/src/f19
+ dir/src/f2
+ dir/src/f20
+ dir/src/f3
+ dir/src/f4
+ dir/src/f5
+ dir/src/f6
+ dir/src/f7
+ dir/src/f8
+ dir/src/f9
+
+ $ cd ..
+
+narrow clone no paths at all
+
+ $ hg clone --narrow ssh://user@dummy/master narrowempty --noupdate
+ requesting all changes
+ adding changesets
+ adding manifests
+ adding file changes
+ added 1 changesets with 0 changes to 0 files
+ new changesets * (glob)
+ $ cd narrowempty
+ $ hg tracked
+ $ hg update
+ 0 files updated, 0 files merged, 0 files removed, 0 files unresolved
+ $ ls
+
+ $ cd ..
+
+simple clone
+ $ hg clone ssh://user@dummy/master simpleclone
+ requesting all changes
+ adding changesets
+ adding manifests
+ adding file changes
+ added 40 changesets with 40 changes to 40 files
+ new changesets * (glob)
+ updating to branch default
+ 40 files updated, 0 files merged, 0 files removed, 0 files unresolved
+ $ cd simpleclone
+ $ find * | sort
+ dir
+ dir/src
+ dir/src/f1
+ dir/src/f10
+ dir/src/f11
+ dir/src/f12
+ dir/src/f13
+ dir/src/f14
+ dir/src/f15
+ dir/src/f16
+ dir/src/f17
+ dir/src/f18
+ dir/src/f19
+ dir/src/f2
+ dir/src/f20
+ dir/src/f3
+ dir/src/f4
+ dir/src/f5
+ dir/src/f6
+ dir/src/f7
+ dir/src/f8
+ dir/src/f9
+ dir/tests
+ dir/tests/t1
+ dir/tests/t10
+ dir/tests/t11
+ dir/tests/t12
+ dir/tests/t13
+ dir/tests/t14
+ dir/tests/t15
+ dir/tests/t16
+ dir/tests/t17
+ dir/tests/t18
+ dir/tests/t19
+ dir/tests/t2
+ dir/tests/t20
+ dir/tests/t3
+ dir/tests/t4
+ dir/tests/t5
+ dir/tests/t6
+ dir/tests/t7
+ dir/tests/t8
+ dir/tests/t9
+
+ $ cd ..
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/tests/test-narrow-commit.t Sat Feb 24 17:49:10 2018 -0600
@@ -0,0 +1,102 @@
+#testcases flat tree
+
+ $ . "$TESTDIR/narrow-library.sh"
+
+#if tree
+ $ cat << EOF >> $HGRCPATH
+ > [experimental]
+ > treemanifest = 1
+ > EOF
+#endif
+
+create full repo
+
+ $ hg init master
+ $ cd master
+
+ $ mkdir inside
+ $ echo inside > inside/f1
+ $ mkdir outside
+ $ echo outside > outside/f1
+ $ hg ci -Aqm 'initial'
+
+ $ echo modified > inside/f1
+ $ hg ci -qm 'modify inside'
+
+ $ echo modified > outside/f1
+ $ hg ci -qm 'modify outside'
+
+ $ cd ..
+
+ $ hg clone --narrow ssh://user@dummy/master narrow --include inside
+ requesting all changes
+ adding changesets
+ adding manifests
+ adding file changes
+ added 3 changesets with 2 changes to 1 files
+ new changesets *:* (glob)
+ updating to branch default
+ 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
+ $ cd narrow
+
+ $ hg update -q 0
+
+Can not modify dirstate outside
+
+ $ mkdir outside
+ $ touch outside/f1
+ $ hg debugwalk -I 'relglob:f1'
+ matcher: <includematcher includes='(?:(?:|.*/)f1(?:/|$))'>
+ f inside/f1 inside/f1
+ $ hg add outside/f1
+ abort: cannot track 'outside/f1' - it is outside the narrow clone
+ [255]
+ $ touch outside/f3
+ $ hg add outside/f3
+ abort: cannot track 'outside/f3' - it is outside the narrow clone
+ [255]
+
+But adding a truly excluded file shouldn't count
+
+ $ hg add outside/f3 -X outside/f3
+
+ $ rm -r outside
+
+Can modify dirstate inside
+
+ $ echo modified > inside/f1
+ $ touch inside/f3
+ $ hg add inside/f3
+ $ hg status
+ M inside/f1
+ A inside/f3
+ $ hg revert -qC .
+ $ rm inside/f3
+
+Can commit changes inside. Leaves outside unchanged.
+
+ $ hg update -q 'desc("initial")'
+ $ echo modified2 > inside/f1
+ $ hg manifest --debug
+ 4d6a634d5ba06331a60c29ee0db8412490a54fcd 644 inside/f1
+ 7fb3bb6356d28d4dc352c5ba52d7350a81b6bd46 644 outside/f1 (flat !)
+ d0f2f706468ab0e8bec7af87446835fb1b13511b 755 d outside/ (tree !)
+ $ hg commit -m 'modify inside/f1'
+ created new head
+ $ hg files -r .
+ inside/f1
+ outside/f1 (flat !)
+ outside/ (tree !)
+ $ hg manifest --debug
+ 3f4197b4a11b9016e77ebc47fe566944885fd11b 644 inside/f1
+ 7fb3bb6356d28d4dc352c5ba52d7350a81b6bd46 644 outside/f1 (flat !)
+ d0f2f706468ab0e8bec7af87446835fb1b13511b 755 d outside/ (tree !)
+Some filesystems (notably FAT/exFAT only store timestamps with 2
+seconds of precision, so by sleeping for 3 seconds, we can ensure that
+the timestamps of files stored by dirstate will appear older than the
+dirstate file, and therefore we'll be able to get stable output from
+debugdirstate. If we don't do this, the test can be slightly flaky.
+ $ sleep 3
+ $ hg status
+ $ hg debugdirstate --nodates
+ n 644 10 set inside/f1
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/tests/test-narrow-copies.t Sat Feb 24 17:49:10 2018 -0600
@@ -0,0 +1,57 @@
+
+ $ . "$TESTDIR/narrow-library.sh"
+
+create full repo
+
+ $ hg init master
+ $ cd master
+
+ $ mkdir inside
+ $ echo inside > inside/f1
+ $ mkdir outside
+ $ echo outside > outside/f2
+ $ hg ci -Aqm 'initial'
+
+ $ hg mv outside/f2 inside/f2
+ $ hg ci -qm 'move f2 from outside'
+
+ $ echo modified > inside/f2
+ $ hg ci -qm 'modify inside/f2'
+
+ $ cd ..
+
+ $ hg clone --narrow ssh://user@dummy/master narrow --include inside
+ requesting all changes
+ adding changesets
+ adding manifests
+ adding file changes
+ added 3 changesets with 3 changes to 2 files
+ new changesets *:* (glob)
+ updating to branch default
+ 2 files updated, 0 files merged, 0 files removed, 0 files unresolved
+ $ cd narrow
+
+ $ hg co 'desc("move f2")'
+ 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
+ $ hg status
+ $ hg diff
+ $ hg diff --change . --git
+ diff --git a/inside/f2 b/inside/f2
+ new file mode 100644
+ --- /dev/null
+ +++ b/inside/f2
+ @@ -0,0 +1,1 @@
+ +outside
+
+ $ hg log --follow inside/f2 -r tip
+ changeset: 2:bcfb756e0ca9
+ tag: tip
+ user: test
+ date: Thu Jan 01 00:00:00 1970 +0000
+ summary: modify inside/f2
+
+ changeset: 1:5a016133b2bb
+ user: test
+ date: Thu Jan 01 00:00:00 1970 +0000
+ summary: move f2 from outside
+
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/tests/test-narrow-debugcommands.t Sat Feb 24 17:49:10 2018 -0600
@@ -0,0 +1,43 @@
+ $ . "$TESTDIR/narrow-library.sh"
+ $ hg init repo
+ $ cd repo
+ $ cat << EOF > .hg/narrowspec
+ > [includes]
+ > path:foo
+ > [excludes]
+ > EOF
+ $ echo treemanifest >> .hg/requires
+ $ echo narrowhg-experimental >> .hg/requires
+ $ mkdir -p foo/bar
+ $ echo b > foo/f
+ $ echo c > foo/bar/f
+ $ hg commit -Am hi
+ adding foo/bar/f
+ adding foo/f
+ $ hg debugindex -m
+ rev offset length delta linkrev nodeid p1 p2
+ 0 0 47 -1 0 14a5d056d75a 000000000000 000000000000
+ $ hg debugindex --dir foo
+ rev offset length delta linkrev nodeid p1 p2
+ 0 0 77 -1 0 e635c7857aef 000000000000 000000000000
+ $ hg debugindex --dir foo/
+ rev offset length delta linkrev nodeid p1 p2
+ 0 0 77 -1 0 e635c7857aef 000000000000 000000000000
+ $ hg debugindex --dir foo/bar
+ rev offset length delta linkrev nodeid p1 p2
+ 0 0 44 -1 0 e091d4224761 000000000000 000000000000
+ $ hg debugindex --dir foo/bar/
+ rev offset length delta linkrev nodeid p1 p2
+ 0 0 44 -1 0 e091d4224761 000000000000 000000000000
+ $ hg debugdata -m 0
+ foo\x00e635c7857aef92ac761ce5741a99da159abbbb24t (esc)
+ $ hg debugdata --dir foo 0
+ bar\x00e091d42247613adff5d41b67f15fe7189ee97b39t (esc)
+ f\x001e88685f5ddec574a34c70af492f95b6debc8741 (esc)
+ $ hg debugdata --dir foo/ 0
+ bar\x00e091d42247613adff5d41b67f15fe7189ee97b39t (esc)
+ f\x001e88685f5ddec574a34c70af492f95b6debc8741 (esc)
+ $ hg debugdata --dir foo/bar 0
+ f\x00149da44f2a4e14f488b7bd4157945a9837408c00 (esc)
+ $ hg debugdata --dir foo/bar/ 0
+ f\x00149da44f2a4e14f488b7bd4157945a9837408c00 (esc)
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/tests/test-narrow-debugrebuilddirstate.t Sat Feb 24 17:49:10 2018 -0600
@@ -0,0 +1,31 @@
+ $ . "$TESTDIR/narrow-library.sh"
+ $ hg init master
+ $ cd master
+ $ echo treemanifest >> .hg/requires
+ $ echo 'contents of file' > file
+ $ mkdir foo
+ $ echo 'contents of foo/bar' > foo/bar
+ $ hg ci -Am 'some change'
+ adding file
+ adding foo/bar
+
+ $ cd ..
+ $ hg clone --narrow ssh://user@dummy/master copy --include=foo
+ requesting all changes
+ adding changesets
+ adding manifests
+ adding file changes
+ added 1 changesets with 1 changes to 1 files
+ new changesets * (glob)
+ updating to branch default
+ 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
+ $ cd copy
+
+ $ hg debugdirstate
+ n * 20 unset foo/bar (glob)
+ $ mv .hg/dirstate .hg/old_dirstate
+ $ dd bs=40 count=1 if=.hg/old_dirstate of=.hg/dirstate 2>/dev/null
+ $ hg debugdirstate
+ $ hg debugrebuilddirstate
+ $ hg debugdirstate
+ n * * unset foo/bar (glob)
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/tests/test-narrow-exchange-merges.t Sat Feb 24 17:49:10 2018 -0600
@@ -0,0 +1,207 @@
+
+ $ . "$TESTDIR/narrow-library.sh"
+
+create full repo
+
+ $ hg init master
+ $ cd master
+ $ cat >> .hg/hgrc <<EOF
+ > [narrow]
+ > serveellipses=True
+ > EOF
+
+ $ mkdir inside
+ $ echo 1 > inside/f
+ $ hg commit -Aqm 'initial inside'
+
+ $ mkdir outside
+ $ echo 1 > outside/f
+ $ hg commit -Aqm 'initial outside'
+
+ $ echo 2a > outside/f
+ $ hg commit -Aqm 'outside 2a'
+ $ echo 3 > inside/f
+ $ hg commit -Aqm 'inside 3'
+ $ echo 4a > outside/f
+ $ hg commit -Aqm 'outside 4a'
+ $ hg update '.~3'
+ 2 files updated, 0 files merged, 0 files removed, 0 files unresolved
+
+ $ echo 2b > outside/f
+ $ hg commit -Aqm 'outside 2b'
+ $ echo 3 > inside/f
+ $ hg commit -Aqm 'inside 3'
+ $ echo 4b > outside/f
+ $ hg commit -Aqm 'outside 4b'
+ $ hg update '.~3'
+ 2 files updated, 0 files merged, 0 files removed, 0 files unresolved
+
+ $ echo 2c > outside/f
+ $ hg commit -Aqm 'outside 2c'
+ $ echo 3 > inside/f
+ $ hg commit -Aqm 'inside 3'
+ $ echo 4c > outside/f
+ $ hg commit -Aqm 'outside 4c'
+ $ hg update '.~3'
+ 2 files updated, 0 files merged, 0 files removed, 0 files unresolved
+
+ $ echo 2d > outside/f
+ $ hg commit -Aqm 'outside 2d'
+ $ echo 3 > inside/f
+ $ hg commit -Aqm 'inside 3'
+ $ echo 4d > outside/f
+ $ hg commit -Aqm 'outside 4d'
+
+ $ hg update -r 'desc("outside 4a")'
+ 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
+ $ hg merge -r 'desc("outside 4b")' 2>&1 | egrep -v '(warning:|incomplete!)'
+ merging outside/f
+ 0 files updated, 0 files merged, 0 files removed, 1 files unresolved
+ use 'hg resolve' to retry unresolved file merges or 'hg merge --abort' to abandon
+ $ echo 5 > outside/f
+ $ rm outside/f.orig
+ $ hg resolve --mark outside/f
+ (no more unresolved files)
+ $ hg commit -m 'merge a/b 5'
+ $ echo 6 > outside/f
+ $ hg commit -Aqm 'outside 6'
+
+ $ hg merge -r 'desc("outside 4c")' 2>&1 | egrep -v '(warning:|incomplete!)'
+ merging outside/f
+ 0 files updated, 0 files merged, 0 files removed, 1 files unresolved
+ use 'hg resolve' to retry unresolved file merges or 'hg merge --abort' to abandon
+ $ echo 7 > outside/f
+ $ rm outside/f.orig
+ $ hg resolve --mark outside/f
+ (no more unresolved files)
+ $ hg commit -Aqm 'merge a/b/c 7'
+ $ echo 8 > outside/f
+ $ hg commit -Aqm 'outside 8'
+
+ $ hg merge -r 'desc("outside 4d")' 2>&1 | egrep -v '(warning:|incomplete!)'
+ merging outside/f
+ 0 files updated, 0 files merged, 0 files removed, 1 files unresolved
+ use 'hg resolve' to retry unresolved file merges or 'hg merge --abort' to abandon
+ $ echo 9 > outside/f
+ $ rm outside/f.orig
+ $ hg resolve --mark outside/f
+ (no more unresolved files)
+ $ hg commit -Aqm 'merge a/b/c/d 9'
+ $ echo 10 > outside/f
+ $ hg commit -Aqm 'outside 10'
+
+ $ echo 11 > inside/f
+ $ hg commit -Aqm 'inside 11'
+ $ echo 12 > outside/f
+ $ hg commit -Aqm 'outside 12'
+
+ $ hg log -G -T '{rev} {node|short} {desc}\n'
+ @ 21 8d874d57adea outside 12
+ |
+ o 20 7ef88b4dd4fa inside 11
+ |
+ o 19 2a20009de83e outside 10
+ |
+ o 18 3ac1f5779de3 merge a/b/c/d 9
+ |\
+ | o 17 38a9c2f7e546 outside 8
+ | |
+ | o 16 094aa62fc898 merge a/b/c 7
+ | |\
+ | | o 15 f29d083d32e4 outside 6
+ | | |
+ | | o 14 2dc11382541d merge a/b 5
+ | | |\
+ o | | | 13 27d07ef97221 outside 4d
+ | | | |
+ o | | | 12 465567bdfb2d inside 3
+ | | | |
+ o | | | 11 d1c61993ec83 outside 2d
+ | | | |
+ | o | | 10 56859a8e33b9 outside 4c
+ | | | |
+ | o | | 9 bb96a08b062a inside 3
+ | | | |
+ | o | | 8 b844052e7b3b outside 2c
+ |/ / /
+ | | o 7 9db2d8fcc2a6 outside 4b
+ | | |
+ | | o 6 6418167787a6 inside 3
+ | | |
+ +---o 5 77344f344d83 outside 2b
+ | |
+ | o 4 9cadde08dc9f outside 4a
+ | |
+ | o 3 019ef06f125b inside 3
+ | |
+ | o 2 75e40c075a19 outside 2a
+ |/
+ o 1 906d6c682641 initial outside
+ |
+ o 0 9f8e82b51004 initial inside
+
+
+Now narrow clone this and get a hopefully correct graph
+
+ $ cd ..
+ $ hg clone --narrow ssh://user@dummy/master narrow --include inside
+ requesting all changes
+ adding changesets
+ adding manifests
+ adding file changes
+ added 14 changesets with 3 changes to 1 files
+ new changesets *:* (glob)
+ updating to branch default
+ 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
+ $ cd narrow
+
+To make updating the tests easier, we print the emitted nodes
+sorted. This makes it easier to identify when the same node structure
+has been emitted, just in a different order.
+
+ $ hg log -T '{if(ellipsis,"...")}{node|short} {p1node|short} {p2node|short} {desc}\n' | sort
+ ...094aa62fc898 6418167787a6 bb96a08b062a merge a/b/c 7
+ ...2a20009de83e 019ef06f125b 3ac1f5779de3 outside 10
+ ...3ac1f5779de3 465567bdfb2d 094aa62fc898 merge a/b/c/d 9
+ ...75e40c075a19 9f8e82b51004 000000000000 outside 2a
+ ...77344f344d83 9f8e82b51004 000000000000 outside 2b
+ ...8d874d57adea 7ef88b4dd4fa 000000000000 outside 12
+ ...b844052e7b3b 9f8e82b51004 000000000000 outside 2c
+ ...d1c61993ec83 9f8e82b51004 000000000000 outside 2d
+ 019ef06f125b 75e40c075a19 000000000000 inside 3
+ 465567bdfb2d d1c61993ec83 000000000000 inside 3
+ 6418167787a6 77344f344d83 000000000000 inside 3
+ 7ef88b4dd4fa 2a20009de83e 000000000000 inside 11
+ 9f8e82b51004 000000000000 000000000000 initial inside
+ bb96a08b062a b844052e7b3b 000000000000 inside 3
+
+But seeing the graph is also nice:
+ $ hg log -G -T '{if(ellipsis,"...")}{node|short} {desc}\n'
+ @ ...8d874d57adea outside 12
+ |
+ o 7ef88b4dd4fa inside 11
+ |
+ o ...2a20009de83e outside 10
+ |\
+ | o ...3ac1f5779de3 merge a/b/c/d 9
+ | |\
+ | | o ...094aa62fc898 merge a/b/c 7
+ | | |\
+ | o | | 465567bdfb2d inside 3
+ | | | |
+ | o | | ...d1c61993ec83 outside 2d
+ | | | |
+ | | | o bb96a08b062a inside 3
+ | | | |
+ | +---o ...b844052e7b3b outside 2c
+ | | |
+ | | o 6418167787a6 inside 3
+ | | |
+ | | o ...77344f344d83 outside 2b
+ | |/
+ o | 019ef06f125b inside 3
+ | |
+ o | ...75e40c075a19 outside 2a
+ |/
+ o 9f8e82b51004 initial inside
+
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/tests/test-narrow-exchange.t Sat Feb 24 17:49:10 2018 -0600
@@ -0,0 +1,210 @@
+
+ $ . "$TESTDIR/narrow-library.sh"
+
+create full repo
+
+ $ hg init master
+ $ cd master
+ $ cat >> .hg/hgrc <<EOF
+ > [narrow]
+ > serveellipses=True
+ > EOF
+
+ $ mkdir inside
+ $ echo 1 > inside/f
+ $ mkdir inside2
+ $ echo 1 > inside2/f
+ $ mkdir outside
+ $ echo 1 > outside/f
+ $ hg ci -Aqm 'initial'
+
+ $ echo 2 > inside/f
+ $ hg ci -qm 'inside 2'
+
+ $ echo 2 > inside2/f
+ $ hg ci -qm 'inside2 2'
+
+ $ echo 2 > outside/f
+ $ hg ci -qm 'outside 2'
+
+ $ cd ..
+
+ $ hg clone --narrow ssh://user@dummy/master narrow --include inside
+ requesting all changes
+ adding changesets
+ adding manifests
+ adding file changes
+ added 3 changesets with 2 changes to 1 files
+ new changesets *:* (glob)
+ updating to branch default
+ 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
+
+ $ hg clone --narrow ssh://user@dummy/master narrow2 --include inside --include inside2
+ requesting all changes
+ adding changesets
+ adding manifests
+ adding file changes
+ added 4 changesets with 4 changes to 2 files
+ new changesets *:* (glob)
+ updating to branch default
+ 2 files updated, 0 files merged, 0 files removed, 0 files unresolved
+
+Can push to wider repo if change does not affect paths in wider repo that are
+not also in narrower repo
+
+ $ cd narrow
+ $ echo 3 > inside/f
+ $ hg ci -m 'inside 3'
+ $ hg push ssh://user@dummy/narrow2
+ pushing to ssh://user@dummy/narrow2
+ searching for changes
+ remote: adding changesets
+ remote: adding manifests
+ remote: adding file changes
+ remote: added 1 changesets with 1 changes to 1 files
+
+Can push to narrower repo if change affects only paths within remote's
+narrow spec
+
+ $ cd ../narrow2
+ $ cat >> .hg/hgrc <<EOF
+ > [narrow]
+ > serveellipses=True
+ > EOF
+ $ hg co -r 'desc("inside 3")'
+ 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
+ $ echo 4 > inside/f
+ $ hg ci -m 'inside 4'
+ $ hg push ssh://user@dummy/narrow
+ pushing to ssh://user@dummy/narrow
+ searching for changes
+ remote: adding changesets
+ remote: adding manifests
+ remote: adding file changes
+ remote: added 1 changesets with 1 changes to 1 files
+
+Can push to narrow repo if change affects only paths outside remote's
+narrow spec
+
+ $ echo 3 > inside2/f
+ $ hg ci -m 'inside2 3'
+TODO: this should be successful
+ $ hg push ssh://user@dummy/narrow
+ pushing to ssh://user@dummy/narrow
+ searching for changes
+ remote: adding changesets
+ remote: adding manifests
+ remote: adding file changes
+ remote: transaction abort!
+ remote: rollback completed
+ remote: abort: data/inside2/f.i@4a1aa07735e6: unknown parent!
+ abort: stream ended unexpectedly (got 0 bytes, expected 4)
+ [255]
+
+Can pull from wider repo if change affects only paths outside remote's
+narrow spec
+ $ echo 4 > inside2/f
+ $ hg ci -m 'inside2 4'
+ $ hg log -G -T '{rev} {node|short} {files}\n'
+ @ 7 d78a96df731d inside2/f
+ |
+ o 6 8c26f5218962 inside2/f
+ |
+ o 5 ba3480e2f9de inside/f
+ |
+ o 4 4e5edd526618 inside/f
+ |
+ o 3 81e7e07b7ab0 outside/f
+ |
+ o 2 f3993b8c0c2b inside2/f
+ |
+ o 1 8cd66ca966b4 inside/f
+ |
+ o 0 c8057d6f53ab inside/f inside2/f outside/f
+
+ $ cd ../narrow
+ $ hg log -G -T '{rev} {node|short} {files}\n'
+ o 4 ba3480e2f9de inside/f
+ |
+ @ 3 4e5edd526618 inside/f
+ |
+ o 2 81e7e07b7ab0 outside/f
+ |
+ o 1 8cd66ca966b4 inside/f
+ |
+ o 0 c8057d6f53ab inside/f inside2/f outside/f
+
+ $ hg pull ssh://user@dummy/narrow2
+ pulling from ssh://user@dummy/narrow2
+ searching for changes
+ remote: abort: unable to resolve parent while packing 'data/inside2/f.i' 3 for changeset 5 (?)
+ adding changesets
+ remote: abort: unexpected error: unable to resolve parent while packing 'data/inside2/f.i' 3 for changeset 5
+ transaction abort!
+ rollback completed
+ abort: pull failed on remote
+ [255]
+
+Check that the resulting history is valid in the full repo
+
+ $ cd ../narrow2
+ $ hg push ssh://user@dummy/master
+ pushing to ssh://user@dummy/master
+ searching for changes
+ remote: adding changesets
+ remote: adding manifests
+ remote: adding file changes
+ remote: added 4 changesets with 4 changes to 2 files
+ $ cd ../master
+ $ hg verify
+ checking changesets
+ checking manifests
+ crosschecking files in changesets and manifests
+ checking files
+ 3 files, 8 changesets, 10 total revisions
+
+Can not push to wider repo if change affects paths in wider repo that are
+not also in narrower repo
+ $ cd ../master
+ $ hg co -r 'desc("inside2 4")'
+ 2 files updated, 0 files merged, 0 files removed, 0 files unresolved
+ $ echo 5 > inside2/f
+ $ hg ci -m 'inside2 5'
+ $ hg log -G -T '{rev} {node|short} {files}\n'
+ @ 8 5970befb64ba inside2/f
+ |
+ o 7 d78a96df731d inside2/f
+ |
+ o 6 8c26f5218962 inside2/f
+ |
+ o 5 ba3480e2f9de inside/f
+ |
+ o 4 4e5edd526618 inside/f
+ |
+ o 3 81e7e07b7ab0 outside/f
+ |
+ o 2 f3993b8c0c2b inside2/f
+ |
+ o 1 8cd66ca966b4 inside/f
+ |
+ o 0 c8057d6f53ab inside/f inside2/f outside/f
+
+ $ cd ../narrow
+ $ hg pull
+ pulling from ssh://user@dummy/master
+ searching for changes
+ adding changesets
+ adding manifests
+ adding file changes
+ added 1 changesets with 0 changes to 0 files
+ new changesets * (glob)
+ (run 'hg update' to get a working copy)
+TODO: this should tell the user that their narrow clone does not have the
+necessary content to be able to push to the target
+ $ hg push ssh://user@dummy/narrow2
+ pushing to ssh://user@dummy/narrow2
+ searching for changes
+ remote has heads on branch 'default' that are not known locally: d78a96df731d
+ abort: push creates new remote head 5970befb64ba!
+ (pull and merge or see 'hg help push' for details about pushing new heads)
+ [255]
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/tests/test-narrow-expanddirstate.t Sat Feb 24 17:49:10 2018 -0600
@@ -0,0 +1,162 @@
+ $ . "$TESTDIR/narrow-library.sh"
+
+ $ hg init master
+ $ cd master
+
+ $ mkdir inside
+ $ echo inside > inside/f1
+ $ mkdir outside
+ $ echo outside > outside/f2
+ $ mkdir patchdir
+ $ echo patch_this > patchdir/f3
+ $ hg ci -Aqm 'initial'
+
+ $ cd ..
+
+ $ hg clone --narrow ssh://user@dummy/master narrow --include inside
+ requesting all changes
+ adding changesets
+ adding manifests
+ adding file changes
+ added 1 changesets with 1 changes to 1 files
+ new changesets dff6a2a6d433
+ updating to branch default
+ 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
+
+ $ cd narrow
+
+ $ mkdir outside
+ $ echo other_contents > outside/f2
+ $ grep outside .hg/narrowspec
+ [1]
+ $ grep outside .hg/dirstate
+ [1]
+ $ hg status
+
+`hg status` did not add outside.
+ $ grep outside .hg/narrowspec
+ [1]
+ $ grep outside .hg/dirstate
+ [1]
+
+Unfortunately this is not really a candidate for adding to narrowhg proper,
+since it depends on some other source for providing the manifests (when using
+treemanifests) and file contents. Something like a virtual filesystem and/or
+remotefilelog. We want to be useful when not using those systems, so we do not
+have this method available in narrowhg proper at the moment.
+ $ cat > "$TESTTMP/expand_extension.py" <<EOF
+ > import os
+ > import sys
+ >
+ > from mercurial import encoding
+ > from mercurial import extensions
+ > from mercurial import localrepo
+ > from mercurial import match as matchmod
+ > from mercurial import narrowspec
+ > from mercurial import patch
+ > from mercurial import util as hgutil
+ >
+ > def expandnarrowspec(ui, repo, newincludes=None):
+ > if not newincludes:
+ > return
+ > import sys
+ > newincludes = set([newincludes])
+ > includes, excludes = repo.narrowpats
+ > currentmatcher = narrowspec.match(repo.root, includes, excludes)
+ > includes = includes | newincludes
+ > if not repo.currenttransaction():
+ > ui.develwarn(b'expandnarrowspec called outside of transaction!')
+ > repo.setnarrowpats(includes, excludes)
+ > newmatcher = narrowspec.match(repo.root, includes, excludes)
+ > added = matchmod.differencematcher(newmatcher, currentmatcher)
+ > for f in repo[b'.'].manifest().walk(added):
+ > repo.dirstate.normallookup(f)
+ >
+ > def makeds(ui, repo):
+ > def wrapds(orig, self):
+ > ds = orig(self)
+ > class expandingdirstate(ds.__class__):
+ > @hgutil.propertycache
+ > def _map(self):
+ > ret = super(expandingdirstate, self)._map
+ > with repo.wlock(), repo.lock(), repo.transaction(
+ > b'expandnarrowspec'):
+ > expandnarrowspec(ui, repo,
+ > encoding.environ.get(b'DIRSTATEINCLUDES'))
+ > return ret
+ > ds.__class__ = expandingdirstate
+ > return ds
+ > return wrapds
+ >
+ > def reposetup(ui, repo):
+ > extensions.wrapfilecache(localrepo.localrepository, b'dirstate',
+ > makeds(ui, repo))
+ > def overridepatch(orig, *args, **kwargs):
+ > with repo.wlock():
+ > expandnarrowspec(ui, repo, encoding.environ.get(b'PATCHINCLUDES'))
+ > return orig(*args, **kwargs)
+ >
+ > extensions.wrapfunction(patch, b'patch', overridepatch)
+ > EOF
+ $ cat >> ".hg/hgrc" <<EOF
+ > [extensions]
+ > expand_extension = $TESTTMP/expand_extension.py
+ > EOF
+
+Since we do not have the ability to rely on a virtual filesystem or
+remotefilelog in the test, we just fake it by copying the data from the 'master'
+repo.
+ $ cp -a ../master/.hg/store/data/* .hg/store/data
+Do that for patchdir as well.
+ $ cp -a ../master/patchdir .
+
+`hg status` will now add outside, but not patchdir.
+ $ DIRSTATEINCLUDES=path:outside hg status
+ M outside/f2
+ $ grep outside .hg/narrowspec
+ path:outside
+ $ grep outside .hg/dirstate > /dev/null
+ $ grep patchdir .hg/narrowspec
+ [1]
+ $ grep patchdir .hg/dirstate
+ [1]
+
+Get rid of the modification to outside/f2.
+ $ hg update -C .
+ 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
+
+This patch will not apply cleanly at the moment, so `hg import` will break
+ $ cat > "$TESTTMP/foo.patch" <<EOF
+ > --- patchdir/f3
+ > +++ patchdir/f3
+ > @@ -1,1 +1,1 @@
+ > -this should be "patch_this", but its not, so patch fails
+ > +this text is irrelevant
+ > EOF
+ $ PATCHINCLUDES=path:patchdir hg import -p0 -e "$TESTTMP/foo.patch" -m ignored
+ applying $TESTTMP/foo.patch
+ patching file patchdir/f3
+ Hunk #1 FAILED at 0
+ 1 out of 1 hunks FAILED -- saving rejects to file patchdir/f3.rej
+ abort: patch failed to apply
+ [255]
+ $ grep patchdir .hg/narrowspec
+ [1]
+ $ grep patchdir .hg/dirstate > /dev/null
+ [1]
+
+Let's make it apply cleanly and see that it *did* expand properly
+ $ cat > "$TESTTMP/foo.patch" <<EOF
+ > --- patchdir/f3
+ > +++ patchdir/f3
+ > @@ -1,1 +1,1 @@
+ > -patch_this
+ > +patched_this
+ > EOF
+ $ PATCHINCLUDES=path:patchdir hg import -p0 -e "$TESTTMP/foo.patch" -m message
+ applying $TESTTMP/foo.patch
+ $ cat patchdir/f3
+ patched_this
+ $ grep patchdir .hg/narrowspec
+ path:patchdir
+ $ grep patchdir .hg/dirstate > /dev/null
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/tests/test-narrow-merge.t Sat Feb 24 17:49:10 2018 -0600
@@ -0,0 +1,104 @@
+#testcases flat tree
+
+ $ . "$TESTDIR/narrow-library.sh"
+
+#if tree
+ $ cat << EOF >> $HGRCPATH
+ > [experimental]
+ > treemanifest = 1
+ > EOF
+#endif
+
+create full repo
+
+ $ hg init master
+ $ cd master
+ $ cat >> .hg/hgrc <<EOF
+ > [narrow]
+ > serveellipses=True
+ > EOF
+
+ $ mkdir inside
+ $ echo inside1 > inside/f1
+ $ echo inside2 > inside/f2
+ $ mkdir outside
+ $ echo outside1 > outside/f1
+ $ echo outside2 > outside/f2
+ $ hg ci -Aqm 'initial'
+
+ $ echo modified > inside/f1
+ $ hg ci -qm 'modify inside/f1'
+
+ $ hg update -q 0
+ $ echo modified > inside/f2
+ $ hg ci -qm 'modify inside/f2'
+
+ $ hg update -q 0
+ $ echo modified2 > inside/f1
+ $ hg ci -qm 'conflicting inside/f1'
+
+ $ hg update -q 0
+ $ echo modified > outside/f1
+ $ hg ci -qm 'modify outside/f1'
+
+ $ hg update -q 0
+ $ echo modified2 > outside/f1
+ $ hg ci -qm 'conflicting outside/f1'
+
+ $ cd ..
+
+ $ hg clone --narrow ssh://user@dummy/master narrow --include inside
+ requesting all changes
+ adding changesets
+ adding manifests
+ adding file changes
+ added 6 changesets with 5 changes to 2 files (+4 heads)
+ new changesets *:* (glob)
+ updating to branch default
+ 2 files updated, 0 files merged, 0 files removed, 0 files unresolved
+ $ cd narrow
+
+ $ hg update -q 0
+
+Can merge in when no files outside narrow spec are involved
+
+ $ hg update -q 'desc("modify inside/f1")'
+ $ hg merge 'desc("modify inside/f2")'
+ 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
+ (branch merge, don't forget to commit)
+ $ hg commit -m 'merge inside changes'
+
+Can merge conflicting changes inside narrow spec
+
+ $ hg update -q 'desc("modify inside/f1")'
+ $ hg merge 'desc("conflicting inside/f1")' 2>&1 | egrep -v '(warning:|incomplete!)'
+ merging inside/f1
+ 0 files updated, 0 files merged, 0 files removed, 1 files unresolved
+ use 'hg resolve' to retry unresolved file merges or 'hg merge --abort' to abandon
+ $ echo modified3 > inside/f1
+ $ hg resolve -m
+ (no more unresolved files)
+ $ hg commit -m 'merge inside/f1'
+
+TODO: Can merge non-conflicting changes outside narrow spec
+
+ $ hg update -q 'desc("modify inside/f1")'
+ $ hg merge 'desc("modify outside/f1")'
+ abort: merge affects file 'outside/f1' outside narrow, which is not yet supported (flat !)
+ abort: merge affects file 'outside/' outside narrow, which is not yet supported (tree !)
+ (merging in the other direction may work)
+ [255]
+
+ $ hg update -q 'desc("modify outside/f1")'
+ $ hg merge 'desc("modify inside/f1")'
+ 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
+ (branch merge, don't forget to commit)
+ $ hg ci -m 'merge from inside to outside'
+
+Refuses merge of conflicting outside changes
+
+ $ hg update -q 'desc("modify outside/f1")'
+ $ hg merge 'desc("conflicting outside/f1")'
+ abort: conflict in file 'outside/f1' is outside narrow clone (flat !)
+ abort: conflict in file 'outside/' is outside narrow clone (tree !)
+ [255]
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/tests/test-narrow-patch.t Sat Feb 24 17:49:10 2018 -0600
@@ -0,0 +1,84 @@
+#testcases flat tree
+
+ $ . "$TESTDIR/narrow-library.sh"
+
+#if tree
+ $ cat << EOF >> $HGRCPATH
+ > [experimental]
+ > treemanifest = 1
+ > EOF
+#endif
+
+create full repo
+
+ $ hg init master
+ $ cd master
+
+ $ mkdir inside
+ $ echo inside > inside/f1
+ $ mkdir outside
+ $ echo outside > outside/f1
+ $ hg ci -Aqm 'initial'
+
+ $ echo modified > inside/f1
+ $ hg ci -qm 'modify inside'
+
+ $ echo modified > outside/f1
+ $ hg ci -qm 'modify outside'
+
+ $ cd ..
+
+ $ hg clone --narrow ssh://user@dummy/master narrow --include inside
+ requesting all changes
+ adding changesets
+ adding manifests
+ adding file changes
+ added 3 changesets with 2 changes to 1 files
+ new changesets *:* (glob)
+ updating to branch default
+ 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
+ $ cd narrow
+
+Can show patch touching paths outside
+
+ $ hg log -p
+ changeset: 2:* (glob)
+ tag: tip
+ user: test
+ date: Thu Jan 01 00:00:00 1970 +0000
+ summary: modify outside
+
+
+ changeset: 1:* (glob)
+ user: test
+ date: Thu Jan 01 00:00:00 1970 +0000
+ summary: modify inside
+
+ diff -r * -r * inside/f1 (glob)
+ --- a/inside/f1 Thu Jan 01 00:00:00 1970 +0000
+ +++ b/inside/f1 Thu Jan 01 00:00:00 1970 +0000
+ @@ -1,1 +1,1 @@
+ -inside
+ +modified
+
+ changeset: 0:* (glob)
+ user: test
+ date: Thu Jan 01 00:00:00 1970 +0000
+ summary: initial
+
+ diff -r 000000000000 -r * inside/f1 (glob)
+ --- /dev/null Thu Jan 01 00:00:00 1970 +0000
+ +++ b/inside/f1 Thu Jan 01 00:00:00 1970 +0000
+ @@ -0,0 +1,1 @@
+ +inside
+
+
+ $ hg status --rev 1 --rev 2
+
+Can show copies inside the narrow clone
+
+ $ hg cp inside/f1 inside/f2
+ $ hg diff --git
+ diff --git a/inside/f1 b/inside/f2
+ copy from inside/f1
+ copy to inside/f2
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/tests/test-narrow-patterns.t Sat Feb 24 17:49:10 2018 -0600
@@ -0,0 +1,435 @@
+ $ . "$TESTDIR/narrow-library.sh"
+
+initialize nested directories to validate complex include/exclude patterns
+
+ $ hg init master
+ $ cd master
+ $ cat >> .hg/hgrc <<EOF
+ > [narrow]
+ > serveellipses=True
+ > EOF
+
+ $ echo root > root
+ $ hg add root
+ $ hg commit -m 'add root'
+
+ $ for d in dir1 dir2 dir1/dirA dir1/dirB dir2/dirA dir2/dirB
+ > do
+ > mkdir -p $d
+ > echo $d/foo > $d/foo
+ > hg add $d/foo
+ > hg commit -m "add $d/foo"
+ > echo $d/bar > $d/bar
+ > hg add $d/bar
+ > hg commit -m "add $d/bar"
+ > done
+#if execbit
+ $ chmod +x dir1/dirA/foo
+ $ hg commit -m "make dir1/dirA/foo executable"
+#else
+ $ hg import --bypass - <<EOF
+ > # HG changeset patch
+ > make dir1/dirA/foo executable
+ >
+ > diff --git a/dir1/dirA/foo b/dir1/dirA/foo
+ > old mode 100644
+ > new mode 100755
+ > EOF
+ applying patch from stdin
+ $ hg update -qr tip
+#endif
+ $ hg log -G -T '{rev} {node|short} {files}\n'
+ @ 13 c87ca422d521 dir1/dirA/foo
+ |
+ o 12 951b8a83924e dir2/dirB/bar
+ |
+ o 11 01ae5a51b563 dir2/dirB/foo
+ |
+ o 10 5eababdf0ac5 dir2/dirA/bar
+ |
+ o 9 99d690663739 dir2/dirA/foo
+ |
+ o 8 8e80155d5445 dir1/dirB/bar
+ |
+ o 7 406760310428 dir1/dirB/foo
+ |
+ o 6 623466a5f475 dir1/dirA/bar
+ |
+ o 5 06ff3a5be997 dir1/dirA/foo
+ |
+ o 4 33227af02764 dir2/bar
+ |
+ o 3 5e1f9d8d7c69 dir2/foo
+ |
+ o 2 594bc4b13d4a dir1/bar
+ |
+ o 1 47f480a08324 dir1/foo
+ |
+ o 0 2a4f0c3b67da root
+
+ $ cd ..
+
+clone a narrow portion of the master, such that we can widen it later
+
+ $ hg clone --narrow ssh://user@dummy/master narrow \
+ > --include dir1 \
+ > --include dir2 \
+ > --exclude dir1/dirA \
+ > --exclude dir1/dirB \
+ > --exclude dir2/dirA \
+ > --exclude dir2/dirB
+ requesting all changes
+ adding changesets
+ adding manifests
+ adding file changes
+ added 6 changesets with 4 changes to 4 files
+ new changesets *:* (glob)
+ updating to branch default
+ 4 files updated, 0 files merged, 0 files removed, 0 files unresolved
+
+ $ cd narrow
+ $ cat .hg/narrowspec
+ [includes]
+ path:dir1
+ path:dir2
+ [excludes]
+ path:dir1/dirA
+ path:dir1/dirB
+ path:dir2/dirA
+ path:dir2/dirB
+ $ hg manifest -r tip
+ dir1/bar
+ dir1/dirA/bar
+ dir1/dirA/foo
+ dir1/dirB/bar
+ dir1/dirB/foo
+ dir1/foo
+ dir2/bar
+ dir2/dirA/bar
+ dir2/dirA/foo
+ dir2/dirB/bar
+ dir2/dirB/foo
+ dir2/foo
+ root
+ $ find * | sort
+ dir1
+ dir1/bar
+ dir1/foo
+ dir2
+ dir2/bar
+ dir2/foo
+ $ hg log -G -T '{rev} {node|short}{if(ellipsis, "...")} {files}\n'
+ @ 5 c87ca422d521... dir1/dirA/foo
+ |
+ o 4 33227af02764 dir2/bar
+ |
+ o 3 5e1f9d8d7c69 dir2/foo
+ |
+ o 2 594bc4b13d4a dir1/bar
+ |
+ o 1 47f480a08324 dir1/foo
+ |
+ o 0 2a4f0c3b67da... root
+
+
+widen the narrow checkout
+
+ $ hg tracked --removeexclude dir1/dirA
+ comparing with ssh://user@dummy/master
+ searching for changes
+ no changes found
+ saved backup bundle to $TESTTMP/narrow/.hg/strip-backup/*-widen.hg (glob)
+ adding changesets
+ adding manifests
+ adding file changes
+ added 9 changesets with 6 changes to 6 files
+ new changesets *:* (glob)
+ $ cat .hg/narrowspec
+ [includes]
+ path:dir1
+ path:dir2
+ [excludes]
+ path:dir1/dirB
+ path:dir2/dirA
+ path:dir2/dirB
+ $ find * | sort
+ dir1
+ dir1/bar
+ dir1/dirA
+ dir1/dirA/bar
+ dir1/dirA/foo
+ dir1/foo
+ dir2
+ dir2/bar
+ dir2/foo
+
+#if execbit
+ $ test -x dir1/dirA/foo && echo executable
+ executable
+ $ test -x dir1/dirA/bar || echo not executable
+ not executable
+#endif
+
+ $ hg log -G -T '{rev} {node|short}{if(ellipsis, "...")} {files}\n'
+ @ 8 c87ca422d521 dir1/dirA/foo
+ |
+ o 7 951b8a83924e... dir2/dirB/bar
+ |
+ o 6 623466a5f475 dir1/dirA/bar
+ |
+ o 5 06ff3a5be997 dir1/dirA/foo
+ |
+ o 4 33227af02764 dir2/bar
+ |
+ o 3 5e1f9d8d7c69 dir2/foo
+ |
+ o 2 594bc4b13d4a dir1/bar
+ |
+ o 1 47f480a08324 dir1/foo
+ |
+ o 0 2a4f0c3b67da... root
+
+
+widen narrow spec again, but exclude a file in previously included spec
+
+ $ hg tracked --removeexclude dir2/dirB --addexclude dir1/dirA/bar
+ comparing with ssh://user@dummy/master
+ searching for changes
+ looking for local changes to affected paths
+ deleting data/dir1/dirA/bar.i
+ no changes found
+ saved backup bundle to $TESTTMP/narrow/.hg/strip-backup/*-widen.hg (glob)
+ adding changesets
+ adding manifests
+ adding file changes
+ added 11 changesets with 7 changes to 7 files
+ new changesets *:* (glob)
+ $ cat .hg/narrowspec
+ [includes]
+ path:dir1
+ path:dir2
+ [excludes]
+ path:dir1/dirA/bar
+ path:dir1/dirB
+ path:dir2/dirA
+ $ find * | sort
+ dir1
+ dir1/bar
+ dir1/dirA
+ dir1/dirA/foo
+ dir1/foo
+ dir2
+ dir2/bar
+ dir2/dirB
+ dir2/dirB/bar
+ dir2/dirB/foo
+ dir2/foo
+ $ hg log -G -T '{rev} {node|short}{if(ellipsis, "...")} {files}\n'
+ @ 10 c87ca422d521 dir1/dirA/foo
+ |
+ o 9 951b8a83924e dir2/dirB/bar
+ |
+ o 8 01ae5a51b563 dir2/dirB/foo
+ |
+ o 7 5eababdf0ac5... dir2/dirA/bar
+ |
+ o 6 623466a5f475... dir1/dirA/bar
+ |
+ o 5 06ff3a5be997 dir1/dirA/foo
+ |
+ o 4 33227af02764 dir2/bar
+ |
+ o 3 5e1f9d8d7c69 dir2/foo
+ |
+ o 2 594bc4b13d4a dir1/bar
+ |
+ o 1 47f480a08324 dir1/foo
+ |
+ o 0 2a4f0c3b67da... root
+
+
+widen narrow spec yet again, excluding a directory in previous spec
+
+ $ hg tracked --removeexclude dir2/dirA --addexclude dir1/dirA
+ comparing with ssh://user@dummy/master
+ searching for changes
+ looking for local changes to affected paths
+ deleting data/dir1/dirA/foo.i
+ no changes found
+ saved backup bundle to $TESTTMP/narrow/.hg/strip-backup/*-widen.hg (glob)
+ adding changesets
+ adding manifests
+ adding file changes
+ added 13 changesets with 8 changes to 8 files
+ new changesets *:* (glob)
+ $ cat .hg/narrowspec
+ [includes]
+ path:dir1
+ path:dir2
+ [excludes]
+ path:dir1/dirA
+ path:dir1/dirA/bar
+ path:dir1/dirB
+ $ find * | sort
+ dir1
+ dir1/bar
+ dir1/foo
+ dir2
+ dir2/bar
+ dir2/dirA
+ dir2/dirA/bar
+ dir2/dirA/foo
+ dir2/dirB
+ dir2/dirB/bar
+ dir2/dirB/foo
+ dir2/foo
+ $ hg log -G -T '{rev} {node|short}{if(ellipsis, "...")} {files}\n'
+ @ 12 c87ca422d521... dir1/dirA/foo
+ |
+ o 11 951b8a83924e dir2/dirB/bar
+ |
+ o 10 01ae5a51b563 dir2/dirB/foo
+ |
+ o 9 5eababdf0ac5 dir2/dirA/bar
+ |
+ o 8 99d690663739 dir2/dirA/foo
+ |
+ o 7 8e80155d5445... dir1/dirB/bar
+ |
+ o 6 623466a5f475... dir1/dirA/bar
+ |
+ o 5 06ff3a5be997... dir1/dirA/foo
+ |
+ o 4 33227af02764 dir2/bar
+ |
+ o 3 5e1f9d8d7c69 dir2/foo
+ |
+ o 2 594bc4b13d4a dir1/bar
+ |
+ o 1 47f480a08324 dir1/foo
+ |
+ o 0 2a4f0c3b67da... root
+
+
+include a directory that was previously explicitly excluded
+
+ $ hg tracked --removeexclude dir1/dirA
+ comparing with ssh://user@dummy/master
+ searching for changes
+ no changes found
+ saved backup bundle to $TESTTMP/narrow/.hg/strip-backup/*-widen.hg (glob)
+ adding changesets
+ adding manifests
+ adding file changes
+ added 13 changesets with 9 changes to 9 files
+ new changesets *:* (glob)
+ $ cat .hg/narrowspec
+ [includes]
+ path:dir1
+ path:dir2
+ [excludes]
+ path:dir1/dirA/bar
+ path:dir1/dirB
+ $ find * | sort
+ dir1
+ dir1/bar
+ dir1/dirA
+ dir1/dirA/foo
+ dir1/foo
+ dir2
+ dir2/bar
+ dir2/dirA
+ dir2/dirA/bar
+ dir2/dirA/foo
+ dir2/dirB
+ dir2/dirB/bar
+ dir2/dirB/foo
+ dir2/foo
+ $ hg log -G -T '{rev} {node|short}{if(ellipsis, "...")} {files}\n'
+ @ 12 c87ca422d521 dir1/dirA/foo
+ |
+ o 11 951b8a83924e dir2/dirB/bar
+ |
+ o 10 01ae5a51b563 dir2/dirB/foo
+ |
+ o 9 5eababdf0ac5 dir2/dirA/bar
+ |
+ o 8 99d690663739 dir2/dirA/foo
+ |
+ o 7 8e80155d5445... dir1/dirB/bar
+ |
+ o 6 623466a5f475... dir1/dirA/bar
+ |
+ o 5 06ff3a5be997 dir1/dirA/foo
+ |
+ o 4 33227af02764 dir2/bar
+ |
+ o 3 5e1f9d8d7c69 dir2/foo
+ |
+ o 2 594bc4b13d4a dir1/bar
+ |
+ o 1 47f480a08324 dir1/foo
+ |
+ o 0 2a4f0c3b67da... root
+
+
+ $ cd ..
+
+clone a narrow portion of the master, such that we can widen it later
+
+ $ hg clone --narrow ssh://user@dummy/master narrow2 --include dir1/dirA
+ requesting all changes
+ adding changesets
+ adding manifests
+ adding file changes
+ added 5 changesets with 2 changes to 2 files
+ new changesets *:* (glob)
+ updating to branch default
+ 2 files updated, 0 files merged, 0 files removed, 0 files unresolved
+ $ cd narrow2
+ $ find * | sort
+ dir1
+ dir1/dirA
+ dir1/dirA/bar
+ dir1/dirA/foo
+ $ hg tracked --addinclude dir1
+ comparing with ssh://user@dummy/master
+ searching for changes
+ no changes found
+ saved backup bundle to $TESTTMP/narrow2/.hg/strip-backup/*-widen.hg (glob)
+ adding changesets
+ adding manifests
+ adding file changes
+ added 10 changesets with 6 changes to 6 files
+ new changesets *:* (glob)
+ $ find * | sort
+ dir1
+ dir1/bar
+ dir1/dirA
+ dir1/dirA/bar
+ dir1/dirA/foo
+ dir1/dirB
+ dir1/dirB/bar
+ dir1/dirB/foo
+ dir1/foo
+ $ hg log -G -T '{rev} {node|short}{if(ellipsis, "...")} {files}\n'
+ @ 9 c87ca422d521 dir1/dirA/foo
+ |
+ o 8 951b8a83924e... dir2/dirB/bar
+ |
+ o 7 8e80155d5445 dir1/dirB/bar
+ |
+ o 6 406760310428 dir1/dirB/foo
+ |
+ o 5 623466a5f475 dir1/dirA/bar
+ |
+ o 4 06ff3a5be997 dir1/dirA/foo
+ |
+ o 3 33227af02764... dir2/bar
+ |
+ o 2 594bc4b13d4a dir1/bar
+ |
+ o 1 47f480a08324 dir1/foo
+ |
+ o 0 2a4f0c3b67da... root
+
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/tests/test-narrow-pull.t Sat Feb 24 17:49:10 2018 -0600
@@ -0,0 +1,175 @@
+ $ . "$TESTDIR/narrow-library.sh"
+
+ $ hg init master
+ $ cd master
+ $ cat >> .hg/hgrc <<EOF
+ > [narrow]
+ > serveellipses=True
+ > EOF
+ $ for x in `$TESTDIR/seq.py 10`
+ > do
+ > echo $x > "f$x"
+ > hg add "f$x"
+ > hg commit -m "Commit f$x"
+ > done
+ $ cd ..
+
+narrow clone a couple files, f2 and f8
+
+ $ hg clone --narrow ssh://user@dummy/master narrow --include "f2" --include "f8"
+ requesting all changes
+ adding changesets
+ adding manifests
+ adding file changes
+ added 5 changesets with 2 changes to 2 files
+ new changesets *:* (glob)
+ updating to branch default
+ 2 files updated, 0 files merged, 0 files removed, 0 files unresolved
+ $ cd narrow
+ $ ls
+ f2
+ f8
+ $ cat f2 f8
+ 2
+ 8
+
+ $ cd ..
+
+change every upstream file twice
+
+ $ cd master
+ $ for x in `$TESTDIR/seq.py 10`
+ > do
+ > echo "update#1 $x" >> "f$x"
+ > hg commit -m "Update#1 to f$x" "f$x"
+ > done
+ $ for x in `$TESTDIR/seq.py 10`
+ > do
+ > echo "update#2 $x" >> "f$x"
+ > hg commit -m "Update#2 to f$x" "f$x"
+ > done
+ $ cd ..
+
+look for incoming changes
+
+ $ cd narrow
+ $ hg incoming --limit 3
+ comparing with ssh://user@dummy/master
+ searching for changes
+ changeset: 5:ddc055582556
+ user: test
+ date: Thu Jan 01 00:00:00 1970 +0000
+ summary: Update#1 to f1
+
+ changeset: 6:f66eb5ad621d
+ user: test
+ date: Thu Jan 01 00:00:00 1970 +0000
+ summary: Update#1 to f2
+
+ changeset: 7:c42ecff04e99
+ user: test
+ date: Thu Jan 01 00:00:00 1970 +0000
+ summary: Update#1 to f3
+
+
+Interrupting the pull is safe
+ $ hg --config hooks.pretxnchangegroup.bad=false pull -q
+ transaction abort!
+ rollback completed
+ abort: pretxnchangegroup.bad hook exited with status 1
+ [255]
+ $ hg id
+ 223311e70a6f tip
+
+pull new changes down to the narrow clone. Should get 8 new changesets: 4
+relevant to the narrow spec, and 4 ellipsis nodes gluing them all together.
+
+ $ hg pull
+ pulling from ssh://user@dummy/master
+ searching for changes
+ adding changesets
+ adding manifests
+ adding file changes
+ added 9 changesets with 4 changes to 2 files
+ new changesets *:* (glob)
+ (run 'hg update' to get a working copy)
+ $ hg log -T '{rev}: {desc}\n'
+ 13: Update#2 to f10
+ 12: Update#2 to f8
+ 11: Update#2 to f7
+ 10: Update#2 to f2
+ 9: Update#2 to f1
+ 8: Update#1 to f8
+ 7: Update#1 to f7
+ 6: Update#1 to f2
+ 5: Update#1 to f1
+ 4: Commit f10
+ 3: Commit f8
+ 2: Commit f7
+ 1: Commit f2
+ 0: Commit f1
+ $ hg update tip
+ 2 files updated, 0 files merged, 0 files removed, 0 files unresolved
+
+add a change and push it
+
+ $ echo "update#3 2" >> f2
+ $ hg commit -m "Update#3 to f2" f2
+ $ hg log f2 -T '{rev}: {desc}\n'
+ 14: Update#3 to f2
+ 10: Update#2 to f2
+ 6: Update#1 to f2
+ 1: Commit f2
+ $ hg push
+ pushing to ssh://user@dummy/master
+ searching for changes
+ remote: adding changesets
+ remote: adding manifests
+ remote: adding file changes
+ remote: added 1 changesets with 1 changes to 1 files
+ $ cd ..
+
+ $ cd master
+ $ hg log f2 -T '{rev}: {desc}\n'
+ 30: Update#3 to f2
+ 21: Update#2 to f2
+ 11: Update#1 to f2
+ 1: Commit f2
+ $ hg log -l 3 -T '{rev}: {desc}\n'
+ 30: Update#3 to f2
+ 29: Update#2 to f10
+ 28: Update#2 to f9
+
+Can pull into repo with a single commit
+
+ $ cd ..
+ $ hg clone -q --narrow ssh://user@dummy/master narrow2 --include "f1" -r 0
+ $ cd narrow2
+ $ hg pull -q -r 1
+ transaction abort!
+ rollback completed
+ abort: pull failed on remote
+ [255]
+
+Can use 'hg share':
+ $ cat >> $HGRCPATH <<EOF
+ > [extensions]
+ > share=
+ > EOF
+
+ $ cd ..
+ $ hg share narrow2 narrow2-share
+ updating working directory
+ 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
+ $ cd narrow2-share
+ $ hg status
+
+We should also be able to unshare without breaking everything:
+ $ hg unshare
+ devel-warn: write with no wlock: "narrowspec" at: */hgext/narrow/narrowrepo.py:* (unsharenarrowspec) (glob)
+ $ hg verify
+ checking changesets
+ checking manifests
+ crosschecking files in changesets and manifests
+ checking files
+ 1 files, 1 changesets, 1 total revisions
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/tests/test-narrow-rebase.t Sat Feb 24 17:49:10 2018 -0600
@@ -0,0 +1,93 @@
+
+ $ . "$TESTDIR/narrow-library.sh"
+
+create full repo
+
+ $ hg init master
+ $ cd master
+
+ $ mkdir inside
+ $ echo inside1 > inside/f1
+ $ echo inside2 > inside/f2
+ $ mkdir outside
+ $ echo outside1 > outside/f1
+ $ echo outside2 > outside/f2
+ $ hg ci -Aqm 'initial'
+
+ $ echo modified > inside/f1
+ $ hg ci -qm 'modify inside/f1'
+
+ $ hg update -q 0
+ $ echo modified2 > inside/f2
+ $ hg ci -qm 'modify inside/f2'
+
+ $ hg update -q 0
+ $ echo modified > outside/f1
+ $ hg ci -qm 'modify outside/f1'
+
+ $ hg update -q 0
+ $ echo modified2 > outside/f1
+ $ hg ci -qm 'conflicting outside/f1'
+
+ $ cd ..
+
+ $ hg clone --narrow ssh://user@dummy/master narrow --include inside
+ requesting all changes
+ adding changesets
+ adding manifests
+ adding file changes
+ added 5 changesets with 4 changes to 2 files (+3 heads)
+ new changesets *:* (glob)
+ updating to branch default
+ 2 files updated, 0 files merged, 0 files removed, 0 files unresolved
+ $ cd narrow
+ $ cat >> $HGRCPATH <<EOF
+ > [extensions]
+ > rebase=
+ > EOF
+
+ $ hg update -q 0
+
+Can rebase onto commit where no files outside narrow spec are involved
+
+ $ hg update -q 0
+ $ echo modified > inside/f2
+ $ hg ci -qm 'modify inside/f2'
+ $ hg rebase -d 'desc("modify inside/f1")'
+ rebasing 5:c2f36d04e05d "modify inside/f2" (tip)
+ saved backup bundle to $TESTTMP/narrow/.hg/strip-backup/*-rebase.hg (glob)
+
+Can rebase onto conflicting changes inside narrow spec
+
+ $ hg update -q 0
+ $ echo conflicting > inside/f1
+ $ hg ci -qm 'conflicting inside/f1'
+ $ hg rebase -d 'desc("modify inside/f1")' 2>&1 | egrep -v '(warning:|incomplete!)'
+ rebasing 6:cdce97fbf653 "conflicting inside/f1" (tip)
+ merging inside/f1
+ unresolved conflicts (see hg resolve, then hg rebase --continue)
+ $ echo modified3 > inside/f1
+ $ hg resolve -m 2>&1 | grep -v continue:
+ (no more unresolved files)
+ $ hg rebase --continue
+ rebasing 6:cdce97fbf653 "conflicting inside/f1" (tip)
+ saved backup bundle to $TESTTMP/narrow/.hg/strip-backup/*-rebase.hg (glob)
+
+Can rebase onto non-conflicting changes outside narrow spec
+
+ $ hg update -q 0
+ $ echo modified > inside/f2
+ $ hg ci -qm 'modify inside/f2'
+ $ hg rebase -d 'desc("modify outside/f1")'
+ rebasing 7:c2f36d04e05d "modify inside/f2" (tip)
+ saved backup bundle to $TESTTMP/narrow/.hg/strip-backup/*-rebase.hg (glob)
+
+Rebase interrupts on conflicting changes outside narrow spec
+
+ $ hg update -q 'desc("conflicting outside/f1")'
+ $ hg phase -f -d .
+ no phases changed
+ $ hg rebase -d 'desc("modify outside/f1")'
+ rebasing 4:707c035aadb6 "conflicting outside/f1"
+ abort: conflict in file 'outside/f1' is outside narrow clone
+ [255]
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/tests/test-narrow-shallow-merges.t Sat Feb 24 17:49:10 2018 -0600
@@ -0,0 +1,345 @@
+ $ . "$TESTDIR/narrow-library.sh"
+
+create full repo
+
+ $ hg init master
+ $ cd master
+ $ cat >> .hg/hgrc <<EOF
+ > [narrow]
+ > serveellipses=True
+ > EOF
+
+ $ mkdir inside
+ $ echo 1 > inside/f
+ $ hg commit -Aqm 'initial inside'
+
+ $ mkdir outside
+ $ echo 1 > outside/f
+ $ hg commit -Aqm 'initial outside'
+
+ $ echo 2a > outside/f
+ $ hg commit -Aqm 'outside 2a'
+ $ echo 3 > inside/f
+ $ hg commit -Aqm 'inside 3'
+ $ echo 4a > outside/f
+ $ hg commit -Aqm 'outside 4a'
+ $ hg update '.~3'
+ 2 files updated, 0 files merged, 0 files removed, 0 files unresolved
+
+ $ echo 2b > outside/f
+ $ hg commit -Aqm 'outside 2b'
+ $ echo 3 > inside/f
+ $ hg commit -Aqm 'inside 3'
+ $ echo 4b > outside/f
+ $ hg commit -Aqm 'outside 4b'
+ $ hg update '.~3'
+ 2 files updated, 0 files merged, 0 files removed, 0 files unresolved
+
+ $ echo 2c > outside/f
+ $ hg commit -Aqm 'outside 2c'
+ $ echo 3 > inside/f
+ $ hg commit -Aqm 'inside 3'
+ $ echo 4c > outside/f
+ $ hg commit -Aqm 'outside 4c'
+ $ hg update '.~3'
+ 2 files updated, 0 files merged, 0 files removed, 0 files unresolved
+
+ $ echo 2d > outside/f
+ $ hg commit -Aqm 'outside 2d'
+ $ echo 3 > inside/f
+ $ hg commit -Aqm 'inside 3'
+ $ echo 4d > outside/f
+ $ hg commit -Aqm 'outside 4d'
+
+ $ hg update -r 'desc("outside 4a")'
+ 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
+ $ hg merge -r 'desc("outside 4b")' 2>&1 | egrep -v '(warning:|incomplete!)'
+ merging outside/f
+ 0 files updated, 0 files merged, 0 files removed, 1 files unresolved
+ use 'hg resolve' to retry unresolved file merges or 'hg merge --abort' to abandon
+ $ echo 5 > outside/f
+ $ rm outside/f.orig
+ $ hg resolve --mark outside/f
+ (no more unresolved files)
+ $ hg commit -m 'merge a/b 5'
+ $ echo 6 > outside/f
+ $ hg commit -Aqm 'outside 6'
+
+ $ hg merge -r 'desc("outside 4c")' 2>&1 | egrep -v '(warning:|incomplete!)'
+ merging outside/f
+ 0 files updated, 0 files merged, 0 files removed, 1 files unresolved
+ use 'hg resolve' to retry unresolved file merges or 'hg merge --abort' to abandon
+ $ echo 7 > outside/f
+ $ rm outside/f.orig
+ $ hg resolve --mark outside/f
+ (no more unresolved files)
+ $ hg commit -Aqm 'merge a/b/c 7'
+ $ echo 8 > outside/f
+ $ hg commit -Aqm 'outside 8'
+
+ $ hg merge -r 'desc("outside 4d")' 2>&1 | egrep -v '(warning:|incomplete!)'
+ merging outside/f
+ 0 files updated, 0 files merged, 0 files removed, 1 files unresolved
+ use 'hg resolve' to retry unresolved file merges or 'hg merge --abort' to abandon
+ $ echo 9 > outside/f
+ $ rm outside/f.orig
+ $ hg resolve --mark outside/f
+ (no more unresolved files)
+ $ hg commit -Aqm 'merge a/b/c/d 9'
+ $ echo 10 > outside/f
+ $ hg commit -Aqm 'outside 10'
+
+ $ echo 11 > inside/f
+ $ hg commit -Aqm 'inside 11'
+ $ echo 12 > outside/f
+ $ hg commit -Aqm 'outside 12'
+
+ $ hg log -G -T '{rev} {node|short} {desc}\n'
+ @ 21 8d874d57adea outside 12
+ |
+ o 20 7ef88b4dd4fa inside 11
+ |
+ o 19 2a20009de83e outside 10
+ |
+ o 18 3ac1f5779de3 merge a/b/c/d 9
+ |\
+ | o 17 38a9c2f7e546 outside 8
+ | |
+ | o 16 094aa62fc898 merge a/b/c 7
+ | |\
+ | | o 15 f29d083d32e4 outside 6
+ | | |
+ | | o 14 2dc11382541d merge a/b 5
+ | | |\
+ o | | | 13 27d07ef97221 outside 4d
+ | | | |
+ o | | | 12 465567bdfb2d inside 3
+ | | | |
+ o | | | 11 d1c61993ec83 outside 2d
+ | | | |
+ | o | | 10 56859a8e33b9 outside 4c
+ | | | |
+ | o | | 9 bb96a08b062a inside 3
+ | | | |
+ | o | | 8 b844052e7b3b outside 2c
+ |/ / /
+ | | o 7 9db2d8fcc2a6 outside 4b
+ | | |
+ | | o 6 6418167787a6 inside 3
+ | | |
+ +---o 5 77344f344d83 outside 2b
+ | |
+ | o 4 9cadde08dc9f outside 4a
+ | |
+ | o 3 019ef06f125b inside 3
+ | |
+ | o 2 75e40c075a19 outside 2a
+ |/
+ o 1 906d6c682641 initial outside
+ |
+ o 0 9f8e82b51004 initial inside
+
+
+Now narrow and shallow clone this and get a hopefully correct graph
+
+ $ cd ..
+ $ hg clone --narrow ssh://user@dummy/master narrow --include inside --depth 7
+ requesting all changes
+ adding changesets
+ adding manifests
+ adding file changes
+ added 8 changesets with 3 changes to 1 files
+ new changesets *:* (glob)
+ updating to branch default
+ 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
+ $ cd narrow
+
+To make updating the tests easier, we print the emitted nodes
+sorted. This makes it easier to identify when the same node structure
+has been emitted, just in a different order.
+
+ $ hg log -G -T '{rev} {node|short}{if(ellipsis,"...")} {desc}\n'
+ @ 7 8d874d57adea... outside 12
+ |
+ o 6 7ef88b4dd4fa inside 11
+ |
+ o 5 2a20009de83e... outside 10
+ |
+ o 4 3ac1f5779de3... merge a/b/c/d 9
+ |\
+ | o 3 465567bdfb2d inside 3
+ | |
+ | o 2 d1c61993ec83... outside 2d
+ |
+ o 1 bb96a08b062a inside 3
+ |
+ o 0 b844052e7b3b... outside 2c
+
+
+ $ hg log -T '{if(ellipsis,"...")}{node|short} {p1node|short} {p2node|short} {desc}\n' | sort
+ ...2a20009de83e 000000000000 3ac1f5779de3 outside 10
+ ...3ac1f5779de3 bb96a08b062a 465567bdfb2d merge a/b/c/d 9
+ ...8d874d57adea 7ef88b4dd4fa 000000000000 outside 12
+ ...b844052e7b3b 000000000000 000000000000 outside 2c
+ ...d1c61993ec83 000000000000 000000000000 outside 2d
+ 465567bdfb2d d1c61993ec83 000000000000 inside 3
+ 7ef88b4dd4fa 2a20009de83e 000000000000 inside 11
+ bb96a08b062a b844052e7b3b 000000000000 inside 3
+
+ $ cd ..
+
+Incremental test case: show a pull can pull in a conflicted merge even if elided
+
+ $ hg init pullmaster
+ $ cd pullmaster
+ $ cat >> .hg/hgrc <<EOF
+ > [narrow]
+ > serveellipses=True
+ > EOF
+ $ mkdir inside outside
+ $ echo v1 > inside/f
+ $ echo v1 > outside/f
+ $ hg add inside/f outside/f
+ $ hg commit -m init
+
+ $ for line in a b c d
+ > do
+ > hg update -r 0
+ > echo v2$line > outside/f
+ > hg commit -m "outside 2$line"
+ > echo v2$line > inside/f
+ > hg commit -m "inside 2$line"
+ > echo v3$line > outside/f
+ > hg commit -m "outside 3$line"
+ > echo v4$line > outside/f
+ > hg commit -m "outside 4$line"
+ > done
+ 0 files updated, 0 files merged, 0 files removed, 0 files unresolved
+ 2 files updated, 0 files merged, 0 files removed, 0 files unresolved
+ created new head
+ 2 files updated, 0 files merged, 0 files removed, 0 files unresolved
+ created new head
+ 2 files updated, 0 files merged, 0 files removed, 0 files unresolved
+ created new head
+
+ $ cd ..
+ $ hg clone --narrow ssh://user@dummy/pullmaster pullshallow \
+ > --include inside --depth 3
+ requesting all changes
+ adding changesets
+ adding manifests
+ adding file changes
+ added 12 changesets with 5 changes to 1 files (+3 heads)
+ new changesets *:* (glob)
+ updating to branch default
+ 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
+ $ cd pullshallow
+
+ $ hg log -G -T '{rev} {node|short}{if(ellipsis,"...")} {desc}\n'
+ @ 11 0ebbd712a0c8... outside 4d
+ |
+ o 10 0d4c867aeb23 inside 2d
+ |
+ o 9 e932969c3961... outside 2d
+
+ o 8 33d530345455... outside 4c
+ |
+ o 7 0ce6481bfe07 inside 2c
+ |
+ o 6 caa65c940632... outside 2c
+
+ o 5 3df233defecc... outside 4b
+ |
+ o 4 7162cc6d11a4 inside 2b
+ |
+ o 3 f2a632f0082d... outside 2b
+
+ o 2 b8a3da16ba49... outside 4a
+ |
+ o 1 53f543eb8e45 inside 2a
+ |
+ o 0 1be3e5221c6a... outside 2a
+
+ $ hg log -T '{if(ellipsis,"...")}{node|short} {p1node|short} {p2node|short} {desc}\n' | sort
+ ...0ebbd712a0c8 0d4c867aeb23 000000000000 outside 4d
+ ...1be3e5221c6a 000000000000 000000000000 outside 2a
+ ...33d530345455 0ce6481bfe07 000000000000 outside 4c
+ ...3df233defecc 7162cc6d11a4 000000000000 outside 4b
+ ...b8a3da16ba49 53f543eb8e45 000000000000 outside 4a
+ ...caa65c940632 000000000000 000000000000 outside 2c
+ ...e932969c3961 000000000000 000000000000 outside 2d
+ ...f2a632f0082d 000000000000 000000000000 outside 2b
+ 0ce6481bfe07 caa65c940632 000000000000 inside 2c
+ 0d4c867aeb23 e932969c3961 000000000000 inside 2d
+ 53f543eb8e45 1be3e5221c6a 000000000000 inside 2a
+ 7162cc6d11a4 f2a632f0082d 000000000000 inside 2b
+
+ $ cd ../pullmaster
+ $ hg update -r 'desc("outside 4a")'
+ 2 files updated, 0 files merged, 0 files removed, 0 files unresolved
+ $ hg merge -r 'desc("outside 4b")' 2>&1 | egrep -v '(warning:|incomplete!)'
+ merging inside/f
+ merging outside/f
+ 0 files updated, 0 files merged, 0 files removed, 2 files unresolved
+ use 'hg resolve' to retry unresolved file merges or 'hg merge --abort' to abandon
+ $ echo 3 > inside/f
+ $ echo 5 > outside/f
+ $ rm -f {in,out}side/f.orig
+ $ hg resolve --mark inside/f outside/f
+ (no more unresolved files)
+ $ hg commit -m 'merge a/b 5'
+
+ $ hg update -r 'desc("outside 4c")'
+ 2 files updated, 0 files merged, 0 files removed, 0 files unresolved
+ $ hg merge -r 'desc("outside 4d")' 2>&1 | egrep -v '(warning:|incomplete!)'
+ merging inside/f
+ merging outside/f
+ 0 files updated, 0 files merged, 0 files removed, 2 files unresolved
+ use 'hg resolve' to retry unresolved file merges or 'hg merge --abort' to abandon
+ $ echo 3 > inside/f
+ $ echo 5 > outside/f
+ $ rm -f {in,out}side/f.orig
+ $ hg resolve --mark inside/f outside/f
+ (no more unresolved files)
+ $ hg commit -m 'merge c/d 5'
+
+ $ hg update -r 'desc("merge a/b 5")'
+ 2 files updated, 0 files merged, 0 files removed, 0 files unresolved
+ $ hg merge -r 'desc("merge c/d 5")'
+ 2 files updated, 0 files merged, 0 files removed, 0 files unresolved
+ (branch merge, don't forget to commit)
+ $ echo 6 > outside/f
+ $ hg commit -m 'outside 6'
+ $ echo 7 > outside/f
+ $ hg commit -m 'outside 7'
+ $ echo 8 > outside/f
+ $ hg commit -m 'outside 8'
+
+ $ cd ../pullshallow
+ $ hg pull --depth 3
+ pulling from ssh://user@dummy/pullmaster
+ searching for changes
+ adding changesets
+ adding manifests
+ adding file changes
+ added 4 changesets with 3 changes to 1 files (-3 heads)
+ new changesets *:* (glob)
+ (run 'hg update' to get a working copy)
+
+ $ hg log -T '{if(ellipsis,"...")}{node|short} {p1node|short} {p2node|short} {desc}\n' | sort
+ ...0ebbd712a0c8 0d4c867aeb23 000000000000 outside 4d
+ ...1be3e5221c6a 000000000000 000000000000 outside 2a
+ ...33d530345455 0ce6481bfe07 000000000000 outside 4c
+ ...3df233defecc 7162cc6d11a4 000000000000 outside 4b
+ ...b8a3da16ba49 53f543eb8e45 000000000000 outside 4a
+ ...bf545653453e 968003d40c60 000000000000 outside 8
+ ...caa65c940632 000000000000 000000000000 outside 2c
+ ...e932969c3961 000000000000 000000000000 outside 2d
+ ...f2a632f0082d 000000000000 000000000000 outside 2b
+ 0ce6481bfe07 caa65c940632 000000000000 inside 2c
+ 0d4c867aeb23 e932969c3961 000000000000 inside 2d
+ 53f543eb8e45 1be3e5221c6a 000000000000 inside 2a
+ 67d49c0bdbda b8a3da16ba49 3df233defecc merge a/b 5
+ 7162cc6d11a4 f2a632f0082d 000000000000 inside 2b
+ 968003d40c60 67d49c0bdbda e867021d52c2 outside 6
+ e867021d52c2 33d530345455 0ebbd712a0c8 merge c/d 5
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/tests/test-narrow-shallow.t Sat Feb 24 17:49:10 2018 -0600
@@ -0,0 +1,122 @@
+ $ . "$TESTDIR/narrow-library.sh"
+
+ $ hg init master
+ $ cd master
+ $ cat >> .hg/hgrc <<EOF
+ > [narrow]
+ > serveellipses=True
+ > EOF
+ $ for x in `$TESTDIR/seq.py 10`
+ > do
+ > echo $x > "f$x"
+ > hg add "f$x"
+ > done
+ $ hg commit -m "Add root files"
+ $ mkdir d1 d2
+ $ for x in `$TESTDIR/seq.py 10`
+ > do
+ > echo d1/$x > "d1/f$x"
+ > hg add "d1/f$x"
+ > echo d2/$x > "d2/f$x"
+ > hg add "d2/f$x"
+ > done
+ $ hg commit -m "Add d1 and d2"
+ $ for x in `$TESTDIR/seq.py 10`
+ > do
+ > echo f$x rev2 > "f$x"
+ > echo d1/f$x rev2 > "d1/f$x"
+ > echo d2/f$x rev2 > "d2/f$x"
+ > hg commit -m "Commit rev2 of f$x, d1/f$x, d2/f$x"
+ > done
+ $ cd ..
+
+narrow and shallow clone the d2 directory
+
+ $ hg clone --narrow ssh://user@dummy/master shallow --include "d2" --depth 2
+ requesting all changes
+ adding changesets
+ adding manifests
+ adding file changes
+ added 4 changesets with 13 changes to 10 files
+ new changesets *:* (glob)
+ updating to branch default
+ 10 files updated, 0 files merged, 0 files removed, 0 files unresolved
+ $ cd shallow
+ $ hg log -T '{rev}{if(ellipsis,"...")}: {desc}\n'
+ 3: Commit rev2 of f10, d1/f10, d2/f10
+ 2: Commit rev2 of f9, d1/f9, d2/f9
+ 1: Commit rev2 of f8, d1/f8, d2/f8
+ 0...: Commit rev2 of f7, d1/f7, d2/f7
+ $ hg update 0
+ 3 files updated, 0 files merged, 0 files removed, 0 files unresolved
+ $ cat d2/f7 d2/f8
+ d2/f7 rev2
+ d2/8
+
+ $ cd ..
+
+change every upstream file once
+
+ $ cd master
+ $ for x in `$TESTDIR/seq.py 10`
+ > do
+ > echo f$x rev3 > "f$x"
+ > echo d1/f$x rev3 > "d1/f$x"
+ > echo d2/f$x rev3 > "d2/f$x"
+ > hg commit -m "Commit rev3 of f$x, d1/f$x, d2/f$x"
+ > done
+ $ cd ..
+
+pull new changes with --depth specified. There were 10 changes to the d2
+directory but the shallow pull should only fetch 3.
+
+ $ cd shallow
+ $ hg pull --depth 2
+ pulling from ssh://user@dummy/master
+ searching for changes
+ adding changesets
+ adding manifests
+ adding file changes
+ added 4 changesets with 10 changes to 10 files
+ new changesets *:* (glob)
+ (run 'hg update' to get a working copy)
+ $ hg log -T '{rev}{if(ellipsis,"...")}: {desc}\n'
+ 7: Commit rev3 of f10, d1/f10, d2/f10
+ 6: Commit rev3 of f9, d1/f9, d2/f9
+ 5: Commit rev3 of f8, d1/f8, d2/f8
+ 4...: Commit rev3 of f7, d1/f7, d2/f7
+ 3: Commit rev2 of f10, d1/f10, d2/f10
+ 2: Commit rev2 of f9, d1/f9, d2/f9
+ 1: Commit rev2 of f8, d1/f8, d2/f8
+ 0...: Commit rev2 of f7, d1/f7, d2/f7
+ $ hg update 4
+ merging d2/f1
+ merging d2/f2
+ merging d2/f3
+ merging d2/f4
+ merging d2/f5
+ merging d2/f6
+ merging d2/f7
+ 3 files updated, 7 files merged, 0 files removed, 0 files unresolved
+ $ cat d2/f7 d2/f8
+ d2/f7 rev3
+ d2/f8 rev2
+ $ hg update 7
+ 3 files updated, 0 files merged, 0 files removed, 0 files unresolved
+ $ cat d2/f10
+ d2/f10 rev3
+
+ $ cd ..
+
+cannot clone with zero or negative depth
+
+ $ hg clone --narrow ssh://user@dummy/master bad --include "d2" --depth 0
+ requesting all changes
+ remote: abort: depth must be positive, got 0
+ abort: pull failed on remote
+ [255]
+ $ hg clone --narrow ssh://user@dummy/master bad --include "d2" --depth -1
+ requesting all changes
+ remote: abort: depth must be positive, got -1
+ abort: pull failed on remote
+ [255]
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/tests/test-narrow-strip.t Sat Feb 24 17:49:10 2018 -0600
@@ -0,0 +1,163 @@
+#testcases flat tree
+
+ $ . "$TESTDIR/narrow-library.sh"
+
+#if tree
+ $ cat << EOF >> $HGRCPATH
+ > [experimental]
+ > treemanifest = 1
+ > EOF
+#endif
+
+create full repo
+
+ $ hg init master
+ $ cd master
+ $ cat >> .hg/hgrc <<EOF
+ > [narrow]
+ > serveellipses=True
+ > EOF
+
+ $ mkdir inside
+ $ echo inside > inside/f1
+ $ mkdir outside
+ $ echo outside > outside/f1
+ $ hg ci -Aqm 'initial'
+
+ $ echo modified > inside/f1
+ $ hg ci -qm 'modify inside'
+
+ $ hg co -q 0
+ $ echo modified > outside/f1
+ $ hg ci -qm 'modify outside'
+
+ $ echo modified again >> outside/f1
+ $ hg ci -qm 'modify outside again'
+
+ $ cd ..
+
+ $ hg clone --narrow ssh://user@dummy/master narrow --include inside
+ requesting all changes
+ adding changesets
+ adding manifests
+ adding file changes
+ added 3 changesets with 2 changes to 1 files (+1 heads)
+ new changesets *:* (glob)
+ updating to branch default
+ 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
+ $ cd narrow
+ $ cat >> $HGRCPATH <<EOF
+ > [extensions]
+ > strip=
+ > EOF
+
+Can strip and recover changesets affecting only files within narrow spec
+
+ $ hg co -r 'desc("modify inside")'
+ 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
+ $ rm -f $TESTTMP/narrow/.hg/strip-backup/*-backup.hg
+ $ hg strip .
+ 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
+ saved backup bundle to $TESTTMP/narrow/.hg/strip-backup/*-backup.hg (glob)
+ $ hg unbundle .hg/strip-backup/*-backup.hg
+ adding changesets
+ adding manifests
+ adding file changes
+ added 1 changesets with 1 changes to 1 files (+1 heads)
+ new changesets * (glob)
+ (run 'hg heads' to see heads, 'hg merge' to merge)
+
+Can strip and recover changesets affecting files outside of narrow spec
+
+ $ hg co -r 'desc("modify outside")'
+ 0 files updated, 0 files merged, 0 files removed, 0 files unresolved
+ $ hg log -G -T '{rev} {desc}\n'
+ o 2 modify inside
+ |
+ | @ 1 modify outside again
+ |/
+ o 0 initial
+
+ $ hg debugdata -m 1
+ inside/f1\x004d6a634d5ba06331a60c29ee0db8412490a54fcd (esc) (flat !)
+ outside/f1\x0084ba604d54dee1f13310ce3d4ac2e8a36636691a (esc) (flat !)
+ inside\x006a8bc41df94075d501f9740587a0c0e13c170dc5t (esc) (tree !)
+ outside\x00255c2627ebdd3c7dcaa6945246f9b9f02bd45a09t (esc) (tree !)
+
+ $ rm -f $TESTTMP/narrow/.hg/strip-backup/*-backup.hg
+ $ hg strip .
+ 0 files updated, 0 files merged, 0 files removed, 0 files unresolved
+ saved backup bundle to $TESTTMP/narrow/.hg/strip-backup/*-backup.hg (glob)
+ $ hg unbundle .hg/strip-backup/*-backup.hg
+ adding changesets
+ adding manifests
+ adding file changes
+ added 1 changesets with 0 changes to 0 files (+1 heads)
+ new changesets * (glob)
+ (run 'hg heads' to see heads, 'hg merge' to merge)
+ $ hg log -G -T '{rev} {desc}\n'
+ o 2 modify outside again
+ |
+ | o 1 modify inside
+ |/
+ @ 0 initial
+
+Check that hash of file outside narrow spec got restored
+ $ hg debugdata -m 2
+ inside/f1\x004d6a634d5ba06331a60c29ee0db8412490a54fcd (esc) (flat !)
+ outside/f1\x0084ba604d54dee1f13310ce3d4ac2e8a36636691a (esc) (flat !)
+ inside\x006a8bc41df94075d501f9740587a0c0e13c170dc5t (esc) (tree !)
+ outside\x00255c2627ebdd3c7dcaa6945246f9b9f02bd45a09t (esc) (tree !)
+
+Also verify we can apply the bundle with 'hg pull':
+ $ hg co -r 'desc("modify inside")'
+ 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
+ $ rm .hg/strip-backup/*-backup.hg
+ $ hg strip .
+ 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
+ saved backup bundle to $TESTTMP/narrow/.hg/strip-backup/*-backup.hg (glob)
+ $ hg pull .hg/strip-backup/*-backup.hg
+ pulling from .hg/strip-backup/*-backup.hg (glob)
+ searching for changes
+ adding changesets
+ adding manifests
+ adding file changes
+ added 1 changesets with 1 changes to 1 files (+1 heads)
+ new changesets * (glob)
+ (run 'hg heads' to see heads, 'hg merge' to merge)
+
+ $ rm .hg/strip-backup/*-backup.hg
+ $ hg strip 0
+ 0 files updated, 0 files merged, 1 files removed, 0 files unresolved
+ saved backup bundle to $TESTTMP/narrow/.hg/strip-backup/*-backup.hg (glob)
+ $ hg incoming .hg/strip-backup/*-backup.hg
+ comparing with .hg/strip-backup/*-backup.hg (glob)
+ changeset: 0:* (glob)
+ user: test
+ date: Thu Jan 01 00:00:00 1970 +0000
+ summary: initial
+
+ changeset: 1:9e48d953700d (flat !)
+ changeset: 1:3888164bccf0 (tree !)
+ user: test
+ date: Thu Jan 01 00:00:00 1970 +0000
+ summary: modify outside again
+
+ changeset: 2:f505d5e96aa8 (flat !)
+ changeset: 2:40b66f95a209 (tree !)
+ tag: tip
+ parent: 0:a99f4d53924d (flat !)
+ parent: 0:c2a5fabcca3c (tree !)
+ user: test
+ date: Thu Jan 01 00:00:00 1970 +0000
+ summary: modify inside
+
+ $ hg pull .hg/strip-backup/*-backup.hg
+ pulling from .hg/strip-backup/*-backup.hg (glob)
+ requesting all changes
+ adding changesets
+ adding manifests
+ adding file changes
+ added 3 changesets with 2 changes to 1 files (+1 heads)
+ new changesets *:* (glob)
+ (run 'hg heads' to see heads, 'hg merge' to merge)
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/tests/test-narrow-update.t Sat Feb 24 17:49:10 2018 -0600
@@ -0,0 +1,76 @@
+
+ $ . "$TESTDIR/narrow-library.sh"
+
+create full repo
+
+ $ hg init master
+ $ cd master
+ $ echo init > init
+ $ hg ci -Aqm 'initial'
+
+ $ mkdir inside
+ $ echo inside > inside/f1
+ $ mkdir outside
+ $ echo outside > outside/f1
+ $ hg ci -Aqm 'add inside and outside'
+
+ $ echo modified > inside/f1
+ $ hg ci -qm 'modify inside'
+
+ $ echo modified > outside/f1
+ $ hg ci -qm 'modify outside'
+
+ $ cd ..
+
+ $ hg clone --narrow ssh://user@dummy/master narrow --include inside
+ requesting all changes
+ adding changesets
+ adding manifests
+ adding file changes
+ added 4 changesets with 2 changes to 1 files
+ new changesets *:* (glob)
+ updating to branch default
+ 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
+ $ cd narrow
+ $ hg debugindex -c
+ rev offset length base linkrev nodeid p1 p2
+ 0 0 64 0 0 9958b1af2add 000000000000 000000000000
+ 1 64 81 1 1 2db4ce2a3bfe 9958b1af2add 000000000000
+ 2 145 75 2 2 0980ee31a742 2db4ce2a3bfe 000000000000
+ 3 220 (76|77) 3 3 4410145019b7 0980ee31a742 000000000000 (re)
+
+ $ hg update -q 0
+
+Can update to revision with changes inside
+
+ $ hg update -q 'desc("add inside and outside")'
+ $ hg update -q 'desc("modify inside")'
+ $ find *
+ inside
+ inside/f1
+ $ cat inside/f1
+ modified
+
+Can update to revision with changes outside
+
+ $ hg update -q 'desc("modify outside")'
+ $ find *
+ inside
+ inside/f1
+ $ cat inside/f1
+ modified
+
+Can update with a deleted file inside
+
+ $ hg rm inside/f1
+ $ hg update -q 'desc("modify inside")'
+ $ hg update -q 'desc("modify outside")'
+ $ hg update -q 'desc("initial")'
+ $ hg update -q 'desc("modify inside")'
+
+Can update with a moved file inside
+
+ $ hg mv inside/f1 inside/f2
+ $ hg update -q 'desc("modify outside")'
+ $ hg update -q 'desc("initial")'
+ $ hg update -q 'desc("modify inside")'
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/tests/test-narrow-widen.t Sat Feb 24 17:49:10 2018 -0600
@@ -0,0 +1,365 @@
+#testcases flat tree
+ $ . "$TESTDIR/narrow-library.sh"
+
+#if tree
+ $ cat << EOF >> $HGRCPATH
+ > [experimental]
+ > treemanifest = 1
+ > EOF
+#endif
+
+ $ hg init master
+ $ cd master
+ $ cat >> .hg/hgrc <<EOF
+ > [narrow]
+ > serveellipses=True
+ > EOF
+
+ $ mkdir inside
+ $ echo 'inside' > inside/f
+ $ hg add inside/f
+ $ hg commit -m 'add inside'
+
+ $ mkdir widest
+ $ echo 'widest' > widest/f
+ $ hg add widest/f
+ $ hg commit -m 'add widest'
+
+ $ mkdir outside
+ $ echo 'outside' > outside/f
+ $ hg add outside/f
+ $ hg commit -m 'add outside'
+
+ $ cd ..
+
+narrow clone the inside file
+
+ $ hg clone --narrow ssh://user@dummy/master narrow --include inside
+ requesting all changes
+ adding changesets
+ adding manifests
+ adding file changes
+ added 2 changesets with 1 changes to 1 files
+ new changesets *:* (glob)
+ updating to branch default
+ 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
+ $ cd narrow
+ $ hg tracked
+ I path:inside
+ $ ls
+ inside
+ $ cat inside/f
+ inside
+ $ cd ..
+
+add more upstream files which we will include in a wider narrow spec
+
+ $ cd master
+
+ $ mkdir wider
+ $ echo 'wider' > wider/f
+ $ hg add wider/f
+ $ echo 'widest v2' > widest/f
+ $ hg commit -m 'add wider, update widest'
+
+ $ echo 'widest v3' > widest/f
+ $ hg commit -m 'update widest v3'
+
+ $ echo 'inside v2' > inside/f
+ $ hg commit -m 'update inside'
+
+ $ mkdir outside2
+ $ echo 'outside2' > outside2/f
+ $ hg add outside2/f
+ $ hg commit -m 'add outside2'
+
+ $ echo 'widest v4' > widest/f
+ $ hg commit -m 'update widest v4'
+
+ $ hg log -T "{if(ellipsis, '...')}{node|short}: {desc}\n"
+ *: update widest v4 (glob)
+ *: add outside2 (glob)
+ *: update inside (glob)
+ *: update widest v3 (glob)
+ *: add wider, update widest (glob)
+ *: add outside (glob)
+ *: add widest (glob)
+ *: add inside (glob)
+
+ $ cd ..
+
+Widen the narrow spec to see the wider file. This should not get the newly
+added upstream revisions.
+
+ $ cd narrow
+ $ hg tracked --addinclude wider/f
+ comparing with ssh://user@dummy/master
+ searching for changes
+ no changes found
+ saved backup bundle to $TESTTMP/narrow/.hg/strip-backup/*-widen.hg (glob)
+ adding changesets
+ adding manifests
+ adding file changes
+ added 2 changesets with 1 changes to 1 files
+ new changesets *:* (glob)
+ $ hg tracked
+ I path:inside
+ I path:wider/f
+
+Pull down the newly added upstream revision.
+
+ $ hg pull
+ pulling from ssh://user@dummy/master
+ searching for changes
+ adding changesets
+ adding manifests
+ adding file changes
+ added 4 changesets with 2 changes to 2 files
+ new changesets *:* (glob)
+ (run 'hg update' to get a working copy)
+ $ hg update -r 'desc("add wider")'
+ 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
+ $ cat wider/f
+ wider
+
+ $ hg update -r 'desc("update inside")'
+ 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
+ $ cat wider/f
+ wider
+ $ cat inside/f
+ inside v2
+
+ $ hg log -T "{if(ellipsis, '...')}{node|short}: {desc}\n"
+ ...*: update widest v4 (glob)
+ *: update inside (glob)
+ ...*: update widest v3 (glob)
+ *: add wider, update widest (glob)
+ ...*: add outside (glob)
+ *: add inside (glob)
+
+Check that widening with a newline fails
+
+ $ hg tracked --addinclude 'widest
+ > '
+ abort: newlines are not allowed in narrowspec paths
+ [255]
+
+widen the narrow spec to include the widest file
+
+ $ hg tracked --addinclude widest
+ comparing with ssh://user@dummy/master
+ searching for changes
+ no changes found
+ saved backup bundle to $TESTTMP/narrow/.hg/strip-backup/*-widen.hg (glob)
+ adding changesets
+ adding manifests
+ adding file changes
+ added 8 changesets with 7 changes to 3 files
+ new changesets *:* (glob)
+ $ hg tracked
+ I path:inside
+ I path:wider/f
+ I path:widest
+ $ hg update 'desc("add widest")'
+ 2 files updated, 0 files merged, 1 files removed, 0 files unresolved
+ $ cat widest/f
+ widest
+ $ hg update 'desc("add wider, update widest")'
+ 2 files updated, 0 files merged, 0 files removed, 0 files unresolved
+ $ cat wider/f
+ wider
+ $ cat widest/f
+ widest v2
+ $ hg update 'desc("update widest v3")'
+ 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
+ $ cat widest/f
+ widest v3
+ $ hg update 'desc("update widest v4")'
+ 2 files updated, 0 files merged, 0 files removed, 0 files unresolved
+ $ cat widest/f
+ widest v4
+
+ $ hg log -T "{if(ellipsis, '...')}{node|short}: {desc}\n"
+ *: update widest v4 (glob)
+ ...*: add outside2 (glob)
+ *: update inside (glob)
+ *: update widest v3 (glob)
+ *: add wider, update widest (glob)
+ ...*: add outside (glob)
+ *: add widest (glob)
+ *: add inside (glob)
+
+separate suite of tests: files from 0-10 modified in changes 0-10. This allows
+more obvious precise tests tickling particular corner cases.
+
+ $ cd ..
+ $ hg init upstream
+ $ cd upstream
+ $ cat >> .hg/hgrc <<EOF
+ > [narrow]
+ > serveellipses=True
+ > EOF
+ $ for x in `$TESTDIR/seq.py 0 10`
+ > do
+ > mkdir d$x
+ > echo $x > d$x/f
+ > hg add d$x/f
+ > hg commit -m "add d$x/f"
+ > done
+ $ hg log -T "{node|short}: {desc}\n"
+ *: add d10/f (glob)
+ *: add d9/f (glob)
+ *: add d8/f (glob)
+ *: add d7/f (glob)
+ *: add d6/f (glob)
+ *: add d5/f (glob)
+ *: add d4/f (glob)
+ *: add d3/f (glob)
+ *: add d2/f (glob)
+ *: add d1/f (glob)
+ *: add d0/f (glob)
+
+make narrow clone with every third node.
+
+ $ cd ..
+ $ hg clone --narrow ssh://user@dummy/upstream narrow2 --include d0 --include d3 --include d6 --include d9
+ requesting all changes
+ adding changesets
+ adding manifests
+ adding file changes
+ added 8 changesets with 4 changes to 4 files
+ new changesets *:* (glob)
+ updating to branch default
+ 4 files updated, 0 files merged, 0 files removed, 0 files unresolved
+ $ cd narrow2
+ $ hg tracked
+ I path:d0
+ I path:d3
+ I path:d6
+ I path:d9
+ $ hg verify
+ checking changesets
+ checking manifests
+ checking directory manifests (tree !)
+ crosschecking files in changesets and manifests
+ checking files
+ 4 files, 8 changesets, 4 total revisions
+ $ hg log -T "{if(ellipsis, '...')}{node|short}: {desc}\n"
+ ...*: add d10/f (glob)
+ *: add d9/f (glob)
+ ...*: add d8/f (glob)
+ *: add d6/f (glob)
+ ...*: add d5/f (glob)
+ *: add d3/f (glob)
+ ...*: add d2/f (glob)
+ *: add d0/f (glob)
+ $ hg tracked --addinclude d1
+ comparing with ssh://user@dummy/upstream
+ searching for changes
+ no changes found
+ saved backup bundle to $TESTTMP/narrow2/.hg/strip-backup/*-widen.hg (glob)
+ adding changesets
+ adding manifests
+ adding file changes
+ added 9 changesets with 5 changes to 5 files
+ new changesets *:* (glob)
+ $ hg tracked
+ I path:d0
+ I path:d1
+ I path:d3
+ I path:d6
+ I path:d9
+ $ hg log -T "{if(ellipsis, '...')}{node|short}: {desc}\n"
+ ...*: add d10/f (glob)
+ *: add d9/f (glob)
+ ...*: add d8/f (glob)
+ *: add d6/f (glob)
+ ...*: add d5/f (glob)
+ *: add d3/f (glob)
+ ...*: add d2/f (glob)
+ *: add d1/f (glob)
+ *: add d0/f (glob)
+
+Verify shouldn't claim the repo is corrupt after a widen.
+
+ $ hg verify
+ checking changesets
+ checking manifests
+ checking directory manifests (tree !)
+ crosschecking files in changesets and manifests
+ checking files
+ 5 files, 9 changesets, 5 total revisions
+
+Widening preserves parent of local commit
+
+ $ cd ..
+ $ hg clone -q --narrow ssh://user@dummy/upstream narrow3 --include d2 -r 2
+ $ cd narrow3
+ $ hg log -T "{if(ellipsis, '...')}{node|short}: {desc}\n"
+ *: add d2/f (glob)
+ ...*: add d1/f (glob)
+ $ hg pull -q -r 3
+ $ hg co -q tip
+ $ hg pull -q -r 4
+ $ echo local > d2/f
+ $ hg ci -m local
+ created new head
+ $ hg tracked -q --addinclude d0 --addinclude d9
+
+Widening preserves bookmarks
+
+ $ cd ..
+ $ hg clone -q --narrow ssh://user@dummy/upstream narrow-bookmarks --include d4
+ $ cd narrow-bookmarks
+ $ echo local > d4/f
+ $ hg ci -m local
+ $ hg bookmarks bookmark
+ $ hg bookmarks
+ * bookmark 3:* (glob)
+ $ hg -q tracked --addinclude d2
+ $ hg bookmarks
+ * bookmark 5:* (glob)
+ $ hg log -r bookmark -T '{desc}\n'
+ local
+
+Widening that fails can be recovered from
+
+ $ cd ..
+ $ hg clone -q --narrow ssh://user@dummy/upstream interrupted --include d0
+ $ cd interrupted
+ $ echo local > d0/f
+ $ hg ci -m local
+ $ hg log -T "{if(ellipsis, '...')}{rev}: {desc}\n"
+ 2: local
+ ...1: add d10/f
+ 0: add d0/f
+ $ hg bookmarks bookmark
+ $ hg --config hooks.pretxnchangegroup.bad=false tracked --addinclude d1
+ comparing with ssh://user@dummy/upstream
+ searching for changes
+ no changes found
+ saved backup bundle to $TESTTMP/interrupted/.hg/strip-backup/*-widen.hg (glob)
+ adding changesets
+ adding manifests
+ adding file changes
+ added 3 changesets with 2 changes to 2 files
+ transaction abort!
+ rollback completed
+ abort: pretxnchangegroup.bad hook exited with status 1
+ [255]
+ $ hg log -T "{if(ellipsis, '...')}{rev}: {desc}\n"
+ $ hg bookmarks
+ no bookmarks set
+ $ hg unbundle .hg/strip-backup/*-widen.hg
+ adding changesets
+ adding manifests
+ adding file changes
+ added 3 changesets with 2 changes to 1 files
+ new changesets *:* (glob)
+ (run 'hg update' to get a working copy)
+ $ hg log -T "{if(ellipsis, '...')}{rev}: {desc}\n"
+ 2: local
+ ...1: add d10/f
+ 0: add d0/f
+ $ hg bookmarks
+ * bookmark 2:* (glob)
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/tests/test-narrow.t Sat Feb 24 17:49:10 2018 -0600
@@ -0,0 +1,374 @@
+#testcases flat tree
+
+ $ . "$TESTDIR/narrow-library.sh"
+
+#if tree
+ $ cat << EOF >> $HGRCPATH
+ > [experimental]
+ > treemanifest = 1
+ > EOF
+#endif
+
+ $ hg init master
+ $ cd master
+ $ cat >> .hg/hgrc <<EOF
+ > [narrow]
+ > serveellipses=True
+ > EOF
+ $ for x in `$TESTDIR/seq.py 0 10`
+ > do
+ > mkdir d$x
+ > echo $x > d$x/f
+ > hg add d$x/f
+ > hg commit -m "add d$x/f"
+ > done
+ $ hg log -T "{node|short}: {desc}\n"
+ *: add d10/f (glob)
+ *: add d9/f (glob)
+ *: add d8/f (glob)
+ *: add d7/f (glob)
+ *: add d6/f (glob)
+ *: add d5/f (glob)
+ *: add d4/f (glob)
+ *: add d3/f (glob)
+ *: add d2/f (glob)
+ *: add d1/f (glob)
+ *: add d0/f (glob)
+ $ cd ..
+
+Error if '.' or '..' are in the directory to track.
+ $ hg clone --narrow ssh://user@dummy/master foo --include ./asdf
+ requesting all changes
+ abort: "." and ".." are not allowed in narrowspec paths
+ [255]
+ $ hg clone --narrow ssh://user@dummy/master foo --include asdf/..
+ requesting all changes
+ abort: "." and ".." are not allowed in narrowspec paths
+ [255]
+ $ hg clone --narrow ssh://user@dummy/master foo --include a/./c
+ requesting all changes
+ abort: "." and ".." are not allowed in narrowspec paths
+ [255]
+
+Names with '.' in them are OK.
+ $ hg clone --narrow ssh://user@dummy/master should-work --include a/.b/c
+ requesting all changes
+ adding changesets
+ adding manifests
+ adding file changes
+ added 1 changesets with 0 changes to 0 files
+ new changesets * (glob)
+ updating to branch default
+ 0 files updated, 0 files merged, 0 files removed, 0 files unresolved
+
+Test repo with local changes
+ $ hg clone --narrow ssh://user@dummy/master narrow-local-changes --include d0 --include d3 --include d6
+ requesting all changes
+ adding changesets
+ adding manifests
+ adding file changes
+ added 6 changesets with 3 changes to 3 files
+ new changesets *:* (glob)
+ updating to branch default
+ 3 files updated, 0 files merged, 0 files removed, 0 files unresolved
+ $ cd narrow-local-changes
+ $ cat >> $HGRCPATH << EOF
+ > [experimental]
+ > evolution=createmarkers
+ > EOF
+ $ echo local change >> d0/f
+ $ hg ci -m 'local change to d0'
+ $ hg co '.^'
+ 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
+ $ echo local change >> d3/f
+ $ hg ci -m 'local hidden change to d3'
+ created new head
+ $ hg ci --amend -m 'local change to d3'
+ $ hg tracked --removeinclude d0
+ comparing with ssh://user@dummy/master
+ searching for changes
+ looking for local changes to affected paths
+ The following changeset(s) or their ancestors have local changes not on the remote:
+ * (glob)
+ abort: local changes found
+ (use --force-delete-local-changes to ignore)
+ [255]
+Check that nothing was removed by the failed attempts
+ $ hg tracked
+ I path:d0
+ I path:d3
+ I path:d6
+ $ hg files
+ d0/f
+ d3/f
+ d6/f
+ $ find *
+ d0
+ d0/f
+ d3
+ d3/f
+ d6
+ d6/f
+ $ hg verify -q
+Force deletion of local changes
+ $ hg log -T "{node|short}: {desc} {outsidenarrow}\n"
+ *: local change to d3 (glob)
+ *: local change to d0 (glob)
+ *: add d10/f outsidenarrow (glob)
+ *: add d6/f (glob)
+ *: add d5/f outsidenarrow (glob)
+ *: add d3/f (glob)
+ *: add d2/f outsidenarrow (glob)
+ *: add d0/f (glob)
+ $ hg tracked --removeinclude d0 --force-delete-local-changes
+ comparing with ssh://user@dummy/master
+ searching for changes
+ looking for local changes to affected paths
+ The following changeset(s) or their ancestors have local changes not on the remote:
+ * (glob)
+ saved backup bundle to $TESTTMP/narrow-local-changes/.hg/strip-backup/*-narrow.hg (glob)
+ deleting data/d0/f.i
+ deleting meta/d0/00manifest.i (tree !)
+ $ hg log -T "{node|short}: {desc} {outsidenarrow}\n"
+ *: local change to d3 (glob)
+ *: add d10/f outsidenarrow (glob)
+ *: add d6/f (glob)
+ *: add d5/f outsidenarrow (glob)
+ *: add d3/f (glob)
+ *: add d2/f outsidenarrow (glob)
+ *: add d0/f outsidenarrow (glob)
+Can restore stripped local changes after widening
+ $ hg tracked --addinclude d0 -q
+ $ hg unbundle .hg/strip-backup/*-narrow.hg -q
+ $ hg --hidden co -r 'desc("local change to d0")' -q
+ $ cat d0/f
+ 0
+ local change
+Pruned commits affecting removed paths should not prevent narrowing
+ $ hg co '.^'
+ 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
+ $ hg debugobsolete `hg log -T '{node}' -r 'desc("local change to d0")'`
+ obsoleted 1 changesets
+ $ hg tracked --removeinclude d0
+ comparing with ssh://user@dummy/master
+ searching for changes
+ looking for local changes to affected paths
+ saved backup bundle to $TESTTMP/narrow-local-changes/.hg/strip-backup/*-narrow.hg (glob)
+ deleting data/d0/f.i
+ deleting meta/d0/00manifest.i (tree !)
+Updates off of stripped commit if necessary
+ $ hg co -r 'desc("local change to d3")' -q
+ $ echo local change >> d6/f
+ $ hg ci -m 'local change to d6'
+ $ hg tracked --removeinclude d3 --force-delete-local-changes
+ comparing with ssh://user@dummy/master
+ searching for changes
+ looking for local changes to affected paths
+ The following changeset(s) or their ancestors have local changes not on the remote:
+ * (glob)
+ * (glob)
+ 2 files updated, 0 files merged, 0 files removed, 0 files unresolved
+ saved backup bundle to $TESTTMP/narrow-local-changes/.hg/strip-backup/*-narrow.hg (glob)
+ deleting data/d3/f.i
+ deleting meta/d3/00manifest.i (tree !)
+ $ hg log -T '{desc}\n' -r .
+ add d10/f
+Updates to nullid if necessary
+ $ hg tracked --addinclude d3 -q
+ $ hg co null -q
+ $ mkdir d3
+ $ echo local change > d3/f
+ $ hg add d3/f
+ $ hg ci -m 'local change to d3'
+ created new head
+ $ hg tracked --removeinclude d3 --force-delete-local-changes
+ comparing with ssh://user@dummy/master
+ searching for changes
+ looking for local changes to affected paths
+ The following changeset(s) or their ancestors have local changes not on the remote:
+ * (glob)
+ 0 files updated, 0 files merged, 1 files removed, 0 files unresolved
+ saved backup bundle to $TESTTMP/narrow-local-changes/.hg/strip-backup/*-narrow.hg (glob)
+ deleting data/d3/f.i
+ deleting meta/d3/00manifest.i (tree !)
+ $ hg id
+ 000000000000
+ $ cd ..
+
+Can remove last include, making repo empty
+ $ hg clone --narrow ssh://user@dummy/master narrow-empty --include d0 -r 5
+ adding changesets
+ adding manifests
+ adding file changes
+ added 2 changesets with 1 changes to 1 files
+ new changesets *:* (glob)
+ updating to branch default
+ 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
+ $ cd narrow-empty
+ $ hg tracked --removeinclude d0
+ comparing with ssh://user@dummy/master
+ searching for changes
+ looking for local changes to affected paths
+ deleting data/d0/f.i
+ deleting meta/d0/00manifest.i (tree !)
+ $ hg tracked
+ $ hg files
+ [1]
+ $ test -d d0
+ [1]
+Do some work in the empty clone
+ $ hg diff --change .
+ $ hg branch foo
+ marked working directory as branch foo
+ (branches are permanent and global, did you want a bookmark?)
+ $ hg ci -m empty
+ $ hg pull -q
+Can widen the empty clone
+ $ hg tracked --addinclude d0
+ comparing with ssh://user@dummy/master
+ searching for changes
+ no changes found
+ saved backup bundle to $TESTTMP/narrow-empty/.hg/strip-backup/*-widen.hg (glob)
+ adding changesets
+ adding manifests
+ adding file changes
+ added 3 changesets with 1 changes to 1 files
+ new changesets *:* (glob)
+ $ hg tracked
+ I path:d0
+ $ hg files
+ d0/f
+ $ find *
+ d0
+ d0/f
+ $ cd ..
+
+TODO(martinvonz): test including e.g. d3/g and then removing it once
+https://bitbucket.org/Google/narrowhg/issues/6 is fixed
+
+ $ hg clone --narrow ssh://user@dummy/master narrow --include d0 --include d3 --include d6 --include d9
+ requesting all changes
+ adding changesets
+ adding manifests
+ adding file changes
+ added 8 changesets with 4 changes to 4 files
+ new changesets *:* (glob)
+ updating to branch default
+ 4 files updated, 0 files merged, 0 files removed, 0 files unresolved
+ $ cd narrow
+ $ hg tracked
+ I path:d0
+ I path:d3
+ I path:d6
+ I path:d9
+ $ hg tracked --removeinclude d6
+ comparing with ssh://user@dummy/master
+ searching for changes
+ looking for local changes to affected paths
+ deleting data/d6/f.i
+ deleting meta/d6/00manifest.i (tree !)
+ $ hg tracked
+ I path:d0
+ I path:d3
+ I path:d9
+ $ hg debugrebuildfncache
+ fncache already up to date
+ $ find *
+ d0
+ d0/f
+ d3
+ d3/f
+ d9
+ d9/f
+ $ hg verify -q
+ $ hg tracked --addexclude d3/f
+ comparing with ssh://user@dummy/master
+ searching for changes
+ looking for local changes to affected paths
+ deleting data/d3/f.i
+ $ hg tracked
+ I path:d0
+ I path:d3
+ I path:d9
+ X path:d3/f
+ $ hg debugrebuildfncache
+ fncache already up to date
+ $ find *
+ d0
+ d0/f
+ d9
+ d9/f
+ $ hg verify -q
+ $ hg tracked --addexclude d0
+ comparing with ssh://user@dummy/master
+ searching for changes
+ looking for local changes to affected paths
+ deleting data/d0/f.i
+ deleting meta/d0/00manifest.i (tree !)
+ $ hg tracked
+ I path:d3
+ I path:d9
+ X path:d0
+ X path:d3/f
+ $ hg debugrebuildfncache
+ fncache already up to date
+ $ find *
+ d9
+ d9/f
+
+Make a 15 of changes to d9 to test the path without --verbose
+(Note: using regexes instead of "* (glob)" because if the test fails, it
+produces more sensible diffs)
+ $ hg tracked
+ I path:d3
+ I path:d9
+ X path:d0
+ X path:d3/f
+ $ for x in `$TESTDIR/seq.py 1 15`
+ > do
+ > echo local change >> d9/f
+ > hg commit -m "change $x to d9/f"
+ > done
+ $ hg tracked --removeinclude d9
+ comparing with ssh://user@dummy/master
+ searching for changes
+ looking for local changes to affected paths
+ The following changeset(s) or their ancestors have local changes not on the remote:
+ ^[0-9a-f]{12}$ (re)
+ ^[0-9a-f]{12}$ (re)
+ ^[0-9a-f]{12}$ (re)
+ ^[0-9a-f]{12}$ (re)
+ ^[0-9a-f]{12}$ (re)
+ ^[0-9a-f]{12}$ (re)
+ ^[0-9a-f]{12}$ (re)
+ ^[0-9a-f]{12}$ (re)
+ ^[0-9a-f]{12}$ (re)
+ ^[0-9a-f]{12}$ (re)
+ ...and 5 more, use --verbose to list all
+ abort: local changes found
+ (use --force-delete-local-changes to ignore)
+ [255]
+Now test it *with* verbose.
+ $ hg tracked --removeinclude d9 --verbose
+ comparing with ssh://user@dummy/master
+ searching for changes
+ looking for local changes to affected paths
+ The following changeset(s) or their ancestors have local changes not on the remote:
+ ^[0-9a-f]{12}$ (re)
+ ^[0-9a-f]{12}$ (re)
+ ^[0-9a-f]{12}$ (re)
+ ^[0-9a-f]{12}$ (re)
+ ^[0-9a-f]{12}$ (re)
+ ^[0-9a-f]{12}$ (re)
+ ^[0-9a-f]{12}$ (re)
+ ^[0-9a-f]{12}$ (re)
+ ^[0-9a-f]{12}$ (re)
+ ^[0-9a-f]{12}$ (re)
+ ^[0-9a-f]{12}$ (re)
+ ^[0-9a-f]{12}$ (re)
+ ^[0-9a-f]{12}$ (re)
+ ^[0-9a-f]{12}$ (re)
+ ^[0-9a-f]{12}$ (re)
+ abort: local changes found
+ (use --force-delete-local-changes to ignore)
+ [255]
--- a/tests/test-notify.t Fri Feb 23 17:57:04 2018 -0800
+++ b/tests/test-notify.t Sat Feb 24 17:49:10 2018 -0600
@@ -421,7 +421,7 @@
> test = False
> mbox = mbox
> EOF
- $ $PYTHON -c 'file("a/a", "ab").write("no" * 500 + "\xd1\x84" + "\n")'
+ $ $PYTHON -c 'open("a/a", "ab").write("no" * 500 + "\xd1\x84" + "\n")'
$ hg --cwd a commit -A -m "long line"
$ hg --traceback --cwd b pull ../a
pulling from ../a
--- a/tests/test-obsolete-divergent.t Fri Feb 23 17:57:04 2018 -0800
+++ b/tests/test-obsolete-divergent.t Sat Feb 24 17:49:10 2018 -0600
@@ -621,6 +621,34 @@
a139f71be9da
$ hg log -r 'contentdivergent()'
+#if serve
+
+ $ hg serve -n test -p $HGPORT -d --pid-file=hg.pid --config web.view=all \
+ > -A access.log -E errors.log
+ $ cat hg.pid >> $DAEMON_PIDS
+
+check an obsolete changeset that was rewritten and also split
+
+ $ get-with-headers.py localhost:$HGPORT 'rev/e442cfc57690?style=paper' | egrep 'rewritten|split'
+ <td>rewritten as <a href="/rev/bed64f5d2f5a?style=paper">bed64f5d2f5a</a> by test <span class="age">Thu, 01 Jan 1970 00:00:00 +0000</span><br>
+ split as <a href="/rev/7ae126973a96?style=paper">7ae126973a96</a> <a href="/rev/14608b260df8?style=paper">14608b260df8</a> by test <span class="age">Thu, 01 Jan 1970 00:00:00 +0000</span></td>
+ $ get-with-headers.py localhost:$HGPORT 'rev/e442cfc57690?style=coal' | egrep 'rewritten|split'
+ <td>rewritten as <a href="/rev/bed64f5d2f5a?style=coal">bed64f5d2f5a</a> by test <span class="age">Thu, 01 Jan 1970 00:00:00 +0000</span><br>
+ split as <a href="/rev/7ae126973a96?style=coal">7ae126973a96</a> <a href="/rev/14608b260df8?style=coal">14608b260df8</a> by test <span class="age">Thu, 01 Jan 1970 00:00:00 +0000</span></td>
+ $ get-with-headers.py localhost:$HGPORT 'rev/e442cfc57690?style=gitweb' | egrep 'rewritten|split'
+ <td>rewritten as <a class="list" href="/rev/bed64f5d2f5a?style=gitweb">bed64f5d2f5a</a> by test <span class="age">Thu, 01 Jan 1970 00:00:00 +0000</span></td>
+ <td>split as <a class="list" href="/rev/7ae126973a96?style=gitweb">7ae126973a96</a> <a class="list" href="/rev/14608b260df8?style=gitweb">14608b260df8</a> by test <span class="age">Thu, 01 Jan 1970 00:00:00 +0000</span></td>
+ $ get-with-headers.py localhost:$HGPORT 'rev/e442cfc57690?style=monoblue' | egrep 'rewritten|split'
+ <dd>rewritten as <a href="/rev/bed64f5d2f5a?style=monoblue">bed64f5d2f5a</a> by test <span class="age">Thu, 01 Jan 1970 00:00:00 +0000</span></dd>
+ <dd>split as <a href="/rev/7ae126973a96?style=monoblue">7ae126973a96</a> <a href="/rev/14608b260df8?style=monoblue">14608b260df8</a> by test <span class="age">Thu, 01 Jan 1970 00:00:00 +0000</span></dd>
+ $ get-with-headers.py localhost:$HGPORT 'rev/e442cfc57690?style=spartan' | egrep 'rewritten|split'
+ <td class="obsolete">rewritten as <a href="/rev/bed64f5d2f5a?style=spartan">bed64f5d2f5a</a> by test <span class="age">Thu, 01 Jan 1970 00:00:00 +0000</span></td>
+ <td class="obsolete">split as <a href="/rev/7ae126973a96?style=spartan">7ae126973a96</a> <a href="/rev/14608b260df8?style=spartan">14608b260df8</a> by test <span class="age">Thu, 01 Jan 1970 00:00:00 +0000</span></td>
+
+ $ killdaemons.py
+
+#endif
+
$ cd ..
--- a/tests/test-obsolete.t Fri Feb 23 17:57:04 2018 -0800
+++ b/tests/test-obsolete.t Sat Feb 24 17:49:10 2018 -0600
@@ -1049,20 +1049,8 @@
$ get-with-headers.py localhost:$HGPORT 'log?rev=first(obsolete())&style=monoblue' | grep '<span class="logtags">'
<span class="logtags"><span class="phasetag" title="draft">draft</span> <span class="obsoletetag" title="obsolete">obsolete</span> </span>
$ get-with-headers.py localhost:$HGPORT 'log?rev=first(obsolete())&style=spartan' | grep 'class="obsolete"'
- <th class="obsolete">obsolete:</th>
- <td class="obsolete">pruned</td>
-
-check an obsolete changeset that has been rewritten
- $ get-with-headers.py localhost:$HGPORT 'rev/cda648ca50f5?style=paper' | grep rewritten
- <td>rewritten as <a href="/rev/3de5eca88c00?style=paper">3de5eca88c00</a> </td>
- $ get-with-headers.py localhost:$HGPORT 'rev/cda648ca50f5?style=coal' | grep rewritten
- <td>rewritten as <a href="/rev/3de5eca88c00?style=coal">3de5eca88c00</a> </td>
- $ get-with-headers.py localhost:$HGPORT 'rev/cda648ca50f5?style=gitweb' | grep rewritten
- <tr><td>obsolete</td><td>rewritten as <a class="list" href="/rev/3de5eca88c00?style=gitweb">3de5eca88c00</a> </td></tr>
- $ get-with-headers.py localhost:$HGPORT 'rev/cda648ca50f5?style=monoblue' | grep rewritten
- <dt>obsolete</dt><dd>rewritten as <a href="/rev/3de5eca88c00?style=monoblue">3de5eca88c00</a> </dd>
- $ get-with-headers.py localhost:$HGPORT 'rev/cda648ca50f5?style=spartan' | grep rewritten
- <td class="obsolete">rewritten as <a href="/rev/3de5eca88c00?style=spartan">3de5eca88c00</a> </td>
+ <th class="obsolete">obsolete:</th>
+ <td class="obsolete">pruned by test <span class="age">Thu, 01 Jan 1970 00:00:00 +0000</span></td>
check changeset with instabilities
--- a/tests/test-parseindex.t Fri Feb 23 17:57:04 2018 -0800
+++ b/tests/test-parseindex.t Sat Feb 24 17:49:10 2018 -0600
@@ -41,10 +41,17 @@
> def __getattr__(self, key):
> return getattr(self.real, key)
>
+ > def __enter__(self):
+ > self.real.__enter__()
+ > return self
+ >
+ > def __exit__(self, *args, **kwargs):
+ > return self.real.__exit__(*args, **kwargs)
+ >
> def opener(*args):
> o = vfs.vfs(*args)
- > def wrapper(*a):
- > f = o(*a)
+ > def wrapper(*a, **kwargs):
+ > f = o(*a, **kwargs)
> return singlebyteread(f)
> return wrapper
>
--- a/tests/test-patch-offset.t Fri Feb 23 17:57:04 2018 -0800
+++ b/tests/test-patch-offset.t Sat Feb 24 17:49:10 2018 -0600
@@ -5,7 +5,7 @@
> path = sys.argv[1]
> patterns = sys.argv[2:]
>
- > fp = file(path, 'wb')
+ > fp = open(path, 'wb')
> for pattern in patterns:
> count = int(pattern[0:-1])
> char = pattern[-1] + '\n'
--- a/tests/test-pathencode.py Fri Feb 23 17:57:04 2018 -0800
+++ b/tests/test-pathencode.py Sat Feb 24 17:49:10 2018 -0600
@@ -64,7 +64,7 @@
counts.pop(c, None)
t = sum(counts.itervalues()) / 100.0
fp.write('probtable = (')
- for i, (k, v) in enumerate(sorted(counts.iteritems(), key=lambda x: x[1],
+ for i, (k, v) in enumerate(sorted(counts.items(), key=lambda x: x[1],
reverse=True)):
if (i % 5) == 0:
fp.write('\n ')
--- a/tests/test-pending.t Fri Feb 23 17:57:04 2018 -0800
+++ b/tests/test-pending.t Sat Feb 24 17:49:10 2018 -0600
@@ -44,7 +44,7 @@
> import os, time
> from mercurial import ui, localrepo
> def rejecthook(ui, repo, hooktype, node, **opts):
- > ui.write('hook %s\\n' % repo['tip'].hex())
+ > ui.write(b'hook %s\\n' % repo[b'tip'].hex())
> # create the notify file so caller knows we're running
> fpath = os.path.join('$d', 'notify')
> f = open(fpath, 'w')
--- a/tests/test-pull.t Fri Feb 23 17:57:04 2018 -0800
+++ b/tests/test-pull.t Sat Feb 24 17:49:10 2018 -0600
@@ -1,5 +1,15 @@
#require serve
+#testcases sshv1 sshv2
+
+#if sshv2
+ $ cat >> $HGRCPATH << EOF
+ > [experimental]
+ > sshpeer.advertise-v2 = true
+ > sshserver.support-v2 = true
+ > EOF
+#endif
+
$ hg init test
$ cd test
--- a/tests/test-pushvars.t Fri Feb 23 17:57:04 2018 -0800
+++ b/tests/test-pushvars.t Sat Feb 24 17:49:10 2018 -0600
@@ -11,8 +11,6 @@
$ cat >> $HGRCPATH << EOF
> [hooks]
> pretxnchangegroup = sh $TESTTMP/pretxnchangegroup.sh
- > [experimental]
- > bundle2-exp = true
> EOF
$ hg init repo
--- a/tests/test-rebase-dest.t Fri Feb 23 17:57:04 2018 -0800
+++ b/tests/test-rebase-dest.t Sat Feb 24 17:49:10 2018 -0600
@@ -85,20 +85,20 @@
> from mercurial import registrar, revset, revsetlang, smartset
> revsetpredicate = registrar.revsetpredicate()
> cache = {}
- > @revsetpredicate('map')
+ > @revsetpredicate(b'map')
> def map(repo, subset, x):
> """(set, mapping)"""
- > setarg, maparg = revsetlang.getargs(x, 2, 2, '')
+ > setarg, maparg = revsetlang.getargs(x, 2, 2, b'')
> rset = revset.getset(repo, smartset.fullreposet(repo), setarg)
- > mapstr = revsetlang.getstring(maparg, '')
- > map = dict(a.split(':') for a in mapstr.split(','))
+ > mapstr = revsetlang.getstring(maparg, b'')
+ > map = dict(a.split(b':') for a in mapstr.split(b','))
> rev = rset.first()
> desc = repo[rev].description()
> newdesc = map.get(desc)
- > if newdesc == 'null':
+ > if newdesc == b'null':
> revs = [-1]
> else:
- > query = revsetlang.formatspec('desc(%s)', newdesc)
+ > query = revsetlang.formatspec(b'desc(%s)', newdesc)
> revs = repo.revs(query)
> return smartset.baseset(revs)
> EOF
--- a/tests/test-rebase-obsolete.t Fri Feb 23 17:57:04 2018 -0800
+++ b/tests/test-rebase-obsolete.t Sat Feb 24 17:49:10 2018 -0600
@@ -1218,6 +1218,46 @@
o 0:b173517d0057 a
+issue5782
+ $ hg strip -r 0:
+ $ hg debugdrawdag <<EOF
+ > d
+ > |
+ > c1 c # replace: c -> c1
+ > \ /
+ > b
+ > |
+ > a
+ > EOF
+ 1 new orphan changesets
+ $ hg debugobsolete `hg log -T "{node}" --hidden -r 'desc("c1")'`
+ obsoleted 1 changesets
+ $ hg log -G -r 'a': --hidden
+ * 4:76be324c128b d
+ |
+ | x 3:ef8a456de8fa c1 (pruned)
+ | |
+ x | 2:a82ac2b38757 c (rewritten using replace as 3:ef8a456de8fa)
+ |/
+ o 1:488e1b7e7341 b
+ |
+ o 0:b173517d0057 a
+
+ $ hg rebase -d 0 -r 2
+ rebasing 2:a82ac2b38757 "c" (c)
+ $ hg log -G -r 'a': --hidden
+ o 5:69ad416a4a26 c
+ |
+ | * 4:76be324c128b d
+ | |
+ | | x 3:ef8a456de8fa c1 (pruned)
+ | | |
+ | x | 2:a82ac2b38757 c (rewritten using replace as 3:ef8a456de8fa rewritten using rebase as 5:69ad416a4a26)
+ | |/
+ | o 1:488e1b7e7341 b
+ |/
+ o 0:b173517d0057 a
+
$ cd ..
Rebase merge where successor of one parent is equal to destination (issue5198)
--- a/tests/test-relink.t Fri Feb 23 17:57:04 2018 -0800
+++ b/tests/test-relink.t Sat Feb 24 17:49:10 2018 -0600
@@ -49,7 +49,7 @@
Test files are read in binary mode
- $ $PYTHON -c "file('.hg/store/data/dummy.i', 'wb').write('a\r\nb\n')"
+ $ $PYTHON -c "open('.hg/store/data/dummy.i', 'wb').write(b'a\r\nb\n')"
$ cd ..
@@ -68,7 +68,7 @@
$ echo b >> b
$ hg ci -m changeb
created new head
- $ $PYTHON -c "file('.hg/store/data/dummy.i', 'wb').write('a\nb\r\n')"
+ $ $PYTHON -c "open('.hg/store/data/dummy.i', 'wb').write(b'a\nb\r\n')"
relink
--- a/tests/test-revert-interactive.t Fri Feb 23 17:57:04 2018 -0800
+++ b/tests/test-revert-interactive.t Sat Feb 24 17:49:10 2018 -0600
@@ -420,4 +420,13 @@
$ cat a
0
+When specified pattern does not exist, we should exit early (issue5789).
+
+ $ hg files
+ a
+ $ hg rev b
+ b: no such file in rev b40d1912accf
+ $ hg rev -i b
+ b: no such file in rev b40d1912accf
+
$ cd ..
--- a/tests/test-revset.t Fri Feb 23 17:57:04 2018 -0800
+++ b/tests/test-revset.t Sat Feb 24 17:49:10 2018 -0600
@@ -16,7 +16,7 @@
> return baseset()
> return baseset([3,3,2,2])
>
- > mercurial.revset.symbols['r3232'] = r3232
+ > mercurial.revset.symbols[b'r3232'] = r3232
> EOF
$ cat >> $HGRCPATH << EOF
> [extensions]
@@ -47,8 +47,8 @@
> cmdtable = {}
> command = registrar.command(cmdtable)
> @command(b'debugrevlistspec',
- > [('', 'optimize', None, 'print parsed tree after optimizing'),
- > ('', 'bin', None, 'unhexlify arguments')])
+ > [(b'', b'optimize', None, b'print parsed tree after optimizing'),
+ > (b'', b'bin', None, b'unhexlify arguments')])
> def debugrevlistspec(ui, repo, fmt, *args, **opts):
> if opts['bin']:
> args = map(nodemod.bin, args)
@@ -58,14 +58,14 @@
> ui.note(revsetlang.prettyformat(tree), "\n")
> if opts["optimize"]:
> opttree = revsetlang.optimize(revsetlang.analyze(tree))
- > ui.note("* optimized:\n", revsetlang.prettyformat(opttree),
- > "\n")
+ > ui.note(b"* optimized:\n", revsetlang.prettyformat(opttree),
+ > b"\n")
> func = revset.match(ui, expr, repo)
> revs = func(repo)
> if ui.verbose:
- > ui.note("* set:\n", smartset.prettyformat(revs), "\n")
+ > ui.note(b"* set:\n", smartset.prettyformat(revs), b"\n")
> for c in revs:
- > ui.write("%s\n" % c)
+ > ui.write(b"%s\n" % c)
> EOF
$ cat <<EOF >> $HGRCPATH
> [extensions]
--- a/tests/test-revset2.t Fri Feb 23 17:57:04 2018 -0800
+++ b/tests/test-revset2.t Sat Feb 24 17:49:10 2018 -0600
@@ -420,7 +420,7 @@
test that repeated `-r` options never eat up stack (issue4565)
(uses `-r 0::1` to avoid possible optimization at old-style parser)
- $ hg log -T '{rev}\n' `$PYTHON -c "for i in xrange(500): print '-r 0::1 ',"`
+ $ hg log -T '{rev}\n' `$PYTHON -c "for i in range(500): print '-r 0::1 ',"`
0
1
--- a/tests/test-run-tests.t Fri Feb 23 17:57:04 2018 -0800
+++ b/tests/test-run-tests.t Sat Feb 24 17:49:10 2018 -0600
@@ -374,6 +374,7 @@
</testsuite>
$ cat .testtimes
+ test-empty.t * (glob)
test-failure-unicode.t * (glob)
test-failure.t * (glob)
test-success.t * (glob)
--- a/tests/test-sparse.t Fri Feb 23 17:57:04 2018 -0800
+++ b/tests/test-sparse.t Sat Feb 24 17:49:10 2018 -0600
@@ -129,6 +129,10 @@
(include file with `hg debugsparse --include <pattern>` or use `hg add -s <file>` to include file directory while adding)
[255]
+But adding a truly excluded file shouldn't count
+
+ $ hg add hide3 -X hide3
+
Verify deleting sparseness while a file has changes fails
$ hg debugsparse --delete 'show*'
--- a/tests/test-ssh-bundle1.t Fri Feb 23 17:57:04 2018 -0800
+++ b/tests/test-ssh-bundle1.t Sat Feb 24 17:49:10 2018 -0600
@@ -1,6 +1,16 @@
This test is a duplicate of 'test-http.t' feel free to factor out
parts that are not bundle1/bundle2 specific.
+#testcases sshv1 sshv2
+
+#if sshv2
+ $ cat >> $HGRCPATH << EOF
+ > [experimental]
+ > sshpeer.advertise-v2 = true
+ > sshserver.support-v2 = true
+ > EOF
+#endif
+
$ cat << EOF >> $HGRCPATH
> [devel]
> # This test is dedicated to interaction through old bundle
@@ -465,11 +475,13 @@
$ hg pull --debug ssh://user@dummy/remote
pulling from ssh://user@dummy/remote
running .* ".*/dummyssh" ['"]user@dummy['"] ('|")hg -R remote serve --stdio('|") (re)
+ sending upgrade request: * proto=exp-ssh-v2-0001 (glob) (sshv2 !)
sending hello command
sending between command
- remote: 384
+ protocol upgraded to exp-ssh-v2-0001 (sshv2 !)
+ remote: 384 (sshv1 !)
remote: capabilities: lookup changegroupsubset branchmap pushkey known getbundle unbundlehash batch streamreqs=generaldelta,revlogv1 $USUAL_BUNDLE2_CAPS_SERVER$ unbundle=HG10GZ,HG10BZ,HG10UN
- remote: 1
+ remote: 1 (sshv1 !)
preparing listkeys for "bookmarks"
sending listkeys command
received listkey for "bookmarks": 45 bytes
--- a/tests/test-ssh-clone-r.t Fri Feb 23 17:57:04 2018 -0800
+++ b/tests/test-ssh-clone-r.t Sat Feb 24 17:49:10 2018 -0600
@@ -1,5 +1,15 @@
This test tries to exercise the ssh functionality with a dummy script
+#testcases sshv1 sshv2
+
+#if sshv2
+ $ cat >> $HGRCPATH << EOF
+ > [experimental]
+ > sshpeer.advertise-v2 = true
+ > sshserver.support-v2 = true
+ > EOF
+#endif
+
creating 'remote' repo
$ hg init remote
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/tests/test-ssh-proto.t Sat Feb 24 17:49:10 2018 -0600
@@ -0,0 +1,635 @@
+ $ cat >> $HGRCPATH << EOF
+ > [ui]
+ > ssh = $PYTHON "$TESTDIR/dummyssh"
+ > [devel]
+ > debug.peer-request = true
+ > [extensions]
+ > sshprotoext = $TESTDIR/sshprotoext.py
+ > EOF
+
+ $ hg init server
+ $ cd server
+ $ echo 0 > foo
+ $ hg -q add foo
+ $ hg commit -m initial
+ $ cd ..
+
+Test a normal behaving server, for sanity
+
+ $ hg --debug debugpeer ssh://user@dummy/server
+ running * "*/tests/dummyssh" 'user@dummy' 'hg -R server serve --stdio' (glob) (no-windows !)
+ running * "*\tests/dummyssh" "user@dummy" "hg -R server serve --stdio" (glob) (windows !)
+ devel-peer-request: hello
+ sending hello command
+ devel-peer-request: between
+ devel-peer-request: pairs: 81 bytes
+ sending between command
+ remote: 384
+ remote: capabilities: lookup changegroupsubset branchmap pushkey known getbundle unbundlehash batch streamreqs=generaldelta,revlogv1 $USUAL_BUNDLE2_CAPS_SERVER$ unbundle=HG10GZ,HG10BZ,HG10UN
+ remote: 1
+ url: ssh://user@dummy/server
+ local: no
+ pushable: yes
+
+Server should answer the "hello" command in isolation
+
+ $ hg -R server serve --stdio << EOF
+ > hello
+ > EOF
+ 384
+ capabilities: lookup changegroupsubset branchmap pushkey known getbundle unbundlehash batch streamreqs=generaldelta,revlogv1 $USUAL_BUNDLE2_CAPS_SERVER$ unbundle=HG10GZ,HG10BZ,HG10UN
+
+>=0.9.1 clients send a "hello" + "between" for the null range as part of handshake.
+Server should reply with capabilities and should send "1\n\n" as a successful
+reply with empty response to the "between".
+
+ $ hg -R server serve --stdio << EOF
+ > hello
+ > between
+ > pairs 81
+ > 0000000000000000000000000000000000000000-0000000000000000000000000000000000000000
+ > EOF
+ 384
+ capabilities: lookup changegroupsubset branchmap pushkey known getbundle unbundlehash batch streamreqs=generaldelta,revlogv1 $USUAL_BUNDLE2_CAPS_SERVER$ unbundle=HG10GZ,HG10BZ,HG10UN
+ 1
+
+
+SSH banner is not printed by default, ignored by clients
+
+ $ SSHSERVERMODE=banner hg debugpeer ssh://user@dummy/server
+ url: ssh://user@dummy/server
+ local: no
+ pushable: yes
+
+--debug will print the banner
+
+ $ SSHSERVERMODE=banner hg --debug debugpeer ssh://user@dummy/server
+ running * "*/tests/dummyssh" 'user@dummy' 'hg -R server serve --stdio' (glob) (no-windows !)
+ running * "*\tests/dummyssh" "user@dummy" "hg -R server serve --stdio" (glob) (windows !)
+ devel-peer-request: hello
+ sending hello command
+ devel-peer-request: between
+ devel-peer-request: pairs: 81 bytes
+ sending between command
+ remote: banner: line 0
+ remote: banner: line 1
+ remote: banner: line 2
+ remote: banner: line 3
+ remote: banner: line 4
+ remote: banner: line 5
+ remote: banner: line 6
+ remote: banner: line 7
+ remote: banner: line 8
+ remote: banner: line 9
+ remote: 384
+ remote: capabilities: lookup changegroupsubset branchmap pushkey known getbundle unbundlehash batch streamreqs=generaldelta,revlogv1 $USUAL_BUNDLE2_CAPS_SERVER$ unbundle=HG10GZ,HG10BZ,HG10UN
+ remote: 1
+ url: ssh://user@dummy/server
+ local: no
+ pushable: yes
+
+And test the banner with the raw protocol
+
+ $ SSHSERVERMODE=banner hg -R server serve --stdio << EOF
+ > hello
+ > between
+ > pairs 81
+ > 0000000000000000000000000000000000000000-0000000000000000000000000000000000000000
+ > EOF
+ banner: line 0
+ banner: line 1
+ banner: line 2
+ banner: line 3
+ banner: line 4
+ banner: line 5
+ banner: line 6
+ banner: line 7
+ banner: line 8
+ banner: line 9
+ 384
+ capabilities: lookup changegroupsubset branchmap pushkey known getbundle unbundlehash batch streamreqs=generaldelta,revlogv1 $USUAL_BUNDLE2_CAPS_SERVER$ unbundle=HG10GZ,HG10BZ,HG10UN
+ 1
+
+
+Connecting to a <0.9.1 server that doesn't support the hello command.
+The client should refuse, as we dropped support for connecting to such
+servers.
+
+ $ SSHSERVERMODE=no-hello hg --debug debugpeer ssh://user@dummy/server
+ running * "*/tests/dummyssh" 'user@dummy' 'hg -R server serve --stdio' (glob) (no-windows !)
+ running * "*\tests/dummyssh" "user@dummy" "hg -R server serve --stdio" (glob) (windows !)
+ devel-peer-request: hello
+ sending hello command
+ devel-peer-request: between
+ devel-peer-request: pairs: 81 bytes
+ sending between command
+ remote: 0
+ remote: 1
+ abort: no suitable response from remote hg!
+ [255]
+
+Sending an unknown command to the server results in an empty response to that command
+
+ $ hg -R server serve --stdio << EOF
+ > pre-hello
+ > hello
+ > between
+ > pairs 81
+ > 0000000000000000000000000000000000000000-0000000000000000000000000000000000000000
+ > EOF
+ 0
+ 384
+ capabilities: lookup changegroupsubset branchmap pushkey known getbundle unbundlehash batch streamreqs=generaldelta,revlogv1 $USUAL_BUNDLE2_CAPS_SERVER$ unbundle=HG10GZ,HG10BZ,HG10UN
+ 1
+
+
+ $ hg --config sshpeer.mode=extra-handshake-commands --config sshpeer.handshake-mode=pre-no-args --debug debugpeer ssh://user@dummy/server
+ running * "*/tests/dummyssh" 'user@dummy' 'hg -R server serve --stdio' (glob) (no-windows !)
+ running * "*\tests/dummyssh" "user@dummy" "hg -R server serve --stdio" (glob) (windows !)
+ sending no-args command
+ devel-peer-request: hello
+ sending hello command
+ devel-peer-request: between
+ devel-peer-request: pairs: 81 bytes
+ sending between command
+ remote: 0
+ remote: 384
+ remote: capabilities: lookup changegroupsubset branchmap pushkey known getbundle unbundlehash batch streamreqs=generaldelta,revlogv1 $USUAL_BUNDLE2_CAPS_SERVER$ unbundle=HG10GZ,HG10BZ,HG10UN
+ remote: 1
+ url: ssh://user@dummy/server
+ local: no
+ pushable: yes
+
+Send multiple unknown commands before hello
+
+ $ hg -R server serve --stdio << EOF
+ > unknown1
+ > unknown2
+ > unknown3
+ > hello
+ > between
+ > pairs 81
+ > 0000000000000000000000000000000000000000-0000000000000000000000000000000000000000
+ > EOF
+ 0
+ 0
+ 0
+ 384
+ capabilities: lookup changegroupsubset branchmap pushkey known getbundle unbundlehash batch streamreqs=generaldelta,revlogv1 $USUAL_BUNDLE2_CAPS_SERVER$ unbundle=HG10GZ,HG10BZ,HG10UN
+ 1
+
+
+ $ hg --config sshpeer.mode=extra-handshake-commands --config sshpeer.handshake-mode=pre-multiple-no-args --debug debugpeer ssh://user@dummy/server
+ running * "*/tests/dummyssh" 'user@dummy' 'hg -R server serve --stdio' (glob) (no-windows !)
+ running * "*\tests/dummyssh" "user@dummy" "hg -R server serve --stdio" (glob) (windows !)
+ sending unknown1 command
+ sending unknown2 command
+ sending unknown3 command
+ devel-peer-request: hello
+ sending hello command
+ devel-peer-request: between
+ devel-peer-request: pairs: 81 bytes
+ sending between command
+ remote: 0
+ remote: 0
+ remote: 0
+ remote: 384
+ remote: capabilities: lookup changegroupsubset branchmap pushkey known getbundle unbundlehash batch streamreqs=generaldelta,revlogv1 $USUAL_BUNDLE2_CAPS_SERVER$ unbundle=HG10GZ,HG10BZ,HG10UN
+ remote: 1
+ url: ssh://user@dummy/server
+ local: no
+ pushable: yes
+
+Send an unknown command before hello that has arguments
+
+ $ hg -R server serve --stdio << EOF
+ > with-args
+ > foo 13
+ > value for foo
+ > bar 13
+ > value for bar
+ > hello
+ > between
+ > pairs 81
+ > 0000000000000000000000000000000000000000-0000000000000000000000000000000000000000
+ > EOF
+ 0
+ 0
+ 0
+ 0
+ 0
+ 384
+ capabilities: lookup changegroupsubset branchmap pushkey known getbundle unbundlehash batch streamreqs=generaldelta,revlogv1 $USUAL_BUNDLE2_CAPS_SERVER$ unbundle=HG10GZ,HG10BZ,HG10UN
+ 1
+
+
+Send an unknown command having an argument that looks numeric
+
+ $ hg -R server serve --stdio << EOF
+ > unknown
+ > foo 1
+ > 0
+ > hello
+ > between
+ > pairs 81
+ > 0000000000000000000000000000000000000000-0000000000000000000000000000000000000000
+ > EOF
+ 0
+ 0
+ 0
+ 384
+ capabilities: lookup changegroupsubset branchmap pushkey known getbundle unbundlehash batch streamreqs=generaldelta,revlogv1 $USUAL_BUNDLE2_CAPS_SERVER$ unbundle=HG10GZ,HG10BZ,HG10UN
+ 1
+
+
+ $ hg -R server serve --stdio << EOF
+ > unknown
+ > foo 1
+ > 1
+ > hello
+ > between
+ > pairs 81
+ > 0000000000000000000000000000000000000000-0000000000000000000000000000000000000000
+ > EOF
+ 0
+ 0
+ 0
+ 384
+ capabilities: lookup changegroupsubset branchmap pushkey known getbundle unbundlehash batch streamreqs=generaldelta,revlogv1 $USUAL_BUNDLE2_CAPS_SERVER$ unbundle=HG10GZ,HG10BZ,HG10UN
+ 1
+
+
+When sending a dict argument value, it is serialized to
+"<arg> <item count>" followed by "<key> <len>\n<value>" for each item
+in the dict.
+
+Dictionary value for unknown command
+
+ $ hg -R server serve --stdio << EOF
+ > unknown
+ > dict 3
+ > key1 3
+ > foo
+ > key2 3
+ > bar
+ > key3 3
+ > baz
+ > hello
+ > EOF
+ 0
+ 0
+ 0
+ 0
+ 0
+ 0
+ 0
+ 0
+ 384
+ capabilities: lookup changegroupsubset branchmap pushkey known getbundle unbundlehash batch streamreqs=generaldelta,revlogv1 $USUAL_BUNDLE2_CAPS_SERVER$ unbundle=HG10GZ,HG10BZ,HG10UN
+
+Incomplete dictionary send
+
+ $ hg -R server serve --stdio << EOF
+ > unknown
+ > dict 3
+ > key1 3
+ > foo
+ > EOF
+ 0
+ 0
+ 0
+ 0
+
+Incomplete value send
+
+ $ hg -R server serve --stdio << EOF
+ > unknown
+ > dict 3
+ > key1 3
+ > fo
+ > EOF
+ 0
+ 0
+ 0
+ 0
+
+Send a command line with spaces
+
+ $ hg -R server serve --stdio << EOF
+ > unknown withspace
+ > hello
+ > between
+ > pairs 81
+ > 0000000000000000000000000000000000000000-0000000000000000000000000000000000000000
+ > EOF
+ 0
+ 384
+ capabilities: lookup changegroupsubset branchmap pushkey known getbundle unbundlehash batch streamreqs=generaldelta,revlogv1 $USUAL_BUNDLE2_CAPS_SERVER$ unbundle=HG10GZ,HG10BZ,HG10UN
+ 1
+
+
+ $ hg -R server serve --stdio << EOF
+ > unknown with multiple spaces
+ > hello
+ > between
+ > pairs 81
+ > 0000000000000000000000000000000000000000-0000000000000000000000000000000000000000
+ > EOF
+ 0
+ 384
+ capabilities: lookup changegroupsubset branchmap pushkey known getbundle unbundlehash batch streamreqs=generaldelta,revlogv1 $USUAL_BUNDLE2_CAPS_SERVER$ unbundle=HG10GZ,HG10BZ,HG10UN
+ 1
+
+
+ $ hg -R server serve --stdio << EOF
+ > unknown with spaces
+ > key 10
+ > some value
+ > hello
+ > between
+ > pairs 81
+ > 0000000000000000000000000000000000000000-0000000000000000000000000000000000000000
+ > EOF
+ 0
+ 0
+ 0
+ 384
+ capabilities: lookup changegroupsubset branchmap pushkey known getbundle unbundlehash batch streamreqs=generaldelta,revlogv1 $USUAL_BUNDLE2_CAPS_SERVER$ unbundle=HG10GZ,HG10BZ,HG10UN
+ 1
+
+
+Send an unknown command after the "between"
+
+ $ hg -R server serve --stdio << EOF
+ > hello
+ > between
+ > pairs 81
+ > 0000000000000000000000000000000000000000-0000000000000000000000000000000000000000unknown
+ > EOF
+ 384
+ capabilities: lookup changegroupsubset branchmap pushkey known getbundle unbundlehash batch streamreqs=generaldelta,revlogv1 $USUAL_BUNDLE2_CAPS_SERVER$ unbundle=HG10GZ,HG10BZ,HG10UN
+ 1
+
+ 0
+
+And one with arguments
+
+ $ hg -R server serve --stdio << EOF
+ > hello
+ > between
+ > pairs 81
+ > 0000000000000000000000000000000000000000-0000000000000000000000000000000000000000unknown
+ > foo 5
+ > value
+ > bar 3
+ > baz
+ > EOF
+ 384
+ capabilities: lookup changegroupsubset branchmap pushkey known getbundle unbundlehash batch streamreqs=generaldelta,revlogv1 $USUAL_BUNDLE2_CAPS_SERVER$ unbundle=HG10GZ,HG10BZ,HG10UN
+ 1
+
+ 0
+ 0
+ 0
+ 0
+ 0
+
+Send a valid command before the handshake
+
+ $ hg -R server serve --stdio << EOF
+ > heads
+ > hello
+ > between
+ > pairs 81
+ > 0000000000000000000000000000000000000000-0000000000000000000000000000000000000000
+ > EOF
+ 41
+ 68986213bd4485ea51533535e3fc9e78007a711f
+ 384
+ capabilities: lookup changegroupsubset branchmap pushkey known getbundle unbundlehash batch streamreqs=generaldelta,revlogv1 $USUAL_BUNDLE2_CAPS_SERVER$ unbundle=HG10GZ,HG10BZ,HG10UN
+ 1
+
+
+And a variation that doesn't send the between command
+
+ $ hg -R server serve --stdio << EOF
+ > heads
+ > hello
+ > EOF
+ 41
+ 68986213bd4485ea51533535e3fc9e78007a711f
+ 384
+ capabilities: lookup changegroupsubset branchmap pushkey known getbundle unbundlehash batch streamreqs=generaldelta,revlogv1 $USUAL_BUNDLE2_CAPS_SERVER$ unbundle=HG10GZ,HG10BZ,HG10UN
+
+Send an upgrade request to a server that doesn't support that command
+
+ $ hg -R server serve --stdio << EOF
+ > upgrade 2e82ab3f-9ce3-4b4e-8f8c-6fd1c0e9e23a proto=irrelevant1%2Cirrelevant2
+ > hello
+ > between
+ > pairs 81
+ > 0000000000000000000000000000000000000000-0000000000000000000000000000000000000000
+ > EOF
+ 0
+ 384
+ capabilities: lookup changegroupsubset branchmap pushkey known getbundle unbundlehash batch streamreqs=generaldelta,revlogv1 $USUAL_BUNDLE2_CAPS_SERVER$ unbundle=HG10GZ,HG10BZ,HG10UN
+ 1
+
+
+ $ hg --config experimental.sshpeer.advertise-v2=true --debug debugpeer ssh://user@dummy/server
+ running * "*/tests/dummyssh" 'user@dummy' 'hg -R server serve --stdio' (glob) (no-windows !)
+ running * "*\tests/dummyssh" "user@dummy" "hg -R server serve --stdio" (glob) (windows !)
+ sending upgrade request: * proto=exp-ssh-v2-0001 (glob)
+ devel-peer-request: hello
+ sending hello command
+ devel-peer-request: between
+ devel-peer-request: pairs: 81 bytes
+ sending between command
+ remote: 0
+ remote: 384
+ remote: capabilities: lookup changegroupsubset branchmap pushkey known getbundle unbundlehash batch streamreqs=generaldelta,revlogv1 $USUAL_BUNDLE2_CAPS_SERVER$ unbundle=HG10GZ,HG10BZ,HG10UN
+ remote: 1
+ url: ssh://user@dummy/server
+ local: no
+ pushable: yes
+
+Enable version 2 support on server. We need to do this in hgrc because we can't
+use --config with `hg serve --stdio`.
+
+ $ cat >> server/.hg/hgrc << EOF
+ > [experimental]
+ > sshserver.support-v2 = true
+ > EOF
+
+Send an upgrade request to a server that supports upgrade
+
+ >>> with open('payload', 'wb') as fh:
+ ... fh.write(b'upgrade this-is-some-token proto=exp-ssh-v2-0001\n')
+ ... fh.write(b'hello\n')
+ ... fh.write(b'between\n')
+ ... fh.write(b'pairs 81\n')
+ ... fh.write(b'0000000000000000000000000000000000000000-0000000000000000000000000000000000000000')
+
+ $ hg -R server serve --stdio < payload
+ upgraded this-is-some-token exp-ssh-v2-0001
+ 383
+ capabilities: lookup changegroupsubset branchmap pushkey known getbundle unbundlehash batch streamreqs=generaldelta,revlogv1 $USUAL_BUNDLE2_CAPS_SERVER$ unbundle=HG10GZ,HG10BZ,HG10UN
+
+ $ hg --config experimental.sshpeer.advertise-v2=true --debug debugpeer ssh://user@dummy/server
+ running * "*/tests/dummyssh" 'user@dummy' 'hg -R server serve --stdio' (glob) (no-windows !)
+ running * "*\tests/dummyssh" "user@dummy" "hg -R server serve --stdio" (glob) (windows !)
+ sending upgrade request: * proto=exp-ssh-v2-0001 (glob)
+ devel-peer-request: hello
+ sending hello command
+ devel-peer-request: between
+ devel-peer-request: pairs: 81 bytes
+ sending between command
+ protocol upgraded to exp-ssh-v2-0001
+ remote: capabilities: lookup changegroupsubset branchmap pushkey known getbundle unbundlehash batch streamreqs=generaldelta,revlogv1 $USUAL_BUNDLE2_CAPS_SERVER$ unbundle=HG10GZ,HG10BZ,HG10UN
+ url: ssh://user@dummy/server
+ local: no
+ pushable: yes
+
+Verify the peer has capabilities
+
+ $ hg --config experimental.sshpeer.advertise-v2=true --debug debugcapabilities ssh://user@dummy/server
+ running * "*/tests/dummyssh" 'user@dummy' 'hg -R server serve --stdio' (glob) (no-windows !)
+ running * "*\tests/dummyssh" "user@dummy" "hg -R server serve --stdio" (glob) (windows !)
+ sending upgrade request: * proto=exp-ssh-v2-0001 (glob)
+ devel-peer-request: hello
+ sending hello command
+ devel-peer-request: between
+ devel-peer-request: pairs: 81 bytes
+ sending between command
+ protocol upgraded to exp-ssh-v2-0001
+ remote: capabilities: lookup changegroupsubset branchmap pushkey known getbundle unbundlehash batch streamreqs=generaldelta,revlogv1 $USUAL_BUNDLE2_CAPS_SERVER$ unbundle=HG10GZ,HG10BZ,HG10UN
+ Main capabilities:
+ batch
+ branchmap
+ $USUAL_BUNDLE2_CAPS_SERVER$
+ changegroupsubset
+ getbundle
+ known
+ lookup
+ pushkey
+ streamreqs=generaldelta,revlogv1
+ unbundle=HG10GZ,HG10BZ,HG10UN
+ unbundlehash
+ Bundle2 capabilities:
+ HG20
+ bookmarks
+ changegroup
+ 01
+ 02
+ digests
+ md5
+ sha1
+ sha512
+ error
+ abort
+ unsupportedcontent
+ pushraced
+ pushkey
+ hgtagsfnodes
+ listkeys
+ phases
+ heads
+ pushkey
+ remote-changegroup
+ http
+ https
+
+Command after upgrade to version 2 is processed
+
+ >>> with open('payload', 'wb') as fh:
+ ... fh.write(b'upgrade this-is-some-token proto=exp-ssh-v2-0001\n')
+ ... fh.write(b'hello\n')
+ ... fh.write(b'between\n')
+ ... fh.write(b'pairs 81\n')
+ ... fh.write(b'0000000000000000000000000000000000000000-0000000000000000000000000000000000000000')
+ ... fh.write(b'hello\n')
+ $ hg -R server serve --stdio < payload
+ upgraded this-is-some-token exp-ssh-v2-0001
+ 383
+ capabilities: lookup changegroupsubset branchmap pushkey known getbundle unbundlehash batch streamreqs=generaldelta,revlogv1 $USUAL_BUNDLE2_CAPS_SERVER$ unbundle=HG10GZ,HG10BZ,HG10UN
+ 384
+ capabilities: lookup changegroupsubset branchmap pushkey known getbundle unbundlehash batch streamreqs=generaldelta,revlogv1 $USUAL_BUNDLE2_CAPS_SERVER$ unbundle=HG10GZ,HG10BZ,HG10UN
+
+Multiple upgrades is not allowed
+
+ >>> with open('payload', 'wb') as fh:
+ ... fh.write(b'upgrade this-is-some-token proto=exp-ssh-v2-0001\n')
+ ... fh.write(b'hello\n')
+ ... fh.write(b'between\n')
+ ... fh.write(b'pairs 81\n')
+ ... fh.write(b'0000000000000000000000000000000000000000-0000000000000000000000000000000000000000')
+ ... fh.write(b'upgrade another-token proto=irrelevant\n')
+ ... fh.write(b'hello\n')
+ $ hg -R server serve --stdio < payload
+ upgraded this-is-some-token exp-ssh-v2-0001
+ 383
+ capabilities: lookup changegroupsubset branchmap pushkey known getbundle unbundlehash batch streamreqs=generaldelta,revlogv1 $USUAL_BUNDLE2_CAPS_SERVER$ unbundle=HG10GZ,HG10BZ,HG10UN
+ cannot upgrade protocols multiple times
+ -
+
+
+Malformed upgrade request line (not exactly 3 space delimited tokens)
+
+ $ hg -R server serve --stdio << EOF
+ > upgrade
+ > EOF
+ 0
+
+ $ hg -R server serve --stdio << EOF
+ > upgrade token
+ > EOF
+ 0
+
+ $ hg -R server serve --stdio << EOF
+ > upgrade token foo=bar extra-token
+ > EOF
+ 0
+
+Upgrade request to unsupported protocol is ignored
+
+ $ hg -R server serve --stdio << EOF
+ > upgrade this-is-some-token proto=unknown1,unknown2
+ > hello
+ > between
+ > pairs 81
+ > 0000000000000000000000000000000000000000-0000000000000000000000000000000000000000
+ > EOF
+ 0
+ 384
+ capabilities: lookup changegroupsubset branchmap pushkey known getbundle unbundlehash batch streamreqs=generaldelta,revlogv1 $USUAL_BUNDLE2_CAPS_SERVER$ unbundle=HG10GZ,HG10BZ,HG10UN
+ 1
+
+
+Upgrade request must be followed by hello + between
+
+ $ hg -R server serve --stdio << EOF
+ > upgrade token proto=exp-ssh-v2-0001
+ > invalid
+ > EOF
+ malformed handshake protocol: missing hello
+ -
+
+
+ $ hg -R server serve --stdio << EOF
+ > upgrade token proto=exp-ssh-v2-0001
+ > hello
+ > invalid
+ > EOF
+ malformed handshake protocol: missing between
+ -
+
+
+ $ hg -R server serve --stdio << EOF
+ > upgrade token proto=exp-ssh-v2-0001
+ > hello
+ > between
+ > invalid
+ > EOF
+ malformed handshake protocol: missing pairs 81
+ -
+
--- a/tests/test-ssh.t Fri Feb 23 17:57:04 2018 -0800
+++ b/tests/test-ssh.t Sat Feb 24 17:49:10 2018 -0600
@@ -1,3 +1,12 @@
+#testcases sshv1 sshv2
+
+#if sshv2
+ $ cat >> $HGRCPATH << EOF
+ > [experimental]
+ > sshpeer.advertise-v2 = true
+ > sshserver.support-v2 = true
+ > EOF
+#endif
This test tries to exercise the ssh functionality with a dummy script
@@ -481,14 +490,16 @@
$ hg pull --debug ssh://user@dummy/remote --config devel.debug.peer-request=yes
pulling from ssh://user@dummy/remote
running .* ".*/dummyssh" ['"]user@dummy['"] ('|")hg -R remote serve --stdio('|") (re)
+ sending upgrade request: * proto=exp-ssh-v2-0001 (glob) (sshv2 !)
devel-peer-request: hello
sending hello command
devel-peer-request: between
devel-peer-request: pairs: 81 bytes
sending between command
- remote: 384
+ remote: 384 (sshv1 !)
+ protocol upgraded to exp-ssh-v2-0001 (sshv2 !)
remote: capabilities: lookup changegroupsubset branchmap pushkey known getbundle unbundlehash batch streamreqs=generaldelta,revlogv1 $USUAL_BUNDLE2_CAPS_SERVER$ unbundle=HG10GZ,HG10BZ,HG10UN
- remote: 1
+ remote: 1 (sshv1 !)
query 1; heads
devel-peer-request: batch
devel-peer-request: cmds: 141 bytes
--- a/tests/test-sshserver.py Fri Feb 23 17:57:04 2018 -0800
+++ b/tests/test-sshserver.py Sat Feb 24 17:49:10 2018 -0600
@@ -6,30 +6,33 @@
import silenttestrunner
from mercurial import (
- sshserver,
util,
wireproto,
+ wireprotoserver,
)
class SSHServerGetArgsTests(unittest.TestCase):
def testparseknown(self):
tests = [
- ('* 0\nnodes 0\n', ['', {}]),
- ('* 0\nnodes 40\n1111111111111111111111111111111111111111\n',
- ['1111111111111111111111111111111111111111', {}]),
+ (b'* 0\nnodes 0\n', [b'', {}]),
+ (b'* 0\nnodes 40\n1111111111111111111111111111111111111111\n',
+ [b'1111111111111111111111111111111111111111', {}]),
]
for input, expected in tests:
- self.assertparse('known', input, expected)
+ self.assertparse(b'known', input, expected)
def assertparse(self, cmd, input, expected):
server = mockserver(input)
+ proto = wireprotoserver.sshv1protocolhandler(server._ui,
+ server._fin,
+ server._fout)
_func, spec = wireproto.commands[cmd]
- self.assertEqual(server.getargs(spec), expected)
+ self.assertEqual(proto.getargs(spec), expected)
def mockserver(inbytes):
ui = mockui(inbytes)
repo = mockrepo(ui)
- return sshserver.sshserver(ui, repo)
+ return wireprotoserver.sshserver(ui, repo)
class mockrepo(object):
def __init__(self, ui):
--- a/tests/test-status.t Fri Feb 23 17:57:04 2018 -0800
+++ b/tests/test-status.t Sat Feb 24 17:49:10 2018 -0600
@@ -465,12 +465,12 @@
$ hg init repo5
$ cd repo5
- >>> open("010a", "wb").write("\1\nfoo")
+ >>> open("010a", r"wb").write(b"\1\nfoo")
$ hg ci -q -A -m 'initial checkin'
$ hg status -A
C 010a
- >>> open("010a", "wb").write("\1\nbar")
+ >>> open("010a", r"wb").write(b"\1\nbar")
$ hg status -A
M 010a
$ hg ci -q -m 'modify 010a'
--- a/tests/test-strip.t Fri Feb 23 17:57:04 2018 -0800
+++ b/tests/test-strip.t Sat Feb 24 17:49:10 2018 -0600
@@ -893,17 +893,17 @@
> def test(transaction):
> # observe cache inconsistency
> try:
- > [repo.changelog.node(r) for r in repo.revs("not public()")]
+ > [repo.changelog.node(r) for r in repo.revs(b"not public()")]
> except IndexError:
- > repo.ui.status("Index error!\n")
+ > repo.ui.status(b"Index error!\n")
> transaction = orig(repo, desc, *args, **kwargs)
> # warm up the phase cache
- > list(repo.revs("not public()"))
- > if desc != 'strip':
- > transaction.addpostclose("phase invalidation test", test)
+ > list(repo.revs(b"not public()"))
+ > if desc != b'strip':
+ > transaction.addpostclose(b"phase invalidation test", test)
> return transaction
> def extsetup(ui):
- > extensions.wrapfunction(localrepo.localrepository, "transaction",
+ > extensions.wrapfunction(localrepo.localrepository, b"transaction",
> transactioncallback)
> EOF
$ hg up -C 2
@@ -930,9 +930,9 @@
> class crashstriprepo(repo.__class__):
> def transaction(self, desc, *args, **kwargs):
> tr = super(crashstriprepo, self).transaction(desc, *args, **kwargs)
- > if desc == 'strip':
- > def crash(tra): raise error.Abort('boom')
- > tr.addpostclose('crash', crash)
+ > if desc == b'strip':
+ > def crash(tra): raise error.Abort(b'boom')
+ > tr.addpostclose(b'crash', crash)
> return tr
> repo.__class__ = crashstriprepo
> EOF
@@ -1175,16 +1175,16 @@
> from mercurial import commands, registrar, repair
> cmdtable = {}
> command = registrar.command(cmdtable)
- > @command('testdelayedstrip')
+ > @command(b'testdelayedstrip')
> def testdelayedstrip(ui, repo):
> def getnodes(expr):
> return [repo.changelog.node(r) for r in repo.revs(expr)]
> with repo.wlock():
> with repo.lock():
- > with repo.transaction('delayedstrip'):
- > repair.delayedstrip(ui, repo, getnodes('B+I+Z+D+E'), 'J')
- > repair.delayedstrip(ui, repo, getnodes('G+H+Z'), 'I')
- > commands.commit(ui, repo, message='J', date='0 0')
+ > with repo.transaction(b'delayedstrip'):
+ > repair.delayedstrip(ui, repo, getnodes(b'B+I+Z+D+E'), b'J')
+ > repair.delayedstrip(ui, repo, getnodes(b'G+H+Z'), b'I')
+ > commands.commit(ui, repo, message=b'J', date=b'0 0')
> EOF
$ hg testdelayedstrip --config extensions.t=$TESTTMP/delayedstrip.py
warning: orphaned descendants detected, not stripping 08ebfeb61bac, 112478962961, 7fb047a69f22
@@ -1225,7 +1225,7 @@
> from mercurial import registrar, scmutil
> cmdtable = {}
> command = registrar.command(cmdtable)
- > @command('testnodescleanup')
+ > @command(b'testnodescleanup')
> def testnodescleanup(ui, repo):
> def nodes(expr):
> return [repo.changelog.node(r) for r in repo.revs(expr)]
@@ -1233,12 +1233,13 @@
> return nodes(expr)[0]
> with repo.wlock():
> with repo.lock():
- > with repo.transaction('delayedstrip'):
- > mapping = {node('F'): [node('F2')],
- > node('D'): [node('D2')],
- > node('G'): [node('G2')]}
- > scmutil.cleanupnodes(repo, mapping, 'replace')
- > scmutil.cleanupnodes(repo, nodes('((B::)+I+Z)-D2'), 'replace')
+ > with repo.transaction(b'delayedstrip'):
+ > mapping = {node(b'F'): [node(b'F2')],
+ > node(b'D'): [node(b'D2')],
+ > node(b'G'): [node(b'G2')]}
+ > scmutil.cleanupnodes(repo, mapping, b'replace')
+ > scmutil.cleanupnodes(repo, nodes(b'((B::)+I+Z)-D2'),
+ > b'replace')
> EOF
$ hg testnodescleanup --config extensions.t=$TESTTMP/scmutilcleanup.py
warning: orphaned descendants detected, not stripping 112478962961, 1fc8102cda62, 26805aba1e60
--- a/tests/test-subrepo-missing.t Fri Feb 23 17:57:04 2018 -0800
+++ b/tests/test-subrepo-missing.t Sat Feb 24 17:49:10 2018 -0600
@@ -14,7 +14,7 @@
ignore blanklines in .hgsubstate
- >>> file('.hgsubstate', 'wb').write('\n\n \t \n \n')
+ >>> open('.hgsubstate', 'wb').write(b'\n\n \t \n \n')
$ hg st --subrepos
M .hgsubstate
$ hg revert -qC .hgsubstate
@@ -22,7 +22,7 @@
abort more gracefully on .hgsubstate parsing error
$ cp .hgsubstate .hgsubstate.old
- >>> file('.hgsubstate', 'wb').write('\ninvalid')
+ >>> open('.hgsubstate', 'wb').write(b'\ninvalid')
$ hg st --subrepos --cwd $TESTTMP -R $TESTTMP/repo
abort: invalid subrepository revision specifier in 'repo/.hgsubstate' line 2
[255]
--- a/tests/test-tag.t Fri Feb 23 17:57:04 2018 -0800
+++ b/tests/test-tag.t Sat Feb 24 17:49:10 2018 -0600
@@ -231,8 +231,8 @@
doesn't end with EOL
$ $PYTHON << EOF
- > f = file('.hg/localtags'); last = f.readlines()[-1][:-1]; f.close()
- > f = file('.hg/localtags', 'w'); f.write(last); f.close()
+ > f = open('.hg/localtags'); last = f.readlines()[-1][:-1]; f.close()
+ > f = open('.hg/localtags', 'w'); f.write(last); f.close()
> EOF
$ cat .hg/localtags; echo
acb14030fe0a21b60322c440ad2d20cf7685a376 localblah
@@ -243,8 +243,8 @@
$ $PYTHON << EOF
- > f = file('.hgtags'); last = f.readlines()[-1][:-1]; f.close()
- > f = file('.hgtags', 'w'); f.write(last); f.close()
+ > f = open('.hgtags'); last = f.readlines()[-1][:-1]; f.close()
+ > f = open('.hgtags', 'w'); f.write(last); f.close()
> EOF
$ hg ci -m'broken manual edit of .hgtags'
$ cat .hgtags; echo
--- a/tests/test-template-engine.t Fri Feb 23 17:57:04 2018 -0800
+++ b/tests/test-template-engine.t Sat Feb 24 17:49:10 2018 -0600
@@ -13,7 +13,7 @@
> tmpl = self.loader(t)
> props = self._defaults.copy()
> props.update(map)
- > for k, v in props.iteritems():
+ > for k, v in props.items():
> if k in ('templ', 'ctx', 'repo', 'revcache', 'cache', 'troubles'):
> continue
> if hasattr(v, '__call__'):
--- a/tests/test-transplant.t Fri Feb 23 17:57:04 2018 -0800
+++ b/tests/test-transplant.t Sat Feb 24 17:49:10 2018 -0600
@@ -760,7 +760,7 @@
$ cd twin2
$ echo '[patch]' >> .hg/hgrc
$ echo 'eol = crlf' >> .hg/hgrc
- $ $PYTHON -c "file('b', 'wb').write('b\r\nb\r\n')"
+ $ $PYTHON -c "open('b', 'wb').write(b'b\r\nb\r\n')"
$ hg ci -Am addb
adding b
$ hg transplant -s ../twin1 tip
@@ -838,9 +838,9 @@
$ cd binarysource
$ echo a > a
$ hg ci -Am adda a
- >>> file('b', 'wb').write('\0b1')
+ >>> open('b', 'wb').write(b'\0b1')
$ hg ci -Am addb b
- >>> file('b', 'wb').write('\0b2')
+ >>> open('b', 'wb').write(b'\0b2')
$ hg ci -m changeb b
$ cd ..
@@ -891,14 +891,14 @@
> # emulate that patch.patch() is aborted at patching on "abort" file
> from mercurial import error, extensions, patch as patchmod
> def patch(orig, ui, repo, patchname,
- > strip=1, prefix='', files=None,
- > eolmode='strict', similarity=0):
+ > strip=1, prefix=b'', files=None,
+ > eolmode=b'strict', similarity=0):
> if files is None:
> files = set()
> r = orig(ui, repo, patchname,
> strip=strip, prefix=prefix, files=files,
> eolmode=eolmode, similarity=similarity)
- > if 'abort' in files:
+ > if b'abort' in files:
> raise error.PatchError('intentional error while patching')
> return r
> def extsetup(ui):
--- a/tests/test-ui-color.py Fri Feb 23 17:57:04 2018 -0800
+++ b/tests/test-ui-color.py Sat Feb 24 17:49:10 2018 -0600
@@ -9,27 +9,27 @@
# ensure errors aren't buffered
testui = uimod.ui()
testui.pushbuffer()
-testui.write(('buffered\n'))
-testui.warn(('warning\n'))
-testui.write_err('error\n')
+testui.write((b'buffered\n'))
+testui.warn((b'warning\n'))
+testui.write_err(b'error\n')
print(repr(testui.popbuffer()))
# test dispatch.dispatch with the same ui object
-hgrc = open(os.environ["HGRCPATH"], 'w')
-hgrc.write('[extensions]\n')
-hgrc.write('color=\n')
+hgrc = open(os.environ["HGRCPATH"], 'wb')
+hgrc.write(b'[extensions]\n')
+hgrc.write(b'color=\n')
hgrc.close()
ui_ = uimod.ui.load()
-ui_.setconfig('ui', 'formatted', 'True')
+ui_.setconfig(b'ui', b'formatted', b'True')
# we're not interested in the output, so write that to devnull
-ui_.fout = open(os.devnull, 'w')
+ui_.fout = open(os.devnull, 'wb')
# call some arbitrary command just so we go through
# color's wrapped _runcommand twice.
def runcmd():
- dispatch.dispatch(dispatch.request(['version', '-q'], ui_))
+ dispatch.dispatch(dispatch.request([b'version', b'-q'], ui_))
runcmd()
print("colored? %s" % (ui_._colormode is not None))
--- a/tests/test-ui-verbosity.py Fri Feb 23 17:57:04 2018 -0800
+++ b/tests/test-ui-verbosity.py Sat Feb 24 17:49:10 2018 -0600
@@ -2,9 +2,13 @@
import os
from mercurial import (
+ pycompat,
ui as uimod,
)
+if pycompat.ispy3:
+ xrange = range
+
hgrc = os.environ['HGRCPATH']
f = open(hgrc)
basehgrc = f.read()
--- a/tests/test-upgrade-repo.t Fri Feb 23 17:57:04 2018 -0800
+++ b/tests/test-upgrade-repo.t Sat Feb 24 17:49:10 2018 -0600
@@ -31,23 +31,18 @@
abort: cannot upgrade repository; unsupported source requirement: shared
[255]
-Do not yet support upgrading manifestv2 and treemanifest repos
-
- $ hg --config experimental.manifestv2=true init manifestv2
- $ hg -R manifestv2 debugupgraderepo
- abort: cannot upgrade repository; unsupported source requirement: manifestv2
- [255]
+Do not yet support upgrading treemanifest repos
$ hg --config experimental.treemanifest=true init treemanifest
$ hg -R treemanifest debugupgraderepo
abort: cannot upgrade repository; unsupported source requirement: treemanifest
[255]
-Cannot add manifestv2 or treemanifest requirement during upgrade
+Cannot add treemanifest requirement during upgrade
$ hg init disallowaddedreq
- $ hg -R disallowaddedreq --config experimental.manifestv2=true --config experimental.treemanifest=true debugupgraderepo
- abort: cannot upgrade repository; do not support adding requirement: manifestv2, treemanifest
+ $ hg -R disallowaddedreq --config experimental.treemanifest=true debugupgraderepo
+ abort: cannot upgrade repository; do not support adding requirement: treemanifest
[255]
An upgrade of a repository created with recommended settings only suggests optimizations
--- a/tests/test-walk.t Fri Feb 23 17:57:04 2018 -0800
+++ b/tests/test-walk.t Sat Feb 24 17:49:10 2018 -0600
@@ -304,12 +304,10 @@
f beans/turtle beans/turtle
$ hg debugwalk -Xbeans/black beans/black
matcher: <differencematcher m1=<patternmatcher patterns='(?:beans\\/black(?:/|$))'>, m2=<includematcher includes='(?:beans\\/black(?:/|$))'>>
- f beans/black beans/black exact
$ hg debugwalk -Xbeans/black -Ibeans/black
matcher: <differencematcher m1=<includematcher includes='(?:beans\\/black(?:/|$))'>, m2=<includematcher includes='(?:beans\\/black(?:/|$))'>>
$ hg debugwalk -Xbeans beans/black
matcher: <differencematcher m1=<patternmatcher patterns='(?:beans\\/black(?:/|$))'>, m2=<includematcher includes='(?:beans(?:/|$))'>>
- f beans/black beans/black exact
$ hg debugwalk -Xbeans -Ibeans/black
matcher: <differencematcher m1=<includematcher includes='(?:beans\\/black(?:/|$))'>, m2=<includematcher includes='(?:beans(?:/|$))'>>
$ hg debugwalk 'glob:mammals/../beans/b*'
@@ -345,17 +343,13 @@
[255]
Test explicit paths and excludes:
-(BROKEN: nothing should be included, but wctx.walk() does)
$ hg debugwalk fennel -X fennel
matcher: <differencematcher m1=<patternmatcher patterns='(?:fennel(?:/|$))'>, m2=<includematcher includes='(?:fennel(?:/|$))'>>
- f fennel fennel exact
$ hg debugwalk fennel -X 'f*'
matcher: <differencematcher m1=<patternmatcher patterns='(?:fennel(?:/|$))'>, m2=<includematcher includes='(?:f[^/]*(?:/|$))'>>
- f fennel fennel exact
$ hg debugwalk beans/black -X 'path:beans'
matcher: <differencematcher m1=<patternmatcher patterns='(?:beans\\/black(?:/|$))'>, m2=<includematcher includes='(?:beans(?:/|$))'>>
- f beans/black beans/black exact
$ hg debugwalk -I 'path:beans/black' -X 'path:beans'
matcher: <differencematcher m1=<includematcher includes='(?:beans\\/black(?:/|$))'>, m2=<includematcher includes='(?:beans(?:/|$))'>>
@@ -494,12 +488,12 @@
Test listfile and listfile0
- $ $PYTHON -c "file('listfile0', 'wb').write('fenugreek\0new\0')"
+ $ $PYTHON -c "open('listfile0', 'wb').write(b'fenugreek\0new\0')"
$ hg debugwalk -I 'listfile0:listfile0'
matcher: <includematcher includes='(?:fenugreek(?:/|$)|new(?:/|$))'>
f fenugreek fenugreek
f new new
- $ $PYTHON -c "file('listfile', 'wb').write('fenugreek\nnew\r\nmammals/skunk\n')"
+ $ $PYTHON -c "open('listfile', 'wb').write(b'fenugreek\nnew\r\nmammals/skunk\n')"
$ hg debugwalk -I 'listfile:listfile'
matcher: <includematcher includes='(?:fenugreek(?:/|$)|new(?:/|$)|mammals\\/skunk(?:/|$))'>
f fenugreek fenugreek
@@ -525,7 +519,7 @@
$ cd t
$ echo fennel > overflow.list
- $ $PYTHON -c "for i in xrange(20000 / 100): print 'x' * 100" >> overflow.list
+ $ $PYTHON -c "for i in range(20000 / 100): print 'x' * 100" >> overflow.list
$ echo fenugreek >> overflow.list
$ hg debugwalk 'listfile:overflow.list' 2>&1 | egrep -v '(^matcher: |^xxx)'
f fennel fennel exact
--- a/tests/test-win32text.t Fri Feb 23 17:57:04 2018 -0800
+++ b/tests/test-win32text.t Sat Feb 24 17:49:10 2018 -0600
@@ -5,9 +5,9 @@
> import sys
>
> for path in sys.argv[1:]:
- > data = file(path, 'rb').read()
- > data = data.replace('\n', '\r\n')
- > file(path, 'wb').write(data)
+ > data = open(path, 'rb').read()
+ > data = data.replace(b'\n', b'\r\n')
+ > open(path, 'wb').write(data)
> EOF
$ echo '[hooks]' >> .hg/hgrc
$ echo 'pretxncommit.crlf = python:hgext.win32text.forbidcrlf' >> .hg/hgrc
@@ -118,7 +118,7 @@
$ hg rem f
$ hg ci -m 4
- $ $PYTHON -c 'file("bin", "wb").write("hello\x00\x0D\x0A")'
+ $ $PYTHON -c 'open("bin", "wb").write(b"hello\x00\x0D\x0A")'
$ hg add bin
$ hg ci -m 5
$ hg log -v
@@ -342,7 +342,7 @@
$ rm .hg/hgrc
$ (echo some; echo text) > f3
- $ $PYTHON -c 'file("f4.bat", "wb").write("rem empty\x0D\x0A")'
+ $ $PYTHON -c 'open("f4.bat", "wb").write(b"rem empty\x0D\x0A")'
$ hg add f3 f4.bat
$ hg ci -m 6
$ cat bin
@@ -395,7 +395,7 @@
$ cat f4.bat
rem empty\r (esc)
- $ $PYTHON -c 'file("f5.sh", "wb").write("# empty\x0D\x0A")'
+ $ $PYTHON -c 'open("f5.sh", "wb").write(b"# empty\x0D\x0A")'
$ hg add f5.sh
$ hg ci -m 7
$ cat f5.sh
--- a/tests/test-wireproto.py Fri Feb 23 17:57:04 2018 -0800
+++ b/tests/test-wireproto.py Sat Feb 24 17:49:10 2018 -0600
@@ -1,8 +1,10 @@
from __future__ import absolute_import, print_function
from mercurial import (
+ error,
util,
wireproto,
+ wireprototypes,
)
stringio = util.stringio
@@ -42,7 +44,13 @@
return ['batch']
def _call(self, cmd, **args):
- return wireproto.dispatch(self.serverrepo, proto(args), cmd)
+ res = wireproto.dispatch(self.serverrepo, proto(args), cmd)
+ if isinstance(res, wireprototypes.bytesresponse):
+ return res.data
+ elif isinstance(res, bytes):
+ return res
+ else:
+ raise error.Abort('dummy client does not support response type')
def _callstream(self, cmd, **args):
return stringio(self._call(cmd, **args))
--- a/tests/test-worker.t Fri Feb 23 17:57:04 2018 -0800
+++ b/tests/test-worker.t Sat Feb 24 17:49:10 2018 -0600
@@ -12,7 +12,7 @@
> def abort(ui, args):
> if args[0] == 0:
> # by first worker for test stability
- > raise error.Abort('known exception')
+ > raise error.Abort(b'known exception')
> return runme(ui, [])
> def exc(ui, args):
> if args[0] == 0:
@@ -21,25 +21,25 @@
> return runme(ui, [])
> def runme(ui, args):
> for arg in args:
- > ui.status('run\n')
+ > ui.status(b'run\n')
> yield 1, arg
> time.sleep(0.1) # easier to trigger killworkers code path
> functable = {
- > 'abort': abort,
- > 'exc': exc,
- > 'runme': runme,
+ > b'abort': abort,
+ > b'exc': exc,
+ > b'runme': runme,
> }
> cmdtable = {}
> command = registrar.command(cmdtable)
- > @command(b'test', [], 'hg test [COST] [FUNC]')
- > def t(ui, repo, cost=1.0, func='runme'):
+ > @command(b'test', [], b'hg test [COST] [FUNC]')
+ > def t(ui, repo, cost=1.0, func=b'runme'):
> cost = float(cost)
> func = functable[func]
- > ui.status('start\n')
+ > ui.status(b'start\n')
> runs = worker.worker(ui, cost, func, (ui,), range(8))
> for n, i in runs:
> pass
- > ui.status('done\n')
+ > ui.status(b'done\n')
> EOF
$ abspath=`pwd`/t.py
$ hg init
--- a/tests/testlib/ext-phase-report.py Fri Feb 23 17:57:04 2018 -0800
+++ b/tests/testlib/ext-phase-report.py Sat Feb 24 17:49:10 2018 -0600
@@ -5,18 +5,18 @@
def reposetup(ui, repo):
def reportphasemove(tr):
- for rev, move in sorted(tr.changes['phases'].iteritems()):
+ for rev, move in sorted(tr.changes[b'phases'].items()):
if move[0] is None:
- ui.write(('test-debug-phase: new rev %d: x -> %d\n'
+ ui.write((b'test-debug-phase: new rev %d: x -> %d\n'
% (rev, move[1])))
else:
- ui.write(('test-debug-phase: move rev %d: %s -> %d\n'
+ ui.write((b'test-debug-phase: move rev %d: %d -> %d\n'
% (rev, move[0], move[1])))
class reportphaserepo(repo.__class__):
def transaction(self, *args, **kwargs):
tr = super(reportphaserepo, self).transaction(*args, **kwargs)
- tr.addpostclose('report-phase', reportphasemove)
+ tr.addpostclose(b'report-phase', reportphasemove)
return tr
repo.__class__ = reportphaserepo