Mercurial > hg
changeset 28532:ed75909c4c67
merge with stable
author | Matt Mackall <mpm@selenic.com> |
---|---|
date | Tue, 15 Mar 2016 14:10:46 -0700 |
parents | fe79a5821e5a (diff) aa440c3d7c5d (current diff) |
children | dfd5a6830ea7 |
files | mercurial/streamclone.py |
diffstat | 293 files changed, 14971 insertions(+), 4564 deletions(-) [+] |
line wrap: on
line diff
--- a/.hgignore Sun Mar 13 02:29:11 2016 +0100 +++ b/.hgignore Tue Mar 15 14:10:46 2016 -0700 @@ -21,10 +21,13 @@ .\#* tests/.coverage* tests/.testtimes* +tests/.hypothesis +tests/hypothesis-generated tests/annotated tests/*.err tests/htmlcov build +contrib/chg/chg contrib/hgsh/hgsh contrib/vagrant/.vagrant dist @@ -37,6 +40,7 @@ MANIFEST MANIFEST.in patches +mercurial/__modulepolicy__.py mercurial/__version__.py mercurial/hgpythonlib.h mercurial.egg-info
--- a/Makefile Sun Mar 13 02:29:11 2016 +0100 +++ b/Makefile Tue Mar 15 14:10:46 2016 -0700 @@ -63,6 +63,7 @@ \( -name '*.py[cdo]' -o -name '*.so' \) -exec rm -f '{}' ';' rm -f $(addprefix mercurial/,$(notdir $(wildcard mercurial/pure/[a-z]*.py))) rm -f MANIFEST MANIFEST.in hgext/__index__.py tests/*.err + rm -f mercurial/__modulepolicy__.py if test -d .hg; then rm -f mercurial/__version__.py; fi rm -rf build mercurial/locale $(MAKE) -C doc clean @@ -167,6 +168,10 @@ mkdir -p packages/debian-jessie contrib/dockerdeb debian jessie +docker-ubuntu-trusty: + mkdir -p packages/ubuntu-trusty + contrib/dockerdeb ubuntu trusty + fedora20: mkdir -p packages/fedora20 contrib/buildrpm
--- a/contrib/casesmash.py Sun Mar 13 02:29:11 2016 +0100 +++ b/contrib/casesmash.py Tue Mar 15 14:10:46 2016 -0700 @@ -1,5 +1,9 @@ -import os, __builtin__ -from mercurial import util +from __future__ import absolute_import +import __builtin__ +import os +from mercurial import ( + util, +) def lowerwrap(scope, funcname): f = getattr(scope, funcname)
--- a/contrib/check-code.py Sun Mar 13 02:29:11 2016 +0100 +++ b/contrib/check-code.py Tue Mar 15 14:10:46 2016 -0700 @@ -19,9 +19,13 @@ * ONLY use no--check-code for skipping entire files from external sources """ -import re, glob, os, sys +from __future__ import absolute_import, print_function +import glob import keyword import optparse +import os +import re +import sys try: import re2 except ImportError: @@ -90,7 +94,7 @@ (r'pushd|popd', "don't use 'pushd' or 'popd', use 'cd'"), (r'\W\$?\(\([^\)\n]*\)\)', "don't use (()) or $(()), use 'expr'"), (r'grep.*-q', "don't use 'grep -q', redirect to /dev/null"), - (r'(?<!hg )grep.*-a', "don't use 'grep -a', use in-line python"), + (r'(?<!hg )grep.* -a', "don't use 'grep -a', use in-line python"), (r'sed.*-i', "don't use 'sed -i', use a temporary file"), (r'\becho\b.*\\n', "don't use 'echo \\n', use printf"), (r'echo -n', "don't use 'echo -n', use printf"), @@ -176,6 +180,19 @@ 'write "file:/*/$TESTTMP" + (glob) to match on windows too'), (r'^ (cat|find): .*: No such file or directory', 'use test -f to test for file existence'), + (r'^ diff -[^ -]*p', + "don't use (external) diff with -p for portability"), + (r'^ [-+][-+][-+] .* [-+]0000 \(glob\)', + "glob timezone field in diff output for portability"), + (r'^ @@ -[0-9]+ [+][0-9]+,[0-9]+ @@', + "use '@@ -N* +N,n @@ (glob)' style chunk header for portability"), + (r'^ @@ -[0-9]+,[0-9]+ [+][0-9]+ @@', + "use '@@ -N,n +N* @@ (glob)' style chunk header for portability"), + (r'^ @@ -[0-9]+ [+][0-9]+ @@', + "use '@@ -N* +N* @@ (glob)' style chunk header for portability"), + (uprefix + r'hg( +-[^ ]+( +[^ ]+)?)* +extdiff' + r'( +(-[^ po-]+|--(?!program|option)[^ ]+|[^-][^ ]*))*$', + "use $RUNTESTDIR/pdiff via extdiff (or -o/-p for false-positives)"), ], # warnings [ @@ -205,9 +222,6 @@ "tuple parameter unpacking not available in Python 3+"), (r'lambda\s*\(.*,.*\)', "tuple parameter unpacking not available in Python 3+"), - (r'import (.+,[^.]+\.[^.]+|[^.]+\.[^.]+,)', - '2to3 can\'t always rewrite "import qux, foo.bar", ' - 'use "import foo.bar" on its own line instead.'), (r'(?<!def)\s+(cmp)\(', "cmp is not available in Python 3+"), (r'\breduce\s*\(.*', "reduce is not available in Python 3+"), (r'dict\(.*=', 'dict() is different in Py2 and 3 and is slower than {}', @@ -232,9 +246,11 @@ "don't use camelcase in identifiers"), (r'^\s*(if|while|def|class|except|try)\s[^[\n]*:\s*[^\\n]#\s]+', "linebreak after :"), - (r'class\s[^( \n]+:', "old-style class, use class foo(object)"), + (r'class\s[^( \n]+:', "old-style class, use class foo(object)", + r'#.*old-style'), (r'class\s[^( \n]+\(\):', - "class foo() creates old style object, use class foo(object)"), + "class foo() creates old style object, use class foo(object)", + r'#.*old-style'), (r'\b(%s)\(' % '|'.join(k for k in keyword.kwlist if k not in ('print', 'exec')), "Python keyword is not a function"), @@ -431,12 +447,12 @@ msgid = fname, lineno, line if msgid != self._lastseen: if blame: - print "%s:%d (%s):" % (fname, lineno, blame) + print("%s:%d (%s):" % (fname, lineno, blame)) else: - print "%s:%d:" % (fname, lineno) - print " > %s" % line + print("%s:%d:" % (fname, lineno)) + print(" > %s" % line) self._lastseen = msgid - print " " + msg + print(" " + msg) _defaultlogger = norepeatlogger() @@ -466,19 +482,19 @@ try: fp = open(f) except IOError as e: - print "Skipping %s, %s" % (f, str(e).split(':', 1)[0]) + print("Skipping %s, %s" % (f, str(e).split(':', 1)[0])) return result pre = post = fp.read() fp.close() for name, match, magic, filters, pats in checks: if debug: - print name, f + print(name, f) fc = 0 - if not (re.match(match, f) or (magic and re.search(magic, f))): + if not (re.match(match, f) or (magic and re.search(magic, pre))): if debug: - print "Skipping %s for %s it doesn't match %s" % ( - name, match, f) + print("Skipping %s for %s it doesn't match %s" % ( + name, match, f)) continue if "no-" "check-code" in pre: # If you're looking at this line, it's because a file has: @@ -487,7 +503,7 @@ # tests easier. So, instead of writing it with a normal # spelling, we write it with the expected spelling from # tests/test-check-code.t - print "Skipping %s it has no-che?k-code (glob)" % f + print("Skipping %s it has no-che?k-code (glob)" % f) return "Skip" # skip checking this file for p, r in filters: post = re.sub(p, r, post) @@ -499,7 +515,7 @@ # print post # uncomment to show filtered version if debug: - print "Checking %s for %s" % (name, f) + print("Checking %s for %s" % (name, f)) prelines = None errors = [] @@ -530,8 +546,8 @@ if ignore and re.search(ignore, l, re.MULTILINE): if debug: - print "Skipping %s for %s:%s (ignore pattern)" % ( - name, f, n) + print("Skipping %s for %s:%s (ignore pattern)" % ( + name, f, n)) continue bd = "" if blame: @@ -551,7 +567,7 @@ logfunc(*e) fc += 1 if maxerr and fc >= maxerr: - print " (too many errors, giving up)" + print(" (too many errors, giving up)") break return result
--- a/contrib/check-commit Sun Mar 13 02:29:11 2016 +0100 +++ b/contrib/check-commit Tue Mar 15 14:10:46 2016 -0700 @@ -23,7 +23,8 @@ errors = [ (beforepatch + r".*[(]bc[)]", "(BC) needs to be uppercase"), - (beforepatch + r".*[(]issue \d\d\d", "no space allowed between issue and number"), + (beforepatch + r".*[(]issue \d\d\d", + "no space allowed between issue and number"), (beforepatch + r".*[(]bug(\d|\s)", "use (issueDDDD) instead of bug"), (commitheader + r"# User [^@\n]+\n", "username is not an email address"), (commitheader + r"(?!merge with )[^#]\S+[^:] ", @@ -34,7 +35,7 @@ "summary keyword should be most user-relevant one-word command or topic"), (afterheader + r".*\.\s*\n", "don't add trailing period on summary line"), (afterheader + r".{79,}", "summary line too long (limit is 78)"), - (r"\n\+\n \n", "adds double empty line"), + (r"\n\+\n( |\+)\n", "adds double empty line"), (r"\n \n\+\n", "adds double empty line"), (r"\n\+[ \t]+def [a-z]+_[a-z]", "adds a function with foo_bar naming"), ] @@ -45,13 +46,12 @@ return first return second -def checkcommit(commit, node = None): +def checkcommit(commit, node=None): exitcode = 0 printed = node is None hits = [] for exp, msg in errors: - m = re.search(exp, commit) - if m: + for m in re.finditer(exp, commit): end = m.end() trailing = re.search(r'(\\n)+$', exp) if trailing:
--- a/contrib/check-config.py Sun Mar 13 02:29:11 2016 +0100 +++ b/contrib/check-config.py Tue Mar 15 14:10:46 2016 -0700 @@ -7,6 +7,7 @@ # This software may be used and distributed according to the terms of the # GNU General Public License version 2 or any later version. +from __future__ import absolute_import, print_function import re import sys @@ -82,9 +83,9 @@ if re.match('[a-z.]+$', default): default = '<variable>' if name in foundopts and (ctype, default) != foundopts[name]: - print l - print "conflict on %s: %r != %r" % (name, (ctype, default), - foundopts[name]) + print(l) + print("conflict on %s: %r != %r" % (name, (ctype, default), + foundopts[name])) foundopts[name] = (ctype, default) carryover = '' else: @@ -102,7 +103,10 @@ ctype, default = foundopts[name] if default: default = ' [%s]' % default - print "undocumented: %s (%s)%s" % (name, ctype, default) + print("undocumented: %s (%s)%s" % (name, ctype, default)) if __name__ == "__main__": - sys.exit(main(sys.argv[1:])) + if len(sys.argv) > 1: + sys.exit(main(sys.argv[1:])) + else: + sys.exit(main([l.rstrip() for l in sys.stdin]))
--- a/contrib/check-py3-compat.py Sun Mar 13 02:29:11 2016 +0100 +++ b/contrib/check-py3-compat.py Tue Mar 15 14:10:46 2016 -0700 @@ -16,12 +16,12 @@ """Check Python 3 compatibility for a file.""" with open(f, 'rb') as fh: content = fh.read() + root = ast.parse(content) # Ignore empty files. - if not content.strip(): + if not root.body: return - root = ast.parse(content) futures = set() haveprint = False for node in ast.walk(root):
--- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/contrib/chg/Makefile Tue Mar 15 14:10:46 2016 -0700 @@ -0,0 +1,49 @@ +HG = $(CURDIR)/../../hg + +TARGET = chg +SRCS = chg.c hgclient.c util.c +OBJS = $(SRCS:.c=.o) + +CFLAGS ?= -O2 -Wall -Wextra -pedantic -g +CPPFLAGS ?= -D_FORTIFY_SOURCE=2 +override CFLAGS += -std=gnu99 + +DESTDIR = +PREFIX = /usr/local +MANDIR = $(PREFIX)/share/man/man1 + +CHGSOCKDIR = /tmp/chg$(shell id -u) +CHGSOCKNAME = $(CHGSOCKDIR)/server + +.PHONY: all +all: $(TARGET) + +$(TARGET): $(OBJS) + $(CC) $(LDFLAGS) -o $@ $(OBJS) + +chg.o: hgclient.h util.h +hgclient.o: hgclient.h util.h +util.o: util.h + +.PHONY: install +install: $(TARGET) + install -d $(DESTDIR)$(PREFIX)/bin + install -m 755 $(TARGET) $(DESTDIR)$(PREFIX)/bin + install -d $(DESTDIR)$(MANDIR) + install -m 644 chg.1 $(DESTDIR)$(MANDIR) + +.PHONY: serve +serve: + [ -d $(CHGSOCKDIR) ] || ( umask 077; mkdir $(CHGSOCKDIR) ) + $(HG) serve --cwd / --cmdserver chgunix \ + --address $(CHGSOCKNAME) \ + --config extensions.chgserver= \ + --config cmdserver.log=/dev/stderr + +.PHONY: clean +clean: + $(RM) $(OBJS) + +.PHONY: distclean +distclean: + $(RM) $(OBJS) $(TARGET)
--- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/contrib/chg/README Tue Mar 15 14:10:46 2016 -0700 @@ -0,0 +1,30 @@ +cHg +=== + +A fast client for Mercurial command server running on Unix. + +Install: + + $ make + $ make install + +Usage: + + $ chg help # show help of Mercurial + $ alias hg=chg # replace hg command + $ chg --kill-chg-daemon # terminate background server + +Environment variables: + +Although cHg tries to update environment variables, some of them cannot be +changed after spawning the server. The following variables are specially +handled: + + * configuration files are reloaded automatically by default. + * CHGHG or HG specifies the path to the hg executable spawned as the + background command server. + +The following variables are available for testing: + + * CHGDEBUG enables debug messages. + * CHGSOCKNAME specifies the socket path of the background cmdserver.
--- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/contrib/chg/chg.1 Tue Mar 15 14:10:46 2016 -0700 @@ -0,0 +1,41 @@ +.\" Hey, EMACS: -*- nroff -*- +.\" First parameter, NAME, should be all caps +.\" Second parameter, SECTION, should be 1-8, maybe w/ subsection +.\" other parameters are allowed: see man(7), man(1) +.TH CHG 1 "March 3, 2013" +.\" Please adjust this date whenever revising the manpage. +.\" +.\" Some roff macros, for reference: +.\" .nh disable hyphenation +.\" .hy enable hyphenation +.\" .ad l left justify +.\" .ad b justify to both left and right margins +.\" .nf disable filling +.\" .fi enable filling +.\" .br insert line break +.\" .sp <n> insert n+1 empty lines +.\" for manpage-specific macros, see man(7) +.SH NAME +chg \- a fast client for Mercurial command server +.SH SYNOPSIS +.B chg +.IR command " [" options "] [" arguments "]..." +.br +.SH DESCRIPTION +The +.B chg +command is the wrapper for +.B hg +command. +It uses the Mercurial command server to reduce start-up overhead. +.SH OPTIONS +This program accepts the same command line syntax as the +.B hg +command. Additionally it accepts the following options. +.TP +.B \-\-kill\-chg\-daemon +Terminate the background command servers. +.SH SEE ALSO +.BR hg (1), +.SH AUTHOR +Written by Yuya Nishihara <yuya@tcha.org>.
--- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/contrib/chg/chg.c Tue Mar 15 14:10:46 2016 -0700 @@ -0,0 +1,565 @@ +/* + * A fast client for Mercurial command server + * + * Copyright (c) 2011 Yuya Nishihara <yuya@tcha.org> + * + * This software may be used and distributed according to the terms of the + * GNU General Public License version 2 or any later version. + */ + +#include <assert.h> +#include <errno.h> +#include <fcntl.h> +#include <signal.h> +#include <stdio.h> +#include <stdlib.h> +#include <string.h> +#include <sys/file.h> +#include <sys/stat.h> +#include <sys/types.h> +#include <sys/un.h> +#include <sys/wait.h> +#include <time.h> +#include <unistd.h> + +#include "hgclient.h" +#include "util.h" + +#ifndef UNIX_PATH_MAX +#define UNIX_PATH_MAX (sizeof(((struct sockaddr_un *)NULL)->sun_path)) +#endif + +struct cmdserveropts { + char sockname[UNIX_PATH_MAX]; + char redirectsockname[UNIX_PATH_MAX]; + char lockfile[UNIX_PATH_MAX]; + size_t argsize; + const char **args; + int lockfd; +}; + +static void initcmdserveropts(struct cmdserveropts *opts) { + memset(opts, 0, sizeof(struct cmdserveropts)); + opts->lockfd = -1; +} + +static void freecmdserveropts(struct cmdserveropts *opts) { + free(opts->args); + opts->args = NULL; + opts->argsize = 0; +} + +/* + * Test if an argument is a sensitive flag that should be passed to the server. + * Return 0 if not, otherwise the number of arguments starting from the current + * one that should be passed to the server. + */ +static size_t testsensitiveflag(const char *arg) +{ + static const struct { + const char *name; + size_t narg; + } flags[] = { + {"--config", 1}, + {"--cwd", 1}, + {"--repo", 1}, + {"--repository", 1}, + {"--traceback", 0}, + {"-R", 1}, + }; + size_t i; + for (i = 0; i < sizeof(flags) / sizeof(flags[0]); ++i) { + size_t len = strlen(flags[i].name); + size_t narg = flags[i].narg; + if (memcmp(arg, flags[i].name, len) == 0) { + if (arg[len] == '\0') { /* --flag (value) */ + return narg + 1; + } else if (arg[len] == '=' && narg > 0) { /* --flag=value */ + return 1; + } else if (flags[i].name[1] != '-') { /* short flag */ + return 1; + } + } + } + return 0; +} + +/* + * Parse argv[] and put sensitive flags to opts->args + */ +static void setcmdserverargs(struct cmdserveropts *opts, + int argc, const char *argv[]) +{ + size_t i, step; + opts->argsize = 0; + for (i = 0, step = 1; i < (size_t)argc; i += step, step = 1) { + if (!argv[i]) + continue; /* pass clang-analyse */ + if (strcmp(argv[i], "--") == 0) + break; + size_t n = testsensitiveflag(argv[i]); + if (n == 0 || i + n > (size_t)argc) + continue; + opts->args = reallocx(opts->args, + (n + opts->argsize) * sizeof(char *)); + memcpy(opts->args + opts->argsize, argv + i, + sizeof(char *) * n); + opts->argsize += n; + step = n; + } +} + +static void preparesockdir(const char *sockdir) +{ + int r; + r = mkdir(sockdir, 0700); + if (r < 0 && errno != EEXIST) + abortmsg("cannot create sockdir %s (errno = %d)", + sockdir, errno); + + struct stat st; + r = lstat(sockdir, &st); + if (r < 0) + abortmsg("cannot stat %s (errno = %d)", sockdir, errno); + if (!S_ISDIR(st.st_mode)) + abortmsg("cannot create sockdir %s (file exists)", sockdir); + if (st.st_uid != geteuid() || st.st_mode & 0077) + abortmsg("insecure sockdir %s", sockdir); +} + +static void setcmdserveropts(struct cmdserveropts *opts) +{ + int r; + char sockdir[UNIX_PATH_MAX]; + const char *envsockname = getenv("CHGSOCKNAME"); + if (!envsockname) { + /* by default, put socket file in secure directory + * (permission of socket file may be ignored on some Unices) */ + const char *tmpdir = getenv("TMPDIR"); + if (!tmpdir) + tmpdir = "/tmp"; + r = snprintf(sockdir, sizeof(sockdir), "%s/chg%d", + tmpdir, geteuid()); + if (r < 0 || (size_t)r >= sizeof(sockdir)) + abortmsg("too long TMPDIR (r = %d)", r); + preparesockdir(sockdir); + } + + const char *basename = (envsockname) ? envsockname : sockdir; + const char *sockfmt = (envsockname) ? "%s" : "%s/server"; + const char *lockfmt = (envsockname) ? "%s.lock" : "%s/lock"; + r = snprintf(opts->sockname, sizeof(opts->sockname), sockfmt, basename); + if (r < 0 || (size_t)r >= sizeof(opts->sockname)) + abortmsg("too long TMPDIR or CHGSOCKNAME (r = %d)", r); + r = snprintf(opts->lockfile, sizeof(opts->lockfile), lockfmt, basename); + if (r < 0 || (size_t)r >= sizeof(opts->lockfile)) + abortmsg("too long TMPDIR or CHGSOCKNAME (r = %d)", r); +} + +/* + * Acquire a file lock that indicates a client is trying to start and connect + * to a server, before executing a command. The lock is released upon exit or + * explicit unlock. Will block if the lock is held by another process. + */ +static void lockcmdserver(struct cmdserveropts *opts) +{ + if (opts->lockfd == -1) { + opts->lockfd = open(opts->lockfile, O_RDWR | O_CREAT | O_NOFOLLOW, 0600); + if (opts->lockfd == -1) + abortmsg("cannot create lock file %s", opts->lockfile); + } + int r = flock(opts->lockfd, LOCK_EX); + if (r == -1) + abortmsg("cannot acquire lock"); +} + +/* + * Release the file lock held by calling lockcmdserver. Will do nothing if + * lockcmdserver is not called. + */ +static void unlockcmdserver(struct cmdserveropts *opts) +{ + if (opts->lockfd == -1) + return; + flock(opts->lockfd, LOCK_UN); + close(opts->lockfd); + opts->lockfd = -1; +} + +static const char *gethgcmd(void) +{ + static const char *hgcmd = NULL; + if (!hgcmd) { + hgcmd = getenv("CHGHG"); + if (!hgcmd || hgcmd[0] == '\0') + hgcmd = getenv("HG"); + if (!hgcmd || hgcmd[0] == '\0') + hgcmd = "hg"; + } + return hgcmd; +} + +static void execcmdserver(const struct cmdserveropts *opts) +{ + const char *hgcmd = gethgcmd(); + + const char *baseargv[] = { + hgcmd, + "serve", + "--cmdserver", "chgunix", + "--address", opts->sockname, + "--daemon-postexec", "chdir:/", + "--config", "extensions.chgserver=", + }; + size_t baseargvsize = sizeof(baseargv) / sizeof(baseargv[0]); + size_t argsize = baseargvsize + opts->argsize + 1; + + const char **argv = mallocx(sizeof(char *) * argsize); + memcpy(argv, baseargv, sizeof(baseargv)); + memcpy(argv + baseargvsize, opts->args, sizeof(char *) * opts->argsize); + argv[argsize - 1] = NULL; + + if (putenv("CHGINTERNALMARK=") != 0) + abortmsg("failed to putenv (errno = %d)", errno); + if (execvp(hgcmd, (char **)argv) < 0) + abortmsg("failed to exec cmdserver (errno = %d)", errno); + free(argv); +} + +/* Retry until we can connect to the server. Give up after some time. */ +static hgclient_t *retryconnectcmdserver(struct cmdserveropts *opts, pid_t pid) +{ + static const struct timespec sleepreq = {0, 10 * 1000000}; + int pst = 0; + + for (unsigned int i = 0; i < 10 * 100; i++) { + hgclient_t *hgc = hgc_open(opts->sockname); + if (hgc) + return hgc; + + if (pid > 0) { + /* collect zombie if child process fails to start */ + int r = waitpid(pid, &pst, WNOHANG); + if (r != 0) + goto cleanup; + } + + nanosleep(&sleepreq, NULL); + } + + abortmsg("timed out waiting for cmdserver %s", opts->sockname); + return NULL; + +cleanup: + if (WIFEXITED(pst)) { + debugmsg("cmdserver exited with status %d", WEXITSTATUS(pst)); + exit(WEXITSTATUS(pst)); + } else if (WIFSIGNALED(pst)) { + abortmsg("cmdserver killed by signal %d", WTERMSIG(pst)); + } else { + abortmsg("error white waiting cmdserver"); + } + return NULL; +} + +/* Connect to a cmdserver. Will start a new server on demand. */ +static hgclient_t *connectcmdserver(struct cmdserveropts *opts) +{ + const char *sockname = opts->redirectsockname[0] ? + opts->redirectsockname : opts->sockname; + hgclient_t *hgc = hgc_open(sockname); + if (hgc) + return hgc; + + lockcmdserver(opts); + hgc = hgc_open(sockname); + if (hgc) { + unlockcmdserver(opts); + debugmsg("cmdserver is started by another process"); + return hgc; + } + + /* prevent us from being connected to an outdated server: we were + * told by a server to redirect to opts->redirectsockname and that + * address does not work. we do not want to connect to the server + * again because it will probably tell us the same thing. */ + if (sockname == opts->redirectsockname) + unlink(opts->sockname); + + debugmsg("start cmdserver at %s", opts->sockname); + + pid_t pid = fork(); + if (pid < 0) + abortmsg("failed to fork cmdserver process"); + if (pid == 0) { + /* do not leak lockfd to hg */ + close(opts->lockfd); + /* bypass uisetup() of pager extension */ + int nullfd = open("/dev/null", O_WRONLY); + if (nullfd >= 0) { + dup2(nullfd, fileno(stdout)); + close(nullfd); + } + execcmdserver(opts); + } else { + hgc = retryconnectcmdserver(opts, pid); + } + + unlockcmdserver(opts); + return hgc; +} + +static void killcmdserver(const struct cmdserveropts *opts) +{ + /* resolve config hash */ + char *resolvedpath = realpath(opts->sockname, NULL); + if (resolvedpath) { + unlink(resolvedpath); + free(resolvedpath); + } +} + +static pid_t peerpid = 0; + +static void forwardsignal(int sig) +{ + assert(peerpid > 0); + if (kill(peerpid, sig) < 0) + abortmsg("cannot kill %d (errno = %d)", peerpid, errno); + debugmsg("forward signal %d", sig); +} + +static void handlestopsignal(int sig) +{ + sigset_t unblockset, oldset; + struct sigaction sa, oldsa; + if (sigemptyset(&unblockset) < 0) + goto error; + if (sigaddset(&unblockset, sig) < 0) + goto error; + memset(&sa, 0, sizeof(sa)); + sa.sa_handler = SIG_DFL; + sa.sa_flags = SA_RESTART; + if (sigemptyset(&sa.sa_mask) < 0) + goto error; + + forwardsignal(sig); + if (raise(sig) < 0) /* resend to self */ + goto error; + if (sigaction(sig, &sa, &oldsa) < 0) + goto error; + if (sigprocmask(SIG_UNBLOCK, &unblockset, &oldset) < 0) + goto error; + /* resent signal will be handled before sigprocmask() returns */ + if (sigprocmask(SIG_SETMASK, &oldset, NULL) < 0) + goto error; + if (sigaction(sig, &oldsa, NULL) < 0) + goto error; + return; + +error: + abortmsg("failed to handle stop signal (errno = %d)", errno); +} + +static void setupsignalhandler(pid_t pid) +{ + if (pid <= 0) + return; + peerpid = pid; + + struct sigaction sa; + memset(&sa, 0, sizeof(sa)); + sa.sa_handler = forwardsignal; + sa.sa_flags = SA_RESTART; + if (sigemptyset(&sa.sa_mask) < 0) + goto error; + + if (sigaction(SIGHUP, &sa, NULL) < 0) + goto error; + if (sigaction(SIGINT, &sa, NULL) < 0) + goto error; + + /* terminate frontend by double SIGTERM in case of server freeze */ + sa.sa_flags |= SA_RESETHAND; + if (sigaction(SIGTERM, &sa, NULL) < 0) + goto error; + + /* propagate job control requests to worker */ + sa.sa_handler = forwardsignal; + sa.sa_flags = SA_RESTART; + if (sigaction(SIGCONT, &sa, NULL) < 0) + goto error; + sa.sa_handler = handlestopsignal; + sa.sa_flags = SA_RESTART; + if (sigaction(SIGTSTP, &sa, NULL) < 0) + goto error; + + return; + +error: + abortmsg("failed to set up signal handlers (errno = %d)", errno); +} + +/* This implementation is based on hgext/pager.py (pre 369741ef7253) */ +static void setuppager(hgclient_t *hgc, const char *const args[], + size_t argsize) +{ + const char *pagercmd = hgc_getpager(hgc, args, argsize); + if (!pagercmd) + return; + + int pipefds[2]; + if (pipe(pipefds) < 0) + return; + pid_t pid = fork(); + if (pid < 0) + goto error; + if (pid == 0) { + close(pipefds[0]); + if (dup2(pipefds[1], fileno(stdout)) < 0) + goto error; + if (isatty(fileno(stderr))) { + if (dup2(pipefds[1], fileno(stderr)) < 0) + goto error; + } + close(pipefds[1]); + hgc_attachio(hgc); /* reattach to pager */ + return; + } else { + dup2(pipefds[0], fileno(stdin)); + close(pipefds[0]); + close(pipefds[1]); + + int r = execlp("/bin/sh", "/bin/sh", "-c", pagercmd, NULL); + if (r < 0) { + abortmsg("cannot start pager '%s' (errno = %d)", + pagercmd, errno); + } + return; + } + +error: + close(pipefds[0]); + close(pipefds[1]); + abortmsg("failed to prepare pager (errno = %d)", errno); +} + +/* Run instructions sent from the server like unlink and set redirect path */ +static void runinstructions(struct cmdserveropts *opts, const char **insts) +{ + assert(insts); + opts->redirectsockname[0] = '\0'; + const char **pinst; + for (pinst = insts; *pinst; pinst++) { + debugmsg("instruction: %s", *pinst); + if (strncmp(*pinst, "unlink ", 7) == 0) { + unlink(*pinst + 7); + } else if (strncmp(*pinst, "redirect ", 9) == 0) { + int r = snprintf(opts->redirectsockname, + sizeof(opts->redirectsockname), + "%s", *pinst + 9); + if (r < 0 || r >= (int)sizeof(opts->redirectsockname)) + abortmsg("redirect path is too long (%d)", r); + } else if (strncmp(*pinst, "exit ", 5) == 0) { + int n = 0; + if (sscanf(*pinst + 5, "%d", &n) != 1) + abortmsg("cannot read the exit code"); + exit(n); + } else { + abortmsg("unknown instruction: %s", *pinst); + } + } +} + +/* + * Test whether the command is unsupported or not. This is not designed to + * cover all cases. But it's fast, does not depend on the server and does + * not return false positives. + */ +static int isunsupported(int argc, const char *argv[]) +{ + enum { + SERVE = 1, + DAEMON = 2, + SERVEDAEMON = SERVE | DAEMON, + TIME = 4, + }; + unsigned int state = 0; + int i; + for (i = 0; i < argc; ++i) { + if (strcmp(argv[i], "--") == 0) + break; + if (i == 0 && strcmp("serve", argv[i]) == 0) + state |= SERVE; + else if (strcmp("-d", argv[i]) == 0 || + strcmp("--daemon", argv[i]) == 0) + state |= DAEMON; + else if (strcmp("--time", argv[i]) == 0) + state |= TIME; + } + return (state & TIME) == TIME || + (state & SERVEDAEMON) == SERVEDAEMON; +} + +static void execoriginalhg(const char *argv[]) +{ + debugmsg("execute original hg"); + if (execvp(gethgcmd(), (char **)argv) < 0) + abortmsg("failed to exec original hg (errno = %d)", errno); +} + +int main(int argc, const char *argv[], const char *envp[]) +{ + if (getenv("CHGDEBUG")) + enabledebugmsg(); + + if (getenv("CHGINTERNALMARK")) + abortmsg("chg started by chg detected.\n" + "Please make sure ${HG:-hg} is not a symlink or " + "wrapper to chg. Alternatively, set $CHGHG to the " + "path of real hg."); + + if (isunsupported(argc - 1, argv + 1)) + execoriginalhg(argv); + + struct cmdserveropts opts; + initcmdserveropts(&opts); + setcmdserveropts(&opts); + setcmdserverargs(&opts, argc, argv); + + if (argc == 2) { + if (strcmp(argv[1], "--kill-chg-daemon") == 0) { + killcmdserver(&opts); + return 0; + } + } + + hgclient_t *hgc; + size_t retry = 0; + while (1) { + hgc = connectcmdserver(&opts); + if (!hgc) + abortmsg("cannot open hg client"); + hgc_setenv(hgc, envp); + const char **insts = hgc_validate(hgc, argv + 1, argc - 1); + if (insts == NULL) + break; + runinstructions(&opts, insts); + free(insts); + hgc_close(hgc); + if (++retry > 10) + abortmsg("too many redirections.\n" + "Please make sure %s is not a wrapper which " + "changes sensitive environment variables " + "before executing hg. If you have to use a " + "wrapper, wrap chg instead of hg.", + gethgcmd()); + } + + setupsignalhandler(hgc_peerpid(hgc)); + setuppager(hgc, argv + 1, argc - 1); + int exitcode = hgc_runcommand(hgc, argv + 1, argc - 1); + hgc_close(hgc); + freecmdserveropts(&opts); + return exitcode; +}
--- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/contrib/chg/hgclient.c Tue Mar 15 14:10:46 2016 -0700 @@ -0,0 +1,578 @@ +/* + * A command server client that uses Unix domain socket + * + * Copyright (c) 2011 Yuya Nishihara <yuya@tcha.org> + * + * This software may be used and distributed according to the terms of the + * GNU General Public License version 2 or any later version. + */ + +#include <arpa/inet.h> /* for ntohl(), htonl() */ +#include <assert.h> +#include <ctype.h> +#include <errno.h> +#include <fcntl.h> +#include <signal.h> +#include <stdint.h> +#include <stdio.h> +#include <stdlib.h> +#include <string.h> +#include <sys/socket.h> +#include <sys/stat.h> +#include <sys/un.h> +#include <unistd.h> + +#include "hgclient.h" +#include "util.h" + +enum { + CAP_GETENCODING = 0x0001, + CAP_RUNCOMMAND = 0x0002, + /* cHg extension: */ + CAP_ATTACHIO = 0x0100, + CAP_CHDIR = 0x0200, + CAP_GETPAGER = 0x0400, + CAP_SETENV = 0x0800, + CAP_SETUMASK = 0x1000, + CAP_VALIDATE = 0x2000, +}; + +typedef struct { + const char *name; + unsigned int flag; +} cappair_t; + +static const cappair_t captable[] = { + {"getencoding", CAP_GETENCODING}, + {"runcommand", CAP_RUNCOMMAND}, + {"attachio", CAP_ATTACHIO}, + {"chdir", CAP_CHDIR}, + {"getpager", CAP_GETPAGER}, + {"setenv", CAP_SETENV}, + {"setumask", CAP_SETUMASK}, + {"validate", CAP_VALIDATE}, + {NULL, 0}, /* terminator */ +}; + +typedef struct { + char ch; + char *data; + size_t maxdatasize; + size_t datasize; +} context_t; + +struct hgclient_tag_ { + int sockfd; + pid_t pid; + context_t ctx; + unsigned int capflags; +}; + +static const size_t defaultdatasize = 4096; + +static void initcontext(context_t *ctx) +{ + ctx->ch = '\0'; + ctx->data = malloc(defaultdatasize); + ctx->maxdatasize = (ctx->data) ? defaultdatasize : 0; + ctx->datasize = 0; + debugmsg("initialize context buffer with size %zu", ctx->maxdatasize); +} + +static void enlargecontext(context_t *ctx, size_t newsize) +{ + if (newsize <= ctx->maxdatasize) + return; + + newsize = defaultdatasize + * ((newsize + defaultdatasize - 1) / defaultdatasize); + ctx->data = reallocx(ctx->data, newsize); + ctx->maxdatasize = newsize; + debugmsg("enlarge context buffer to %zu", ctx->maxdatasize); +} + +static void freecontext(context_t *ctx) +{ + debugmsg("free context buffer"); + free(ctx->data); + ctx->data = NULL; + ctx->maxdatasize = 0; + ctx->datasize = 0; +} + +/* Read channeled response from cmdserver */ +static void readchannel(hgclient_t *hgc) +{ + assert(hgc); + + ssize_t rsize = recv(hgc->sockfd, &hgc->ctx.ch, sizeof(hgc->ctx.ch), 0); + if (rsize != sizeof(hgc->ctx.ch)) + abortmsg("failed to read channel"); + + uint32_t datasize_n; + rsize = recv(hgc->sockfd, &datasize_n, sizeof(datasize_n), 0); + if (rsize != sizeof(datasize_n)) + abortmsg("failed to read data size"); + + /* datasize denotes the maximum size to write if input request */ + hgc->ctx.datasize = ntohl(datasize_n); + enlargecontext(&hgc->ctx, hgc->ctx.datasize); + + if (isupper(hgc->ctx.ch) && hgc->ctx.ch != 'S') + return; /* assumes input request */ + + size_t cursize = 0; + while (cursize < hgc->ctx.datasize) { + rsize = recv(hgc->sockfd, hgc->ctx.data + cursize, + hgc->ctx.datasize - cursize, 0); + if (rsize < 0) + abortmsg("failed to read data block"); + cursize += rsize; + } +} + +static void sendall(int sockfd, const void *data, size_t datasize) +{ + const char *p = data; + const char *const endp = p + datasize; + while (p < endp) { + ssize_t r = send(sockfd, p, endp - p, 0); + if (r < 0) + abortmsg("cannot communicate (errno = %d)", errno); + p += r; + } +} + +/* Write lengh-data block to cmdserver */ +static void writeblock(const hgclient_t *hgc) +{ + assert(hgc); + + const uint32_t datasize_n = htonl(hgc->ctx.datasize); + sendall(hgc->sockfd, &datasize_n, sizeof(datasize_n)); + + sendall(hgc->sockfd, hgc->ctx.data, hgc->ctx.datasize); +} + +static void writeblockrequest(const hgclient_t *hgc, const char *chcmd) +{ + debugmsg("request %s, block size %zu", chcmd, hgc->ctx.datasize); + + char buf[strlen(chcmd) + 1]; + memcpy(buf, chcmd, sizeof(buf) - 1); + buf[sizeof(buf) - 1] = '\n'; + sendall(hgc->sockfd, buf, sizeof(buf)); + + writeblock(hgc); +} + +/* Build '\0'-separated list of args. argsize < 0 denotes that args are + * terminated by NULL. */ +static void packcmdargs(context_t *ctx, const char *const args[], + ssize_t argsize) +{ + ctx->datasize = 0; + const char *const *const end = (argsize >= 0) ? args + argsize : NULL; + for (const char *const *it = args; it != end && *it; ++it) { + const size_t n = strlen(*it) + 1; /* include '\0' */ + enlargecontext(ctx, ctx->datasize + n); + memcpy(ctx->data + ctx->datasize, *it, n); + ctx->datasize += n; + } + + if (ctx->datasize > 0) + --ctx->datasize; /* strip last '\0' */ +} + +/* Extract '\0'-separated list of args to new buffer, terminated by NULL */ +static const char **unpackcmdargsnul(const context_t *ctx) +{ + const char **args = NULL; + size_t nargs = 0, maxnargs = 0; + const char *s = ctx->data; + const char *e = ctx->data + ctx->datasize; + for (;;) { + if (nargs + 1 >= maxnargs) { /* including last NULL */ + maxnargs += 256; + args = reallocx(args, maxnargs * sizeof(args[0])); + } + args[nargs] = s; + nargs++; + s = memchr(s, '\0', e - s); + if (!s) + break; + s++; + } + args[nargs] = NULL; + return args; +} + +static void handlereadrequest(hgclient_t *hgc) +{ + context_t *ctx = &hgc->ctx; + size_t r = fread(ctx->data, sizeof(ctx->data[0]), ctx->datasize, stdin); + ctx->datasize = r; + writeblock(hgc); +} + +/* Read single-line */ +static void handlereadlinerequest(hgclient_t *hgc) +{ + context_t *ctx = &hgc->ctx; + if (!fgets(ctx->data, ctx->datasize, stdin)) + ctx->data[0] = '\0'; + ctx->datasize = strlen(ctx->data); + writeblock(hgc); +} + +/* Execute the requested command and write exit code */ +static void handlesystemrequest(hgclient_t *hgc) +{ + context_t *ctx = &hgc->ctx; + enlargecontext(ctx, ctx->datasize + 1); + ctx->data[ctx->datasize] = '\0'; /* terminate last string */ + + const char **args = unpackcmdargsnul(ctx); + if (!args[0] || !args[1]) + abortmsg("missing command or cwd in system request"); + debugmsg("run '%s' at '%s'", args[0], args[1]); + int32_t r = runshellcmd(args[0], args + 2, args[1]); + free(args); + + uint32_t r_n = htonl(r); + memcpy(ctx->data, &r_n, sizeof(r_n)); + ctx->datasize = sizeof(r_n); + writeblock(hgc); +} + +/* Read response of command execution until receiving 'r'-esult */ +static void handleresponse(hgclient_t *hgc) +{ + for (;;) { + readchannel(hgc); + context_t *ctx = &hgc->ctx; + debugmsg("response read from channel %c, size %zu", + ctx->ch, ctx->datasize); + switch (ctx->ch) { + case 'o': + fwrite(ctx->data, sizeof(ctx->data[0]), ctx->datasize, + stdout); + break; + case 'e': + fwrite(ctx->data, sizeof(ctx->data[0]), ctx->datasize, + stderr); + break; + case 'd': + /* assumes last char is '\n' */ + ctx->data[ctx->datasize - 1] = '\0'; + debugmsg("server: %s", ctx->data); + break; + case 'r': + return; + case 'I': + handlereadrequest(hgc); + break; + case 'L': + handlereadlinerequest(hgc); + break; + case 'S': + handlesystemrequest(hgc); + break; + default: + if (isupper(ctx->ch)) + abortmsg("cannot handle response (ch = %c)", + ctx->ch); + } + } +} + +static unsigned int parsecapabilities(const char *s, const char *e) +{ + unsigned int flags = 0; + while (s < e) { + const char *t = strchr(s, ' '); + if (!t || t > e) + t = e; + const cappair_t *cap; + for (cap = captable; cap->flag; ++cap) { + size_t n = t - s; + if (strncmp(s, cap->name, n) == 0 && + strlen(cap->name) == n) { + flags |= cap->flag; + break; + } + } + s = t + 1; + } + return flags; +} + +static void readhello(hgclient_t *hgc) +{ + readchannel(hgc); + context_t *ctx = &hgc->ctx; + if (ctx->ch != 'o') { + char ch = ctx->ch; + if (ch == 'e') { + /* write early error and will exit */ + fwrite(ctx->data, sizeof(ctx->data[0]), ctx->datasize, + stderr); + handleresponse(hgc); + } + abortmsg("unexpected channel of hello message (ch = %c)", ch); + } + enlargecontext(ctx, ctx->datasize + 1); + ctx->data[ctx->datasize] = '\0'; + debugmsg("hello received: %s (size = %zu)", ctx->data, ctx->datasize); + + const char *s = ctx->data; + const char *const dataend = ctx->data + ctx->datasize; + while (s < dataend) { + const char *t = strchr(s, ':'); + if (!t || t[1] != ' ') + break; + const char *u = strchr(t + 2, '\n'); + if (!u) + u = dataend; + if (strncmp(s, "capabilities:", t - s + 1) == 0) { + hgc->capflags = parsecapabilities(t + 2, u); + } else if (strncmp(s, "pid:", t - s + 1) == 0) { + hgc->pid = strtol(t + 2, NULL, 10); + } + s = u + 1; + } + debugmsg("capflags=0x%04x, pid=%d", hgc->capflags, hgc->pid); +} + +static void attachio(hgclient_t *hgc) +{ + debugmsg("request attachio"); + static const char chcmd[] = "attachio\n"; + sendall(hgc->sockfd, chcmd, sizeof(chcmd) - 1); + readchannel(hgc); + context_t *ctx = &hgc->ctx; + if (ctx->ch != 'I') + abortmsg("unexpected response for attachio (ch = %c)", ctx->ch); + + static const int fds[3] = {STDIN_FILENO, STDOUT_FILENO, STDERR_FILENO}; + struct msghdr msgh; + memset(&msgh, 0, sizeof(msgh)); + struct iovec iov = {ctx->data, ctx->datasize}; /* dummy payload */ + msgh.msg_iov = &iov; + msgh.msg_iovlen = 1; + char fdbuf[CMSG_SPACE(sizeof(fds))]; + msgh.msg_control = fdbuf; + msgh.msg_controllen = sizeof(fdbuf); + struct cmsghdr *cmsg = CMSG_FIRSTHDR(&msgh); + cmsg->cmsg_level = SOL_SOCKET; + cmsg->cmsg_type = SCM_RIGHTS; + cmsg->cmsg_len = CMSG_LEN(sizeof(fds)); + memcpy(CMSG_DATA(cmsg), fds, sizeof(fds)); + msgh.msg_controllen = cmsg->cmsg_len; + ssize_t r = sendmsg(hgc->sockfd, &msgh, 0); + if (r < 0) + abortmsg("sendmsg failed (errno = %d)", errno); + + handleresponse(hgc); + int32_t n; + if (ctx->datasize != sizeof(n)) + abortmsg("unexpected size of attachio result"); + memcpy(&n, ctx->data, sizeof(n)); + n = ntohl(n); + if (n != sizeof(fds) / sizeof(fds[0])) + abortmsg("failed to send fds (n = %d)", n); +} + +static void chdirtocwd(hgclient_t *hgc) +{ + if (!getcwd(hgc->ctx.data, hgc->ctx.maxdatasize)) + abortmsg("failed to getcwd (errno = %d)", errno); + hgc->ctx.datasize = strlen(hgc->ctx.data); + writeblockrequest(hgc, "chdir"); +} + +static void forwardumask(hgclient_t *hgc) +{ + mode_t mask = umask(0); + umask(mask); + + static const char command[] = "setumask\n"; + sendall(hgc->sockfd, command, sizeof(command) - 1); + uint32_t data = htonl(mask); + sendall(hgc->sockfd, &data, sizeof(data)); +} + +/*! + * Open connection to per-user cmdserver + * + * If no background server running, returns NULL. + */ +hgclient_t *hgc_open(const char *sockname) +{ + int fd = socket(AF_UNIX, SOCK_STREAM, 0); + if (fd < 0) + abortmsg("cannot create socket (errno = %d)", errno); + + /* don't keep fd on fork(), so that it can be closed when the parent + * process get terminated. */ + int flags = fcntl(fd, F_GETFD); + if (flags < 0) + abortmsg("cannot get flags of socket (errno = %d)", errno); + if (fcntl(fd, F_SETFD, flags | FD_CLOEXEC) < 0) + abortmsg("cannot set flags of socket (errno = %d)", errno); + + struct sockaddr_un addr; + addr.sun_family = AF_UNIX; + strncpy(addr.sun_path, sockname, sizeof(addr.sun_path)); + addr.sun_path[sizeof(addr.sun_path) - 1] = '\0'; + + debugmsg("connect to %s", addr.sun_path); + int r = connect(fd, (struct sockaddr *)&addr, sizeof(addr)); + if (r < 0) { + close(fd); + if (errno == ENOENT || errno == ECONNREFUSED) + return NULL; + abortmsg("cannot connect to %s (errno = %d)", + addr.sun_path, errno); + } + + hgclient_t *hgc = mallocx(sizeof(hgclient_t)); + memset(hgc, 0, sizeof(*hgc)); + hgc->sockfd = fd; + initcontext(&hgc->ctx); + + readhello(hgc); + if (!(hgc->capflags & CAP_RUNCOMMAND)) + abortmsg("insufficient capability: runcommand"); + if (hgc->capflags & CAP_ATTACHIO) + attachio(hgc); + if (hgc->capflags & CAP_CHDIR) + chdirtocwd(hgc); + if (hgc->capflags & CAP_SETUMASK) + forwardumask(hgc); + + return hgc; +} + +/*! + * Close connection and free allocated memory + */ +void hgc_close(hgclient_t *hgc) +{ + assert(hgc); + freecontext(&hgc->ctx); + close(hgc->sockfd); + free(hgc); +} + +pid_t hgc_peerpid(const hgclient_t *hgc) +{ + assert(hgc); + return hgc->pid; +} + +/*! + * Send command line arguments to let the server load the repo config and check + * whether it can process our request directly or not. + * Make sure hgc_setenv is called before calling this. + * + * @return - NULL, the server believes it can handle our request, or does not + * support "validate" command. + * - a list of strings, the server cannot handle our request and it + * sent instructions telling us how to fix the issue. See + * chgserver.py for possible instruction formats. + * the list should be freed by the caller. + * the last string is guaranteed to be NULL. + */ +const char **hgc_validate(hgclient_t *hgc, const char *const args[], + size_t argsize) +{ + assert(hgc); + if (!(hgc->capflags & CAP_VALIDATE)) + return NULL; + + packcmdargs(&hgc->ctx, args, argsize); + writeblockrequest(hgc, "validate"); + handleresponse(hgc); + + /* the server returns '\0' if it can handle our request */ + if (hgc->ctx.datasize <= 1) + return NULL; + + /* make sure the buffer is '\0' terminated */ + enlargecontext(&hgc->ctx, hgc->ctx.datasize + 1); + hgc->ctx.data[hgc->ctx.datasize] = '\0'; + return unpackcmdargsnul(&hgc->ctx); +} + +/*! + * Execute the specified Mercurial command + * + * @return result code + */ +int hgc_runcommand(hgclient_t *hgc, const char *const args[], size_t argsize) +{ + assert(hgc); + + packcmdargs(&hgc->ctx, args, argsize); + writeblockrequest(hgc, "runcommand"); + handleresponse(hgc); + + int32_t exitcode_n; + if (hgc->ctx.datasize != sizeof(exitcode_n)) { + abortmsg("unexpected size of exitcode"); + } + memcpy(&exitcode_n, hgc->ctx.data, sizeof(exitcode_n)); + return ntohl(exitcode_n); +} + +/*! + * (Re-)send client's stdio channels so that the server can access to tty + */ +void hgc_attachio(hgclient_t *hgc) +{ + assert(hgc); + if (!(hgc->capflags & CAP_ATTACHIO)) + return; + attachio(hgc); +} + +/*! + * Get pager command for the given Mercurial command args + * + * If no pager enabled, returns NULL. The return value becomes invalid + * once you run another request to hgc. + */ +const char *hgc_getpager(hgclient_t *hgc, const char *const args[], + size_t argsize) +{ + assert(hgc); + + if (!(hgc->capflags & CAP_GETPAGER)) + return NULL; + + packcmdargs(&hgc->ctx, args, argsize); + writeblockrequest(hgc, "getpager"); + handleresponse(hgc); + + if (hgc->ctx.datasize < 1 || hgc->ctx.data[0] == '\0') + return NULL; + enlargecontext(&hgc->ctx, hgc->ctx.datasize + 1); + hgc->ctx.data[hgc->ctx.datasize] = '\0'; + return hgc->ctx.data; +} + +/*! + * Update server's environment variables + * + * @param envp list of environment variables in "NAME=VALUE" format, + * terminated by NULL. + */ +void hgc_setenv(hgclient_t *hgc, const char *const envp[]) +{ + assert(hgc && envp); + if (!(hgc->capflags & CAP_SETENV)) + return; + packcmdargs(&hgc->ctx, envp, /*argsize*/ -1); + writeblockrequest(hgc, "setenv"); +}
--- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/contrib/chg/hgclient.h Tue Mar 15 14:10:46 2016 -0700 @@ -0,0 +1,31 @@ +/* + * A command server client that uses Unix domain socket + * + * Copyright (c) 2011 Yuya Nishihara <yuya@tcha.org> + * + * This software may be used and distributed according to the terms of the + * GNU General Public License version 2 or any later version. + */ + +#ifndef HGCLIENT_H_ +#define HGCLIENT_H_ + +#include <sys/types.h> + +struct hgclient_tag_; +typedef struct hgclient_tag_ hgclient_t; + +hgclient_t *hgc_open(const char *sockname); +void hgc_close(hgclient_t *hgc); + +pid_t hgc_peerpid(const hgclient_t *hgc); + +const char **hgc_validate(hgclient_t *hgc, const char *const args[], + size_t argsize); +int hgc_runcommand(hgclient_t *hgc, const char *const args[], size_t argsize); +void hgc_attachio(hgclient_t *hgc); +const char *hgc_getpager(hgclient_t *hgc, const char *const args[], + size_t argsize); +void hgc_setenv(hgclient_t *hgc, const char *const envp[]); + +#endif /* HGCLIENT_H_ */
--- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/contrib/chg/util.c Tue Mar 15 14:10:46 2016 -0700 @@ -0,0 +1,139 @@ +/* + * Utility functions + * + * Copyright (c) 2011 Yuya Nishihara <yuya@tcha.org> + * + * This software may be used and distributed according to the terms of the + * GNU General Public License version 2 or any later version. + */ + +#include <signal.h> +#include <stdarg.h> +#include <stdio.h> +#include <stdlib.h> +#include <string.h> +#include <sys/types.h> +#include <sys/wait.h> +#include <unistd.h> + +#include "util.h" + +void abortmsg(const char *fmt, ...) +{ + va_list args; + va_start(args, fmt); + fputs("\033[1;31mchg: abort: ", stderr); + vfprintf(stderr, fmt, args); + fputs("\033[m\n", stderr); + va_end(args); + + exit(255); +} + +static int debugmsgenabled = 0; + +void enabledebugmsg(void) +{ + debugmsgenabled = 1; +} + +void debugmsg(const char *fmt, ...) +{ + if (!debugmsgenabled) + return; + + va_list args; + va_start(args, fmt); + fputs("\033[1;30mchg: debug: ", stderr); + vfprintf(stderr, fmt, args); + fputs("\033[m\n", stderr); + va_end(args); +} + +void *mallocx(size_t size) +{ + void *result = malloc(size); + if (!result) + abortmsg("failed to malloc"); + return result; +} + +void *reallocx(void *ptr, size_t size) +{ + void *result = realloc(ptr, size); + if (!result) + abortmsg("failed to realloc"); + return result; +} + +/* + * Execute a shell command in mostly the same manner as system(), with the + * give environment variables, after chdir to the given cwd. Returns a status + * code compatible with the Python subprocess module. + */ +int runshellcmd(const char *cmd, const char *envp[], const char *cwd) +{ + enum { F_SIGINT = 1, F_SIGQUIT = 2, F_SIGMASK = 4, F_WAITPID = 8 }; + unsigned int doneflags = 0; + int status = 0; + struct sigaction newsa, oldsaint, oldsaquit; + sigset_t oldmask; + + /* block or mask signals just as system() does */ + memset(&newsa, 0, sizeof(newsa)); + newsa.sa_handler = SIG_IGN; + newsa.sa_flags = 0; + if (sigemptyset(&newsa.sa_mask) < 0) + goto done; + if (sigaction(SIGINT, &newsa, &oldsaint) < 0) + goto done; + doneflags |= F_SIGINT; + if (sigaction(SIGQUIT, &newsa, &oldsaquit) < 0) + goto done; + doneflags |= F_SIGQUIT; + + if (sigaddset(&newsa.sa_mask, SIGCHLD) < 0) + goto done; + if (sigprocmask(SIG_BLOCK, &newsa.sa_mask, &oldmask) < 0) + goto done; + doneflags |= F_SIGMASK; + + pid_t pid = fork(); + if (pid < 0) + goto done; + if (pid == 0) { + sigaction(SIGINT, &oldsaint, NULL); + sigaction(SIGQUIT, &oldsaquit, NULL); + sigprocmask(SIG_SETMASK, &oldmask, NULL); + if (cwd && chdir(cwd) < 0) + _exit(127); + const char *argv[] = {"sh", "-c", cmd, NULL}; + if (envp) { + execve("/bin/sh", (char **)argv, (char **)envp); + } else { + execv("/bin/sh", (char **)argv); + } + _exit(127); + } else { + if (waitpid(pid, &status, 0) < 0) + goto done; + doneflags |= F_WAITPID; + } + +done: + if (doneflags & F_SIGINT) + sigaction(SIGINT, &oldsaint, NULL); + if (doneflags & F_SIGQUIT) + sigaction(SIGQUIT, &oldsaquit, NULL); + if (doneflags & F_SIGMASK) + sigprocmask(SIG_SETMASK, &oldmask, NULL); + + /* no way to report other errors, use 127 (= shell termination) */ + if (!(doneflags & F_WAITPID)) + return 127; + if (WIFEXITED(status)) + return WEXITSTATUS(status); + if (WIFSIGNALED(status)) + return -WTERMSIG(status); + return 127; +}
--- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/contrib/chg/util.h Tue Mar 15 14:10:46 2016 -0700 @@ -0,0 +1,27 @@ +/* + * Utility functions + * + * Copyright (c) 2011 Yuya Nishihara <yuya@tcha.org> + * + * This software may be used and distributed according to the terms of the + * GNU General Public License version 2 or any later version. + */ + +#ifndef UTIL_H_ +#define UTIL_H_ + +#ifdef __GNUC__ +#define PRINTF_FORMAT_ __attribute__((format(printf, 1, 2))) +#endif + +void abortmsg(const char *fmt, ...) PRINTF_FORMAT_; + +void enabledebugmsg(void); +void debugmsg(const char *fmt, ...) PRINTF_FORMAT_; + +void *mallocx(size_t size); +void *reallocx(void *ptr, size_t size); + +int runshellcmd(const char *cmd, const char *envp[], const char *cwd); + +#endif /* UTIL_H_ */
--- a/contrib/debugcmdserver.py Sun Mar 13 02:29:11 2016 +0100 +++ b/contrib/debugcmdserver.py Tue Mar 15 14:10:46 2016 -0700 @@ -7,10 +7,12 @@ # $ ./hg serve --cmds pipe | ./contrib/debugcmdserver.py - # o, 52 -> 'capabilities: getencoding runcommand\nencoding: UTF-8' -import sys, struct +from __future__ import absolute_import, print_function +import struct +import sys if len(sys.argv) != 2: - print 'usage: debugcmdserver.py FILE' + print('usage: debugcmdserver.py FILE') sys.exit(1) outputfmt = '>cI'
--- a/contrib/debugshell.py Sun Mar 13 02:29:11 2016 +0100 +++ b/contrib/debugshell.py Tue Mar 15 14:10:46 2016 -0700 @@ -1,9 +1,10 @@ # debugshell extension """a python shell with repo, changelog & manifest objects""" -import sys +from __future__ import absolute_import +import code import mercurial -import code +import sys from mercurial import ( cmdutil, demandimport,
--- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/contrib/docker/ubuntu-trusty Tue Mar 15 14:10:46 2016 -0700 @@ -0,0 +1,11 @@ +FROM ubuntu:trusty +RUN apt-get update && apt-get install -y \ + build-essential \ + debhelper \ + dh-python \ + devscripts \ + python \ + python-all-dev \ + python-docutils \ + zip \ + unzip
--- a/contrib/fixpax.py Sun Mar 13 02:29:11 2016 +0100 +++ b/contrib/fixpax.py Tue Mar 15 14:10:46 2016 -0700 @@ -11,7 +11,10 @@ *.mpkg/Contents/Packages/*.pkg/Contents/Archive.pax.gz """ -import sys, os, gzip +from __future__ import absolute_import, print_function +import gzip +import os +import sys def fixpax(iname, oname): i = gzip.GzipFile(iname) @@ -55,7 +58,7 @@ if __name__ == '__main__': for iname in sys.argv[1:]: - print 'fixing file ownership in %s' % iname + print('fixing file ownership in %s' % iname) oname = sys.argv[1] + '.tmp' fixpax(iname, oname) os.rename(oname, iname)
--- a/contrib/hg-ssh Sun Mar 13 02:29:11 2016 +0100 +++ b/contrib/hg-ssh Tue Mar 15 14:10:46 2016 -0700 @@ -52,7 +52,7 @@ orig_cmd = os.getenv('SSH_ORIGINAL_COMMAND', '?') try: cmdargv = shlex.split(orig_cmd) - except ValueError, e: + except ValueError as e: sys.stderr.write('Illegal command "%s": %s\n' % (orig_cmd, e)) sys.exit(255) @@ -77,7 +77,7 @@ sys.exit(255) def rejectpush(ui, **kwargs): - ui.warn("Permission denied\n") + ui.warn(("Permission denied\n")) # mercurial hooks use unix process conventions for hook return values # so a truthy return means failure return True
--- a/contrib/hgclient.py Sun Mar 13 02:29:11 2016 +0100 +++ b/contrib/hgclient.py Tue Mar 15 14:10:46 2016 -0700 @@ -1,6 +1,14 @@ # A minimal client for Mercurial's command server -import os, sys, signal, struct, socket, subprocess, time, cStringIO +from __future__ import absolute_import, print_function +import cStringIO +import os +import signal +import socket +import struct +import subprocess +import sys +import time def connectpipe(path=None): cmdline = ['hg', 'serve', '--cmdserver', 'pipe'] @@ -69,7 +77,7 @@ def runcommand(server, args, output=sys.stdout, error=sys.stderr, input=None, outfilter=lambda x: x): - print '*** runcommand', ' '.join(args) + print('*** runcommand', ' '.join(args)) sys.stdout.flush() server.stdin.write('runcommand\n') writeblock(server, '\0'.join(args)) @@ -92,10 +100,10 @@ elif ch == 'r': ret, = struct.unpack('>i', data) if ret != 0: - print ' [%d]' % ret + print(' [%d]' % ret) return ret else: - print "unexpected channel %c: %r" % (ch, data) + print("unexpected channel %c: %r" % (ch, data)) if ch.isupper(): return
--- a/contrib/hgfixes/fix_bytes.py Sun Mar 13 02:29:11 2016 +0100 +++ /dev/null Thu Jan 01 00:00:00 1970 +0000 @@ -1,98 +0,0 @@ -"""Fixer that changes plain strings to bytes strings.""" - -import re - -from lib2to3 import fixer_base -from lib2to3.pgen2 import token -from lib2to3.fixer_util import Name -from lib2to3.pygram import python_symbols as syms - -_re = re.compile(r'[rR]?[\'\"]') - -# XXX: Implementing a blacklist in 2to3 turned out to be more troublesome than -# blacklisting some modules inside the fixers. So, this is what I came with. - -blacklist = ('mercurial/demandimport.py', - 'mercurial/py3kcompat.py', # valid python 3 already - 'mercurial/i18n.py', - ) - -def isdocstring(node): - def isclassorfunction(ancestor): - symbols = (syms.funcdef, syms.classdef) - # if the current node is a child of a function definition, a class - # definition or a file, then it is a docstring - if ancestor.type == syms.simple_stmt: - try: - while True: - if ancestor.type in symbols: - return True - ancestor = ancestor.parent - except AttributeError: - return False - return False - - def ismodule(ancestor): - # Our child is a docstring if we are a simple statement, and our - # ancestor is file_input. In other words, our child is a lone string in - # the source file. - try: - if (ancestor.type == syms.simple_stmt and - ancestor.parent.type == syms.file_input): - return True - except AttributeError: - return False - - def isdocassignment(ancestor): - # Assigning to __doc__, definitely a string - try: - while True: - if (ancestor.type == syms.expr_stmt and - Name('__doc__') in ancestor.children): - return True - ancestor = ancestor.parent - except AttributeError: - return False - - if ismodule(node.parent) or \ - isdocassignment(node.parent) or \ - isclassorfunction(node.parent): - return True - return False - -def shouldtransform(node): - specialnames = ['__main__'] - - if node.value in specialnames: - return False - - ggparent = node.parent.parent.parent - sggparent = str(ggparent) - - if 'getattr' in sggparent or \ - 'hasattr' in sggparent or \ - 'setattr' in sggparent or \ - 'encode' in sggparent or \ - 'decode' in sggparent: - return False - - return True - -class FixBytes(fixer_base.BaseFix): - - PATTERN = 'STRING' - - def transform(self, node, results): - # The filename may be prefixed with a build directory. - if self.filename.endswith(blacklist): - return - if node.type == token.STRING: - if _re.match(node.value): - if isdocstring(node): - return - if not shouldtransform(node): - return - new = node.clone() - new.value = 'b' + new.value - return new -
--- a/contrib/hgfixes/fix_bytesmod.py Sun Mar 13 02:29:11 2016 +0100 +++ /dev/null Thu Jan 01 00:00:00 1970 +0000 @@ -1,63 +0,0 @@ -"""Fixer that changes bytes % whatever to a function that actually formats -it.""" - -from lib2to3 import fixer_base -from lib2to3.fixer_util import is_tuple, Call, Comma, Name, touch_import - -# XXX: Implementing a blacklist in 2to3 turned out to be more troublesome than -# blacklisting some modules inside the fixers. So, this is what I came with. - -blacklist = ['mercurial/demandimport.py', - 'mercurial/py3kcompat.py', - 'mercurial/i18n.py', - ] - -def isnumberremainder(formatstr, data): - try: - if data.value.isdigit(): - return True - except AttributeError: - return False - -class FixBytesmod(fixer_base.BaseFix): - # XXX: There's one case (I suppose) I can't handle: when a remainder - # operation like foo % bar is performed, I can't really know what the - # contents of foo and bar are. I believe the best approach is to "correct" - # the to-be-converted code and let bytesformatter handle that case in - # runtime. - PATTERN = ''' - term< formatstr=STRING '%' data=STRING > | - term< formatstr=STRING '%' data=atom > | - term< formatstr=NAME '%' data=any > | - term< formatstr=any '%' data=any > - ''' - - def transform(self, node, results): - for bfn in blacklist: - if self.filename.endswith(bfn): - return - if not self.filename.endswith('mercurial/py3kcompat.py'): - touch_import('mercurial', 'py3kcompat', node=node) - - formatstr = results['formatstr'].clone() - data = results['data'].clone() - formatstr.prefix = '' # remove spaces from start - - if isnumberremainder(formatstr, data): - return - - # We have two possibilities: - # 1- An identifier or name is passed, it is going to be a leaf, thus, we - # just need to copy its value as an argument to the formatter; - # 2- A tuple is explicitly passed. In this case, we're gonna explode it - # to pass to the formatter - # TODO: Check for normal strings. They don't need to be translated - - if is_tuple(data): - args = [formatstr, Comma().clone()] + \ - [c.clone() for c in data.children[:]] - else: - args = [formatstr, Comma().clone(), data] - - call = Call(Name('bytesformatter', prefix=' '), args) - return call
--- a/contrib/hgfixes/fix_leftover_imports.py Sun Mar 13 02:29:11 2016 +0100 +++ /dev/null Thu Jan 01 00:00:00 1970 +0000 @@ -1,107 +0,0 @@ -"Fixer that translates some APIs ignored by the default 2to3 fixers." - -# FIXME: This fixer has some ugly hacks. Its main design is based on that of -# fix_imports, from lib2to3. Unfortunately, the fix_imports framework only -# changes module names "without dots", meaning it won't work for some changes -# in the email module/package. Thus this fixer was born. I believe that with a -# bit more thinking, a more generic fixer can be implemented, but I'll leave -# that as future work. - -from lib2to3.fixer_util import Name -from lib2to3.fixes import fix_imports - -# This maps the old names to the new names. Note that a drawback of the current -# design is that the dictionary keys MUST have EXACTLY one dot (.) in them, -# otherwise things will break. (If you don't need a module hierarchy, you're -# better of just inherit from fix_imports and overriding the MAPPING dict.) - -MAPPING = {'email.Utils': 'email.utils', - 'email.Errors': 'email.errors', - 'email.Header': 'email.header', - 'email.Parser': 'email.parser', - 'email.Encoders': 'email.encoders', - 'email.MIMEText': 'email.mime.text', - 'email.MIMEBase': 'email.mime.base', - 'email.Generator': 'email.generator', - 'email.MIMEMultipart': 'email.mime.multipart', -} - -def alternates(members): - return "(" + "|".join(map(repr, members)) + ")" - -def build_pattern(mapping=MAPPING): - packages = {} - for key in mapping: - # What we are doing here is the following: with dotted names, we'll - # have something like package_name <trailer '.' module>. Then, we are - # making a dictionary to copy this structure. For example, if - # mapping={'A.B': 'a.b', 'A.C': 'a.c'}, it will generate the dictionary - # {'A': ['b', 'c']} to, then, generate something like "A <trailer '.' - # ('b' | 'c')". - name = key.split('.') - prefix = name[0] - if prefix in packages: - packages[prefix].append(name[1:][0]) - else: - packages[prefix] = name[1:] - - mod_list = ' | '.join(["'%s' '.' ('%s')" % - (key, "' | '".join(packages[key])) for key in packages]) - mod_list = '(' + mod_list + ' )' - - yield """name_import=import_name< 'import' module_name=dotted_name< %s > > - """ % mod_list - - yield """name_import=import_name< 'import' - multiple_imports=dotted_as_names< any* - module_name=dotted_name< %s > - any* > - >""" % mod_list - - packs = ' | '.join(["'%s' trailer<'.' ('%s')>" % (key, - "' | '".join(packages[key])) for key in packages]) - - yield "power< package=(%s) trailer<'.' any > any* >" % packs - -class FixLeftoverImports(fix_imports.FixImports): - # We want to run this fixer after fix_import has run (this shouldn't matter - # for hg, though, as setup3k prefers to run the default fixers first) - mapping = MAPPING - - def build_pattern(self): - return "|".join(build_pattern(self.mapping)) - - def transform(self, node, results): - # Mostly copied from fix_imports.py - import_mod = results.get("module_name") - if import_mod: - try: - mod_name = import_mod.value - except AttributeError: - # XXX: A hack to remove whitespace prefixes and suffixes - mod_name = str(import_mod).strip() - new_name = self.mapping[mod_name] - import_mod.replace(Name(new_name, prefix=import_mod.prefix)) - if "name_import" in results: - # If it's not a "from x import x, y" or "import x as y" import, - # marked its usage to be replaced. - self.replace[mod_name] = new_name - if "multiple_imports" in results: - # This is a nasty hack to fix multiple imports on a line (e.g., - # "import StringIO, urlparse"). The problem is that I can't - # figure out an easy way to make a pattern recognize the keys of - # MAPPING randomly sprinkled in an import statement. - results = self.match(node) - if results: - self.transform(node, results) - else: - # Replace usage of the module. - # Now this is, mostly, a hack - bare_name = results["package"][0] - bare_name_text = ''.join(map(str, results['package'])).strip() - new_name = self.replace.get(bare_name_text) - prefix = results['package'][0].prefix - if new_name: - bare_name.replace(Name(new_name, prefix=prefix)) - results["package"][1].replace(Name('')) -
--- a/contrib/import-checker.py Sun Mar 13 02:29:11 2016 +0100 +++ b/contrib/import-checker.py Tue Mar 15 14:10:46 2016 -0700 @@ -366,7 +366,7 @@ fromlocal = fromlocalfunc(module, localmods) # Whether a local/non-stdlib import has been performed. - seenlocal = False + seenlocal = None # Whether a relative, non-symbol import has been seen. seennonsymbolrelative = False # The last name to be imported (for sorting). @@ -403,10 +403,11 @@ # stdlib imports should be before local imports. stdlib = name in stdlib_modules if stdlib and seenlocal and node.col_offset == root_col_offset: - yield msg('stdlib import follows local import: %s', name) + yield msg('stdlib import "%s" follows local import: %s', + name, seenlocal) if not stdlib: - seenlocal = True + seenlocal = name # Import of sibling modules should use relative imports. topname = name.split('.')[0] @@ -437,7 +438,7 @@ if not fullname or fullname in stdlib_modules: yield msg('relative import of stdlib module') else: - seenlocal = True + seenlocal = fullname # Direct symbol import is only allowed from certain modules and # must occur before non-symbol imports. @@ -494,10 +495,6 @@ """Given some python source, verify that stdlib imports are done in separate statements from relative local module imports. - Observing this limitation is important as it works around an - annoying lib2to3 bug in relative import rewrites: - http://bugs.python.org/issue19510. - >>> list(verify_stdlib_on_own_line(ast.parse('import sys, foo'))) [('mixed imports\\n stdlib: sys\\n relative: foo', 1)] >>> list(verify_stdlib_on_own_line(ast.parse('import sys, os')))
--- a/contrib/memory.py Sun Mar 13 02:29:11 2016 +0100 +++ b/contrib/memory.py Tue Mar 15 14:10:46 2016 -0700 @@ -11,6 +11,7 @@ prints it to ``stderr`` on exit. ''' +from __future__ import absolute_import import atexit def memusage(ui):
--- a/contrib/revsetbenchmarks.py Sun Mar 13 02:29:11 2016 +0100 +++ b/contrib/revsetbenchmarks.py Tue Mar 15 14:10:46 2016 -0700 @@ -63,7 +63,7 @@ return parseoutput(output) except CalledProcessError as exc: print >> sys.stderr, 'abort: cannot run revset benchmark: %s' % exc.cmd - if exc.output is None: + if getattr(exc, 'output', None) is None: # no output before 2.7 print >> sys.stderr, '(no output)' else: print >> sys.stderr, exc.output
--- a/contrib/showstack.py Sun Mar 13 02:29:11 2016 +0100 +++ b/contrib/showstack.py Tue Mar 15 14:10:46 2016 -0700 @@ -2,7 +2,10 @@ # # binds to both SIGQUIT (Ctrl-\) and SIGINFO (Ctrl-T on BSDs) -import sys, signal, traceback +from __future__ import absolute_import +import signal +import sys +import traceback def sigshow(*args): sys.stderr.write("\n")
--- a/contrib/simplemerge Sun Mar 13 02:29:11 2016 +0100 +++ b/contrib/simplemerge Tue Mar 15 14:10:46 2016 -0700 @@ -47,7 +47,7 @@ opts = {} try: args = fancyopts.fancyopts(sys.argv[1:], options, opts) - except fancyopts.getopt.GetoptError, e: + except fancyopts.getopt.GetoptError as e: raise ParseError(e) if opts['help']: showhelp() @@ -55,11 +55,11 @@ if len(args) != 3: raise ParseError(_('wrong number of arguments')) sys.exit(simplemerge.simplemerge(ui.ui(), *args, **opts)) -except ParseError, e: +except ParseError as e: sys.stdout.write("%s: %s\n" % (sys.argv[0], e)) showhelp() sys.exit(1) -except error.Abort, e: +except error.Abort as e: sys.stderr.write("abort: %s\n" % e) sys.exit(255) except KeyboardInterrupt:
--- a/contrib/win32/hgwebdir_wsgi.py Sun Mar 13 02:29:11 2016 +0100 +++ b/contrib/win32/hgwebdir_wsgi.py Tue Mar 15 14:10:46 2016 -0700 @@ -1,66 +1,104 @@ # An example WSGI script for IIS/isapi-wsgi to export multiple hgweb repos -# Copyright 2010 Sune Foldager <cryo@cyanite.org> +# Copyright 2010-2016 Sune Foldager <cyano@me.com> # # This software may be used and distributed according to the terms of the # GNU General Public License version 2 or any later version. # # Requirements: -# - Python 2.6 -# - PyWin32 build 214 or newer -# - Mercurial installed from source (python setup.py install) -# - IIS 7 -# -# Earlier versions will in general work as well, but the PyWin32 version is -# necessary for win32traceutil to work correctly. +# - Python 2.7, preferably 64 bit +# - PyWin32 for Python 2.7 (32 or 64 bit) +# - Mercurial installed from source (python setup.py install) or download the +# python module installer from https://www.mercurial-scm.org/wiki/Download +# - IIS 7 or newer # # # Installation and use: # -# - Download the isapi-wsgi source and run python setup.py install: -# http://code.google.com/p/isapi-wsgi/ +# - Download or clone the isapi-wsgi source and run python setup.py install. +# https://github.com/hexdump42/isapi-wsgi +# +# - Create a directory to hold the shim dll, config files etc. This can reside +# inside the standard IIS directory, C:\inetpub, or anywhere else. Copy this +# script there. # # - Run this script (i.e. python hgwebdir_wsgi.py) to get a shim dll. The # shim is identical for all scripts, so you can just copy and rename one -# from an earlier run, if you wish. +# from an earlier run, if you wish. The shim needs to reside in the same +# directory as this script. +# +# - Start IIS manager and create a new app pool: +# .NET CLR Version: No Managed Code +# Advanced Settings: Enable 32 Bit Applications, if using 32 bit Python. +# You can adjust the identity and maximum worker processes if you wish. This +# setup works fine with multiple worker processes. # -# - Setup an IIS application where your hgwebdir is to be served from. -# On 64-bit systems, make sure it's assigned a 32-bit app pool. +# - Create an IIS application where your hgwebdir is to be served from. +# Assign it the app pool you just created and point its physical path to the +# directory you created. +# +# - In the application, remove all handler mappings and setup a wildcard script +# handler mapping of type IsapiModule with the shim dll as its executable. +# This file MUST reside in the same directory as the shim. The easiest way +# to do all this is to close IIS manager, place a web.config file in your +# directory and start IIS manager again. The file should contain: # -# - In the application, setup a wildcard script handler mapping of type -# IsapiModule with the shim dll as its executable. This file MUST reside -# in the same directory as the shim. Remove all other handlers, if you wish. +# <?xml version="1.0" encoding="UTF-8"?> +# <configuration> +# <system.webServer> +# <handlers accessPolicy="Read, Script"> +# <clear /> +# <add name="hgwebdir" path="*" verb="*" modules="IsapiModule" +# scriptProcessor="C:\your\directory\_hgwebdir_wsgi.dll" +# resourceType="Unspecified" requireAccess="None" +# preCondition="bitness64" /> +# </handlers> +# </system.webServer> +# </configuration> +# +# Where "bitness64" should be replaced with "bitness32" for 32 bit Python. +# +# - Edit ISAPI And CGI Restrictions on the web server (global setting). Add a +# restriction pointing to your shim dll and allow it to run. # -# - Make sure the ISAPI and CGI restrictions (configured globally on the -# web server) includes the shim dll, to allow it to run. +# - Create a configuration file in your directory and adjust the configuration +# variables below to match your needs. Example configuration: +# +# [web] +# style = gitweb +# push_ssl = false +# allow_push = * +# encoding = utf8 # -# - Adjust the configuration variables below to match your needs. +# [server] +# validate = true +# +# [paths] +# repo1 = c:\your\directory\repo1 +# repo2 = c:\your\directory\repo2 +# +# - Restart the web server and see if things are running. # # Configuration file location -hgweb_config = r'c:\src\iis\hg\hgweb.config' +hgweb_config = r'c:\your\directory\wsgi.config' # Global settings for IIS path translation path_strip = 0 # Strip this many path elements off (when using url rewrite) path_prefix = 1 # This many path elements are prefixes (depends on the # virtual path of the IIS application). +from __future__ import absolute_import import sys # Adjust python path if this is not a system-wide install -#sys.path.insert(0, r'c:\path\to\python\lib') +#sys.path.insert(0, r'C:\your\custom\hg\build\lib.win32-2.7') # Enable tracing. Run 'python -m win32traceutil' to debug if getattr(sys, 'isapidllhandle', None) is not None: import win32traceutil win32traceutil.SetupForPrint # silence unused import warning -# To serve pages in local charset instead of UTF-8, remove the two lines below -import os -os.environ['HGENCODING'] = 'UTF-8' - - import isapi_wsgi -from mercurial import demandimport; demandimport.enable() from mercurial.hgweb.hgwebdir_mod import hgwebdir # Example tweak: Replace isapi_wsgi's handler to provide better error message
--- a/contrib/wix/help.wxs Sun Mar 13 02:29:11 2016 +0100 +++ b/contrib/wix/help.wxs Tue Mar 15 14:10:46 2016 -0700 @@ -40,6 +40,7 @@ <Component Id="help.internals" Guid="$(var.help.internals.guid)" Win64='$(var.IsX64)'> <File Id="internals.bundles.txt" Name="bundles.txt" KeyPath="yes" /> <File Id="internals.changegroups.txt" Name="changegroups.txt" /> + <File Id="internals.requirements.txt" Name="requirements.txt" /> <File Id="internals.revlogs.txt" Name="revlogs.txt" /> </Component> </Directory>
--- a/doc/docchecker Sun Mar 13 02:29:11 2016 +0100 +++ b/doc/docchecker Tue Mar 15 14:10:46 2016 -0700 @@ -14,41 +14,41 @@ hg_cramped = re.compile(r'\w:hg:`') def check(line): - if hg_backtick.search(line): - print(line) - print("""warning: please avoid nesting ' in :hg:`...`""") - if hg_cramped.search(line): - print(line) - print('warning: please have a space before :hg:') + if hg_backtick.search(line): + print(line) + print("""warning: please avoid nesting ' in :hg:`...`""") + if hg_cramped.search(line): + print(line) + print('warning: please have a space before :hg:') def work(file): - (llead, lline) = ('', '') + (llead, lline) = ('', '') - for line in file: - # this section unwraps lines - match = leadingline.match(line) - if not match: - check(lline) - (llead, lline) = ('', '') - continue + for line in file: + # this section unwraps lines + match = leadingline.match(line) + if not match: + check(lline) + (llead, lline) = ('', '') + continue - lead, line = match.group(1), match.group(2) - if (lead == llead): - if (lline != ''): - lline += ' ' + line - else: - lline = line - else: - check(lline) - (llead, lline) = (lead, line) - check(lline) + lead, line = match.group(1), match.group(2) + if (lead == llead): + if (lline != ''): + lline += ' ' + line + else: + lline = line + else: + check(lline) + (llead, lline) = (lead, line) + check(lline) def main(): - for f in sys.argv[1:]: - try: - with open(f) as file: - work(file) - except: - print("failed to process %s" % f) + for f in sys.argv[1:]: + try: + with open(f) as file: + work(file) + except BaseException as e: + print("failed to process %s: %s" % (f, e)) main()
--- a/doc/runrst Sun Mar 13 02:29:11 2016 +0100 +++ b/doc/runrst Tue Mar 15 14:10:46 2016 -0700 @@ -30,10 +30,22 @@ linktext = nodes.literal(rawtext, text) parts = text.split() cmd, args = parts[1], parts[2:] + refuri = "hg.1.html#%s" % cmd if cmd == 'help' and args: - cmd = args[0] # link to 'dates' for 'hg help dates' + if args[0] == 'config': + # :hg:`help config` + refuri = "hgrc.5.html" + elif args[0].startswith('config.'): + # :hg:`help config.SECTION...` + refuri = "hgrc.5.html#%s" % args[0].split('.', 2)[1] + elif len(args) >= 2 and args[0] == '-c': + # :hg:`help -c COMMAND ...` is equivalent to :hg:`COMMAND` + # (mainly for :hg:`help -c config`) + refuri = "hg.1.html#%s" % args[1] + else: + refuri = "hg.1.html#%s" % args[0] node = nodes.reference(rawtext, '', linktext, - refuri="hg.1.html#%s" % cmd) + refuri=refuri) return [node], [] roles.register_local_role("hg", role_hg)
--- a/hgext/__init__.py Sun Mar 13 02:29:11 2016 +0100 +++ b/hgext/__init__.py Tue Mar 15 14:10:46 2016 -0700 @@ -1,1 +1,3 @@ -# placeholder +from __future__ import absolute_import +import pkgutil +__path__ = pkgutil.extend_path(__path__, __name__)
--- a/hgext/acl.py Sun Mar 13 02:29:11 2016 +0100 +++ b/hgext/acl.py Tue Mar 15 14:10:46 2016 -0700 @@ -191,9 +191,17 @@ ''' +from __future__ import absolute_import + +import getpass +import urllib + from mercurial.i18n import _ -from mercurial import util, match, error -import getpass, urllib +from mercurial import ( + error, + match, + util, +) # Note for extension authors: ONLY specify testedwith = 'internal' for # extensions which SHIP WITH MERCURIAL. Non-mainline extensions should
--- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/hgext/automv.py Tue Mar 15 14:10:46 2016 -0700 @@ -0,0 +1,100 @@ +# automv.py +# +# Copyright 2013-2016 Facebook, Inc. +# +# This software may be used and distributed according to the terms of the +# GNU General Public License version 2 or any later version. +"""Check for unrecorded moves at commit time (EXPERIMENTAL) + +This extension checks at commit/amend time if any of the committed files +comes from an unrecorded mv. + +The threshold at which a file is considered a move can be set with the +``automv.similarity`` config option. This option takes a percentage between 0 +(disabled) and 100 (files must be identical), the default is 95. + +""" + +# Using 95 as a default similarity is based on an analysis of the mercurial +# repositories of the cpython, mozilla-central & mercurial repositories, as +# well as 2 very large facebook repositories. At 95 50% of all potential +# missed moves would be caught, as well as correspond with 87% of all +# explicitly marked moves. Together, 80% of moved files are 95% similar or +# more. +# +# See http://markmail.org/thread/5pxnljesvufvom57 for context. + +from __future__ import absolute_import + +from mercurial import ( + commands, + copies, + error, + extensions, + scmutil, + similar +) +from mercurial.i18n import _ + +def extsetup(ui): + entry = extensions.wrapcommand( + commands.table, 'commit', mvcheck) + entry[1].append( + ('', 'no-automv', None, + _('disable automatic file move detection'))) + +def mvcheck(orig, ui, repo, *pats, **opts): + """Hook to check for moves at commit time""" + renames = None + disabled = opts.pop('no_automv', False) + if not disabled: + threshold = ui.configint('automv', 'similarity', 95) + if not 0 <= threshold <= 100: + raise error.Abort(_('automv.similarity must be between 0 and 100')) + if threshold > 0: + match = scmutil.match(repo[None], pats, opts) + added, removed = _interestingfiles(repo, match) + renames = _findrenames(repo, match, added, removed, + threshold / 100.0) + + with repo.wlock(): + if renames is not None: + scmutil._markchanges(repo, (), (), renames) + return orig(ui, repo, *pats, **opts) + +def _interestingfiles(repo, matcher): + """Find what files were added or removed in this commit. + + Returns a tuple of two lists: (added, removed). Only files not *already* + marked as moved are included in the added list. + + """ + stat = repo.status(match=matcher) + added = stat[1] + removed = stat[2] + + copy = copies._forwardcopies(repo['.'], repo[None], matcher) + # remove the copy files for which we already have copy info + added = [f for f in added if f not in copy] + + return added, removed + +def _findrenames(repo, matcher, added, removed, similarity): + """Find what files in added are really moved files. + + Any file named in removed that is at least similarity% similar to a file + in added is seen as a rename. + + """ + renames = {} + if similarity > 0: + for src, dst, score in similar.findrenames( + repo, added, removed, similarity): + if repo.ui.verbose: + repo.ui.status( + _('detected move of %s as %s (%d%% similar)\n') % ( + matcher.rel(src), matcher.rel(dst), score * 100)) + renames[dst] = src + if renames: + repo.ui.status(_('detected move of %d files\n') % len(renames)) + return renames
--- a/hgext/blackbox.py Sun Mar 13 02:29:11 2016 +0100 +++ b/hgext/blackbox.py Tue Mar 15 14:10:46 2016 -0700 @@ -10,10 +10,16 @@ Logs event information to .hg/blackbox.log to help debug and diagnose problems. The events that get logged can be configured via the blackbox.track config key. + Examples:: [blackbox] track = * + # dirty is *EXPENSIVE* (slow); + # each log entry indicates `+` if the repository is dirty, like :hg:`id`. + dirty = True + # record the source of log messages + logsource = True [blackbox] track = command, commandfinish, commandexception, exthook, pythonhook @@ -29,9 +35,19 @@ """ -from mercurial import util, cmdutil +from __future__ import absolute_import + +import errno +import re + from mercurial.i18n import _ -import errno, os, re +from mercurial.node import hex + +from mercurial import ( + cmdutil, + ui as uimod, + util, +) cmdtable = {} command = cmdutil.command(cmdtable) @@ -40,10 +56,47 @@ # be specifying the version(s) of Mercurial they are tested with, or # leave the attribute unspecified. testedwith = 'internal' -lastblackbox = None +lastui = None + +filehandles = {} + +def _openlog(vfs): + path = vfs.join('blackbox.log') + if path in filehandles: + return filehandles[path] + filehandles[path] = fp = vfs('blackbox.log', 'a') + return fp + +def _closelog(vfs): + path = vfs.join('blackbox.log') + fp = filehandles[path] + del filehandles[path] + fp.close() def wrapui(ui): class blackboxui(ui.__class__): + def __init__(self, src=None): + super(blackboxui, self).__init__(src) + if src is None: + self._partialinit() + else: + self._bbfp = src._bbfp + self._bbinlog = False + self._bbrepo = src._bbrepo + self._bbvfs = src._bbvfs + + def _partialinit(self): + if util.safehasattr(self, '_bbvfs'): + return + self._bbfp = None + self._bbinlog = False + self._bbrepo = None + self._bbvfs = None + + def copy(self): + self._partialinit() + return self.__class__(self) + @util.propertycache def track(self): return self.configlist('blackbox', 'track', ['*']) @@ -51,76 +104,109 @@ def _openlogfile(self): def rotate(oldpath, newpath): try: - os.unlink(newpath) + self._bbvfs.unlink(newpath) except OSError as err: if err.errno != errno.ENOENT: self.debug("warning: cannot remove '%s': %s\n" % (newpath, err.strerror)) try: if newpath: - os.rename(oldpath, newpath) + self._bbvfs.rename(oldpath, newpath) except OSError as err: if err.errno != errno.ENOENT: self.debug("warning: cannot rename '%s' to '%s': %s\n" % (newpath, oldpath, err.strerror)) - fp = self._bbopener('blackbox.log', 'a') + fp = _openlog(self._bbvfs) maxsize = self.configbytes('blackbox', 'maxsize', 1048576) if maxsize > 0: - st = os.fstat(fp.fileno()) + st = self._bbvfs.fstat(fp) if st.st_size >= maxsize: path = fp.name - fp.close() + _closelog(self._bbvfs) maxfiles = self.configint('blackbox', 'maxfiles', 7) for i in xrange(maxfiles - 1, 1, -1): rotate(oldpath='%s.%d' % (path, i - 1), newpath='%s.%d' % (path, i)) rotate(oldpath=path, newpath=maxfiles > 0 and path + '.1') - fp = self._bbopener('blackbox.log', 'a') + fp = _openlog(self._bbvfs) return fp + def _bbwrite(self, fmt, *args): + self._bbfp.write(fmt % args) + self._bbfp.flush() + def log(self, event, *msg, **opts): - global lastblackbox + global lastui super(blackboxui, self).log(event, *msg, **opts) + self._partialinit() if not '*' in self.track and not event in self.track: return - if util.safehasattr(self, '_blackbox'): - blackbox = self._blackbox - elif util.safehasattr(self, '_bbopener'): + if self._bbfp: + ui = self + elif self._bbvfs: try: - self._blackbox = self._openlogfile() + self._bbfp = self._openlogfile() except (IOError, OSError) as err: self.debug('warning: cannot write to blackbox.log: %s\n' % err.strerror) - del self._bbopener - self._blackbox = None - blackbox = self._blackbox + del self._bbvfs + self._bbfp = None + ui = self else: # certain ui instances exist outside the context of # a repo, so just default to the last blackbox that # was seen. - blackbox = lastblackbox + ui = lastui - if blackbox: + if not ui or not ui._bbfp: + return + if not lastui or ui._bbrepo: + lastui = ui + if ui._bbinlog: + # recursion guard + return + try: + ui._bbinlog = True date = util.datestr(None, '%Y/%m/%d %H:%M:%S') user = util.getuser() - pid = str(os.getpid()) + pid = str(util.getpid()) formattedmsg = msg[0] % msg[1:] + rev = '(unknown)' + changed = '' + if ui._bbrepo: + ctx = ui._bbrepo[None] + parents = ctx.parents() + rev = ('+'.join([hex(p.node()) for p in parents])) + if (ui.configbool('blackbox', 'dirty', False) and ( + any(ui._bbrepo.status()) or + any(ctx.sub(s).dirty() for s in ctx.substate) + )): + changed = '+' + if ui.configbool('blackbox', 'logsource', False): + src = ' [%s]' % event + else: + src = '' try: - blackbox.write('%s %s (%s)> %s' % - (date, user, pid, formattedmsg)) + ui._bbwrite('%s %s @%s%s (%s)%s> %s', + date, user, rev, changed, pid, src, formattedmsg) except IOError as err: self.debug('warning: cannot write to blackbox.log: %s\n' % err.strerror) - lastblackbox = blackbox + finally: + ui._bbinlog = False def setrepo(self, repo): - self._bbopener = repo.vfs + self._bbfp = None + self._bbinlog = False + self._bbrepo = repo + self._bbvfs = repo.vfs ui.__class__ = blackboxui + uimod.ui = blackboxui def uisetup(ui): wrapui(ui) @@ -143,12 +229,12 @@ '''view the recent repository events ''' - if not os.path.exists(repo.join('blackbox.log')): + if not repo.vfs.exists('blackbox.log'): return limit = opts.get('limit') - blackbox = repo.vfs('blackbox.log', 'r') - lines = blackbox.read().split('\n') + fp = repo.vfs('blackbox.log', 'r') + lines = fp.read().split('\n') count = 0 output = []
--- a/hgext/bugzilla.py Sun Mar 13 02:29:11 2016 +0100 +++ b/hgext/bugzilla.py Tue Mar 15 14:10:46 2016 -0700 @@ -277,10 +277,21 @@ Changeset commit comment. Bug 1234. ''' +from __future__ import absolute_import + +import re +import time +import urlparse +import xmlrpclib + from mercurial.i18n import _ from mercurial.node import short -from mercurial import cmdutil, mail, util, error -import re, time, urlparse, xmlrpclib +from mercurial import ( + cmdutil, + error, + mail, + util, +) # Note for extension authors: ONLY specify testedwith = 'internal' for # extensions which SHIP WITH MERCURIAL. Non-mainline extensions should
--- a/hgext/censor.py Sun Mar 13 02:29:11 2016 +0100 +++ b/hgext/censor.py Tue Mar 15 14:10:46 2016 -0700 @@ -25,10 +25,20 @@ revisions if they are allowed by the "censor.policy=ignore" config option. """ +from __future__ import absolute_import + +from mercurial.i18n import _ from mercurial.node import short -from mercurial import cmdutil, error, filelog, revlog, scmutil, util -from mercurial.i18n import _ -from mercurial import lock as lockmod + +from mercurial import ( + cmdutil, + error, + filelog, + lock as lockmod, + revlog, + scmutil, + util, +) cmdtable = {} command = cmdutil.command(cmdtable)
--- a/hgext/chgserver.py Sun Mar 13 02:29:11 2016 +0100 +++ b/hgext/chgserver.py Tue Mar 15 14:10:46 2016 -0700 @@ -22,18 +22,33 @@ 'setenv' command replace os.environ completely -'SIGHUP' signal - reload configuration files +'setumask' command + set umask + +'validate' command + reload the config and check if the server is up to date + +Config +------ + +:: + + [chgserver] + idletimeout = 3600 # seconds, after which an idle server will exit + skiphash = False # whether to skip config or env change checks """ from __future__ import absolute_import import SocketServer import errno +import inspect import os import re -import signal import struct +import sys +import threading +import time import traceback from mercurial.i18n import _ @@ -44,6 +59,7 @@ commandserver, dispatch, error, + extensions, osutil, util, ) @@ -56,6 +72,108 @@ _log = commandserver.log +def _hashlist(items): + """return sha1 hexdigest for a list""" + return util.sha1(str(items)).hexdigest() + +# sensitive config sections affecting confighash +_configsections = [ + 'extdiff', # uisetup will register new commands + 'extensions', +] + +# sensitive environment variables affecting confighash +_envre = re.compile(r'''\A(?: + CHGHG + |HG.* + |LANG(?:UAGE)? + |LC_.* + |LD_.* + |PATH + |PYTHON.* + |TERM(?:INFO)? + |TZ + )\Z''', re.X) + +def _confighash(ui): + """return a quick hash for detecting config/env changes + + confighash is the hash of sensitive config items and environment variables. + + for chgserver, it is designed that once confighash changes, the server is + not qualified to serve its client and should redirect the client to a new + server. different from mtimehash, confighash change will not mark the + server outdated and exit since the user can have different configs at the + same time. + """ + sectionitems = [] + for section in _configsections: + sectionitems.append(ui.configitems(section)) + sectionhash = _hashlist(sectionitems) + envitems = [(k, v) for k, v in os.environ.iteritems() if _envre.match(k)] + envhash = _hashlist(sorted(envitems)) + return sectionhash[:6] + envhash[:6] + +def _getmtimepaths(ui): + """get a list of paths that should be checked to detect change + + The list will include: + - extensions (will not cover all files for complex extensions) + - mercurial/__version__.py + - python binary + """ + modules = [m for n, m in extensions.extensions(ui)] + try: + from mercurial import __version__ + modules.append(__version__) + except ImportError: + pass + files = [sys.executable] + for m in modules: + try: + files.append(inspect.getabsfile(m)) + except TypeError: + pass + return sorted(set(files)) + +def _mtimehash(paths): + """return a quick hash for detecting file changes + + mtimehash calls stat on given paths and calculate a hash based on size and + mtime of each file. mtimehash does not read file content because reading is + expensive. therefore it's not 100% reliable for detecting content changes. + it's possible to return different hashes for same file contents. + it's also possible to return a same hash for different file contents for + some carefully crafted situation. + + for chgserver, it is designed that once mtimehash changes, the server is + considered outdated immediately and should no longer provide service. + """ + def trystat(path): + try: + st = os.stat(path) + return (st.st_mtime, st.st_size) + except OSError: + # could be ENOENT, EPERM etc. not fatal in any case + pass + return _hashlist(map(trystat, paths))[:12] + +class hashstate(object): + """a structure storing confighash, mtimehash, paths used for mtimehash""" + def __init__(self, confighash, mtimehash, mtimepaths): + self.confighash = confighash + self.mtimehash = mtimehash + self.mtimepaths = mtimepaths + + @staticmethod + def fromui(ui, mtimepaths=None): + if mtimepaths is None: + mtimepaths = _getmtimepaths(ui) + confighash = _confighash(ui) + mtimehash = _mtimehash(mtimepaths) + _log('confighash = %s mtimehash = %s\n' % (confighash, mtimehash)) + return hashstate(confighash, mtimehash, mtimepaths) + # copied from hgext/pager.py:uisetup() def _setuppagercmd(ui, options, cmd): if not ui.formatted(): @@ -139,17 +257,35 @@ return chgui(srcui) -def _renewui(srcui): +def _renewui(srcui, args=None): + if not args: + args = [] + newui = srcui.__class__() for a in ['fin', 'fout', 'ferr', 'environ']: setattr(newui, a, getattr(srcui, a)) if util.safehasattr(srcui, '_csystem'): newui._csystem = srcui._csystem + + # load wd and repo config, copied from dispatch.py + cwds = dispatch._earlygetopt(['--cwd'], args) + cwd = cwds and os.path.realpath(cwds[-1]) or None + rpath = dispatch._earlygetopt(["-R", "--repository", "--repo"], args) + path, newui = dispatch._getlocal(newui, rpath, wd=cwd) + + # internal config: extensions.chgserver + # copy it. it can only be overrided from command line. + newui.setconfig('extensions', 'chgserver', + srcui.config('extensions', 'chgserver'), '--config') + + # command line args + dispatch._parseconfig(newui, dispatch._earlygetopt(['--config'], args)) + # stolen from tortoisehg.util.copydynamicconfig() for section, name, value in srcui.walkconfig(): source = srcui.configsource(section, name) - if ':' in source: - # path:line + if ':' in source or source == '--config': + # path:line or command line continue if source == 'none': # ui.configsource returns 'none' by default @@ -178,7 +314,7 @@ self.channel = channel def __call__(self, cmd, environ, cwd): - args = [util.quotecommand(cmd), cwd or '.'] + args = [util.quotecommand(cmd), os.path.abspath(cwd or '.')] args.extend('%s=%s' % (k, v) for k, v in environ.iteritems()) data = '\0'.join(args) self.out.write(struct.pack('>cI', self.channel, len(data))) @@ -200,11 +336,16 @@ ] class chgcmdserver(commandserver.server): - def __init__(self, ui, repo, fin, fout, sock): + def __init__(self, ui, repo, fin, fout, sock, hashstate, baseaddress): super(chgcmdserver, self).__init__( _newchgui(ui, channeledsystem(fin, fout, 'S')), repo, fin, fout) self.clientsock = sock self._oldios = [] # original (self.ch, ui.fp, fd) before "attachio" + self.hashstate = hashstate + self.baseaddress = baseaddress + if hashstate is not None: + self.capabilities = self.capabilities.copy() + self.capabilities['validate'] = chgcmdserver.validate def cleanup(self): # dispatch._runcatch() does not flush outputs if exception is not @@ -277,30 +418,64 @@ setattr(ui, fn, fp) del self._oldios[:] + def validate(self): + """Reload the config and check if the server is up to date + + Read a list of '\0' separated arguments. + Write a non-empty list of '\0' separated instruction strings or '\0' + if the list is empty. + An instruction string could be either: + - "unlink $path", the client should unlink the path to stop the + outdated server. + - "redirect $path", the client should try to connect to another + server instead. + - "exit $n", the client should exit directly with code n. + This may happen if we cannot parse the config. + """ + args = self._readlist() + try: + self.ui = _renewui(self.ui, args) + except error.ParseError as inst: + dispatch._formatparse(self.ui.warn, inst) + self.ui.flush() + self.cresult.write('exit 255') + return + newhash = hashstate.fromui(self.ui, self.hashstate.mtimepaths) + insts = [] + if newhash.mtimehash != self.hashstate.mtimehash: + addr = _hashaddress(self.baseaddress, self.hashstate.confighash) + insts.append('unlink %s' % addr) + if newhash.confighash != self.hashstate.confighash: + addr = _hashaddress(self.baseaddress, newhash.confighash) + insts.append('redirect %s' % addr) + _log('validate: %s\n' % insts) + self.cresult.write('\0'.join(insts) or '\0') + def chdir(self): """Change current directory Note that the behavior of --cwd option is bit different from this. It does not affect --config parameter. """ - length = struct.unpack('>I', self._read(4))[0] - if not length: + path = self._readstr() + if not path: return - path = self._read(length) _log('chdir to %r\n' % path) os.chdir(path) + def setumask(self): + """Change umask""" + mask = struct.unpack('>I', self._read(4))[0] + _log('setumask %r\n' % mask) + os.umask(mask) + def getpager(self): """Read cmdargs and write pager command to r-channel if enabled If pager isn't enabled, this writes '\0' because channeledoutput does not allow to write empty data. """ - length = struct.unpack('>I', self._read(4))[0] - if not length: - args = [] - else: - args = self._read(length).split('\0') + args = self._readlist() try: cmd, _func, args, options, _cmdoptions = dispatch._parse(self.ui, args) @@ -323,12 +498,9 @@ Note that not all variables can make an effect on the running process. """ - length = struct.unpack('>I', self._read(4))[0] - if not length: - return - s = self._read(length) + l = self._readlist() try: - newenv = dict(l.split('=', 1) for l in s.split('\0')) + newenv = dict(s.split('=', 1) for s in l) except ValueError: raise ValueError('unexpected value in setenv request') @@ -349,15 +521,22 @@ capabilities.update({'attachio': attachio, 'chdir': chdir, 'getpager': getpager, - 'setenv': setenv}) + 'setenv': setenv, + 'setumask': setumask}) # copied from mercurial/commandserver.py class _requesthandler(SocketServer.StreamRequestHandler): def handle(self): + # use a different process group from the master process, making this + # process pass kernel "is_current_pgrp_orphaned" check so signals like + # SIGTSTP, SIGTTIN, SIGTTOU are not ignored. + os.setpgid(0, 0) ui = self.server.ui repo = self.server.repo - sv = chgcmdserver(ui, repo, self.rfile, self.wfile, self.connection) + sv = None try: + sv = chgcmdserver(ui, repo, self.rfile, self.wfile, self.connection, + self.server.hashstate, self.server.baseaddress) try: sv.serve() # handle exceptions that may be raised by command server. most of @@ -374,23 +553,119 @@ except: # re-raises # also write traceback to error channel. otherwise client cannot # see it because it is written to server's stderr by default. - traceback.print_exc(file=sv.cerr) + if sv: + cerr = sv.cerr + else: + cerr = commandserver.channeledoutput(self.wfile, 'e') + traceback.print_exc(file=cerr) raise +def _tempaddress(address): + return '%s.%d.tmp' % (address, os.getpid()) + +def _hashaddress(address, hashstr): + return '%s-%s' % (address, hashstr) + +class AutoExitMixIn: # use old-style to comply with SocketServer design + lastactive = time.time() + idletimeout = 3600 # default 1 hour + + def startautoexitthread(self): + # note: the auto-exit check here is cheap enough to not use a thread, + # be done in serve_forever. however SocketServer is hook-unfriendly, + # you simply cannot hook serve_forever without copying a lot of code. + # besides, serve_forever's docstring suggests using thread. + thread = threading.Thread(target=self._autoexitloop) + thread.daemon = True + thread.start() + + def _autoexitloop(self, interval=1): + while True: + time.sleep(interval) + if not self.issocketowner(): + _log('%s is not owned, exiting.\n' % self.server_address) + break + if time.time() - self.lastactive > self.idletimeout: + _log('being idle too long. exiting.\n') + break + self.shutdown() + + def process_request(self, request, address): + self.lastactive = time.time() + return SocketServer.ForkingMixIn.process_request( + self, request, address) + + def server_bind(self): + # use a unique temp address so we can stat the file and do ownership + # check later + tempaddress = _tempaddress(self.server_address) + self.socket.bind(tempaddress) + self._socketstat = os.stat(tempaddress) + # rename will replace the old socket file if exists atomically. the + # old server will detect ownership change and exit. + util.rename(tempaddress, self.server_address) + + def issocketowner(self): + try: + stat = os.stat(self.server_address) + return (stat.st_ino == self._socketstat.st_ino and + stat.st_mtime == self._socketstat.st_mtime) + except OSError: + return False + + def unlinksocketfile(self): + if not self.issocketowner(): + return + # it is possible to have a race condition here that we may + # remove another server's socket file. but that's okay + # since that server will detect and exit automatically and + # the client will start a new server on demand. + try: + os.unlink(self.server_address) + except OSError as exc: + if exc.errno != errno.ENOENT: + raise + class chgunixservice(commandserver.unixservice): def init(self): - # drop options set for "hg serve --cmdserver" command - self.ui.setconfig('progress', 'assume-tty', None) - signal.signal(signal.SIGHUP, self._reloadconfig) - class cls(SocketServer.ForkingMixIn, SocketServer.UnixStreamServer): + self._inithashstate() + class cls(AutoExitMixIn, SocketServer.ForkingMixIn, + SocketServer.UnixStreamServer): ui = self.ui repo = self.repo + hashstate = self.hashstate + baseaddress = self.baseaddress self.server = cls(self.address, _requesthandler) - # avoid writing "listening at" message to stdout before attachio - # request, which calls setvbuf() + self.server.idletimeout = self.ui.configint( + 'chgserver', 'idletimeout', self.server.idletimeout) + self.server.startautoexitthread() + self._createsymlink() + + def _inithashstate(self): + self.baseaddress = self.address + if self.ui.configbool('chgserver', 'skiphash', False): + self.hashstate = None + return + self.hashstate = hashstate.fromui(self.ui) + self.address = _hashaddress(self.address, self.hashstate.confighash) - def _reloadconfig(self, signum, frame): - self.ui = self.server.ui = _renewui(self.ui) + def _createsymlink(self): + if self.baseaddress == self.address: + return + tempaddress = _tempaddress(self.baseaddress) + os.symlink(os.path.basename(self.address), tempaddress) + util.rename(tempaddress, self.baseaddress) + + def run(self): + try: + self.server.serve_forever() + finally: + self.server.unlinksocketfile() def uisetup(ui): commandserver._servicemap['chgunix'] = chgunixservice + + # CHGINTERNALMARK is temporarily set by chg client to detect if chg will + # start another chg. drop it to avoid possible side effects. + if 'CHGINTERNALMARK' in os.environ: + del os.environ['CHGINTERNALMARK']
--- a/hgext/children.py Sun Mar 13 02:29:11 2016 +0100 +++ b/hgext/children.py Tue Mar 15 14:10:46 2016 -0700 @@ -14,9 +14,15 @@ "children(REV)"` instead. ''' -from mercurial import cmdutil -from mercurial.commands import templateopts +from __future__ import absolute_import + from mercurial.i18n import _ +from mercurial import ( + cmdutil, + commands, +) + +templateopts = commands.templateopts cmdtable = {} command = cmdutil.command(cmdtable)
--- a/hgext/churn.py Sun Mar 13 02:29:11 2016 +0100 +++ b/hgext/churn.py Tue Mar 15 14:10:46 2016 -0700 @@ -8,11 +8,22 @@ '''command to display statistics about repository history''' +from __future__ import absolute_import + +import datetime +import os +import time + from mercurial.i18n import _ -from mercurial import patch, cmdutil, scmutil, util, commands, error -from mercurial import encoding -import os -import time, datetime +from mercurial import ( + cmdutil, + commands, + encoding, + error, + patch, + scmutil, + util, +) cmdtable = {} command = cmdutil.command(cmdtable) @@ -83,7 +94,8 @@ rate[key] = [r + l for r, l in zip(rate.get(key, (0, 0)), lines)] state['count'] += 1 - ui.progress(_('analyzing'), state['count'], total=len(repo)) + ui.progress(_('analyzing'), state['count'], total=len(repo), + unit=_('revisions')) for ctx in cmdutil.walkchangerevs(repo, m, opts, prep): continue
--- a/hgext/clonebundles.py Sun Mar 13 02:29:11 2016 +0100 +++ b/hgext/clonebundles.py Tue Mar 15 14:10:46 2016 -0700 @@ -162,6 +162,8 @@ Mercurial server when the bundle hosting service fails. """ +from __future__ import absolute_import + from mercurial import ( extensions, wireproto,
--- a/hgext/color.py Sun Mar 13 02:29:11 2016 +0100 +++ b/hgext/color.py Tue Mar 15 14:10:46 2016 -0700 @@ -157,7 +157,6 @@ from mercurial import cmdutil, commands, dispatch, extensions, subrepo, util from mercurial import ui as uimod -from mercurial import templater, error from mercurial.i18n import _ cmdtable = {} @@ -480,29 +479,6 @@ for s in msg.split('\n')]) return msg -def templatelabel(context, mapping, args): - if len(args) != 2: - # i18n: "label" is a keyword - raise error.ParseError(_("label expects two arguments")) - - # add known effects to the mapping so symbols like 'red', 'bold', - # etc. don't need to be quoted - mapping.update(dict([(k, k) for k in _effects])) - - thing = args[1][0](context, mapping, args[1][1]) - - # apparently, repo could be a string that is the favicon? - repo = mapping.get('repo', '') - if isinstance(repo, str): - return thing - - label = args[0][0](context, mapping, args[0][1]) - - thing = templater.stringify(thing) - label = templater.stringify(label) - - return repo.ui.label(thing, label) - def uisetup(ui): if ui.plain(): return @@ -524,8 +500,6 @@ return orig(gitsub, commands, env, stream, cwd) extensions.wrapfunction(dispatch, '_runcommand', colorcmd) extensions.wrapfunction(subrepo.gitsubrepo, '_gitnodir', colorgit) - templatelabel.__doc__ = templater.funcs['label'].__doc__ - templater.funcs['label'] = templatelabel def extsetup(ui): commands.globalopts.append(
--- a/hgext/convert/__init__.py Sun Mar 13 02:29:11 2016 +0100 +++ b/hgext/convert/__init__.py Tue Mar 15 14:10:46 2016 -0700 @@ -7,12 +7,20 @@ '''import revisions from foreign VCS repositories into Mercurial''' -import convcmd -import cvsps -import subversion -from mercurial import cmdutil, templatekw +from __future__ import absolute_import + +from mercurial import ( + cmdutil, + templatekw, +) from mercurial.i18n import _ +from . import ( + convcmd, + cvsps, + subversion, +) + cmdtable = {} command = cmdutil.command(cmdtable) # Note for extension authors: ONLY specify testedwith = 'internal' for
--- a/hgext/convert/bzr.py Sun Mar 13 02:29:11 2016 +0100 +++ b/hgext/convert/bzr.py Tue Mar 15 14:10:46 2016 -0700 @@ -7,9 +7,16 @@ # This module is for handling 'bzr', that was formerly known as Bazaar-NG; # it cannot access 'bar' repositories, but they were never used very much +from __future__ import absolute_import import os -from mercurial import demandimport, error +from mercurial import ( + demandimport, + error +) +from mercurial.i18n import _ +from . import common + # these do not work with demandimport, blacklist demandimport.ignore.extend([ 'bzrlib.transactions', @@ -17,49 +24,50 @@ 'ElementPath', ]) -from mercurial.i18n import _ -from mercurial import error -from common import NoRepo, commit, converter_source - try: # bazaar imports - from bzrlib import bzrdir, revision, errors - from bzrlib.revisionspec import RevisionSpec + import bzrlib.bzrdir + import bzrlib.errors + import bzrlib.revision + import bzrlib.revisionspec.RevisionSpec + bzrdir = bzrlib.bzrdir + errors = bzrlib.errors + revision = bzrlib.revision + revisionspec = bzrlib.revisionspec except ImportError: pass supportedkinds = ('file', 'symlink') -class bzr_source(converter_source): +class bzr_source(common.converter_source): """Reads Bazaar repositories by using the Bazaar Python libraries""" def __init__(self, ui, path, revs=None): super(bzr_source, self).__init__(ui, path, revs=revs) if not os.path.exists(os.path.join(path, '.bzr')): - raise NoRepo(_('%s does not look like a Bazaar repository') - % path) + raise common.NoRepo(_('%s does not look like a Bazaar repository') + % path) try: # access bzrlib stuff bzrdir except NameError: - raise NoRepo(_('Bazaar modules could not be loaded')) + raise common.NoRepo(_('Bazaar modules could not be loaded')) path = os.path.abspath(path) self._checkrepotype(path) try: self.sourcerepo = bzrdir.BzrDir.open(path).open_repository() except errors.NoRepositoryPresent: - raise NoRepo(_('%s does not look like a Bazaar repository') - % path) + raise common.NoRepo(_('%s does not look like a Bazaar repository') + % path) self._parentids = {} def _checkrepotype(self, path): # Lightweight checkouts detection is informational but probably # fragile at API level. It should not terminate the conversion. try: - from bzrlib import bzrdir dir = bzrdir.BzrDir.open_containing(path)[0] try: tree = dir.open_workingtree(recommend_upgrade=False) @@ -102,7 +110,7 @@ revid = None for branch in self._bzrbranches(): try: - r = RevisionSpec.from_string(self.revs[0]) + r = revisionspec.RevisionSpec.from_string(self.revs[0]) info = r.in_history(branch) except errors.BzrError: pass @@ -160,7 +168,7 @@ branch = self.recode(rev.properties.get('branch-nick', u'default')) if branch == 'trunk': branch = 'default' - return commit(parents=parents, + return common.commit(parents=parents, date='%d %d' % (rev.timestamp, -rev.timezone), author=self.recode(rev.committer), desc=self.recode(rev.message),
--- a/hgext/convert/common.py Sun Mar 13 02:29:11 2016 +0100 +++ b/hgext/convert/common.py Tue Mar 15 14:10:46 2016 -0700 @@ -4,10 +4,21 @@ # # This software may be used and distributed according to the terms of the # GNU General Public License version 2 or any later version. +from __future__ import absolute_import -import base64, errno, subprocess, os, datetime, re +import base64 import cPickle as pickle -from mercurial import phases, util, error +import datetime +import errno +import os +import re +import subprocess + +from mercurial import ( + error, + phases, + util, +) from mercurial.i18n import _ propertycache = util.propertycache
--- a/hgext/convert/convcmd.py Sun Mar 13 02:29:11 2016 +0100 +++ b/hgext/convert/convcmd.py Tue Mar 15 14:10:46 2016 -0700 @@ -4,22 +4,50 @@ # # This software may be used and distributed according to the terms of the # GNU General Public License version 2 or any later version. +from __future__ import absolute_import -from common import NoRepo, MissingTool, SKIPREV, mapfile -from cvs import convert_cvs -from darcs import darcs_source -from git import convert_git -from hg import mercurial_source, mercurial_sink -from subversion import svn_source, svn_sink -from monotone import monotone_source -from gnuarch import gnuarch_source -from bzr import bzr_source -from p4 import p4_source -import filemap +import os +import shlex +import shutil + +from mercurial import ( + encoding, + error, + hg, + util, +) +from mercurial.i18n import _ -import os, shutil, shlex -from mercurial import hg, util, encoding, error -from mercurial.i18n import _ +from . import ( + bzr, + common, + cvs, + darcs, + filemap, + git, + gnuarch, + hg as hgconvert, + monotone, + p4, + subversion, +) + +mapfile = common.mapfile +MissingTool = common.MissingTool +NoRepo = common.NoRepo +SKIPREV = common.SKIPREV + +bzr_source = bzr.bzr_source +convert_cvs = cvs.convert_cvs +convert_git = git.convert_git +darcs_source = darcs.darcs_source +gnuarch_source = gnuarch.gnuarch_source +mercurial_sink = hgconvert.mercurial_sink +mercurial_source = hgconvert.mercurial_source +monotone_source = monotone.monotone_source +p4_source = p4.p4_source +svn_sink = subversion.svn_sink +svn_source = subversion.svn_source orig_encoding = 'ascii' @@ -117,7 +145,7 @@ def getfile(self, file, rev): self.retrieved += 1 self.ui.progress(_('getting files'), self.retrieved, - item=file, total=self.filecount) + item=file, total=self.filecount, unit=_('files')) return self.source.getfile(file, rev) def targetfilebelongstosource(self, targetfilename):
--- a/hgext/convert/cvs.py Sun Mar 13 02:29:11 2016 +0100 +++ b/hgext/convert/cvs.py Tue Mar 15 14:10:46 2016 -0700 @@ -4,15 +4,32 @@ # # This software may be used and distributed according to the terms of the # GNU General Public License version 2 or any later version. +from __future__ import absolute_import -import os, re, socket, errno -from cStringIO import StringIO -from mercurial import encoding, util, error +import cStringIO +import errno +import os +import re +import socket + +from mercurial import ( + encoding, + error, + util, +) from mercurial.i18n import _ -from common import NoRepo, commit, converter_source, checktool -from common import makedatetimestamp -import cvsps +from . import ( + common, + cvsps, +) + +StringIO = cStringIO.StringIO +checktool = common.checktool +commit = common.commit +converter_source = common.converter_source +makedatetimestamp = common.makedatetimestamp +NoRepo = common.NoRepo class convert_cvs(converter_source): def __init__(self, ui, path, revs=None):
--- a/hgext/convert/cvsps.py Sun Mar 13 02:29:11 2016 +0100 +++ b/hgext/convert/cvsps.py Tue Mar 15 14:10:46 2016 -0700 @@ -4,13 +4,17 @@ # # This software may be used and distributed according to the terms of the # GNU General Public License version 2 or any later version. +from __future__ import absolute_import +import cPickle as pickle import os import re -import cPickle as pickle + +from mercurial import ( + hook, + util, +) from mercurial.i18n import _ -from mercurial import hook -from mercurial import util class logentry(object): '''Class logentry has the following attributes:
--- a/hgext/convert/darcs.py Sun Mar 13 02:29:11 2016 +0100 +++ b/hgext/convert/darcs.py Tue Mar 15 14:10:46 2016 -0700 @@ -4,39 +4,52 @@ # # This software may be used and distributed according to the terms of the # GNU General Public License version 2 or any later version. +from __future__ import absolute_import -from common import NoRepo, checktool, commandline, commit, converter_source +import errno +import os +import re +import shutil +import tempfile from mercurial.i18n import _ -from mercurial import util, error -import os, shutil, tempfile, re, errno +from mercurial import ( + error, + util, +) +from . import common +NoRepo = common.NoRepo # The naming drift of ElementTree is fun! try: - from xml.etree.cElementTree import ElementTree, XMLParser + import xml.etree.cElementTree.ElementTree as ElementTree + import xml.etree.cElementTree.XMLParser as XMLParser except ImportError: try: - from xml.etree.ElementTree import ElementTree, XMLParser + import xml.etree.ElementTree.ElementTree as ElementTree + import xml.etree.ElementTree.XMLParser as XMLParser except ImportError: try: - from elementtree.cElementTree import ElementTree, XMLParser + import elementtree.cElementTree.ElementTree as ElementTree + import elementtree.cElementTree.XMLParser as XMLParser except ImportError: try: - from elementtree.ElementTree import ElementTree, XMLParser + import elementtree.ElementTree.ElementTree as ElementTree + import elementtree.ElementTree.XMLParser as XMLParser except ImportError: pass -class darcs_source(converter_source, commandline): +class darcs_source(common.converter_source, common.commandline): def __init__(self, ui, path, revs=None): - converter_source.__init__(self, ui, path, revs=revs) - commandline.__init__(self, ui, 'darcs') + common.converter_source.__init__(self, ui, path, revs=revs) + common.commandline.__init__(self, ui, 'darcs') # check for _darcs, ElementTree so that we can easily skip # test-convert-darcs if ElementTree is not around if not os.path.exists(os.path.join(path, '_darcs')): raise NoRepo(_("%s does not look like a darcs repository") % path) - checktool('darcs') + common.checktool('darcs') version = self.run0('--version').splitlines()[0].strip() if version < '2.1': raise error.Abort(_('darcs version 2.1 or newer needed (found %r)') @@ -139,10 +152,10 @@ desc = elt.findtext('name') + '\n' + elt.findtext('comment', '') # etree can return unicode objects for name, comment, and author, # so recode() is used to ensure str objects are emitted. - return commit(author=self.recode(elt.get('author')), - date=util.datestr(date, '%Y-%m-%d %H:%M:%S %1%2'), - desc=self.recode(desc).strip(), - parents=self.parents[rev]) + return common.commit(author=self.recode(elt.get('author')), + date=util.datestr(date, '%Y-%m-%d %H:%M:%S %1%2'), + desc=self.recode(desc).strip(), + parents=self.parents[rev]) def pull(self, rev): output, status = self.run('pull', self.path, all=True,
--- a/hgext/convert/filemap.py Sun Mar 13 02:29:11 2016 +0100 +++ b/hgext/convert/filemap.py Tue Mar 15 14:10:46 2016 -0700 @@ -3,12 +3,16 @@ # # This software may be used and distributed according to the terms of the # GNU General Public License version 2 or any later version. +from __future__ import absolute_import import posixpath import shlex +from mercurial import ( + error, +) from mercurial.i18n import _ -from mercurial import error -from common import SKIPREV, converter_source +from . import common +SKIPREV = common.SKIPREV def rpairs(path): '''Yield tuples with path split at '/', starting with the full path. @@ -164,7 +168,7 @@ # touch files we're interested in, but also merges that merge two # or more interesting revisions. -class filemap_source(converter_source): +class filemap_source(common.converter_source): def __init__(self, ui, baseconverter, filemap): super(filemap_source, self).__init__(ui) self.base = baseconverter
--- a/hgext/convert/git.py Sun Mar 13 02:29:11 2016 +0100 +++ b/hgext/convert/git.py Tue Mar 15 14:10:46 2016 -0700 @@ -4,14 +4,21 @@ # # This software may be used and distributed according to the terms of the # GNU General Public License version 2 or any later version. +from __future__ import absolute_import import os import subprocess -from mercurial import util, config, error -from mercurial.node import hex, nullid +from mercurial import ( + config, + error, + node as nodemod, + util, +) from mercurial.i18n import _ -from common import NoRepo, commit, converter_source, checktool +from . import ( + common, +) class submodule(object): def __init__(self, path, node, url): @@ -25,7 +32,7 @@ def hgsubstate(self): return "%s %s" % (self.node, self.path) -class convert_git(converter_source): +class convert_git(common.converter_source): # Windows does not support GIT_DIR= construct while other systems # cannot remove environment variable. Just assume none have # both issues. @@ -92,7 +99,8 @@ if os.path.isdir(path + "/.git"): path += "/.git" if not os.path.exists(path + "/objects"): - raise NoRepo(_("%s does not look like a Git repository") % path) + raise common.NoRepo(_("%s does not look like a Git repository") % + path) # The default value (50) is based on the default for 'git diff'. similarity = ui.configint('convert', 'git.similarity', default=50) @@ -107,7 +115,7 @@ else: self.simopt = '' - checktool('git', 'git') + common.checktool('git', 'git') self.path = path self.submodules = [] @@ -134,7 +142,7 @@ return heads def catfile(self, rev, type): - if rev == hex(nullid): + if rev == nodemod.nullhex: raise IOError self.catfilepipe[0].write(rev+'\n') self.catfilepipe[0].flush() @@ -151,7 +159,7 @@ return data def getfile(self, name, rev): - if rev == hex(nullid): + if rev == nodemod.nullhex: return None, None if name == '.hgsub': data = '\n'.join([m.hgsub() for m in self.submoditer()]) @@ -165,7 +173,7 @@ return data, mode def submoditer(self): - null = hex(nullid) + null = nodemod.nullhex for m in sorted(self.submodules, key=lambda p: p.path): if m.node != null: yield m @@ -240,7 +248,7 @@ subexists[0] = True if entry[4] == 'D' or renamesource: subdeleted[0] = True - changes.append(('.hgsub', hex(nullid))) + changes.append(('.hgsub', nodemod.nullhex)) else: changes.append(('.hgsub', '')) elif entry[1] == '160000' or entry[0] == ':160000': @@ -248,7 +256,7 @@ subexists[0] = True else: if renamesource: - h = hex(nullid) + h = nodemod.nullhex self.modecache[(f, h)] = (p and "x") or (s and "l") or "" changes.append((f, h)) @@ -287,7 +295,7 @@ if subexists[0]: if subdeleted[0]: - changes.append(('.hgsubstate', hex(nullid))) + changes.append(('.hgsubstate', nodemod.nullhex)) else: self.retrievegitmodules(version) changes.append(('.hgsubstate', '')) @@ -324,8 +332,9 @@ tz = -int(tzs) * (int(tzh) * 3600 + int(tzm)) date = tm + " " + str(tz) - c = commit(parents=parents, date=date, author=author, desc=message, - rev=version) + c = common.commit(parents=parents, date=date, author=author, + desc=message, + rev=version) return c def numcommits(self):
--- a/hgext/convert/gnuarch.py Sun Mar 13 02:29:11 2016 +0100 +++ b/hgext/convert/gnuarch.py Tue Mar 15 14:10:46 2016 -0700 @@ -5,14 +5,22 @@ # # This software may be used and distributed according to the terms of the # GNU General Public License version 2 or any later version. +from __future__ import absolute_import -from common import NoRepo, commandline, commit, converter_source +import email +import os +import shutil +import stat +import tempfile +from mercurial import ( + encoding, + error, + util, +) from mercurial.i18n import _ -from mercurial import encoding, util, error -import os, shutil, tempfile, stat -from email.Parser import Parser +from . import common -class gnuarch_source(converter_source, commandline): +class gnuarch_source(common.converter_source, common.commandline): class gnuarch_rev(object): def __init__(self, rev): @@ -31,7 +39,7 @@ super(gnuarch_source, self).__init__(ui, path, revs=revs) if not os.path.exists(os.path.join(path, '{arch}')): - raise NoRepo(_("%s does not look like a GNU Arch repository") + raise common.NoRepo(_("%s does not look like a GNU Arch repository") % path) # Could use checktool, but we want to check for baz or tla. @@ -44,7 +52,7 @@ else: raise error.Abort(_('cannot find a GNU Arch tool')) - commandline.__init__(self, ui, self.execmd) + common.commandline.__init__(self, ui, self.execmd) self.path = os.path.realpath(path) self.tmppath = None @@ -54,7 +62,7 @@ self.changes = {} self.parents = {} self.tags = {} - self.catlogparser = Parser() + self.catlogparser = email.Parser.Parser() self.encoding = encoding.encoding self.archives = [] @@ -175,8 +183,9 @@ def getcommit(self, rev): changes = self.changes[rev] - return commit(author=changes.author, date=changes.date, - desc=changes.summary, parents=self.parents[rev], rev=rev) + return common.commit(author=changes.author, date=changes.date, + desc=changes.summary, parents=self.parents[rev], + rev=rev) def gettags(self): return self.tags
--- a/hgext/convert/hg.py Sun Mar 13 02:29:11 2016 +0100 +++ b/hgext/convert/hg.py Tue Mar 15 14:10:46 2016 -0700 @@ -16,24 +16,36 @@ # identifier to be stored in the converted revision. This will cause # the converted revision to have a different identity than the # source. +from __future__ import absolute_import +import cStringIO +import os +import re +import time -import os, time, cStringIO +from mercurial import ( + bookmarks, + context, + error, + exchange, + hg, + lock as lockmod, + merge as mergemod, + node as nodemod, + phases, + scmutil, + util, +) from mercurial.i18n import _ -from mercurial.node import bin, hex, nullid -from mercurial import hg, util, context, bookmarks, error, scmutil, exchange -from mercurial import phases -from mercurial import lock as lockmod -from mercurial import merge as mergemod +from . import common +mapfile = common.mapfile +NoRepo = common.NoRepo -from common import NoRepo, commit, converter_source, converter_sink, mapfile - -import re sha1re = re.compile(r'\b[0-9a-f]{12,40}\b') -class mercurial_sink(converter_sink): +class mercurial_sink(common.converter_sink): def __init__(self, ui, path): - converter_sink.__init__(self, ui, path) + common.converter_sink.__init__(self, ui, path) self.branchnames = ui.configbool('convert', 'hg.usebranchnames', True) self.clonebranches = ui.configbool('convert', 'hg.clonebranches', False) self.tagsbranch = ui.config('convert', 'hg.tagsbranch', 'default') @@ -132,7 +144,7 @@ continue revid = revmap.get(source.lookuprev(s[0])) if not revid: - if s[0] == hex(nullid): + if s[0] == nodemod.nullhex: revid = s[0] else: continue @@ -148,7 +160,7 @@ revid = s[0] subpath = s[1] - if revid != hex(nullid): + if revid != nodemod.nullhex: revmap = self.subrevmaps.get(subpath) if revmap is None: revmap = mapfile(self.ui, @@ -250,13 +262,13 @@ parents = pl nparents = len(parents) if self.filemapmode and nparents == 1: - m1node = self.repo.changelog.read(bin(parents[0]))[0] + m1node = self.repo.changelog.read(nodemod.bin(parents[0]))[0] parent = parents[0] if len(parents) < 2: - parents.append(nullid) + parents.append(nodemod.nullid) if len(parents) < 2: - parents.append(nullid) + parents.append(nodemod.nullid) p2 = parents.pop(0) text = commit.desc @@ -283,12 +295,12 @@ # Only transplant stores its reference in binary if label == 'transplant_source': - node = hex(node) + node = nodemod.hex(node) newrev = revmap.get(node) if newrev is not None: if label == 'transplant_source': - newrev = bin(newrev) + newrev = nodemod.bin(newrev) extra[label] = newrev @@ -302,7 +314,7 @@ p2 = parents.pop(0) p1ctx = self.repo[p1] p2ctx = None - if p2 != nullid: + if p2 != nodemod.nullid: p2ctx = self.repo[p2] fileset = set(files) if full: @@ -324,7 +336,7 @@ phases.phasenames[commit.phase], 'convert') with self.repo.transaction("convert") as tr: - node = hex(self.repo.commitctx(ctx)) + node = nodemod.hex(self.repo.commitctx(ctx)) # If the node value has changed, but the phase is lower than # draft, set it back to draft since it hasn't been exposed @@ -340,7 +352,7 @@ if self.filemapmode and nparents == 1: man = self.repo.manifest - mnode = self.repo.changelog.read(bin(p2))[0] + mnode = self.repo.changelog.read(nodemod.bin(p2))[0] closed = 'close' in commit.extra if not closed and not man.cmp(m1node, man.revision(mnode)): self.ui.status(_("filtering out empty revision\n")) @@ -354,7 +366,7 @@ tagparent = parentctx.node() except error.RepoError: parentctx = None - tagparent = nullid + tagparent = nodemod.nullid oldlines = set() for branch, heads in self.repo.branchmap().iteritems(): @@ -397,7 +409,7 @@ [".hgtags"], getfilectx, "convert-repo", date, extra) node = self.repo.commitctx(ctx) - return hex(node), hex(tagparent) + return nodemod.hex(node), nodemod.hex(tagparent) def setfilemapmode(self, active): self.filemapmode = active @@ -413,7 +425,7 @@ self.ui.status(_("updating bookmarks\n")) destmarks = self.repo._bookmarks for bookmark in updatedbookmark: - destmarks[bookmark] = bin(updatedbookmark[bookmark]) + destmarks[bookmark] = nodemod.bin(updatedbookmark[bookmark]) destmarks.recordchange(tr) tr.close() finally: @@ -430,9 +442,9 @@ 'are not implemented)') % rev) return rev in self.repo -class mercurial_source(converter_source): +class mercurial_source(common.converter_source): def __init__(self, ui, path, revs=None): - converter_source.__init__(self, ui, path, revs) + common.converter_source.__init__(self, ui, path, revs) self.ignoreerrors = ui.configbool('convert', 'hg.ignoreerrors', False) self.ignored = set() self.saverev = ui.configbool('convert', 'hg.saverev', False) @@ -493,7 +505,7 @@ return [p for p in ctx.parents() if p and self.keep(p.node())] def getheads(self): - return [hex(h) for h in self._heads if self.keep(h)] + return [nodemod.hex(h) for h in self._heads if self.keep(h)] def getfile(self, name, rev): try: @@ -572,18 +584,23 @@ parents = [p.hex() for p in self._parents(ctx)] crev = rev - return commit(author=ctx.user(), - date=util.datestr(ctx.date(), '%Y-%m-%d %H:%M:%S %1%2'), - desc=ctx.description(), rev=crev, parents=parents, - branch=ctx.branch(), extra=ctx.extra(), - sortkey=ctx.rev(), saverev=self.saverev, - phase=ctx.phase()) + return common.commit(author=ctx.user(), + date=util.datestr(ctx.date(), + '%Y-%m-%d %H:%M:%S %1%2'), + desc=ctx.description(), + rev=crev, + parents=parents, + branch=ctx.branch(), + extra=ctx.extra(), + sortkey=ctx.rev(), + saverev=self.saverev, + phase=ctx.phase()) def gettags(self): # This will get written to .hgtags, filter non global tags out. tags = [t for t in self.repo.tagslist() if self.repo.tagtype(t[0]) == 'global'] - return dict([(name, hex(node)) for name, node in tags + return dict([(name, nodemod.hex(node)) for name, node in tags if self.keep(node)]) def getchangedfiles(self, rev, i): @@ -622,7 +639,7 @@ def lookuprev(self, rev): try: - return hex(self.repo.lookup(rev)) + return nodemod.hex(self.repo.lookup(rev)) except (error.RepoError, error.LookupError): return None
--- a/hgext/convert/monotone.py Sun Mar 13 02:29:11 2016 +0100 +++ b/hgext/convert/monotone.py Tue Mar 15 14:10:46 2016 -0700 @@ -5,28 +5,34 @@ # # This software may be used and distributed according to the terms of the # GNU General Public License version 2 or any later version. +from __future__ import absolute_import -import os, re -from mercurial import util, error -from common import NoRepo, commit, converter_source, checktool -from common import commandline +import os +import re + +from mercurial import ( + error, + util, +) from mercurial.i18n import _ -class monotone_source(converter_source, commandline): +from . import common + +class monotone_source(common.converter_source, common.commandline): def __init__(self, ui, path=None, revs=None): - converter_source.__init__(self, ui, path, revs) + common.converter_source.__init__(self, ui, path, revs) if revs and len(revs) > 1: raise error.Abort(_('monotone source does not support specifying ' 'multiple revs')) - commandline.__init__(self, ui, 'mtn') + common.commandline.__init__(self, ui, 'mtn') self.ui = ui self.path = path self.automatestdio = False self.revs = revs - norepo = NoRepo(_("%s does not look like a monotone repository") - % path) + norepo = common.NoRepo(_("%s does not look like a monotone repository") + % path) if not os.path.exists(os.path.join(path, '_MTN')): # Could be a monotone repository (SQLite db file) try: @@ -69,7 +75,7 @@ self.files = None self.dirs = None - checktool('mtn', abort=False) + common.checktool('mtn', abort=False) def mtnrun(self, *args, **kwargs): if self.automatestdio: @@ -302,7 +308,7 @@ certs = self.mtngetcerts(rev) if certs.get('suspend') == certs["branch"]: extra['close'] = 1 - return commit( + return common.commit( author=certs["author"], date=util.datestr(util.strdate(certs["date"], "%Y-%m-%dT%H:%M:%S")), desc=certs["changelog"],
--- a/hgext/convert/p4.py Sun Mar 13 02:29:11 2016 +0100 +++ b/hgext/convert/p4.py Tue Mar 15 14:10:46 2016 -0700 @@ -4,13 +4,18 @@ # # This software may be used and distributed according to the terms of the # GNU General Public License version 2 or any later version. +from __future__ import absolute_import -from mercurial import util, error +import marshal +import re + +from mercurial import ( + error, + util, +) from mercurial.i18n import _ -from common import commit, converter_source, checktool, NoRepo -import marshal -import re +from . import common def loaditer(f): "Yield the dictionary objects generated by p4" @@ -37,17 +42,18 @@ filename = filename.replace(k, v) return filename -class p4_source(converter_source): +class p4_source(common.converter_source): def __init__(self, ui, path, revs=None): # avoid import cycle - import convcmd + from . import convcmd super(p4_source, self).__init__(ui, path, revs=revs) if "/" in path and not path.startswith('//'): - raise NoRepo(_('%s does not look like a P4 repository') % path) + raise common.NoRepo(_('%s does not look like a P4 repository') % + path) - checktool('p4', abort=False) + common.checktool('p4', abort=False) self.p4changes = {} self.heads = {} @@ -142,10 +148,10 @@ parents = [] date = (int(d["time"]), 0) # timezone not set - c = commit(author=self.recode(d["user"]), - date=util.datestr(date, '%Y-%m-%d %H:%M:%S %1%2'), - parents=parents, desc=desc, branch=None, - extra={"p4": change}) + c = common.commit(author=self.recode(d["user"]), + date=util.datestr(date, '%Y-%m-%d %H:%M:%S %1%2'), + parents=parents, desc=desc, branch=None, + extra={"p4": change}) files = [] copies = {}
--- a/hgext/convert/subversion.py Sun Mar 13 02:29:11 2016 +0100 +++ b/hgext/convert/subversion.py Tue Mar 15 14:10:46 2016 -0700 @@ -1,38 +1,59 @@ # Subversion 1.4/1.5 Python API backend # # Copyright(C) 2007 Daniel Holth et al +from __future__ import absolute_import -import os, re, sys, tempfile, urllib, urllib2 +import cPickle as pickle +import cStringIO +import os +import re +import sys +import tempfile +import urllib +import urllib2 import xml.dom.minidom -import cPickle as pickle -from mercurial import strutil, scmutil, util, encoding, error +from mercurial import ( + encoding, + error, + scmutil, + strutil, + util, +) from mercurial.i18n import _ +from . import common + +StringIO = cStringIO.StringIO propertycache = util.propertycache +commandline = common.commandline +commit = common.commit +converter_sink = common.converter_sink +converter_source = common.converter_source +decodeargs = common.decodeargs +encodeargs = common.encodeargs +makedatetimestamp = common.makedatetimestamp +mapfile = common.mapfile +MissingTool = common.MissingTool +NoRepo = common.NoRepo + # Subversion stuff. Works best with very recent Python SVN bindings # e.g. SVN 1.5 or backports. Thanks to the bzr folks for enhancing # these bindings. -from cStringIO import StringIO - -from common import NoRepo, MissingTool, commit, encodeargs, decodeargs -from common import commandline, converter_source, converter_sink, mapfile -from common import makedatetimestamp - try: - from svn.core import SubversionException, Pool import svn import svn.client import svn.core import svn.ra import svn.delta - import transport + from . import transport import warnings warnings.filterwarnings('ignore', module='svn.core', category=DeprecationWarning) + svn.core.SubversionException # trigger import to catch error except ImportError: svn = None @@ -79,7 +100,7 @@ def geturl(path): try: return svn.client.url_from_path(svn.core.svn_path_canonicalize(path)) - except SubversionException: + except svn.core.SubversionException: # svn.client.url_from_path() fails with local repositories pass if os.path.isdir(path): @@ -316,7 +337,7 @@ self.commits = {} self.paths = {} self.uuid = svn.ra.get_uuid(self.ra) - except SubversionException: + except svn.core.SubversionException: ui.traceback() svnversion = '%d.%d.%d' % (svn.core.SVN_VER_MAJOR, svn.core.SVN_VER_MINOR, @@ -377,7 +398,7 @@ svn.client.ls(self.url.rstrip('/') + '/' + quote(path), optrev, False, self.ctx) return True - except SubversionException: + except svn.core.SubversionException: return False def getheads(self): @@ -676,7 +697,7 @@ prevmodule = self.reparent('') dirent = svn.ra.stat(self.ra, path.strip('/'), stop) self.reparent(prevmodule) - except SubversionException: + except svn.core.SubversionException: dirent = None if not dirent: raise SvnPathNotFound(_('%s not found up to revision %d') @@ -728,7 +749,7 @@ for i, (path, ent) in enumerate(paths): self.ui.progress(_('scanning paths'), i, item=path, - total=len(paths)) + total=len(paths), unit=_('paths')) entrypath = self.getrelpath(path) kind = self._checkpath(entrypath, revnum) @@ -948,7 +969,7 @@ firstcset.parents.append(latest) except SvnPathNotFound: pass - except SubversionException as xxx_todo_changeme: + except svn.core.SubversionException as xxx_todo_changeme: (inst, num) = xxx_todo_changeme.args if num == svn.core.SVN_ERR_FS_NO_SUCH_REVISION: raise error.Abort(_('svn: branch has no revision %s') @@ -975,7 +996,7 @@ info = info[-1] mode = ("svn:executable" in info) and 'x' or '' mode = ("svn:special" in info) and 'l' or mode - except SubversionException as e: + except svn.core.SubversionException as e: notfound = (svn.core.SVN_ERR_FS_NOT_FOUND, svn.core.SVN_ERR_RA_DAV_PATH_NOT_FOUND) if e.apr_err in notfound: # File not found @@ -990,7 +1011,7 @@ def _iterfiles(self, path, revnum): """Enumerate all files in path at revnum, recursively.""" path = path.strip('/') - pool = Pool() + pool = svn.core.Pool() rpath = '/'.join([self.baseurl, quote(path)]).strip('/') entries = svn.client.ls(rpath, optrev(revnum), True, self.ctx, pool) if path:
--- a/hgext/convert/transport.py Sun Mar 13 02:29:11 2016 +0100 +++ b/hgext/convert/transport.py Tue Mar 15 14:10:46 2016 -0700 @@ -16,12 +16,18 @@ # You should have received a copy of the GNU General Public License # along with this program; if not, see <http://www.gnu.org/licenses/>. +from __future__ import absolute_import -from mercurial import util -from svn.core import SubversionException, Pool -import svn.ra import svn.client import svn.core +import svn.ra + +Pool = svn.core.Pool +SubversionException = svn.core.SubversionException + +from mercurial import ( + util, +) # Some older versions of the Python bindings need to be # explicitly initialized. But what we want to do probably
--- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/hgext/fsmonitor/__init__.py Tue Mar 15 14:10:46 2016 -0700 @@ -0,0 +1,694 @@ +# __init__.py - fsmonitor initialization and overrides +# +# Copyright 2013-2016 Facebook, Inc. +# +# This software may be used and distributed according to the terms of the +# GNU General Public License version 2 or any later version. + +'''Faster status operations with the Watchman file monitor (EXPERIMENTAL) + +Integrates the file-watching program Watchman with Mercurial to produce faster +status results. + +On a particular Linux system, for a real-world repository with over 400,000 +files hosted on ext4, vanilla `hg status` takes 1.3 seconds. On the same +system, with fsmonitor it takes about 0.3 seconds. + +fsmonitor requires no configuration -- it will tell Watchman about your +repository as necessary. You'll need to install Watchman from +https://facebook.github.io/watchman/ and make sure it is in your PATH. + +The following configuration options exist: + +:: + + [fsmonitor] + mode = {off, on, paranoid} + +When `mode = off`, fsmonitor will disable itself (similar to not loading the +extension at all). When `mode = on`, fsmonitor will be enabled (the default). +When `mode = paranoid`, fsmonitor will query both Watchman and the filesystem, +and ensure that the results are consistent. + +:: + + [fsmonitor] + timeout = (float) + +A value, in seconds, that determines how long fsmonitor will wait for Watchman +to return results. Defaults to `2.0`. + +:: + + [fsmonitor] + blacklistusers = (list of userids) + +A list of usernames for which fsmonitor will disable itself altogether. + +:: + + [fsmonitor] + walk_on_invalidate = (boolean) + +Whether or not to walk the whole repo ourselves when our cached state has been +invalidated, for example when Watchman has been restarted or .hgignore rules +have been changed. Walking the repo in that case can result in competing for +I/O with Watchman. For large repos it is recommended to set this value to +false. You may wish to set this to true if you have a very fast filesystem +that can outpace the IPC overhead of getting the result data for the full repo +from Watchman. Defaults to false. + +fsmonitor is incompatible with the largefiles and eol extensions, and +will disable itself if any of those are active. + +''' + +# Platforms Supported +# =================== +# +# **Linux:** *Stable*. Watchman and fsmonitor are both known to work reliably, +# even under severe loads. +# +# **Mac OS X:** *Stable*. The Mercurial test suite passes with fsmonitor +# turned on, on case-insensitive HFS+. There has been a reasonable amount of +# user testing under normal loads. +# +# **Solaris, BSD:** *Alpha*. watchman and fsmonitor are believed to work, but +# very little testing has been done. +# +# **Windows:** *Alpha*. Not in a release version of watchman or fsmonitor yet. +# +# Known Issues +# ============ +# +# * fsmonitor will disable itself if any of the following extensions are +# enabled: largefiles, inotify, eol; or if the repository has subrepos. +# * fsmonitor will produce incorrect results if nested repos that are not +# subrepos exist. *Workaround*: add nested repo paths to your `.hgignore`. +# +# The issues related to nested repos and subrepos are probably not fundamental +# ones. Patches to fix them are welcome. + +from __future__ import absolute_import + +import os +import stat +import sys + +from mercurial import ( + context, + extensions, + localrepo, + merge, + pathutil, + scmutil, + util, +) +from mercurial import match as matchmod +from mercurial.i18n import _ + +from . import ( + state, + watchmanclient, +) + +# Note for extension authors: ONLY specify testedwith = 'internal' for +# extensions which SHIP WITH MERCURIAL. Non-mainline extensions should +# be specifying the version(s) of Mercurial they are tested with, or +# leave the attribute unspecified. +testedwith = 'internal' + +# This extension is incompatible with the following blacklisted extensions +# and will disable itself when encountering one of these: +_blacklist = ['largefiles', 'eol'] + +def _handleunavailable(ui, state, ex): + """Exception handler for Watchman interaction exceptions""" + if isinstance(ex, watchmanclient.Unavailable): + if ex.warn: + ui.warn(str(ex) + '\n') + if ex.invalidate: + state.invalidate() + ui.log('fsmonitor', 'Watchman unavailable: %s\n', ex.msg) + else: + ui.log('fsmonitor', 'Watchman exception: %s\n', ex) + +def _hashignore(ignore): + """Calculate hash for ignore patterns and filenames + + If this information changes between Mercurial invocations, we can't + rely on Watchman information anymore and have to re-scan the working + copy. + + """ + sha1 = util.sha1() + if util.safehasattr(ignore, 'includepat'): + sha1.update(ignore.includepat) + sha1.update('\0\0') + if util.safehasattr(ignore, 'excludepat'): + sha1.update(ignore.excludepat) + sha1.update('\0\0') + if util.safehasattr(ignore, 'patternspat'): + sha1.update(ignore.patternspat) + sha1.update('\0\0') + if util.safehasattr(ignore, '_files'): + for f in ignore._files: + sha1.update(f) + sha1.update('\0') + return sha1.hexdigest() + +def overridewalk(orig, self, match, subrepos, unknown, ignored, full=True): + '''Replacement for dirstate.walk, hooking into Watchman. + + Whenever full is False, ignored is False, and the Watchman client is + available, use Watchman combined with saved state to possibly return only a + subset of files.''' + def bail(): + return orig(match, subrepos, unknown, ignored, full=True) + + if full or ignored or not self._watchmanclient.available(): + return bail() + state = self._fsmonitorstate + clock, ignorehash, notefiles = state.get() + if not clock: + if state.walk_on_invalidate: + return bail() + # Initial NULL clock value, see + # https://facebook.github.io/watchman/docs/clockspec.html + clock = 'c:0:0' + notefiles = [] + + def fwarn(f, msg): + self._ui.warn('%s: %s\n' % (self.pathto(f), msg)) + return False + + def badtype(mode): + kind = _('unknown') + if stat.S_ISCHR(mode): + kind = _('character device') + elif stat.S_ISBLK(mode): + kind = _('block device') + elif stat.S_ISFIFO(mode): + kind = _('fifo') + elif stat.S_ISSOCK(mode): + kind = _('socket') + elif stat.S_ISDIR(mode): + kind = _('directory') + return _('unsupported file type (type is %s)') % kind + + ignore = self._ignore + dirignore = self._dirignore + if unknown: + if _hashignore(ignore) != ignorehash and clock != 'c:0:0': + # ignore list changed -- can't rely on Watchman state any more + if state.walk_on_invalidate: + return bail() + notefiles = [] + clock = 'c:0:0' + else: + # always ignore + ignore = util.always + dirignore = util.always + + matchfn = match.matchfn + matchalways = match.always() + dmap = self._map + nonnormalset = getattr(self, '_nonnormalset', None) + + copymap = self._copymap + getkind = stat.S_IFMT + dirkind = stat.S_IFDIR + regkind = stat.S_IFREG + lnkkind = stat.S_IFLNK + join = self._join + normcase = util.normcase + fresh_instance = False + + exact = skipstep3 = False + if matchfn == match.exact: # match.exact + exact = True + dirignore = util.always # skip step 2 + elif match.files() and not match.anypats(): # match.match, no patterns + skipstep3 = True + + if not exact and self._checkcase: + # note that even though we could receive directory entries, we're only + # interested in checking if a file with the same name exists. So only + # normalize files if possible. + normalize = self._normalizefile + skipstep3 = False + else: + normalize = None + + # step 1: find all explicit files + results, work, dirsnotfound = self._walkexplicit(match, subrepos) + + skipstep3 = skipstep3 and not (work or dirsnotfound) + work = [d for d in work if not dirignore(d[0])] + + if not work and (exact or skipstep3): + for s in subrepos: + del results[s] + del results['.hg'] + return results + + # step 2: query Watchman + try: + # Use the user-configured timeout for the query. + # Add a little slack over the top of the user query to allow for + # overheads while transferring the data + self._watchmanclient.settimeout(state.timeout + 0.1) + result = self._watchmanclient.command('query', { + 'fields': ['mode', 'mtime', 'size', 'exists', 'name'], + 'since': clock, + 'expression': [ + 'not', [ + 'anyof', ['dirname', '.hg'], + ['name', '.hg', 'wholename'] + ] + ], + 'sync_timeout': int(state.timeout * 1000), + 'empty_on_fresh_instance': state.walk_on_invalidate, + }) + except Exception as ex: + _handleunavailable(self._ui, state, ex) + self._watchmanclient.clearconnection() + return bail() + else: + # We need to propagate the last observed clock up so that we + # can use it for our next query + state.setlastclock(result['clock']) + if result['is_fresh_instance']: + if state.walk_on_invalidate: + state.invalidate() + return bail() + fresh_instance = True + # Ignore any prior noteable files from the state info + notefiles = [] + + # for file paths which require normalization and we encounter a case + # collision, we store our own foldmap + if normalize: + foldmap = dict((normcase(k), k) for k in results) + + switch_slashes = os.sep == '\\' + # The order of the results is, strictly speaking, undefined. + # For case changes on a case insensitive filesystem we may receive + # two entries, one with exists=True and another with exists=False. + # The exists=True entries in the same response should be interpreted + # as being happens-after the exists=False entries due to the way that + # Watchman tracks files. We use this property to reconcile deletes + # for name case changes. + for entry in result['files']: + fname = entry['name'] + if switch_slashes: + fname = fname.replace('\\', '/') + if normalize: + normed = normcase(fname) + fname = normalize(fname, True, True) + foldmap[normed] = fname + fmode = entry['mode'] + fexists = entry['exists'] + kind = getkind(fmode) + + if not fexists: + # if marked as deleted and we don't already have a change + # record, mark it as deleted. If we already have an entry + # for fname then it was either part of walkexplicit or was + # an earlier result that was a case change + if fname not in results and fname in dmap and ( + matchalways or matchfn(fname)): + results[fname] = None + elif kind == dirkind: + if fname in dmap and (matchalways or matchfn(fname)): + results[fname] = None + elif kind == regkind or kind == lnkkind: + if fname in dmap: + if matchalways or matchfn(fname): + results[fname] = entry + elif (matchalways or matchfn(fname)) and not ignore(fname): + results[fname] = entry + elif fname in dmap and (matchalways or matchfn(fname)): + results[fname] = None + + # step 3: query notable files we don't already know about + # XXX try not to iterate over the entire dmap + if normalize: + # any notable files that have changed case will already be handled + # above, so just check membership in the foldmap + notefiles = set((normalize(f, True, True) for f in notefiles + if normcase(f) not in foldmap)) + visit = set((f for f in notefiles if (f not in results and matchfn(f) + and (f in dmap or not ignore(f))))) + + if nonnormalset is not None and not fresh_instance: + if matchalways: + visit.update(f for f in nonnormalset if f not in results) + visit.update(f for f in copymap if f not in results) + else: + visit.update(f for f in nonnormalset + if f not in results and matchfn(f)) + visit.update(f for f in copymap + if f not in results and matchfn(f)) + else: + if matchalways: + visit.update(f for f, st in dmap.iteritems() + if (f not in results and + (st[2] < 0 or st[0] != 'n' or fresh_instance))) + visit.update(f for f in copymap if f not in results) + else: + visit.update(f for f, st in dmap.iteritems() + if (f not in results and + (st[2] < 0 or st[0] != 'n' or fresh_instance) + and matchfn(f))) + visit.update(f for f in copymap + if f not in results and matchfn(f)) + + audit = pathutil.pathauditor(self._root).check + auditpass = [f for f in visit if audit(f)] + auditpass.sort() + auditfail = visit.difference(auditpass) + for f in auditfail: + results[f] = None + + nf = iter(auditpass).next + for st in util.statfiles([join(f) for f in auditpass]): + f = nf() + if st or f in dmap: + results[f] = st + + for s in subrepos: + del results[s] + del results['.hg'] + return results + +def overridestatus( + orig, self, node1='.', node2=None, match=None, ignored=False, + clean=False, unknown=False, listsubrepos=False): + listignored = ignored + listclean = clean + listunknown = unknown + + def _cmpsets(l1, l2): + try: + if 'FSMONITOR_LOG_FILE' in os.environ: + fn = os.environ['FSMONITOR_LOG_FILE'] + f = open(fn, 'wb') + else: + fn = 'fsmonitorfail.log' + f = self.opener(fn, 'wb') + except (IOError, OSError): + self.ui.warn(_('warning: unable to write to %s\n') % fn) + return + + try: + for i, (s1, s2) in enumerate(zip(l1, l2)): + if set(s1) != set(s2): + f.write('sets at position %d are unequal\n' % i) + f.write('watchman returned: %s\n' % s1) + f.write('stat returned: %s\n' % s2) + finally: + f.close() + + if isinstance(node1, context.changectx): + ctx1 = node1 + else: + ctx1 = self[node1] + if isinstance(node2, context.changectx): + ctx2 = node2 + else: + ctx2 = self[node2] + + working = ctx2.rev() is None + parentworking = working and ctx1 == self['.'] + match = match or matchmod.always(self.root, self.getcwd()) + + # Maybe we can use this opportunity to update Watchman's state. + # Mercurial uses workingcommitctx and/or memctx to represent the part of + # the workingctx that is to be committed. So don't update the state in + # that case. + # HG_PENDING is set in the environment when the dirstate is being updated + # in the middle of a transaction; we must not update our state in that + # case, or we risk forgetting about changes in the working copy. + updatestate = (parentworking and match.always() and + not isinstance(ctx2, (context.workingcommitctx, + context.memctx)) and + 'HG_PENDING' not in os.environ) + + try: + if self._fsmonitorstate.walk_on_invalidate: + # Use a short timeout to query the current clock. If that + # takes too long then we assume that the service will be slow + # to answer our query. + # walk_on_invalidate indicates that we prefer to walk the + # tree ourselves because we can ignore portions that Watchman + # cannot and we tend to be faster in the warmer buffer cache + # cases. + self._watchmanclient.settimeout(0.1) + else: + # Give Watchman more time to potentially complete its walk + # and return the initial clock. In this mode we assume that + # the filesystem will be slower than parsing a potentially + # very large Watchman result set. + self._watchmanclient.settimeout( + self._fsmonitorstate.timeout + 0.1) + startclock = self._watchmanclient.getcurrentclock() + except Exception as ex: + self._watchmanclient.clearconnection() + _handleunavailable(self.ui, self._fsmonitorstate, ex) + # boo, Watchman failed. bail + return orig(node1, node2, match, listignored, listclean, + listunknown, listsubrepos) + + if updatestate: + # We need info about unknown files. This may make things slower the + # first time, but whatever. + stateunknown = True + else: + stateunknown = listunknown + + r = orig(node1, node2, match, listignored, listclean, stateunknown, + listsubrepos) + modified, added, removed, deleted, unknown, ignored, clean = r + + if updatestate: + notefiles = modified + added + removed + deleted + unknown + self._fsmonitorstate.set( + self._fsmonitorstate.getlastclock() or startclock, + _hashignore(self.dirstate._ignore), + notefiles) + + if not listunknown: + unknown = [] + + # don't do paranoid checks if we're not going to query Watchman anyway + full = listclean or match.traversedir is not None + if self._fsmonitorstate.mode == 'paranoid' and not full: + # run status again and fall back to the old walk this time + self.dirstate._fsmonitordisable = True + + # shut the UI up + quiet = self.ui.quiet + self.ui.quiet = True + fout, ferr = self.ui.fout, self.ui.ferr + self.ui.fout = self.ui.ferr = open(os.devnull, 'wb') + + try: + rv2 = orig( + node1, node2, match, listignored, listclean, listunknown, + listsubrepos) + finally: + self.dirstate._fsmonitordisable = False + self.ui.quiet = quiet + self.ui.fout, self.ui.ferr = fout, ferr + + # clean isn't tested since it's set to True above + _cmpsets([modified, added, removed, deleted, unknown, ignored, clean], + rv2) + modified, added, removed, deleted, unknown, ignored, clean = rv2 + + return scmutil.status( + modified, added, removed, deleted, unknown, ignored, clean) + +def makedirstate(cls): + class fsmonitordirstate(cls): + def _fsmonitorinit(self, fsmonitorstate, watchmanclient): + # _fsmonitordisable is used in paranoid mode + self._fsmonitordisable = False + self._fsmonitorstate = fsmonitorstate + self._watchmanclient = watchmanclient + + def walk(self, *args, **kwargs): + orig = super(fsmonitordirstate, self).walk + if self._fsmonitordisable: + return orig(*args, **kwargs) + return overridewalk(orig, self, *args, **kwargs) + + def rebuild(self, *args, **kwargs): + self._fsmonitorstate.invalidate() + return super(fsmonitordirstate, self).rebuild(*args, **kwargs) + + def invalidate(self, *args, **kwargs): + self._fsmonitorstate.invalidate() + return super(fsmonitordirstate, self).invalidate(*args, **kwargs) + + return fsmonitordirstate + +def wrapdirstate(orig, self): + ds = orig(self) + # only override the dirstate when Watchman is available for the repo + if util.safehasattr(self, '_fsmonitorstate'): + ds.__class__ = makedirstate(ds.__class__) + ds._fsmonitorinit(self._fsmonitorstate, self._watchmanclient) + return ds + +def extsetup(ui): + wrapfilecache(localrepo.localrepository, 'dirstate', wrapdirstate) + if sys.platform == 'darwin': + # An assist for avoiding the dangling-symlink fsevents bug + extensions.wrapfunction(os, 'symlink', wrapsymlink) + + extensions.wrapfunction(merge, 'update', wrapupdate) + +def wrapsymlink(orig, source, link_name): + ''' if we create a dangling symlink, also touch the parent dir + to encourage fsevents notifications to work more correctly ''' + try: + return orig(source, link_name) + finally: + try: + os.utime(os.path.dirname(link_name), None) + except OSError: + pass + +class state_update(object): + ''' This context mananger is responsible for dispatching the state-enter + and state-leave signals to the watchman service ''' + + def __init__(self, repo, node, distance, partial): + self.repo = repo + self.node = node + self.distance = distance + self.partial = partial + + def __enter__(self): + self._state('state-enter') + return self + + def __exit__(self, type_, value, tb): + status = 'ok' if type_ is None else 'failed' + self._state('state-leave', status=status) + + def _state(self, cmd, status='ok'): + if not util.safehasattr(self.repo, '_watchmanclient'): + return + try: + commithash = self.repo[self.node].hex() + self.repo._watchmanclient.command(cmd, { + 'name': 'hg.update', + 'metadata': { + # the target revision + 'rev': commithash, + # approximate number of commits between current and target + 'distance': self.distance, + # success/failure (only really meaningful for state-leave) + 'status': status, + # whether the working copy parent is changing + 'partial': self.partial, + }}) + except Exception as e: + # Swallow any errors; fire and forget + self.repo.ui.log( + 'watchman', 'Exception %s while running %s\n', e, cmd) + +# Bracket working copy updates with calls to the watchman state-enter +# and state-leave commands. This allows clients to perform more intelligent +# settling during bulk file change scenarios +# https://facebook.github.io/watchman/docs/cmd/subscribe.html#advanced-settling +def wrapupdate(orig, repo, node, branchmerge, force, ancestor=None, + mergeancestor=False, labels=None, matcher=None, **kwargs): + + distance = 0 + partial = True + if matcher is None or matcher.always(): + partial = False + wc = repo[None] + parents = wc.parents() + if len(parents) == 2: + anc = repo.changelog.ancestor(parents[0].node(), parents[1].node()) + ancrev = repo[anc].rev() + distance = abs(repo[node].rev() - ancrev) + elif len(parents) == 1: + distance = abs(repo[node].rev() - parents[0].rev()) + + with state_update(repo, node, distance, partial): + return orig( + repo, node, branchmerge, force, ancestor, mergeancestor, + labels, matcher, *kwargs) + +def reposetup(ui, repo): + # We don't work with largefiles or inotify + exts = extensions.enabled() + for ext in _blacklist: + if ext in exts: + ui.warn(_('The fsmonitor extension is incompatible with the %s ' + 'extension and has been disabled.\n') % ext) + return + + if util.safehasattr(repo, 'dirstate'): + # We don't work with subrepos either. Note that we can get passed in + # e.g. a statichttprepo, which throws on trying to access the substate. + # XXX This sucks. + try: + # if repo[None].substate can cause a dirstate parse, which is too + # slow. Instead, look for a file called hgsubstate, + if repo.wvfs.exists('.hgsubstate') or repo.wvfs.exists('.hgsub'): + return + except AttributeError: + return + + fsmonitorstate = state.state(repo) + if fsmonitorstate.mode == 'off': + return + + try: + client = watchmanclient.client(repo) + except Exception as ex: + _handleunavailable(ui, fsmonitorstate, ex) + return + + repo._fsmonitorstate = fsmonitorstate + repo._watchmanclient = client + + # at this point since fsmonitorstate wasn't present, repo.dirstate is + # not a fsmonitordirstate + repo.dirstate.__class__ = makedirstate(repo.dirstate.__class__) + # nuke the dirstate so that _fsmonitorinit and subsequent configuration + # changes take effect on it + del repo._filecache['dirstate'] + delattr(repo.unfiltered(), 'dirstate') + + class fsmonitorrepo(repo.__class__): + def status(self, *args, **kwargs): + orig = super(fsmonitorrepo, self).status + return overridestatus(orig, self, *args, **kwargs) + + repo.__class__ = fsmonitorrepo + +def wrapfilecache(cls, propname, wrapper): + """Wraps a filecache property. These can't be wrapped using the normal + wrapfunction. This should eventually go into upstream Mercurial. + """ + assert callable(wrapper) + for currcls in cls.__mro__: + if propname in currcls.__dict__: + origfn = currcls.__dict__[propname].func + assert callable(origfn) + def wrap(*args, **kwargs): + return wrapper(origfn, *args, **kwargs) + currcls.__dict__[propname].func = wrap + break + + if currcls is object: + raise AttributeError( + _("type '%s' has no property '%s'") % (cls, propname))
--- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/hgext/fsmonitor/pywatchman/__init__.py Tue Mar 15 14:10:46 2016 -0700 @@ -0,0 +1,779 @@ +# Copyright 2014-present Facebook, Inc. +# All rights reserved. +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are met: +# +# * Redistributions of source code must retain the above copyright notice, +# this list of conditions and the following disclaimer. +# +# * Redistributions in binary form must reproduce the above copyright notice, +# this list of conditions and the following disclaimer in the documentation +# and/or other materials provided with the distribution. +# +# * Neither the name Facebook nor the names of its contributors may be used to +# endorse or promote products derived from this software without specific +# prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE +# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +import os +import errno +import math +import socket +import subprocess +import time + +# Sometimes it's really hard to get Python extensions to compile, +# so fall back to a pure Python implementation. +try: + import bser +except ImportError: + import pybser as bser + +import capabilities + +if os.name == 'nt': + import ctypes + import ctypes.wintypes + + wintypes = ctypes.wintypes + GENERIC_READ = 0x80000000 + GENERIC_WRITE = 0x40000000 + FILE_FLAG_OVERLAPPED = 0x40000000 + OPEN_EXISTING = 3 + INVALID_HANDLE_VALUE = -1 + FORMAT_MESSAGE_FROM_SYSTEM = 0x00001000 + FORMAT_MESSAGE_ALLOCATE_BUFFER = 0x00000100 + FORMAT_MESSAGE_IGNORE_INSERTS = 0x00000200 + WAIT_TIMEOUT = 0x00000102 + WAIT_OBJECT_0 = 0x00000000 + ERROR_IO_PENDING = 997 + + class OVERLAPPED(ctypes.Structure): + _fields_ = [ + ("Internal", wintypes.ULONG), ("InternalHigh", wintypes.ULONG), + ("Offset", wintypes.DWORD), ("OffsetHigh", wintypes.DWORD), + ("hEvent", wintypes.HANDLE) + ] + + def __init__(self): + self.Offset = 0 + self.OffsetHigh = 0 + self.hEvent = 0 + + LPDWORD = ctypes.POINTER(wintypes.DWORD) + + CreateFile = ctypes.windll.kernel32.CreateFileA + CreateFile.argtypes = [wintypes.LPSTR, wintypes.DWORD, wintypes.DWORD, + wintypes.LPVOID, wintypes.DWORD, wintypes.DWORD, + wintypes.HANDLE] + CreateFile.restype = wintypes.HANDLE + + CloseHandle = ctypes.windll.kernel32.CloseHandle + CloseHandle.argtypes = [wintypes.HANDLE] + CloseHandle.restype = wintypes.BOOL + + ReadFile = ctypes.windll.kernel32.ReadFile + ReadFile.argtypes = [wintypes.HANDLE, wintypes.LPVOID, wintypes.DWORD, + LPDWORD, ctypes.POINTER(OVERLAPPED)] + ReadFile.restype = wintypes.BOOL + + WriteFile = ctypes.windll.kernel32.WriteFile + WriteFile.argtypes = [wintypes.HANDLE, wintypes.LPVOID, wintypes.DWORD, + LPDWORD, ctypes.POINTER(OVERLAPPED)] + WriteFile.restype = wintypes.BOOL + + GetLastError = ctypes.windll.kernel32.GetLastError + GetLastError.argtypes = [] + GetLastError.restype = wintypes.DWORD + + FormatMessage = ctypes.windll.kernel32.FormatMessageA + FormatMessage.argtypes = [wintypes.DWORD, wintypes.LPVOID, wintypes.DWORD, + wintypes.DWORD, ctypes.POINTER(wintypes.LPSTR), + wintypes.DWORD, wintypes.LPVOID] + FormatMessage.restype = wintypes.DWORD + + LocalFree = ctypes.windll.kernel32.LocalFree + + GetOverlappedResultEx = ctypes.windll.kernel32.GetOverlappedResultEx + GetOverlappedResultEx.argtypes = [wintypes.HANDLE, + ctypes.POINTER(OVERLAPPED), LPDWORD, + wintypes.DWORD, wintypes.BOOL] + GetOverlappedResultEx.restype = wintypes.BOOL + + CancelIoEx = ctypes.windll.kernel32.CancelIoEx + CancelIoEx.argtypes = [wintypes.HANDLE, ctypes.POINTER(OVERLAPPED)] + CancelIoEx.restype = wintypes.BOOL + +# 2 bytes marker, 1 byte int size, 8 bytes int64 value +sniff_len = 13 + +# This is a helper for debugging the client. +_debugging = False +if _debugging: + + def log(fmt, *args): + print('[%s] %s' % + (time.strftime("%a, %d %b %Y %H:%M:%S", time.gmtime()), + fmt % args[:])) +else: + + def log(fmt, *args): + pass + + +class WatchmanError(Exception): + pass + + +class SocketTimeout(WatchmanError): + """A specialized exception raised for socket timeouts during communication to/from watchman. + This makes it easier to implement non-blocking loops as callers can easily distinguish + between a routine timeout and an actual error condition. + + Note that catching WatchmanError will also catch this as it is a super-class, so backwards + compatibility in exception handling is preserved. + """ + + +class CommandError(WatchmanError): + """error returned by watchman + + self.msg is the message returned by watchman. + """ + + def __init__(self, msg, cmd=None): + self.msg = msg + self.cmd = cmd + super(CommandError, self).__init__('watchman command error: %s' % msg) + + def setCommand(self, cmd): + self.cmd = cmd + + def __str__(self): + if self.cmd: + return '%s, while executing %s' % (self.msg, self.cmd) + return self.msg + + +class Transport(object): + """ communication transport to the watchman server """ + buf = None + + def close(self): + """ tear it down """ + raise NotImplementedError() + + def readBytes(self, size): + """ read size bytes """ + raise NotImplementedError() + + def write(self, buf): + """ write some data """ + raise NotImplementedError() + + def setTimeout(self, value): + pass + + def readLine(self): + """ read a line + Maintains its own buffer, callers of the transport should not mix + calls to readBytes and readLine. + """ + if self.buf is None: + self.buf = [] + + # Buffer may already have a line if we've received unilateral + # response(s) from the server + if len(self.buf) == 1 and "\n" in self.buf[0]: + (line, b) = self.buf[0].split("\n", 1) + self.buf = [b] + return line + + while True: + b = self.readBytes(4096) + if "\n" in b: + result = ''.join(self.buf) + (line, b) = b.split("\n", 1) + self.buf = [b] + return result + line + self.buf.append(b) + + +class Codec(object): + """ communication encoding for the watchman server """ + transport = None + + def __init__(self, transport): + self.transport = transport + + def receive(self): + raise NotImplementedError() + + def send(self, *args): + raise NotImplementedError() + + def setTimeout(self, value): + self.transport.setTimeout(value) + + +class UnixSocketTransport(Transport): + """ local unix domain socket transport """ + sock = None + + def __init__(self, sockpath, timeout): + self.sockpath = sockpath + self.timeout = timeout + + sock = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM) + try: + sock.settimeout(self.timeout) + sock.connect(self.sockpath) + self.sock = sock + except socket.error as e: + raise WatchmanError('unable to connect to %s: %s' % + (self.sockpath, e)) + + def close(self): + self.sock.close() + self.sock = None + + def setTimeout(self, value): + self.timeout = value + self.sock.settimeout(self.timeout) + + def readBytes(self, size): + try: + buf = [self.sock.recv(size)] + if not buf[0]: + raise WatchmanError('empty watchman response') + return buf[0] + except socket.timeout: + raise SocketTimeout('timed out waiting for response') + + def write(self, data): + try: + self.sock.sendall(data) + except socket.timeout: + raise SocketTimeout('timed out sending query command') + + +class WindowsNamedPipeTransport(Transport): + """ connect to a named pipe """ + + def __init__(self, sockpath, timeout): + self.sockpath = sockpath + self.timeout = int(math.ceil(timeout * 1000)) + self._iobuf = None + + self.pipe = CreateFile(sockpath, GENERIC_READ | GENERIC_WRITE, 0, None, + OPEN_EXISTING, FILE_FLAG_OVERLAPPED, None) + + if self.pipe == INVALID_HANDLE_VALUE: + self.pipe = None + self._raise_win_err('failed to open pipe %s' % sockpath, + GetLastError()) + + def _win32_strerror(self, err): + """ expand a win32 error code into a human readable message """ + + # FormatMessage will allocate memory and assign it here + buf = ctypes.c_char_p() + FormatMessage( + FORMAT_MESSAGE_FROM_SYSTEM | FORMAT_MESSAGE_ALLOCATE_BUFFER + | FORMAT_MESSAGE_IGNORE_INSERTS, None, err, 0, buf, 0, None) + try: + return buf.value + finally: + LocalFree(buf) + + def _raise_win_err(self, msg, err): + raise IOError('%s win32 error code: %d %s' % + (msg, err, self._win32_strerror(err))) + + def close(self): + if self.pipe: + CloseHandle(self.pipe) + self.pipe = None + + def readBytes(self, size): + """ A read can block for an unbounded amount of time, even if the + kernel reports that the pipe handle is signalled, so we need to + always perform our reads asynchronously + """ + + # try to satisfy the read from any buffered data + if self._iobuf: + if size >= len(self._iobuf): + res = self._iobuf + self.buf = None + return res + res = self._iobuf[:size] + self._iobuf = self._iobuf[size:] + return res + + # We need to initiate a read + buf = ctypes.create_string_buffer(size) + olap = OVERLAPPED() + + log('made read buff of size %d', size) + + # ReadFile docs warn against sending in the nread parameter for async + # operations, so we always collect it via GetOverlappedResultEx + immediate = ReadFile(self.pipe, buf, size, None, olap) + + if not immediate: + err = GetLastError() + if err != ERROR_IO_PENDING: + self._raise_win_err('failed to read %d bytes' % size, + GetLastError()) + + nread = wintypes.DWORD() + if not GetOverlappedResultEx(self.pipe, olap, nread, + 0 if immediate else self.timeout, True): + err = GetLastError() + CancelIoEx(self.pipe, olap) + + if err == WAIT_TIMEOUT: + log('GetOverlappedResultEx timedout') + raise SocketTimeout('timed out after waiting %dms for read' % + self.timeout) + + log('GetOverlappedResultEx reports error %d', err) + self._raise_win_err('error while waiting for read', err) + + nread = nread.value + if nread == 0: + # Docs say that named pipes return 0 byte when the other end did + # a zero byte write. Since we don't ever do that, the only + # other way this shows up is if the client has gotten in a weird + # state, so let's bail out + CancelIoEx(self.pipe, olap) + raise IOError('Async read yielded 0 bytes; unpossible!') + + # Holds precisely the bytes that we read from the prior request + buf = buf[:nread] + + returned_size = min(nread, size) + if returned_size == nread: + return buf + + # keep any left-overs around for a later read to consume + self._iobuf = buf[returned_size:] + return buf[:returned_size] + + def write(self, data): + olap = OVERLAPPED() + immediate = WriteFile(self.pipe, ctypes.c_char_p(data), len(data), + None, olap) + + if not immediate: + err = GetLastError() + if err != ERROR_IO_PENDING: + self._raise_win_err('failed to write %d bytes' % len(data), + GetLastError()) + + # Obtain results, waiting if needed + nwrote = wintypes.DWORD() + if GetOverlappedResultEx(self.pipe, olap, nwrote, 0 if immediate else + self.timeout, True): + return nwrote.value + + err = GetLastError() + + # It's potentially unsafe to allow the write to continue after + # we unwind, so let's make a best effort to avoid that happening + CancelIoEx(self.pipe, olap) + + if err == WAIT_TIMEOUT: + raise SocketTimeout('timed out after waiting %dms for write' % + self.timeout) + self._raise_win_err('error while waiting for write of %d bytes' % + len(data), err) + + +class CLIProcessTransport(Transport): + """ open a pipe to the cli to talk to the service + This intended to be used only in the test harness! + + The CLI is an oddball because we only support JSON input + and cannot send multiple commands through the same instance, + so we spawn a new process for each command. + + We disable server spawning for this implementation, again, because + it is intended to be used only in our test harness. You really + should not need to use the CLI transport for anything real. + + While the CLI can output in BSER, our Transport interface doesn't + support telling this instance that it should do so. That effectively + limits this implementation to JSON input and output only at this time. + + It is the responsibility of the caller to set the send and + receive codecs appropriately. + """ + proc = None + closed = True + + def __init__(self, sockpath, timeout): + self.sockpath = sockpath + self.timeout = timeout + + def close(self): + if self.proc: + self.proc.kill() + self.proc = None + + def _connect(self): + if self.proc: + return self.proc + args = [ + 'watchman', + '--sockname={}'.format(self.sockpath), + '--logfile=/BOGUS', + '--statefile=/BOGUS', + '--no-spawn', + '--no-local', + '--no-pretty', + '-j', + ] + self.proc = subprocess.Popen(args, + stdin=subprocess.PIPE, + stdout=subprocess.PIPE) + return self.proc + + def readBytes(self, size): + self._connect() + res = self.proc.stdout.read(size) + if res == '': + raise WatchmanError('EOF on CLI process transport') + return res + + def write(self, data): + if self.closed: + self.closed = False + self.proc = None + self._connect() + res = self.proc.stdin.write(data) + self.proc.stdin.close() + self.closed = True + return res + + +class BserCodec(Codec): + """ use the BSER encoding. This is the default, preferred codec """ + + def _loads(self, response): + return bser.loads(response) + + def receive(self): + buf = [self.transport.readBytes(sniff_len)] + if not buf[0]: + raise WatchmanError('empty watchman response') + + elen = bser.pdu_len(buf[0]) + + rlen = len(buf[0]) + while elen > rlen: + buf.append(self.transport.readBytes(elen - rlen)) + rlen += len(buf[-1]) + + response = ''.join(buf) + try: + res = self._loads(response) + return res + except ValueError as e: + raise WatchmanError('watchman response decode error: %s' % e) + + def send(self, *args): + cmd = bser.dumps(*args) + self.transport.write(cmd) + + +class ImmutableBserCodec(BserCodec): + """ use the BSER encoding, decoding values using the newer + immutable object support """ + + def _loads(self, response): + return bser.loads(response, False) + + +class JsonCodec(Codec): + """ Use json codec. This is here primarily for testing purposes """ + json = None + + def __init__(self, transport): + super(JsonCodec, self).__init__(transport) + # optional dep on json, only if JsonCodec is used + import json + self.json = json + + def receive(self): + line = self.transport.readLine() + try: + return self.json.loads(line) + except Exception as e: + print(e, line) + raise + + def send(self, *args): + cmd = self.json.dumps(*args) + self.transport.write(cmd + "\n") + + +class client(object): + """ Handles the communication with the watchman service """ + sockpath = None + transport = None + sendCodec = None + recvCodec = None + sendConn = None + recvConn = None + subs = {} # Keyed by subscription name + sub_by_root = {} # Keyed by root, then by subscription name + logs = [] # When log level is raised + unilateral = ['log', 'subscription'] + tport = None + useImmutableBser = None + + def __init__(self, + sockpath=None, + timeout=1.0, + transport=None, + sendEncoding=None, + recvEncoding=None, + useImmutableBser=False): + self.sockpath = sockpath + self.timeout = timeout + self.useImmutableBser = useImmutableBser + + transport = transport or os.getenv('WATCHMAN_TRANSPORT') or 'local' + if transport == 'local' and os.name == 'nt': + self.transport = WindowsNamedPipeTransport + elif transport == 'local': + self.transport = UnixSocketTransport + elif transport == 'cli': + self.transport = CLIProcessTransport + if sendEncoding is None: + sendEncoding = 'json' + if recvEncoding is None: + recvEncoding = sendEncoding + else: + raise WatchmanError('invalid transport %s' % transport) + + sendEncoding = sendEncoding or os.getenv('WATCHMAN_ENCODING') or 'bser' + recvEncoding = recvEncoding or os.getenv('WATCHMAN_ENCODING') or 'bser' + + self.recvCodec = self._parseEncoding(recvEncoding) + self.sendCodec = self._parseEncoding(sendEncoding) + + def _parseEncoding(self, enc): + if enc == 'bser': + if self.useImmutableBser: + return ImmutableBserCodec + return BserCodec + elif enc == 'json': + return JsonCodec + else: + raise WatchmanError('invalid encoding %s' % enc) + + def _hasprop(self, result, name): + if self.useImmutableBser: + return hasattr(result, name) + return name in result + + def _resolvesockname(self): + # if invoked via a trigger, watchman will set this env var; we + # should use it unless explicitly set otherwise + path = os.getenv('WATCHMAN_SOCK') + if path: + return path + + cmd = ['watchman', '--output-encoding=bser', 'get-sockname'] + try: + p = subprocess.Popen(cmd, + stdout=subprocess.PIPE, + stderr=subprocess.PIPE, + close_fds=os.name != 'nt') + except OSError as e: + raise WatchmanError('"watchman" executable not in PATH (%s)', e) + + stdout, stderr = p.communicate() + exitcode = p.poll() + + if exitcode: + raise WatchmanError("watchman exited with code %d" % exitcode) + + result = bser.loads(stdout) + if 'error' in result: + raise WatchmanError('get-sockname error: %s' % result['error']) + + return result['sockname'] + + def _connect(self): + """ establish transport connection """ + + if self.recvConn: + return + + if self.sockpath is None: + self.sockpath = self._resolvesockname() + + self.tport = self.transport(self.sockpath, self.timeout) + self.sendConn = self.sendCodec(self.tport) + self.recvConn = self.recvCodec(self.tport) + + def __del__(self): + self.close() + + def close(self): + if self.tport: + self.tport.close() + self.tport = None + self.recvConn = None + self.sendConn = None + + def receive(self): + """ receive the next PDU from the watchman service + + If the client has activated subscriptions or logs then + this PDU may be a unilateral PDU sent by the service to + inform the client of a log event or subscription change. + + It may also simply be the response portion of a request + initiated by query. + + There are clients in production that subscribe and call + this in a loop to retrieve all subscription responses, + so care should be taken when making changes here. + """ + + self._connect() + result = self.recvConn.receive() + if self._hasprop(result, 'error'): + raise CommandError(result['error']) + + if self._hasprop(result, 'log'): + self.logs.append(result['log']) + + if self._hasprop(result, 'subscription'): + sub = result['subscription'] + if not (sub in self.subs): + self.subs[sub] = [] + self.subs[sub].append(result) + + # also accumulate in {root,sub} keyed store + root = os.path.normcase(result['root']) + if not root in self.sub_by_root: + self.sub_by_root[root] = {} + if not sub in self.sub_by_root[root]: + self.sub_by_root[root][sub] = [] + self.sub_by_root[root][sub].append(result) + + return result + + def isUnilateralResponse(self, res): + for k in self.unilateral: + if k in res: + return True + return False + + def getLog(self, remove=True): + """ Retrieve buffered log data + + If remove is true the data will be removed from the buffer. + Otherwise it will be left in the buffer + """ + res = self.logs + if remove: + self.logs = [] + return res + + def getSubscription(self, name, remove=True, root=None): + """ Retrieve the data associated with a named subscription + + If remove is True (the default), the subscription data is removed + from the buffer. Otherwise the data is returned but left in + the buffer. + + Returns None if there is no data associated with `name` + + If root is not None, then only return the subscription + data that matches both root and name. When used in this way, + remove processing impacts both the unscoped and scoped stores + for the subscription data. + """ + + if root is not None: + if not root in self.sub_by_root: + return None + if not name in self.sub_by_root[root]: + return None + sub = self.sub_by_root[root][name] + if remove: + del self.sub_by_root[root][name] + # don't let this grow unbounded + if name in self.subs: + del self.subs[name] + return sub + + if not (name in self.subs): + return None + sub = self.subs[name] + if remove: + del self.subs[name] + return sub + + def query(self, *args): + """ Send a query to the watchman service and return the response + + This call will block until the response is returned. + If any unilateral responses are sent by the service in between + the request-response they will be buffered up in the client object + and NOT returned via this method. + """ + + log('calling client.query') + self._connect() + try: + self.sendConn.send(args) + + res = self.receive() + while self.isUnilateralResponse(res): + res = self.receive() + + return res + except CommandError as ex: + ex.setCommand(args) + raise ex + + def capabilityCheck(self, optional=None, required=None): + """ Perform a server capability check """ + res = self.query('version', { + 'optional': optional or [], + 'required': required or [] + }) + + if not self._hasprop(res, 'capabilities'): + # Server doesn't support capabilities, so we need to + # synthesize the results based on the version + capabilities.synthesize(res, optional) + if 'error' in res: + raise CommandError(res['error']) + + return res + + def setTimeout(self, value): + self.recvConn.setTimeout(value) + self.sendConn.setTimeout(value) + +# no-check-code -- this is a 3rd party library
--- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/hgext/fsmonitor/pywatchman/bser.c Tue Mar 15 14:10:46 2016 -0700 @@ -0,0 +1,950 @@ +/* +Copyright (c) 2013-2015, Facebook, Inc. +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + * Redistributions of source code must retain the above copyright notice, + this list of conditions and the following disclaimer. + + * Redistributions in binary form must reproduce the above copyright notice, + this list of conditions and the following disclaimer in the documentation + and/or other materials provided with the distribution. + + * Neither the name Facebook nor the names of its contributors may be used to + endorse or promote products derived from this software without specific + prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE +FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +*/ + +#include <Python.h> +#ifdef _MSC_VER +#define inline __inline +#include "msc_stdint.h" +#endif + +/* Return the smallest size int that can store the value */ +#define INT_SIZE(x) (((x) == ((int8_t)x)) ? 1 : \ + ((x) == ((int16_t)x)) ? 2 : \ + ((x) == ((int32_t)x)) ? 4 : 8) + +#define BSER_ARRAY 0x00 +#define BSER_OBJECT 0x01 +#define BSER_STRING 0x02 +#define BSER_INT8 0x03 +#define BSER_INT16 0x04 +#define BSER_INT32 0x05 +#define BSER_INT64 0x06 +#define BSER_REAL 0x07 +#define BSER_TRUE 0x08 +#define BSER_FALSE 0x09 +#define BSER_NULL 0x0a +#define BSER_TEMPLATE 0x0b +#define BSER_SKIP 0x0c + +// An immutable object representation of BSER_OBJECT. +// Rather than build a hash table, key -> value are obtained +// by walking the list of keys to determine the offset into +// the values array. The assumption is that the number of +// array elements will be typically small (~6 for the top +// level query result and typically 3-5 for the file entries) +// so that the time overhead for this is small compared to +// using a proper hash table. Even with this simplistic +// approach, this is still faster for the mercurial use case +// as it helps to eliminate creating N other objects to +// represent the stat information in the hgwatchman extension +typedef struct { + PyObject_HEAD + PyObject *keys; // tuple of field names + PyObject *values; // tuple of values +} bserObject; + +static Py_ssize_t bserobj_tuple_length(PyObject *o) { + bserObject *obj = (bserObject*)o; + + return PySequence_Length(obj->keys); +} + +static PyObject *bserobj_tuple_item(PyObject *o, Py_ssize_t i) { + bserObject *obj = (bserObject*)o; + + return PySequence_GetItem(obj->values, i); +} + +static PySequenceMethods bserobj_sq = { + bserobj_tuple_length, /* sq_length */ + 0, /* sq_concat */ + 0, /* sq_repeat */ + bserobj_tuple_item, /* sq_item */ + 0, /* sq_ass_item */ + 0, /* sq_contains */ + 0, /* sq_inplace_concat */ + 0 /* sq_inplace_repeat */ +}; + +static void bserobj_dealloc(PyObject *o) { + bserObject *obj = (bserObject*)o; + + Py_CLEAR(obj->keys); + Py_CLEAR(obj->values); + PyObject_Del(o); +} + +static PyObject *bserobj_getattrro(PyObject *o, PyObject *name) { + bserObject *obj = (bserObject*)o; + Py_ssize_t i, n; + const char *namestr; + + if (PyIndex_Check(name)) { + i = PyNumber_AsSsize_t(name, PyExc_IndexError); + if (i == -1 && PyErr_Occurred()) { + return NULL; + } + return PySequence_GetItem(obj->values, i); + } + + // hack^Wfeature to allow mercurial to use "st_size" to reference "size" + namestr = PyString_AsString(name); + if (!strncmp(namestr, "st_", 3)) { + namestr += 3; + } + + n = PyTuple_GET_SIZE(obj->keys); + for (i = 0; i < n; i++) { + const char *item_name = NULL; + PyObject *key = PyTuple_GET_ITEM(obj->keys, i); + + item_name = PyString_AsString(key); + if (!strcmp(item_name, namestr)) { + return PySequence_GetItem(obj->values, i); + } + } + PyErr_Format(PyExc_AttributeError, + "bserobject has no attribute '%.400s'", namestr); + return NULL; +} + +static PyMappingMethods bserobj_map = { + bserobj_tuple_length, /* mp_length */ + bserobj_getattrro, /* mp_subscript */ + 0 /* mp_ass_subscript */ +}; + +PyTypeObject bserObjectType = { + PyVarObject_HEAD_INIT(NULL, 0) + "bserobj_tuple", /* tp_name */ + sizeof(bserObject), /* tp_basicsize */ + 0, /* tp_itemsize */ + bserobj_dealloc, /* tp_dealloc */ + 0, /* tp_print */ + 0, /* tp_getattr */ + 0, /* tp_setattr */ + 0, /* tp_compare */ + 0, /* tp_repr */ + 0, /* tp_as_number */ + &bserobj_sq, /* tp_as_sequence */ + &bserobj_map, /* tp_as_mapping */ + 0, /* tp_hash */ + 0, /* tp_call */ + 0, /* tp_str */ + bserobj_getattrro, /* tp_getattro */ + 0, /* tp_setattro */ + 0, /* tp_as_buffer */ + Py_TPFLAGS_DEFAULT, /* tp_flags */ + "bserobj tuple", /* tp_doc */ + 0, /* tp_traverse */ + 0, /* tp_clear */ + 0, /* tp_richcompare */ + 0, /* tp_weaklistoffset */ + 0, /* tp_iter */ + 0, /* tp_iternext */ + 0, /* tp_methods */ + 0, /* tp_members */ + 0, /* tp_getset */ + 0, /* tp_base */ + 0, /* tp_dict */ + 0, /* tp_descr_get */ + 0, /* tp_descr_set */ + 0, /* tp_dictoffset */ + 0, /* tp_init */ + 0, /* tp_alloc */ + 0, /* tp_new */ +}; + + +static PyObject *bser_loads_recursive(const char **ptr, const char *end, + int mutable); + +static const char bser_true = BSER_TRUE; +static const char bser_false = BSER_FALSE; +static const char bser_null = BSER_NULL; +static const char bser_string_hdr = BSER_STRING; +static const char bser_array_hdr = BSER_ARRAY; +static const char bser_object_hdr = BSER_OBJECT; + +static inline uint32_t next_power_2(uint32_t n) +{ + n |= (n >> 16); + n |= (n >> 8); + n |= (n >> 4); + n |= (n >> 2); + n |= (n >> 1); + return n + 1; +} + +// A buffer we use for building up the serialized result +struct bser_buffer { + char *buf; + int wpos, allocd; +}; +typedef struct bser_buffer bser_t; + +static int bser_append(bser_t *bser, const char *data, uint32_t len) +{ + int newlen = next_power_2(bser->wpos + len); + if (newlen > bser->allocd) { + char *nbuf = realloc(bser->buf, newlen); + if (!nbuf) { + return 0; + } + + bser->buf = nbuf; + bser->allocd = newlen; + } + + memcpy(bser->buf + bser->wpos, data, len); + bser->wpos += len; + return 1; +} + +static int bser_init(bser_t *bser) +{ + bser->allocd = 8192; + bser->wpos = 0; + bser->buf = malloc(bser->allocd); + + if (!bser->buf) { + return 0; + } + + // Leave room for the serialization header, which includes + // our overall length. To make things simpler, we'll use an + // int32 for the header +#define EMPTY_HEADER "\x00\x01\x05\x00\x00\x00\x00" + bser_append(bser, EMPTY_HEADER, sizeof(EMPTY_HEADER)-1); + + return 1; +} + +static void bser_dtor(bser_t *bser) +{ + free(bser->buf); + bser->buf = NULL; +} + +static int bser_long(bser_t *bser, int64_t val) +{ + int8_t i8; + int16_t i16; + int32_t i32; + int64_t i64; + char sz; + int size = INT_SIZE(val); + char *iptr; + + switch (size) { + case 1: + sz = BSER_INT8; + i8 = (int8_t)val; + iptr = (char*)&i8; + break; + case 2: + sz = BSER_INT16; + i16 = (int16_t)val; + iptr = (char*)&i16; + break; + case 4: + sz = BSER_INT32; + i32 = (int32_t)val; + iptr = (char*)&i32; + break; + case 8: + sz = BSER_INT64; + i64 = (int64_t)val; + iptr = (char*)&i64; + break; + default: + PyErr_SetString(PyExc_RuntimeError, + "Cannot represent this long value!?"); + return 0; + } + + if (!bser_append(bser, &sz, sizeof(sz))) { + return 0; + } + + return bser_append(bser, iptr, size); +} + +static int bser_string(bser_t *bser, PyObject *sval) +{ + char *buf = NULL; + Py_ssize_t len; + int res; + PyObject *utf = NULL; + + if (PyUnicode_Check(sval)) { + utf = PyUnicode_AsEncodedString(sval, "utf-8", "ignore"); + sval = utf; + } + + res = PyString_AsStringAndSize(sval, &buf, &len); + if (res == -1) { + res = 0; + goto out; + } + + if (!bser_append(bser, &bser_string_hdr, sizeof(bser_string_hdr))) { + res = 0; + goto out; + } + + if (!bser_long(bser, len)) { + res = 0; + goto out; + } + + if (len > UINT32_MAX) { + PyErr_Format(PyExc_ValueError, "string too big"); + res = 0; + goto out; + } + + res = bser_append(bser, buf, (uint32_t)len); + +out: + if (utf) { + Py_DECREF(utf); + } + + return res; +} + +static int bser_recursive(bser_t *bser, PyObject *val) +{ + if (PyBool_Check(val)) { + if (val == Py_True) { + return bser_append(bser, &bser_true, sizeof(bser_true)); + } + return bser_append(bser, &bser_false, sizeof(bser_false)); + } + + if (val == Py_None) { + return bser_append(bser, &bser_null, sizeof(bser_null)); + } + + if (PyInt_Check(val)) { + return bser_long(bser, PyInt_AS_LONG(val)); + } + + if (PyLong_Check(val)) { + return bser_long(bser, PyLong_AsLongLong(val)); + } + + if (PyString_Check(val) || PyUnicode_Check(val)) { + return bser_string(bser, val); + } + + + if (PyFloat_Check(val)) { + double dval = PyFloat_AS_DOUBLE(val); + char sz = BSER_REAL; + + if (!bser_append(bser, &sz, sizeof(sz))) { + return 0; + } + + return bser_append(bser, (char*)&dval, sizeof(dval)); + } + + if (PyList_Check(val)) { + Py_ssize_t i, len = PyList_GET_SIZE(val); + + if (!bser_append(bser, &bser_array_hdr, sizeof(bser_array_hdr))) { + return 0; + } + + if (!bser_long(bser, len)) { + return 0; + } + + for (i = 0; i < len; i++) { + PyObject *ele = PyList_GET_ITEM(val, i); + + if (!bser_recursive(bser, ele)) { + return 0; + } + } + + return 1; + } + + if (PyTuple_Check(val)) { + Py_ssize_t i, len = PyTuple_GET_SIZE(val); + + if (!bser_append(bser, &bser_array_hdr, sizeof(bser_array_hdr))) { + return 0; + } + + if (!bser_long(bser, len)) { + return 0; + } + + for (i = 0; i < len; i++) { + PyObject *ele = PyTuple_GET_ITEM(val, i); + + if (!bser_recursive(bser, ele)) { + return 0; + } + } + + return 1; + } + + if (PyMapping_Check(val)) { + Py_ssize_t len = PyMapping_Length(val); + Py_ssize_t pos = 0; + PyObject *key, *ele; + + if (!bser_append(bser, &bser_object_hdr, sizeof(bser_object_hdr))) { + return 0; + } + + if (!bser_long(bser, len)) { + return 0; + } + + while (PyDict_Next(val, &pos, &key, &ele)) { + if (!bser_string(bser, key)) { + return 0; + } + if (!bser_recursive(bser, ele)) { + return 0; + } + } + + return 1; + } + + PyErr_SetString(PyExc_ValueError, "Unsupported value type"); + return 0; +} + +static PyObject *bser_dumps(PyObject *self, PyObject *args) +{ + PyObject *val = NULL, *res; + bser_t bser; + uint32_t len; + + if (!PyArg_ParseTuple(args, "O", &val)) { + return NULL; + } + + if (!bser_init(&bser)) { + return PyErr_NoMemory(); + } + + if (!bser_recursive(&bser, val)) { + bser_dtor(&bser); + if (errno == ENOMEM) { + return PyErr_NoMemory(); + } + // otherwise, we've already set the error to something reasonable + return NULL; + } + + // Now fill in the overall length + len = bser.wpos - (sizeof(EMPTY_HEADER) - 1); + memcpy(bser.buf + 3, &len, sizeof(len)); + + res = PyString_FromStringAndSize(bser.buf, bser.wpos); + bser_dtor(&bser); + + return res; +} + +int bunser_int(const char **ptr, const char *end, int64_t *val) +{ + int needed; + const char *buf = *ptr; + int8_t i8; + int16_t i16; + int32_t i32; + int64_t i64; + + switch (buf[0]) { + case BSER_INT8: + needed = 2; + break; + case BSER_INT16: + needed = 3; + break; + case BSER_INT32: + needed = 5; + break; + case BSER_INT64: + needed = 9; + break; + default: + PyErr_Format(PyExc_ValueError, + "invalid bser int encoding 0x%02x", buf[0]); + return 0; + } + if (end - buf < needed) { + PyErr_SetString(PyExc_ValueError, "input buffer to small for int encoding"); + return 0; + } + *ptr = buf + needed; + switch (buf[0]) { + case BSER_INT8: + memcpy(&i8, buf + 1, sizeof(i8)); + *val = i8; + return 1; + case BSER_INT16: + memcpy(&i16, buf + 1, sizeof(i16)); + *val = i16; + return 1; + case BSER_INT32: + memcpy(&i32, buf + 1, sizeof(i32)); + *val = i32; + return 1; + case BSER_INT64: + memcpy(&i64, buf + 1, sizeof(i64)); + *val = i64; + return 1; + default: + return 0; + } +} + +static int bunser_string(const char **ptr, const char *end, + const char **start, int64_t *len) +{ + const char *buf = *ptr; + + // skip string marker + buf++; + if (!bunser_int(&buf, end, len)) { + return 0; + } + + if (buf + *len > end) { + PyErr_Format(PyExc_ValueError, "invalid string length in bser data"); + return 0; + } + + *ptr = buf + *len; + *start = buf; + return 1; +} + +static PyObject *bunser_array(const char **ptr, const char *end, int mutable) +{ + const char *buf = *ptr; + int64_t nitems, i; + PyObject *res; + + // skip array header + buf++; + if (!bunser_int(&buf, end, &nitems)) { + return 0; + } + *ptr = buf; + + if (nitems > LONG_MAX) { + PyErr_Format(PyExc_ValueError, "too many items for python array"); + return NULL; + } + + if (mutable) { + res = PyList_New((Py_ssize_t)nitems); + } else { + res = PyTuple_New((Py_ssize_t)nitems); + } + + for (i = 0; i < nitems; i++) { + PyObject *ele = bser_loads_recursive(ptr, end, mutable); + + if (!ele) { + Py_DECREF(res); + return NULL; + } + + if (mutable) { + PyList_SET_ITEM(res, i, ele); + } else { + PyTuple_SET_ITEM(res, i, ele); + } + // DECREF(ele) not required as SET_ITEM steals the ref + } + + return res; +} + +static PyObject *bunser_object(const char **ptr, const char *end, + int mutable) +{ + const char *buf = *ptr; + int64_t nitems, i; + PyObject *res; + bserObject *obj; + + // skip array header + buf++; + if (!bunser_int(&buf, end, &nitems)) { + return 0; + } + *ptr = buf; + + if (mutable) { + res = PyDict_New(); + } else { + obj = PyObject_New(bserObject, &bserObjectType); + obj->keys = PyTuple_New((Py_ssize_t)nitems); + obj->values = PyTuple_New((Py_ssize_t)nitems); + res = (PyObject*)obj; + } + + for (i = 0; i < nitems; i++) { + const char *keystr; + int64_t keylen; + PyObject *key; + PyObject *ele; + + if (!bunser_string(ptr, end, &keystr, &keylen)) { + Py_DECREF(res); + return NULL; + } + + if (keylen > LONG_MAX) { + PyErr_Format(PyExc_ValueError, "string too big for python"); + Py_DECREF(res); + return NULL; + } + + key = PyString_FromStringAndSize(keystr, (Py_ssize_t)keylen); + if (!key) { + Py_DECREF(res); + return NULL; + } + + ele = bser_loads_recursive(ptr, end, mutable); + + if (!ele) { + Py_DECREF(key); + Py_DECREF(res); + return NULL; + } + + if (mutable) { + PyDict_SetItem(res, key, ele); + Py_DECREF(key); + Py_DECREF(ele); + } else { + /* PyTuple_SET_ITEM steals ele, key */ + PyTuple_SET_ITEM(obj->values, i, ele); + PyTuple_SET_ITEM(obj->keys, i, key); + } + } + + return res; +} + +static PyObject *bunser_template(const char **ptr, const char *end, + int mutable) +{ + const char *buf = *ptr; + int64_t nitems, i; + PyObject *arrval; + PyObject *keys; + Py_ssize_t numkeys, keyidx; + + if (buf[1] != BSER_ARRAY) { + PyErr_Format(PyExc_ValueError, "Expect ARRAY to follow TEMPLATE"); + return NULL; + } + + // skip header + buf++; + *ptr = buf; + + // Load template keys + keys = bunser_array(ptr, end, mutable); + if (!keys) { + return NULL; + } + + numkeys = PySequence_Length(keys); + + // Load number of array elements + if (!bunser_int(ptr, end, &nitems)) { + Py_DECREF(keys); + return 0; + } + + if (nitems > LONG_MAX) { + PyErr_Format(PyExc_ValueError, "Too many items for python"); + Py_DECREF(keys); + return NULL; + } + + arrval = PyList_New((Py_ssize_t)nitems); + if (!arrval) { + Py_DECREF(keys); + return NULL; + } + + for (i = 0; i < nitems; i++) { + PyObject *dict = NULL; + bserObject *obj = NULL; + + if (mutable) { + dict = PyDict_New(); + } else { + obj = PyObject_New(bserObject, &bserObjectType); + if (obj) { + obj->keys = keys; + Py_INCREF(obj->keys); + obj->values = PyTuple_New(numkeys); + } + dict = (PyObject*)obj; + } + if (!dict) { +fail: + Py_DECREF(keys); + Py_DECREF(arrval); + return NULL; + } + + for (keyidx = 0; keyidx < numkeys; keyidx++) { + PyObject *key; + PyObject *ele; + + if (**ptr == BSER_SKIP) { + *ptr = *ptr + 1; + ele = Py_None; + Py_INCREF(ele); + } else { + ele = bser_loads_recursive(ptr, end, mutable); + } + + if (!ele) { + goto fail; + } + + if (mutable) { + key = PyList_GET_ITEM(keys, keyidx); + PyDict_SetItem(dict, key, ele); + Py_DECREF(ele); + } else { + PyTuple_SET_ITEM(obj->values, keyidx, ele); + // DECREF(ele) not required as SET_ITEM steals the ref + } + } + + PyList_SET_ITEM(arrval, i, dict); + // DECREF(obj) not required as SET_ITEM steals the ref + } + + Py_DECREF(keys); + + return arrval; +} + +static PyObject *bser_loads_recursive(const char **ptr, const char *end, + int mutable) +{ + const char *buf = *ptr; + + switch (buf[0]) { + case BSER_INT8: + case BSER_INT16: + case BSER_INT32: + case BSER_INT64: + { + int64_t ival; + if (!bunser_int(ptr, end, &ival)) { + return NULL; + } + if (ival < LONG_MIN || ival > LONG_MAX) { + return PyLong_FromLongLong(ival); + } + return PyInt_FromSsize_t(Py_SAFE_DOWNCAST(ival, int64_t, Py_ssize_t)); + } + + case BSER_REAL: + { + double dval; + memcpy(&dval, buf + 1, sizeof(dval)); + *ptr = buf + 1 + sizeof(double); + return PyFloat_FromDouble(dval); + } + + case BSER_TRUE: + *ptr = buf + 1; + Py_INCREF(Py_True); + return Py_True; + + case BSER_FALSE: + *ptr = buf + 1; + Py_INCREF(Py_False); + return Py_False; + + case BSER_NULL: + *ptr = buf + 1; + Py_INCREF(Py_None); + return Py_None; + + case BSER_STRING: + { + const char *start; + int64_t len; + + if (!bunser_string(ptr, end, &start, &len)) { + return NULL; + } + + if (len > LONG_MAX) { + PyErr_Format(PyExc_ValueError, "string too long for python"); + return NULL; + } + + return PyString_FromStringAndSize(start, (long)len); + } + + case BSER_ARRAY: + return bunser_array(ptr, end, mutable); + + case BSER_OBJECT: + return bunser_object(ptr, end, mutable); + + case BSER_TEMPLATE: + return bunser_template(ptr, end, mutable); + + default: + PyErr_Format(PyExc_ValueError, "unhandled bser opcode 0x%02x", buf[0]); + } + + return NULL; +} + +// Expected use case is to read a packet from the socket and +// then call bser.pdu_len on the packet. It returns the total +// length of the entire response that the peer is sending, +// including the bytes already received. This allows the client +// to compute the data size it needs to read before it can +// decode the data +static PyObject *bser_pdu_len(PyObject *self, PyObject *args) +{ + const char *start = NULL; + const char *data = NULL; + int datalen = 0; + const char *end; + int64_t expected_len, total_len; + + if (!PyArg_ParseTuple(args, "s#", &start, &datalen)) { + return NULL; + } + data = start; + end = data + datalen; + + // Validate the header and length + if (memcmp(data, EMPTY_HEADER, 2) != 0) { + PyErr_SetString(PyExc_ValueError, "invalid bser header"); + return NULL; + } + + data += 2; + + // Expect an integer telling us how big the rest of the data + // should be + if (!bunser_int(&data, end, &expected_len)) { + return NULL; + } + + total_len = expected_len + (data - start); + if (total_len > LONG_MAX) { + return PyLong_FromLongLong(total_len); + } + return PyInt_FromLong((long)total_len); +} + +static PyObject *bser_loads(PyObject *self, PyObject *args) +{ + const char *data = NULL; + int datalen = 0; + const char *end; + int64_t expected_len; + int mutable = 1; + PyObject *mutable_obj = NULL; + + if (!PyArg_ParseTuple(args, "s#|O:loads", &data, &datalen, &mutable_obj)) { + return NULL; + } + if (mutable_obj) { + mutable = PyObject_IsTrue(mutable_obj) > 0 ? 1 : 0; + } + + end = data + datalen; + + // Validate the header and length + if (memcmp(data, EMPTY_HEADER, 2) != 0) { + PyErr_SetString(PyExc_ValueError, "invalid bser header"); + return NULL; + } + + data += 2; + + // Expect an integer telling us how big the rest of the data + // should be + if (!bunser_int(&data, end, &expected_len)) { + return NULL; + } + + // Verify + if (expected_len + data != end) { + PyErr_SetString(PyExc_ValueError, "bser data len != header len"); + return NULL; + } + + return bser_loads_recursive(&data, end, mutable); +} + +static PyMethodDef bser_methods[] = { + {"loads", bser_loads, METH_VARARGS, "Deserialize string."}, + {"pdu_len", bser_pdu_len, METH_VARARGS, "Extract PDU length."}, + {"dumps", bser_dumps, METH_VARARGS, "Serialize string."}, + {NULL, NULL, 0, NULL} +}; + +PyMODINIT_FUNC initbser(void) +{ + (void)Py_InitModule("bser", bser_methods); + PyType_Ready(&bserObjectType); +} + +/* vim:ts=2:sw=2:et: + */ + +// no-check-code -- this is a 3rd party library
--- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/hgext/fsmonitor/pywatchman/capabilities.py Tue Mar 15 14:10:46 2016 -0700 @@ -0,0 +1,69 @@ +# Copyright 2015 Facebook, Inc. +# All rights reserved. +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are met: +# +# * Redistributions of source code must retain the above copyright notice, +# this list of conditions and the following disclaimer. +# +# * Redistributions in binary form must reproduce the above copyright notice, +# this list of conditions and the following disclaimer in the documentation +# and/or other materials provided with the distribution. +# +# * Neither the name Facebook nor the names of its contributors may be used to +# endorse or promote products derived from this software without specific +# prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE +# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +import re + +def parse_version(vstr): + res = 0 + for n in vstr.split('.'): + res = res * 1000 + res = res + int(n) + return res + +cap_versions = { + "cmd-watch-del-all": "3.1.1", + "cmd-watch-project": "3.1", + "relative_root": "3.3", + "term-dirname": "3.1", + "term-idirname": "3.1", + "wildmatch": "3.7", +} + +def check(version, name): + if name in cap_versions: + return version >= parse_version(cap_versions[name]) + return False + +def synthesize(vers, opts): + """ Synthesize a capability enabled version response + This is a very limited emulation for relatively recent feature sets + """ + parsed_version = parse_version(vers['version']) + vers['capabilities'] = {} + for name in opts['optional']: + vers['capabilities'][name] = check(parsed_version, name) + failed = False + for name in opts['required']: + have = check(parsed_version, name) + vers['capabilities'][name] = have + if not have: + vers['error'] = 'client required capability `' + name + \ + '` is not supported by this server' + return vers + +# no-check-code -- this is a 3rd party library
--- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/hgext/fsmonitor/pywatchman/msc_stdint.h Tue Mar 15 14:10:46 2016 -0700 @@ -0,0 +1,260 @@ +// no-check-code +// ISO C9x compliant stdint.h for Microsoft Visual Studio +// Based on ISO/IEC 9899:TC2 Committee draft (May 6, 2005) WG14/N1124 +// +// Copyright (c) 2006-2013 Alexander Chemeris +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are met: +// +// 1. Redistributions of source code must retain the above copyright notice, +// this list of conditions and the following disclaimer. +// +// 2. Redistributions in binary form must reproduce the above copyright +// notice, this list of conditions and the following disclaimer in the +// documentation and/or other materials provided with the distribution. +// +// 3. Neither the name of the product nor the names of its contributors may +// be used to endorse or promote products derived from this software +// without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED +// WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF +// MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO +// EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, +// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; +// OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, +// WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR +// OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF +// ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +// +/////////////////////////////////////////////////////////////////////////////// + +#ifndef _MSC_VER // [ +#error "Use this header only with Microsoft Visual C++ compilers!" +#endif // _MSC_VER ] + +#ifndef _MSC_STDINT_H_ // [ +#define _MSC_STDINT_H_ + +#if _MSC_VER > 1000 +#pragma once +#endif + +#if _MSC_VER >= 1600 // [ +#include <stdint.h> +#else // ] _MSC_VER >= 1600 [ + +#include <limits.h> + +// For Visual Studio 6 in C++ mode and for many Visual Studio versions when +// compiling for ARM we should wrap <wchar.h> include with 'extern "C++" {}' +// or compiler give many errors like this: +// error C2733: second C linkage of overloaded function 'wmemchr' not allowed +#ifdef __cplusplus +extern "C" { +#endif +# include <wchar.h> +#ifdef __cplusplus +} +#endif + +// Define _W64 macros to mark types changing their size, like intptr_t. +#ifndef _W64 +# if !defined(__midl) && (defined(_X86_) || defined(_M_IX86)) && _MSC_VER >= 1300 +# define _W64 __w64 +# else +# define _W64 +# endif +#endif + + +// 7.18.1 Integer types + +// 7.18.1.1 Exact-width integer types + +// Visual Studio 6 and Embedded Visual C++ 4 doesn't +// realize that, e.g. char has the same size as __int8 +// so we give up on __intX for them. +#if (_MSC_VER < 1300) + typedef signed char int8_t; + typedef signed short int16_t; + typedef signed int int32_t; + typedef unsigned char uint8_t; + typedef unsigned short uint16_t; + typedef unsigned int uint32_t; +#else + typedef signed __int8 int8_t; + typedef signed __int16 int16_t; + typedef signed __int32 int32_t; + typedef unsigned __int8 uint8_t; + typedef unsigned __int16 uint16_t; + typedef unsigned __int32 uint32_t; +#endif +typedef signed __int64 int64_t; +typedef unsigned __int64 uint64_t; + + +// 7.18.1.2 Minimum-width integer types +typedef int8_t int_least8_t; +typedef int16_t int_least16_t; +typedef int32_t int_least32_t; +typedef int64_t int_least64_t; +typedef uint8_t uint_least8_t; +typedef uint16_t uint_least16_t; +typedef uint32_t uint_least32_t; +typedef uint64_t uint_least64_t; + +// 7.18.1.3 Fastest minimum-width integer types +typedef int8_t int_fast8_t; +typedef int16_t int_fast16_t; +typedef int32_t int_fast32_t; +typedef int64_t int_fast64_t; +typedef uint8_t uint_fast8_t; +typedef uint16_t uint_fast16_t; +typedef uint32_t uint_fast32_t; +typedef uint64_t uint_fast64_t; + +// 7.18.1.4 Integer types capable of holding object pointers +#ifdef _WIN64 // [ + typedef signed __int64 intptr_t; + typedef unsigned __int64 uintptr_t; +#else // _WIN64 ][ + typedef _W64 signed int intptr_t; + typedef _W64 unsigned int uintptr_t; +#endif // _WIN64 ] + +// 7.18.1.5 Greatest-width integer types +typedef int64_t intmax_t; +typedef uint64_t uintmax_t; + + +// 7.18.2 Limits of specified-width integer types + +#if !defined(__cplusplus) || defined(__STDC_LIMIT_MACROS) // [ See footnote 220 at page 257 and footnote 221 at page 259 + +// 7.18.2.1 Limits of exact-width integer types +#define INT8_MIN ((int8_t)_I8_MIN) +#define INT8_MAX _I8_MAX +#define INT16_MIN ((int16_t)_I16_MIN) +#define INT16_MAX _I16_MAX +#define INT32_MIN ((int32_t)_I32_MIN) +#define INT32_MAX _I32_MAX +#define INT64_MIN ((int64_t)_I64_MIN) +#define INT64_MAX _I64_MAX +#define UINT8_MAX _UI8_MAX +#define UINT16_MAX _UI16_MAX +#define UINT32_MAX _UI32_MAX +#define UINT64_MAX _UI64_MAX + +// 7.18.2.2 Limits of minimum-width integer types +#define INT_LEAST8_MIN INT8_MIN +#define INT_LEAST8_MAX INT8_MAX +#define INT_LEAST16_MIN INT16_MIN +#define INT_LEAST16_MAX INT16_MAX +#define INT_LEAST32_MIN INT32_MIN +#define INT_LEAST32_MAX INT32_MAX +#define INT_LEAST64_MIN INT64_MIN +#define INT_LEAST64_MAX INT64_MAX +#define UINT_LEAST8_MAX UINT8_MAX +#define UINT_LEAST16_MAX UINT16_MAX +#define UINT_LEAST32_MAX UINT32_MAX +#define UINT_LEAST64_MAX UINT64_MAX + +// 7.18.2.3 Limits of fastest minimum-width integer types +#define INT_FAST8_MIN INT8_MIN +#define INT_FAST8_MAX INT8_MAX +#define INT_FAST16_MIN INT16_MIN +#define INT_FAST16_MAX INT16_MAX +#define INT_FAST32_MIN INT32_MIN +#define INT_FAST32_MAX INT32_MAX +#define INT_FAST64_MIN INT64_MIN +#define INT_FAST64_MAX INT64_MAX +#define UINT_FAST8_MAX UINT8_MAX +#define UINT_FAST16_MAX UINT16_MAX +#define UINT_FAST32_MAX UINT32_MAX +#define UINT_FAST64_MAX UINT64_MAX + +// 7.18.2.4 Limits of integer types capable of holding object pointers +#ifdef _WIN64 // [ +# define INTPTR_MIN INT64_MIN +# define INTPTR_MAX INT64_MAX +# define UINTPTR_MAX UINT64_MAX +#else // _WIN64 ][ +# define INTPTR_MIN INT32_MIN +# define INTPTR_MAX INT32_MAX +# define UINTPTR_MAX UINT32_MAX +#endif // _WIN64 ] + +// 7.18.2.5 Limits of greatest-width integer types +#define INTMAX_MIN INT64_MIN +#define INTMAX_MAX INT64_MAX +#define UINTMAX_MAX UINT64_MAX + +// 7.18.3 Limits of other integer types + +#ifdef _WIN64 // [ +# define PTRDIFF_MIN _I64_MIN +# define PTRDIFF_MAX _I64_MAX +#else // _WIN64 ][ +# define PTRDIFF_MIN _I32_MIN +# define PTRDIFF_MAX _I32_MAX +#endif // _WIN64 ] + +#define SIG_ATOMIC_MIN INT_MIN +#define SIG_ATOMIC_MAX INT_MAX + +#ifndef SIZE_MAX // [ +# ifdef _WIN64 // [ +# define SIZE_MAX _UI64_MAX +# else // _WIN64 ][ +# define SIZE_MAX _UI32_MAX +# endif // _WIN64 ] +#endif // SIZE_MAX ] + +// WCHAR_MIN and WCHAR_MAX are also defined in <wchar.h> +#ifndef WCHAR_MIN // [ +# define WCHAR_MIN 0 +#endif // WCHAR_MIN ] +#ifndef WCHAR_MAX // [ +# define WCHAR_MAX _UI16_MAX +#endif // WCHAR_MAX ] + +#define WINT_MIN 0 +#define WINT_MAX _UI16_MAX + +#endif // __STDC_LIMIT_MACROS ] + + +// 7.18.4 Limits of other integer types + +#if !defined(__cplusplus) || defined(__STDC_CONSTANT_MACROS) // [ See footnote 224 at page 260 + +// 7.18.4.1 Macros for minimum-width integer constants + +#define INT8_C(val) val##i8 +#define INT16_C(val) val##i16 +#define INT32_C(val) val##i32 +#define INT64_C(val) val##i64 + +#define UINT8_C(val) val##ui8 +#define UINT16_C(val) val##ui16 +#define UINT32_C(val) val##ui32 +#define UINT64_C(val) val##ui64 + +// 7.18.4.2 Macros for greatest-width integer constants +// These #ifndef's are needed to prevent collisions with <boost/cstdint.hpp>. +// Check out Issue 9 for the details. +#ifndef INTMAX_C // [ +# define INTMAX_C INT64_C +#endif // INTMAX_C ] +#ifndef UINTMAX_C // [ +# define UINTMAX_C UINT64_C +#endif // UINTMAX_C ] + +#endif // __STDC_CONSTANT_MACROS ] + +#endif // _MSC_VER >= 1600 ] + +#endif // _MSC_STDINT_H_ ]
--- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/hgext/fsmonitor/pywatchman/pybser.py Tue Mar 15 14:10:46 2016 -0700 @@ -0,0 +1,359 @@ +# Copyright 2015 Facebook, Inc. +# All rights reserved. +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are met: +# +# * Redistributions of source code must retain the above copyright notice, +# this list of conditions and the following disclaimer. +# +# * Redistributions in binary form must reproduce the above copyright notice, +# this list of conditions and the following disclaimer in the documentation +# and/or other materials provided with the distribution. +# +# * Neither the name Facebook nor the names of its contributors may be used to +# endorse or promote products derived from this software without specific +# prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE +# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +import collections +import ctypes +import struct +import sys + +BSER_ARRAY = '\x00' +BSER_OBJECT = '\x01' +BSER_STRING = '\x02' +BSER_INT8 = '\x03' +BSER_INT16 = '\x04' +BSER_INT32 = '\x05' +BSER_INT64 = '\x06' +BSER_REAL = '\x07' +BSER_TRUE = '\x08' +BSER_FALSE = '\x09' +BSER_NULL = '\x0a' +BSER_TEMPLATE = '\x0b' +BSER_SKIP = '\x0c' + +# Leave room for the serialization header, which includes +# our overall length. To make things simpler, we'll use an +# int32 for the header +EMPTY_HEADER = "\x00\x01\x05\x00\x00\x00\x00" + +# Python 3 conditional for supporting Python 2's int/long types +if sys.version_info > (3,): + long = int + +def _int_size(x): + """Return the smallest size int that can store the value""" + if -0x80 <= x <= 0x7F: + return 1 + elif -0x8000 <= x <= 0x7FFF: + return 2 + elif -0x80000000 <= x <= 0x7FFFFFFF: + return 4 + elif long(-0x8000000000000000) <= x <= long(0x7FFFFFFFFFFFFFFF): + return 8 + else: + raise RuntimeError('Cannot represent value: ' + str(x)) + + +class _bser_buffer(object): + + def __init__(self): + self.buf = ctypes.create_string_buffer(8192) + struct.pack_into(str(len(EMPTY_HEADER)) + 's', self.buf, 0, EMPTY_HEADER) + self.wpos = len(EMPTY_HEADER) + + def ensure_size(self, size): + while ctypes.sizeof(self.buf) - self.wpos < size: + ctypes.resize(self.buf, ctypes.sizeof(self.buf) * 2) + + def append_long(self, val): + size = _int_size(val) + to_write = size + 1 + self.ensure_size(to_write) + if size == 1: + struct.pack_into('=cb', self.buf, self.wpos, BSER_INT8, val) + elif size == 2: + struct.pack_into('=ch', self.buf, self.wpos, BSER_INT16, val) + elif size == 4: + struct.pack_into('=ci', self.buf, self.wpos, BSER_INT32, val) + elif size == 8: + struct.pack_into('=cq', self.buf, self.wpos, BSER_INT64, val) + else: + raise RuntimeError('Cannot represent this long value') + self.wpos += to_write + + + def append_string(self, s): + if isinstance(s, unicode): + s = s.encode('utf-8') + s_len = len(s) + size = _int_size(s_len) + to_write = 2 + size + s_len + self.ensure_size(to_write) + if size == 1: + struct.pack_into('=ccb' + str(s_len) + 's', self.buf, self.wpos, BSER_STRING, BSER_INT8, s_len, s) + elif size == 2: + struct.pack_into('=cch' + str(s_len) + 's', self.buf, self.wpos, BSER_STRING, BSER_INT16, s_len, s) + elif size == 4: + struct.pack_into('=cci' + str(s_len) + 's', self.buf, self.wpos, BSER_STRING, BSER_INT32, s_len, s) + elif size == 8: + struct.pack_into('=ccq' + str(s_len) + 's', self.buf, self.wpos, BSER_STRING, BSER_INT64, s_len, s) + else: + raise RuntimeError('Cannot represent this string value') + self.wpos += to_write + + + def append_recursive(self, val): + if isinstance(val, bool): + needed = 1 + self.ensure_size(needed) + if val: + to_encode = BSER_TRUE + else: + to_encode = BSER_FALSE + struct.pack_into('=c', self.buf, self.wpos, to_encode) + self.wpos += needed + elif val is None: + needed = 1 + self.ensure_size(needed) + struct.pack_into('=c', self.buf, self.wpos, BSER_NULL) + self.wpos += needed + elif isinstance(val, (int, long)): + self.append_long(val) + elif isinstance(val, (str, unicode)): + self.append_string(val) + elif isinstance(val, float): + needed = 9 + self.ensure_size(needed) + struct.pack_into('=cd', self.buf, self.wpos, BSER_REAL, val) + self.wpos += needed + elif isinstance(val, collections.Mapping) and isinstance(val, collections.Sized): + val_len = len(val) + size = _int_size(val_len) + needed = 2 + size + self.ensure_size(needed) + if size == 1: + struct.pack_into('=ccb', self.buf, self.wpos, BSER_OBJECT, BSER_INT8, val_len) + elif size == 2: + struct.pack_into('=cch', self.buf, self.wpos, BSER_OBJECT, BSER_INT16, val_len) + elif size == 4: + struct.pack_into('=cci', self.buf, self.wpos, BSER_OBJECT, BSER_INT32, val_len) + elif size == 8: + struct.pack_into('=ccq', self.buf, self.wpos, BSER_OBJECT, BSER_INT64, val_len) + else: + raise RuntimeError('Cannot represent this mapping value') + self.wpos += needed + for k, v in val.iteritems(): + self.append_string(k) + self.append_recursive(v) + elif isinstance(val, collections.Iterable) and isinstance(val, collections.Sized): + val_len = len(val) + size = _int_size(val_len) + needed = 2 + size + self.ensure_size(needed) + if size == 1: + struct.pack_into('=ccb', self.buf, self.wpos, BSER_ARRAY, BSER_INT8, val_len) + elif size == 2: + struct.pack_into('=cch', self.buf, self.wpos, BSER_ARRAY, BSER_INT16, val_len) + elif size == 4: + struct.pack_into('=cci', self.buf, self.wpos, BSER_ARRAY, BSER_INT32, val_len) + elif size == 8: + struct.pack_into('=ccq', self.buf, self.wpos, BSER_ARRAY, BSER_INT64, val_len) + else: + raise RuntimeError('Cannot represent this sequence value') + self.wpos += needed + for v in val: + self.append_recursive(v) + else: + raise RuntimeError('Cannot represent unknown value type') + + +def dumps(obj): + bser_buf = _bser_buffer() + bser_buf.append_recursive(obj) + # Now fill in the overall length + obj_len = bser_buf.wpos - len(EMPTY_HEADER) + struct.pack_into('=i', bser_buf.buf, 3, obj_len) + return bser_buf.buf.raw[:bser_buf.wpos] + + +def _bunser_int(buf, pos): + try: + int_type = buf[pos] + except IndexError: + raise ValueError('Invalid bser int encoding, pos out of range') + if int_type == BSER_INT8: + needed = 2 + fmt = '=b' + elif int_type == BSER_INT16: + needed = 3 + fmt = '=h' + elif int_type == BSER_INT32: + needed = 5 + fmt = '=i' + elif int_type == BSER_INT64: + needed = 9 + fmt = '=q' + else: + raise ValueError('Invalid bser int encoding 0x%02x' % int(int_type)) + int_val = struct.unpack_from(fmt, buf, pos + 1)[0] + return (int_val, pos + needed) + + +def _bunser_string(buf, pos): + str_len, pos = _bunser_int(buf, pos + 1) + str_val = struct.unpack_from(str(str_len) + 's', buf, pos)[0] + return (str_val, pos + str_len) + + +def _bunser_array(buf, pos, mutable=True): + arr_len, pos = _bunser_int(buf, pos + 1) + arr = [] + for i in range(arr_len): + arr_item, pos = _bser_loads_recursive(buf, pos, mutable) + arr.append(arr_item) + + if not mutable: + arr = tuple(arr) + + return arr, pos + + +# This is a quack-alike with the bserObjectType in bser.c +# It provides by getattr accessors and getitem for both index +# and name. +class _BunserDict(object): + __slots__ = ('_keys', '_values') + + def __init__(self, keys, values): + self._keys = keys + self._values = values + + def __getattr__(self, name): + return self.__getitem__(name) + + def __getitem__(self, key): + if isinstance(key, (int, long)): + return self._values[key] + elif key.startswith('st_'): + # hack^Wfeature to allow mercurial to use "st_size" to + # reference "size" + key = key[3:] + try: + return self._values[self._keys.index(key)] + except ValueError as ex: + raise KeyError('_BunserDict has no key %s' % key) + + def __len__(self): + return len(self._keys) + +def _bunser_object(buf, pos, mutable=True): + obj_len, pos = _bunser_int(buf, pos + 1) + if mutable: + obj = {} + else: + keys = [] + vals = [] + + for i in range(obj_len): + key, pos = _bunser_string(buf, pos) + val, pos = _bser_loads_recursive(buf, pos, mutable) + if mutable: + obj[key] = val + else: + keys.append(key) + vals.append(val) + + if not mutable: + obj = _BunserDict(keys, vals) + + return obj, pos + + +def _bunser_template(buf, pos, mutable=True): + if buf[pos + 1] != BSER_ARRAY: + raise RuntimeError('Expect ARRAY to follow TEMPLATE') + keys, pos = _bunser_array(buf, pos + 1) + nitems, pos = _bunser_int(buf, pos) + arr = [] + for i in range(nitems): + if mutable: + obj = {} + else: + vals = [] + + for keyidx in range(len(keys)): + if buf[pos] == BSER_SKIP: + pos += 1 + ele = None + else: + ele, pos = _bser_loads_recursive(buf, pos, mutable) + + if mutable: + key = keys[keyidx] + obj[key] = ele + else: + vals.append(ele) + + if not mutable: + obj = _BunserDict(keys, vals) + + arr.append(obj) + return arr, pos + + +def _bser_loads_recursive(buf, pos, mutable=True): + val_type = buf[pos] + if (val_type == BSER_INT8 or val_type == BSER_INT16 or + val_type == BSER_INT32 or val_type == BSER_INT64): + return _bunser_int(buf, pos) + elif val_type == BSER_REAL: + val = struct.unpack_from('=d', buf, pos + 1)[0] + return (val, pos + 9) + elif val_type == BSER_TRUE: + return (True, pos + 1) + elif val_type == BSER_FALSE: + return (False, pos + 1) + elif val_type == BSER_NULL: + return (None, pos + 1) + elif val_type == BSER_STRING: + return _bunser_string(buf, pos) + elif val_type == BSER_ARRAY: + return _bunser_array(buf, pos, mutable) + elif val_type == BSER_OBJECT: + return _bunser_object(buf, pos, mutable) + elif val_type == BSER_TEMPLATE: + return _bunser_template(buf, pos, mutable) + else: + raise RuntimeError('unhandled bser opcode 0x%02x' % (val_type,)) + + +def pdu_len(buf): + if buf[0:2] != EMPTY_HEADER[0:2]: + raise RuntimeError('Invalid BSER header') + expected_len, pos = _bunser_int(buf, 2) + return expected_len + pos + + +def loads(buf, mutable=True): + if buf[0:2] != EMPTY_HEADER[0:2]: + raise RuntimeError('Invalid BSER header') + expected_len, pos = _bunser_int(buf, 2) + if len(buf) != expected_len + pos: + raise RuntimeError('bser data len != header len') + return _bser_loads_recursive(buf, pos, mutable)[0] + +# no-check-code -- this is a 3rd party library
--- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/hgext/fsmonitor/state.py Tue Mar 15 14:10:46 2016 -0700 @@ -0,0 +1,115 @@ +# state.py - fsmonitor persistent state +# +# Copyright 2013-2016 Facebook, Inc. +# +# This software may be used and distributed according to the terms of the +# GNU General Public License version 2 or any later version. + +from __future__ import absolute_import + +import errno +import os +import socket +import struct + +from mercurial import pathutil +from mercurial.i18n import _ + +_version = 4 +_versionformat = ">I" + +class state(object): + def __init__(self, repo): + self._opener = repo.opener + self._ui = repo.ui + self._rootdir = pathutil.normasprefix(repo.root) + self._lastclock = None + + self.mode = self._ui.config('fsmonitor', 'mode', default='on') + self.walk_on_invalidate = self._ui.configbool( + 'fsmonitor', 'walk_on_invalidate', False) + self.timeout = float(self._ui.config( + 'fsmonitor', 'timeout', default='2')) + + def get(self): + try: + file = self._opener('fsmonitor.state', 'rb') + except IOError as inst: + if inst.errno != errno.ENOENT: + raise + return None, None, None + + versionbytes = file.read(4) + if len(versionbytes) < 4: + self._ui.log( + 'fsmonitor', 'fsmonitor: state file only has %d bytes, ' + 'nuking state\n' % len(versionbytes)) + self.invalidate() + return None, None, None + try: + diskversion = struct.unpack(_versionformat, versionbytes)[0] + if diskversion != _version: + # different version, nuke state and start over + self._ui.log( + 'fsmonitor', 'fsmonitor: version switch from %d to ' + '%d, nuking state\n' % (diskversion, _version)) + self.invalidate() + return None, None, None + + state = file.read().split('\0') + # state = hostname\0clock\0ignorehash\0 + list of files, each + # followed by a \0 + diskhostname = state[0] + hostname = socket.gethostname() + if diskhostname != hostname: + # file got moved to a different host + self._ui.log('fsmonitor', 'fsmonitor: stored hostname "%s" ' + 'different from current "%s", nuking state\n' % + (diskhostname, hostname)) + self.invalidate() + return None, None, None + + clock = state[1] + ignorehash = state[2] + # discard the value after the last \0 + notefiles = state[3:-1] + + finally: + file.close() + + return clock, ignorehash, notefiles + + def set(self, clock, ignorehash, notefiles): + if clock is None: + self.invalidate() + return + + try: + file = self._opener('fsmonitor.state', 'wb') + except (IOError, OSError): + self._ui.warn(_("warning: unable to write out fsmonitor state\n")) + return + + try: + file.write(struct.pack(_versionformat, _version)) + file.write(socket.gethostname() + '\0') + file.write(clock + '\0') + file.write(ignorehash + '\0') + if notefiles: + file.write('\0'.join(notefiles)) + file.write('\0') + finally: + file.close() + + def invalidate(self): + try: + os.unlink(os.path.join(self._rootdir, '.hg', 'fsmonitor.state')) + except OSError as inst: + if inst.errno != errno.ENOENT: + raise + + def setlastclock(self, clock): + self._lastclock = clock + + def getlastclock(self): + return self._lastclock
--- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/hgext/fsmonitor/watchmanclient.py Tue Mar 15 14:10:46 2016 -0700 @@ -0,0 +1,109 @@ +# watchmanclient.py - Watchman client for the fsmonitor extension +# +# Copyright 2013-2016 Facebook, Inc. +# +# This software may be used and distributed according to the terms of the +# GNU General Public License version 2 or any later version. + +from __future__ import absolute_import + +import getpass + +from mercurial import util + +from . import pywatchman + +class Unavailable(Exception): + def __init__(self, msg, warn=True, invalidate=False): + self.msg = msg + self.warn = warn + if self.msg == 'timed out waiting for response': + self.warn = False + self.invalidate = invalidate + + def __str__(self): + if self.warn: + return 'warning: Watchman unavailable: %s' % self.msg + else: + return 'Watchman unavailable: %s' % self.msg + +class WatchmanNoRoot(Unavailable): + def __init__(self, root, msg): + self.root = root + super(WatchmanNoRoot, self).__init__(msg) + +class client(object): + def __init__(self, repo, timeout=1.0): + err = None + if not self._user: + err = "couldn't get user" + warn = True + if self._user in repo.ui.configlist('fsmonitor', 'blacklistusers'): + err = 'user %s in blacklist' % self._user + warn = False + + if err: + raise Unavailable(err, warn) + + self._timeout = timeout + self._watchmanclient = None + self._root = repo.root + self._ui = repo.ui + self._firsttime = True + + def settimeout(self, timeout): + self._timeout = timeout + if self._watchmanclient is not None: + self._watchmanclient.setTimeout(timeout) + + def getcurrentclock(self): + result = self.command('clock') + if not util.safehasattr(result, 'clock'): + raise Unavailable('clock result is missing clock value', + invalidate=True) + return result.clock + + def clearconnection(self): + self._watchmanclient = None + + def available(self): + return self._watchmanclient is not None or self._firsttime + + @util.propertycache + def _user(self): + try: + return getpass.getuser() + except KeyError: + # couldn't figure out our user + return None + + def _command(self, *args): + watchmanargs = (args[0], self._root) + args[1:] + try: + if self._watchmanclient is None: + self._firsttime = False + self._watchmanclient = pywatchman.client( + timeout=self._timeout, + useImmutableBser=True) + return self._watchmanclient.query(*watchmanargs) + except pywatchman.CommandError as ex: + if ex.msg.startswith('unable to resolve root'): + raise WatchmanNoRoot(self._root, ex.msg) + raise Unavailable(ex.msg) + except pywatchman.WatchmanError as ex: + raise Unavailable(str(ex)) + + def command(self, *args): + try: + try: + return self._command(*args) + except WatchmanNoRoot: + # this 'watch' command can also raise a WatchmanNoRoot if + # watchman refuses to accept this root + self._command('watch') + return self._command(*args) + except Unavailable: + # this is in an outer scope to catch Unavailable form any of the + # above _command calls + self._watchmanclient = None + raise
--- a/hgext/histedit.py Sun Mar 13 02:29:11 2016 +0100 +++ b/hgext/histedit.py Tue Mar 15 14:10:46 2016 -0700 @@ -234,6 +234,8 @@ Commits are listed from least to most recent +You can reorder changesets by reordering the lines + Commands: """) actions = [] @@ -279,7 +281,7 @@ except IOError as err: if err.errno != errno.ENOENT: raise - raise error.Abort(_('no histedit in progress')) + cmdutil.wrongtooltocontinue(self.repo, _('histedit')) if state.startswith('v1\n'): data = self._load() @@ -447,13 +449,18 @@ parentctx, but does not commit them.""" repo = self.repo rulectx = repo[self.node] + repo.ui.pushbuffer(error=True, labeled=True) hg.update(repo, self.state.parentctxnode, quietempty=True) stats = applychanges(repo.ui, repo, rulectx, {}) if stats and stats[3] > 0: + buf = repo.ui.popbuffer() + repo.ui.write(*buf) raise error.InterventionRequired( _('Fix up the change (%s %s)') % (self.verb, node.short(self.node)), hint=_('hg histedit --continue to resume')) + else: + repo.ui.popbuffer() def continuedirty(self): """Continues the action when changes have been applied to the working @@ -477,7 +484,7 @@ rulectx.""" ctx = self.repo['.'] if ctx.node() == self.state.parentctxnode: - self.repo.ui.warn(_('%s: empty changeset\n') % + self.repo.ui.warn(_('%s: skipping changeset (no changes)\n') % node.short(self.node)) return ctx, [(self.node, tuple())] if ctx.node() == self.node: @@ -733,7 +740,9 @@ def finishfold(self, ui, repo, ctx, oldctx, newnode, internalchanges): parent = ctx.parents()[0].node() + repo.ui.pushbuffer() hg.update(repo, parent) + repo.ui.popbuffer() ### prepare new commit data commitopts = {} commitopts['user'] = ctx.user() @@ -764,7 +773,9 @@ repo.ui.restoreconfig(phasebackup) if n is None: return ctx, [] + repo.ui.pushbuffer() hg.update(repo, n) + repo.ui.popbuffer() replacements = [(oldctx.node(), (newnode,)), (ctx.node(), (n,)), (newnode, (n,)), @@ -892,7 +903,7 @@ - Specify ANCESTOR directly - Use --outgoing -- it will be the first linear changeset not - included in destination. (See :hg:`help config.default-push`) + included in destination. (See :hg:`help config.paths.default-push`) - Otherwise, the value from the "histedit.defaultrev" config option is used as a revset to select the base revision when ANCESTOR is not @@ -973,7 +984,21 @@ finally: release(state.lock, state.wlock) -def _histedit(ui, repo, state, *freeargs, **opts): +goalcontinue = 'continue' +goalabort = 'abort' +goaleditplan = 'edit-plan' +goalnew = 'new' + +def _getgoal(opts): + if opts.get('continue'): + return goalcontinue + if opts.get('abort'): + return goalabort + if opts.get('edit_plan'): + return goaleditplan + return goalnew + +def _validateargs(ui, repo, state, freeargs, opts, goal, rules, revs): # TODO only abort if we try to histedit mq patches, not just # blanket if mq patches are applied somewhere mq = getattr(repo, 'mq', None) @@ -982,28 +1007,21 @@ # basic argument incompatibility processing outg = opts.get('outgoing') - cont = opts.get('continue') editplan = opts.get('edit_plan') abort = opts.get('abort') force = opts.get('force') - rules = opts.get('commands', '') - revs = opts.get('rev', []) - goal = 'new' # This invocation goal, in new, continue, abort if force and not outg: raise error.Abort(_('--force only allowed with --outgoing')) - if cont: + if goal == 'continue': if any((outg, abort, revs, freeargs, rules, editplan)): raise error.Abort(_('no arguments allowed with --continue')) - goal = 'continue' - elif abort: + elif goal == 'abort': if any((outg, revs, freeargs, rules, editplan)): raise error.Abort(_('no arguments allowed with --abort')) - goal = 'abort' - elif editplan: + elif goal == 'edit-plan': if any((outg, revs, freeargs)): raise error.Abort(_('only --commands argument allowed with ' '--edit-plan')) - goal = 'edit-plan' else: if os.path.exists(os.path.join(repo.path, 'histedit-state')): raise error.Abort(_('history edit already in progress, try ' @@ -1025,124 +1043,36 @@ raise error.Abort( _('histedit requires exactly one ancestor revision')) +def _histedit(ui, repo, state, *freeargs, **opts): + goal = _getgoal(opts) + revs = opts.get('rev', []) + rules = opts.get('commands', '') + state.keep = opts.get('keep', False) - replacements = [] - state.keep = opts.get('keep', False) - supportsmarkers = obsolete.isenabled(repo, obsolete.createmarkersopt) + _validateargs(ui, repo, state, freeargs, opts, goal, rules, revs) # rebuild state - if goal == 'continue': + if goal == goalcontinue: state.read() state = bootstrapcontinue(ui, state, opts) - elif goal == 'edit-plan': - state.read() - if not rules: - comment = geteditcomment(node.short(state.parentctxnode), - node.short(state.topmost)) - rules = ruleeditor(repo, ui, state.actions, comment) - else: - if rules == '-': - f = sys.stdin - else: - f = open(rules) - rules = f.read() - f.close() - actions = parserules(rules, state) - ctxs = [repo[act.nodetoverify()] \ - for act in state.actions if act.nodetoverify()] - warnverifyactions(ui, repo, actions, state, ctxs) - state.actions = actions - state.write() + elif goal == goaleditplan: + _edithisteditplan(ui, repo, state, rules) return - elif goal == 'abort': - try: - state.read() - tmpnodes, leafs = newnodestoabort(state) - ui.debug('restore wc to old parent %s\n' - % node.short(state.topmost)) - - # Recover our old commits if necessary - if not state.topmost in repo and state.backupfile: - backupfile = repo.join(state.backupfile) - f = hg.openpath(ui, backupfile) - gen = exchange.readbundle(ui, f, backupfile) - with repo.transaction('histedit.abort') as tr: - if not isinstance(gen, bundle2.unbundle20): - gen.apply(repo, 'histedit', 'bundle:' + backupfile) - if isinstance(gen, bundle2.unbundle20): - bundle2.applybundle(repo, gen, tr, - source='histedit', - url='bundle:' + backupfile) - - os.remove(backupfile) - - # check whether we should update away - if repo.unfiltered().revs('parents() and (%n or %ln::)', - state.parentctxnode, leafs | tmpnodes): - hg.clean(repo, state.topmost, show_stats=True, quietempty=True) - cleanupnode(ui, repo, 'created', tmpnodes) - cleanupnode(ui, repo, 'temp', leafs) - except Exception: - if state.inprogress(): - ui.warn(_('warning: encountered an exception during histedit ' - '--abort; the repository may not have been completely ' - 'cleaned up\n')) - raise - finally: - state.clear() + elif goal == goalabort: + _aborthistedit(ui, repo, state) return else: - cmdutil.checkunfinished(repo) - cmdutil.bailifchanged(repo) + # goal == goalnew + _newhistedit(ui, repo, state, revs, freeargs, opts) - topmost, empty = repo.dirstate.parents() - if outg: - if freeargs: - remote = freeargs[0] - else: - remote = None - root = findoutgoing(ui, repo, remote, force, opts) - else: - rr = list(repo.set('roots(%ld)', scmutil.revrange(repo, revs))) - if len(rr) != 1: - raise error.Abort(_('The specified revisions must have ' - 'exactly one common root')) - root = rr[0].node() - - revs = between(repo, root, topmost, state.keep) - if not revs: - raise error.Abort(_('%s is not an ancestor of working directory') % - node.short(root)) + _continuehistedit(ui, repo, state) + _finishhistedit(ui, repo, state) - ctxs = [repo[r] for r in revs] - if not rules: - comment = geteditcomment(node.short(root), node.short(topmost)) - actions = [pick(state, r) for r in revs] - rules = ruleeditor(repo, ui, actions, comment) - else: - if rules == '-': - f = sys.stdin - else: - f = open(rules) - rules = f.read() - f.close() - actions = parserules(rules, state) - warnverifyactions(ui, repo, actions, state, ctxs) - - parentctxnode = repo[root].parents()[0].node() - - state.parentctxnode = parentctxnode - state.actions = actions - state.topmost = topmost - state.replacements = replacements - - # Create a backup so we can always abort completely. - backupfile = None - if not obsolete.isenabled(repo, obsolete.createmarkersopt): - backupfile = repair._bundle(repo, [parentctxnode], [topmost], root, - 'histedit') - state.backupfile = backupfile - +def _continuehistedit(ui, repo, state): + """This function runs after either: + - bootstrapcontinue (if the goal is 'continue') + - _newhistedit (if the goal is 'new') + """ # preprocess rules so that we can hide inner folds from the user # and only show one editor actions = state.actions[:] @@ -1167,7 +1097,11 @@ state.write() ui.progress(_("editing"), None) +def _finishhistedit(ui, repo, state): + """This action runs when histedit is finishing its session""" + repo.ui.pushbuffer() hg.update(repo, state.parentctxnode, quietempty=True) + repo.ui.popbuffer() mapping, tmpnodes, created, ntm = processreplacement(state) if mapping: @@ -1182,6 +1116,7 @@ for n in succs[1:]: ui.debug(m % node.short(n)) + supportsmarkers = obsolete.isenabled(repo, obsolete.createmarkersopt) if supportsmarkers: # Only create markers if the temp nodes weren't already removed. obsolete.createmarkers(repo, ((repo[t],()) for t in sorted(tmpnodes) @@ -1211,6 +1146,119 @@ if repo.vfs.exists('histedit-last-edit.txt'): repo.vfs.unlink('histedit-last-edit.txt') +def _aborthistedit(ui, repo, state): + try: + state.read() + __, leafs, tmpnodes, __ = processreplacement(state) + ui.debug('restore wc to old parent %s\n' + % node.short(state.topmost)) + + # Recover our old commits if necessary + if not state.topmost in repo and state.backupfile: + backupfile = repo.join(state.backupfile) + f = hg.openpath(ui, backupfile) + gen = exchange.readbundle(ui, f, backupfile) + with repo.transaction('histedit.abort') as tr: + if not isinstance(gen, bundle2.unbundle20): + gen.apply(repo, 'histedit', 'bundle:' + backupfile) + if isinstance(gen, bundle2.unbundle20): + bundle2.applybundle(repo, gen, tr, + source='histedit', + url='bundle:' + backupfile) + + os.remove(backupfile) + + # check whether we should update away + if repo.unfiltered().revs('parents() and (%n or %ln::)', + state.parentctxnode, leafs | tmpnodes): + hg.clean(repo, state.topmost, show_stats=True, quietempty=True) + cleanupnode(ui, repo, 'created', tmpnodes) + cleanupnode(ui, repo, 'temp', leafs) + except Exception: + if state.inprogress(): + ui.warn(_('warning: encountered an exception during histedit ' + '--abort; the repository may not have been completely ' + 'cleaned up\n')) + raise + finally: + state.clear() + +def _edithisteditplan(ui, repo, state, rules): + state.read() + if not rules: + comment = geteditcomment(node.short(state.parentctxnode), + node.short(state.topmost)) + rules = ruleeditor(repo, ui, state.actions, comment) + else: + if rules == '-': + f = sys.stdin + else: + f = open(rules) + rules = f.read() + f.close() + actions = parserules(rules, state) + ctxs = [repo[act.nodetoverify()] \ + for act in state.actions if act.nodetoverify()] + warnverifyactions(ui, repo, actions, state, ctxs) + state.actions = actions + state.write() + +def _newhistedit(ui, repo, state, revs, freeargs, opts): + outg = opts.get('outgoing') + rules = opts.get('commands', '') + force = opts.get('force') + + cmdutil.checkunfinished(repo) + cmdutil.bailifchanged(repo) + + topmost, empty = repo.dirstate.parents() + if outg: + if freeargs: + remote = freeargs[0] + else: + remote = None + root = findoutgoing(ui, repo, remote, force, opts) + else: + rr = list(repo.set('roots(%ld)', scmutil.revrange(repo, revs))) + if len(rr) != 1: + raise error.Abort(_('The specified revisions must have ' + 'exactly one common root')) + root = rr[0].node() + + revs = between(repo, root, topmost, state.keep) + if not revs: + raise error.Abort(_('%s is not an ancestor of working directory') % + node.short(root)) + + ctxs = [repo[r] for r in revs] + if not rules: + comment = geteditcomment(node.short(root), node.short(topmost)) + actions = [pick(state, r) for r in revs] + rules = ruleeditor(repo, ui, actions, comment) + else: + if rules == '-': + f = sys.stdin + else: + f = open(rules) + rules = f.read() + f.close() + actions = parserules(rules, state) + warnverifyactions(ui, repo, actions, state, ctxs) + + parentctxnode = repo[root].parents()[0].node() + + state.parentctxnode = parentctxnode + state.actions = actions + state.topmost = topmost + state.replacements = [] + + # Create a backup so we can always abort completely. + backupfile = None + if not obsolete.isenabled(repo, obsolete.createmarkersopt): + backupfile = repair._bundle(repo, [parentctxnode], [topmost], root, + 'histedit') + state.backupfile = backupfile + def bootstrapcontinue(ui, state, opts): repo = state.repo if state.actions: @@ -1236,7 +1284,8 @@ if ctxs and not keep: if (not obsolete.isenabled(repo, obsolete.allowunstableopt) and repo.revs('(%ld::) - (%ld)', ctxs, ctxs)): - raise error.Abort(_('cannot edit history that would orphan nodes')) + raise error.Abort(_('can only histedit a changeset together ' + 'with all its descendants')) if repo.revs('(%ld) and merge()', ctxs): raise error.Abort(_('cannot edit history that contains merges')) root = ctxs[0] # list is already sorted by repo.set @@ -1330,6 +1379,10 @@ missing = sorted(expected - seen) # sort to stabilize output if state.repo.ui.configbool('histedit', 'dropmissing'): + if len(actions) == 0: + raise error.ParseError(_('no rules provided'), + hint=_('use strip extension to remove commits')) + drops = [drop(state, node.bin(n)) for n in missing] # put the in the beginning so they execute immediately and # don't show in the edit-plan in the future @@ -1340,24 +1393,40 @@ hint=_('use "drop %s" to discard, see also: ' '"hg help -e histedit.config"') % missing[0][:12]) -def newnodestoabort(state): - """process the list of replacements to return +def adjustreplacementsfrommarkers(repo, oldreplacements): + """Adjust replacements from obsolescense markers - 1) the list of final node - 2) the list of temporary node + Replacements structure is originally generated based on + histedit's state and does not account for changes that are + not recorded there. This function fixes that by adding + data read from obsolescense markers""" + if not obsolete.isenabled(repo, obsolete.createmarkersopt): + return oldreplacements - This is meant to be used on abort as less data are required in this case. - """ - replacements = state.replacements - allsuccs = set() - replaced = set() - for rep in replacements: - allsuccs.update(rep[1]) - replaced.add(rep[0]) - newnodes = allsuccs - replaced - tmpnodes = allsuccs & replaced - return newnodes, tmpnodes + unfi = repo.unfiltered() + nm = unfi.changelog.nodemap + obsstore = repo.obsstore + newreplacements = list(oldreplacements) + oldsuccs = [r[1] for r in oldreplacements] + # successors that have already been added to succstocheck once + seensuccs = set().union(*oldsuccs) # create a set from an iterable of tuples + succstocheck = list(seensuccs) + while succstocheck: + n = succstocheck.pop() + missing = nm.get(n) is None + markers = obsstore.successors.get(n, ()) + if missing and not markers: + # dead end, mark it as such + newreplacements.append((n, ())) + for marker in markers: + nsuccs = marker[1] + newreplacements.append((n, nsuccs)) + for nsucc in nsuccs: + if nsucc not in seensuccs: + seensuccs.add(nsucc) + succstocheck.append(nsucc) + return newreplacements def processreplacement(state): """process the list of replacements to return @@ -1365,7 +1434,7 @@ 1) the final mapping between original and created nodes 2) the list of temporary node created by histedit 3) the list of new commit created by histedit""" - replacements = state.replacements + replacements = adjustreplacementsfrommarkers(state.repo, state.replacements) allsuccs = set() replaced = set() fullmapping = {}
--- a/hgext/keyword.py Sun Mar 13 02:29:11 2016 +0100 +++ b/hgext/keyword.py Tue Mar 15 14:10:46 2016 -0700 @@ -82,12 +82,32 @@ {desc}" expands to the first line of the changeset description. ''' -from mercurial import commands, context, cmdutil, dispatch, filelog, extensions -from mercurial import localrepo, match, patch, templatefilters, util, error -from mercurial import scmutil, pathutil + +from __future__ import absolute_import + +import os +import re +import tempfile + from mercurial.hgweb import webcommands from mercurial.i18n import _ -import os, re, tempfile + +from mercurial import ( + cmdutil, + commands, + context, + dispatch, + error, + extensions, + filelog, + localrepo, + match, + patch, + pathutil, + scmutil, + templatefilters, + util, +) cmdtable = {} command = cmdutil.command(cmdtable) @@ -410,10 +430,8 @@ ui.readconfig(opts.get('rcfile')) if args: # simulate hgrc parsing - rcmaps = ['[keywordmaps]\n'] + [a + '\n' for a in args] - fp = repo.vfs('hgrc', 'w') - fp.writelines(rcmaps) - fp.close() + rcmaps = '[keywordmaps]\n%s\n' % '\n'.join(args) + repo.vfs.write('hgrc', rcmaps) ui.readconfig(repo.join('hgrc')) kwmaps = dict(ui.configitems('keywordmaps')) elif opts.get('default'):
--- a/hgext/largefiles/__init__.py Sun Mar 13 02:29:11 2016 +0100 +++ b/hgext/largefiles/__init__.py Tue Mar 15 14:10:46 2016 -0700 @@ -111,6 +111,7 @@ import proto import reposetup import uisetup as uisetupmod +import overrides # Note for extension authors: ONLY specify testedwith = 'internal' for # extensions which SHIP WITH MERCURIAL. Non-mainline extensions should @@ -130,3 +131,4 @@ uisetupmod.uisetup(ui) cmdtable = lfcommands.cmdtable +revsetpredicate = overrides.revsetpredicate
--- a/hgext/largefiles/basestore.py Sun Mar 13 02:29:11 2016 +0100 +++ b/hgext/largefiles/basestore.py Tue Mar 15 14:10:46 2016 -0700 @@ -63,7 +63,7 @@ at = 0 available = self.exists(set(hash for (_filename, hash) in files)) for filename, hash in files: - ui.progress(_('getting largefiles'), at, unit='lfile', + ui.progress(_('getting largefiles'), at, unit=_('files'), total=len(files)) at += 1 ui.note(_('getting %s:%s\n') % (filename, hash))
--- a/hgext/largefiles/lfcommands.py Sun Mar 13 02:29:11 2016 +0100 +++ b/hgext/largefiles/lfcommands.py Tue Mar 15 14:10:46 2016 -0700 @@ -99,7 +99,7 @@ lfiletohash = {} for ctx in ctxs: ui.progress(_('converting revisions'), ctx.rev(), - unit=_('revision'), total=rsrc['tip'].rev()) + unit=_('revisions'), total=rsrc['tip'].rev()) _lfconvert_addchangeset(rsrc, rdst, ctx, revmap, lfiles, normalfiles, matcher, size, lfiletohash) ui.progress(_('converting revisions'), None) @@ -346,7 +346,7 @@ ui.debug("%d largefiles need to be uploaded\n" % len(files)) for hash in files: - ui.progress(_('uploading largefiles'), at, unit='largefile', + ui.progress(_('uploading largefiles'), at, unit=_('files'), total=len(files)) source = lfutil.findfile(rsrc, hash) if not source:
--- a/hgext/largefiles/lfutil.py Sun Mar 13 02:29:11 2016 +0100 +++ b/hgext/largefiles/lfutil.py Tue Mar 15 14:10:46 2016 -0700 @@ -468,7 +468,7 @@ def getlfilestoupload(repo, missing, addfunc): for i, n in enumerate(missing): repo.ui.progress(_('finding outgoing largefiles'), i, - unit=_('revision'), total=len(missing)) + unit=_('revisions'), total=len(missing)) parents = [p for p in repo.changelog.parents(n) if p != node.nullid] oldlfstatus = repo.lfstatus
--- a/hgext/largefiles/overrides.py Sun Mar 13 02:29:11 2016 +0100 +++ b/hgext/largefiles/overrides.py Tue Mar 15 14:10:46 2016 -0700 @@ -12,7 +12,7 @@ import copy from mercurial import hg, util, cmdutil, scmutil, match as match_, \ - archival, pathutil, revset, error + archival, pathutil, registrar, revset, error from mercurial.i18n import _ import lfutil @@ -452,11 +452,10 @@ # writing the files into the working copy and lfcommands.updatelfiles # will update the largefiles. def overridecalculateupdates(origfn, repo, p1, p2, pas, branchmerge, force, - acceptremote, followcopies, matcher=None): + acceptremote, *args, **kwargs): overwrite = force and not branchmerge actions, diverge, renamedelete = origfn( - repo, p1, p2, pas, branchmerge, force, acceptremote, - followcopies, matcher=matcher) + repo, p1, p2, pas, branchmerge, force, acceptremote, *args, **kwargs) if overwrite: return actions, diverge, renamedelete @@ -802,7 +801,7 @@ ui.status(_("%d largefiles cached\n") % numcached) return result -revsetpredicate = revset.extpredicate() +revsetpredicate = registrar.revsetpredicate() @revsetpredicate('pulled()') def pulledrevsetsymbol(repo, subset, x): @@ -963,7 +962,7 @@ if subrepos: for subpath in sorted(ctx.substate): sub = ctx.workingsub(subpath) - submatch = match_.narrowmatcher(subpath, matchfn) + submatch = match_.subdirmatcher(subpath, matchfn) sub._repo.lfstatus = True sub.archive(archiver, prefix, submatch) @@ -1011,7 +1010,7 @@ for subpath in sorted(ctx.substate): sub = ctx.workingsub(subpath) - submatch = match_.narrowmatcher(subpath, match) + submatch = match_.subdirmatcher(subpath, match) sub._repo.lfstatus = True sub.archive(archiver, prefix + repo._path + '/', submatch)
--- a/hgext/largefiles/remotestore.py Sun Mar 13 02:29:11 2016 +0100 +++ b/hgext/largefiles/remotestore.py Tue Mar 15 14:10:46 2016 -0700 @@ -96,3 +96,18 @@ def batch(self): '''Support for remote batching.''' return wireproto.remotebatch(self) + + def _put(self, hash, fd): + '''Put file with the given hash in the remote store.''' + raise NotImplementedError('abstract method') + + def _get(self, hash): + '''Get file with the given hash from the remote store.''' + raise NotImplementedError('abstract method') + + def _stat(self, hashes): + '''Get information about availability of files specified by + hashes in the remote store. Return dictionary mapping hashes + to return code where 0 means that file is available, other + values if not.''' + raise NotImplementedError('abstract method')
--- a/hgext/largefiles/uisetup.py Sun Mar 13 02:29:11 2016 +0100 +++ b/hgext/largefiles/uisetup.py Tue Mar 15 14:10:46 2016 -0700 @@ -171,5 +171,3 @@ if name == 'transplant': extensions.wrapcommand(getattr(module, 'cmdtable'), 'transplant', overrides.overridetransplant) - - overrides.revsetpredicate.setup()
--- a/hgext/largefiles/wirestore.py Sun Mar 13 02:29:11 2016 +0100 +++ b/hgext/largefiles/wirestore.py Tue Mar 15 14:10:46 2016 -0700 @@ -29,12 +29,8 @@ '''For each hash, return 0 if it is available, other values if not. It is usually 2 if the largefile is missing, but might be 1 the server has a corrupted copy.''' - batch = self.remote.batch() - futures = {} + batch = self.remote.iterbatch() for hash in hashes: - futures[hash] = batch.statlfile(hash) + batch.statlfile(hash) batch.submit() - retval = {} - for hash in hashes: - retval[hash] = futures[hash].value - return retval + return dict(zip(hashes, batch.results()))
--- a/hgext/mq.py Sun Mar 13 02:29:11 2016 +0100 +++ b/hgext/mq.py Tue Mar 15 14:10:46 2016 -0700 @@ -70,6 +70,7 @@ from mercurial import patch as patchmod from mercurial import lock as lockmod from mercurial import localrepo +from mercurial import registrar from mercurial import subrepo import os, re, errno, shutil @@ -3537,7 +3538,7 @@ # i18n: column positioning for "hg summary" ui.note(_("mq: (empty queue)\n")) -revsetpredicate = revset.extpredicate() +revsetpredicate = registrar.revsetpredicate() @revsetpredicate('mq()') def revsetmq(repo, subset, x): @@ -3561,12 +3562,11 @@ entry = extensions.wrapcommand(commands.table, 'init', mqinit) entry[1].extend(mqopt) - nowrap = set(commands.norepo.split(" ")) - def dotable(cmdtable): - for cmd in cmdtable.keys(): + for cmd, entry in cmdtable.iteritems(): cmd = cmdutil.parsealiases(cmd)[0] - if cmd in nowrap: + func = entry[0] + if func.norepo: continue entry = extensions.wrapcommand(cmdtable, cmd, mqcommand) entry[1].extend(mqopt) @@ -3577,8 +3577,6 @@ if extmodule.__file__ != __file__: dotable(getattr(extmodule, 'cmdtable', {})) - revsetpredicate.setup() - colortable = {'qguard.negative': 'red', 'qguard.positive': 'yellow', 'qguard.unguarded': 'green',
--- a/hgext/notify.py Sun Mar 13 02:29:11 2016 +0100 +++ b/hgext/notify.py Tue Mar 15 14:10:46 2016 -0700 @@ -132,11 +132,21 @@ references. See also ``notify.strip``. ''' +from __future__ import absolute_import -import email, socket, time +import email +import fnmatch +import socket +import time + +from mercurial import ( + cmdutil, + error, + mail, + patch, + util, +) from mercurial.i18n import _ -from mercurial import patch, cmdutil, util, mail, error -import fnmatch # Note for extension authors: ONLY specify testedwith = 'internal' for # extensions which SHIP WITH MERCURIAL. Non-mainline extensions should
--- a/hgext/pager.py Sun Mar 13 02:29:11 2016 +0100 +++ b/hgext/pager.py Tue Mar 15 14:10:46 2016 -0700 @@ -58,9 +58,21 @@ will also work). ''' +from __future__ import absolute_import -import atexit, sys, os, signal, subprocess -from mercurial import commands, dispatch, util, extensions, cmdutil +import atexit +import os +import signal +import subprocess +import sys + +from mercurial import ( + cmdutil, + commands, + dispatch, + extensions, + util, + ) from mercurial.i18n import _ # Note for extension authors: ONLY specify testedwith = 'internal' for
--- a/hgext/patchbomb.py Sun Mar 13 02:29:11 2016 +0100 +++ b/hgext/patchbomb.py Tue Mar 15 14:10:46 2016 -0700 @@ -63,14 +63,27 @@ You can set patchbomb to always ask for confirmation by setting ``patchbomb.confirm`` to true. ''' +from __future__ import absolute_import -import os, errno, socket, tempfile, cStringIO +import cStringIO import email as emailmod +import errno +import os +import socket +import tempfile -from mercurial import cmdutil, commands, hg, mail, patch, util, error -from mercurial import scmutil +from mercurial import ( + cmdutil, + commands, + error, + hg, + mail, + node as nodemod, + patch, + scmutil, + util, +) from mercurial.i18n import _ -from mercurial.node import bin cmdtable = {} command = cmdutil.command(cmdtable) @@ -167,7 +180,7 @@ msg.attach(mail.mimeencode(ui, body, _charsets, opts.get('test'))) p = mail.mimetextpatch('\n'.join(patchlines), 'x-patch', opts.get('test')) - binnode = bin(node) + binnode = nodemod.bin(node) # if node is mq patch, it will have the patch file's name as a tag if not patchname: patchtags = [t for t in repo.nodetags(binnode) @@ -703,7 +716,8 @@ finally: ui.setconfig('smtp', 'verifycert', verifycert, 'patchbomb') ui.status(_('sending '), subj, ' ...\n') - ui.progress(_('sending'), i, item=subj, total=len(msgs)) + ui.progress(_('sending'), i, item=subj, total=len(msgs), + unit=_('emails')) if not mbox: # Exim does not remove the Bcc field del m['Bcc']
--- a/hgext/purge.py Sun Mar 13 02:29:11 2016 +0100 +++ b/hgext/purge.py Tue Mar 15 14:10:46 2016 -0700 @@ -23,10 +23,18 @@ # along with this program; if not, see <http://www.gnu.org/licenses/>. '''command to delete untracked files from the working directory''' +from __future__ import absolute_import -from mercurial import util, commands, cmdutil, scmutil, error +import os + +from mercurial import ( + cmdutil, + commands, + error, + scmutil, + util, +) from mercurial.i18n import _ -import os cmdtable = {} command = cmdutil.command(cmdtable)
--- a/hgext/rebase.py Sun Mar 13 02:29:11 2016 +0100 +++ b/hgext/rebase.py Tue Mar 15 14:10:46 2016 -0700 @@ -16,7 +16,7 @@ from mercurial import hg, util, repair, merge, cmdutil, commands, bookmarks from mercurial import extensions, patch, scmutil, phases, obsolete, error -from mercurial import copies, repoview, revset +from mercurial import copies, destutil, repoview, registrar, revset from mercurial.commands import templateopts from mercurial.node import nullrev, nullid, hex, short from mercurial.lock import release @@ -69,13 +69,14 @@ c(ctx, extra) return extrafn -def _destrebase(repo): - # Destination defaults to the latest revision in the - # current branch - branch = repo[None].branch() - return repo[branch].rev() +def _destrebase(repo, sourceset): + """small wrapper around destmerge to pass the right extra args -revsetpredicate = revset.extpredicate() + Please wrap destutil.destmerge instead.""" + return destutil.destmerge(repo, action='rebase', sourceset=sourceset, + onheadcheck=False) + +revsetpredicate = registrar.revsetpredicate() @revsetpredicate('_destrebase') def _revsetdestrebase(repo, subset, x): @@ -83,12 +84,12 @@ # default destination for rebase. # # XXX: Currently private because I expect the signature to change. - # # XXX: - taking rev as arguments, # # XXX: - bailing out in case of ambiguity vs returning all data. - # # XXX: - probably merging with the merge destination. # i18n: "_rebasedefaultdest" is a keyword - revset.getargs(x, 0, 0, _("_rebasedefaultdest takes no arguments")) - return subset & revset.baseset([_destrebase(repo)]) + sourceset = None + if x is not None: + sourceset = revset.getset(repo, revset.fullreposet(repo), x) + return subset & revset.baseset([_destrebase(repo, sourceset)]) @command('rebase', [('s', 'source', '', @@ -127,10 +128,13 @@ Published commits cannot be rebased (see :hg:`help phases`). To copy commits, see :hg:`help graft`. - If you don't specify a destination changeset (``-d/--dest``), - rebase uses the current branch tip as the destination. (The - destination changeset is not modified by rebasing, but new - changesets are added as its descendants.) + If you don't specify a destination changeset (``-d/--dest``), rebase + will use the same logic as :hg:`merge` to pick a destination. if + the current branch contains exactly one other head, the other head + is merged with by default. Otherwise, an explicit revision with + which to merge with must be provided. (destination changeset is not + modified by rebasing, but new changesets are added as its + descendants.) Here are the ways to select changesets: @@ -155,6 +159,11 @@ a named branch with two heads. You will need to explicitly specify source and/or destination. + If you need to use a tool to automate merge/conflict decisions, you + can specify one with ``--tool``, see :hg:`help merge-tools`. + As a caveat: the tool will not be used to mediate when a file was + deleted, there is no hook presently available for this. + If a rebase is interrupted to manually resolve a conflict, it can be continued with --continue/-c or aborted with --abort/-a. @@ -258,9 +267,11 @@ try: (originalwd, target, state, skipped, collapsef, keepf, keepbranchesf, external, activebookmark) = restorestatus(repo) + collapsemsg = restorecollapsemsg(repo) except error.RepoLookupError: if abortf: clearstatus(repo) + clearcollapsemsg(repo) repo.ui.warn(_('rebase aborted (no revision is removed,' ' only broken state is cleared)\n')) return 0 @@ -272,78 +283,9 @@ return abort(repo, originalwd, target, state, activebookmark=activebookmark) else: - if srcf and basef: - raise error.Abort(_('cannot specify both a ' - 'source and a base')) - if revf and basef: - raise error.Abort(_('cannot specify both a ' - 'revision and a base')) - if revf and srcf: - raise error.Abort(_('cannot specify both a ' - 'revision and a source')) - - cmdutil.checkunfinished(repo) - cmdutil.bailifchanged(repo) - - if destf: - dest = scmutil.revsingle(repo, destf) - else: - dest = repo[_destrebase(repo)] - destf = str(dest) - - if revf: - rebaseset = scmutil.revrange(repo, revf) - if not rebaseset: - ui.status(_('empty "rev" revision set - ' - 'nothing to rebase\n')) - return _nothingtorebase() - elif srcf: - src = scmutil.revrange(repo, [srcf]) - if not src: - ui.status(_('empty "source" revision set - ' - 'nothing to rebase\n')) - return _nothingtorebase() - rebaseset = repo.revs('(%ld)::', src) - assert rebaseset - else: - base = scmutil.revrange(repo, [basef or '.']) - if not base: - ui.status(_('empty "base" revision set - ' - "can't compute rebase set\n")) - return _nothingtorebase() - commonanc = repo.revs('ancestor(%ld, %d)', base, dest).first() - if commonanc is not None: - rebaseset = repo.revs('(%d::(%ld) - %d)::', - commonanc, base, commonanc) - else: - rebaseset = [] - - if not rebaseset: - # transform to list because smartsets are not comparable to - # lists. This should be improved to honor laziness of - # smartset. - if list(base) == [dest.rev()]: - if basef: - ui.status(_('nothing to rebase - %s is both "base"' - ' and destination\n') % dest) - else: - ui.status(_('nothing to rebase - working directory ' - 'parent is also destination\n')) - elif not repo.revs('%ld - ::%d', base, dest): - if basef: - ui.status(_('nothing to rebase - "base" %s is ' - 'already an ancestor of destination ' - '%s\n') % - ('+'.join(str(repo[r]) for r in base), - dest)) - else: - ui.status(_('nothing to rebase - working ' - 'directory parent is already an ' - 'ancestor of destination %s\n') % dest) - else: # can it happen? - ui.status(_('nothing to rebase from %s to %s\n') % - ('+'.join(str(repo[r]) for r in base), dest)) - return _nothingtorebase() + dest, rebaseset = _definesets(ui, repo, destf, srcf, basef, revf) + if dest is None: + return _nothingtorebase() allowunstable = obsolete.isenabled(repo, obsolete.allowunstableopt) if (not (keepf or allowunstable) @@ -355,7 +297,8 @@ hint=_('use --keep to keep original changesets')) obsoletenotrebased = {} - if ui.configbool('experimental', 'rebaseskipobsolete'): + if ui.configbool('experimental', 'rebaseskipobsolete', + default=True): rebasesetrevs = set(rebaseset) rebaseobsrevs = _filterobsoleterevs(repo, rebasesetrevs) obsoletenotrebased = _computeobsoletenotrebased(repo, @@ -364,15 +307,18 @@ rebaseobsskipped = set(obsoletenotrebased) # Obsolete node with successors not in dest leads to divergence - divergenceok = ui.configbool('rebase', + divergenceok = ui.configbool('experimental', 'allowdivergence') divergencebasecandidates = rebaseobsrevs - rebaseobsskipped if divergencebasecandidates and not divergenceok: - msg = _("this rebase will cause divergence") + divhashes = (str(repo[r]) + for r in divergencebasecandidates) + msg = _("this rebase will cause " + "divergences from: %s") h = _("to force the rebase please set " - "rebase.allowdivergence=True") - raise error.Abort(msg, hint=h) + "experimental.allowdivergence=True") + raise error.Abort(msg % (",".join(divhashes),), hint=h) # - plain prune (no successor) changesets are rebased # - split changesets are not rebased if at least one of the @@ -452,6 +398,7 @@ targetancestors) storestatus(repo, originalwd, target, state, collapsef, keepf, keepbranchesf, external, activebookmark) + storecollapsemsg(repo, collapsemsg) if len(repo[None].parents()) == 2: repo.ui.debug('resuming interrupted rebase\n') else: @@ -573,6 +520,7 @@ # active bookmark was divergent one and has been deleted activebookmark = None clearstatus(repo) + clearcollapsemsg(repo) ui.note(_("rebase completed\n")) util.unlinkpath(repo.sjoin('undo'), ignoremissing=True) @@ -586,6 +534,84 @@ finally: release(lock, wlock) +def _definesets(ui, repo, destf=None, srcf=None, basef=None, revf=[]): + """use revisions argument to define destination and rebase set + """ + if srcf and basef: + raise error.Abort(_('cannot specify both a source and a base')) + if revf and basef: + raise error.Abort(_('cannot specify both a revision and a base')) + if revf and srcf: + raise error.Abort(_('cannot specify both a revision and a source')) + + cmdutil.checkunfinished(repo) + cmdutil.bailifchanged(repo) + + if destf: + dest = scmutil.revsingle(repo, destf) + + if revf: + rebaseset = scmutil.revrange(repo, revf) + if not rebaseset: + ui.status(_('empty "rev" revision set - nothing to rebase\n')) + return None, None + elif srcf: + src = scmutil.revrange(repo, [srcf]) + if not src: + ui.status(_('empty "source" revision set - nothing to rebase\n')) + return None, None + rebaseset = repo.revs('(%ld)::', src) + assert rebaseset + else: + base = scmutil.revrange(repo, [basef or '.']) + if not base: + ui.status(_('empty "base" revision set - ' + "can't compute rebase set\n")) + return None, None + if not destf: + dest = repo[_destrebase(repo, base)] + destf = str(dest) + + commonanc = repo.revs('ancestor(%ld, %d)', base, dest).first() + if commonanc is not None: + rebaseset = repo.revs('(%d::(%ld) - %d)::', + commonanc, base, commonanc) + else: + rebaseset = [] + + if not rebaseset: + # transform to list because smartsets are not comparable to + # lists. This should be improved to honor laziness of + # smartset. + if list(base) == [dest.rev()]: + if basef: + ui.status(_('nothing to rebase - %s is both "base"' + ' and destination\n') % dest) + else: + ui.status(_('nothing to rebase - working directory ' + 'parent is also destination\n')) + elif not repo.revs('%ld - ::%d', base, dest): + if basef: + ui.status(_('nothing to rebase - "base" %s is ' + 'already an ancestor of destination ' + '%s\n') % + ('+'.join(str(repo[r]) for r in base), + dest)) + else: + ui.status(_('nothing to rebase - working ' + 'directory parent is already an ' + 'ancestor of destination %s\n') % dest) + else: # can it happen? + ui.status(_('nothing to rebase from %s to %s\n') % + ('+'.join(str(repo[r]) for r in base), dest)) + return None, None + + if not destf: + dest = repo[_destrebase(repo, rebaseset)] + destf = str(dest) + + return dest, rebaseset + def externalparent(repo, state, targetancestors): """Return the revision that should be used as the second parent when the revisions in state is collapsed on top of targetancestors. @@ -838,6 +864,29 @@ bookmarks.deletedivergent(repo, [targetnode], k) marks.recordchange(tr) +def storecollapsemsg(repo, collapsemsg): + 'Store the collapse message to allow recovery' + collapsemsg = collapsemsg or '' + f = repo.vfs("last-message.txt", "w") + f.write("%s\n" % collapsemsg) + f.close() + +def clearcollapsemsg(repo): + 'Remove collapse message file' + util.unlinkpath(repo.join("last-message.txt"), ignoremissing=True) + +def restorecollapsemsg(repo): + 'Restore previously stored collapse message' + try: + f = repo.vfs("last-message.txt") + collapsemsg = f.readline().strip() + f.close() + except IOError as err: + if err.errno != errno.ENOENT: + raise + raise error.Abort(_('no rebase in progress')) + return collapsemsg + def storestatus(repo, originalwd, target, state, collapse, keep, keepbranches, external, activebookmark): 'Store the current status to allow recovery' @@ -910,7 +959,7 @@ except IOError as err: if err.errno != errno.ENOENT: raise - raise error.Abort(_('no rebase in progress')) + cmdutil.wrongtooltocontinue(repo, _('rebase')) if keepbranches is None: raise error.Abort(_('.hg/rebasestate is incomplete')) @@ -997,6 +1046,7 @@ finally: clearstatus(repo) + clearcollapsemsg(repo) repo.ui.warn(_('rebase aborted\n')) return 0 @@ -1140,7 +1190,6 @@ ui.debug('--update and --rebase are not compatible, ignoring ' 'the update flag\n') - movemarkfrom = repo['.'].node() revsprepull = len(repo) origpostincoming = commands.postincoming def _dummy(*args, **kwargs): @@ -1160,15 +1209,18 @@ # --source. if 'source' in opts: del opts['source'] - rebase(ui, repo, **opts) - branch = repo[None].branch() - dest = repo[branch].rev() - if dest != repo['.'].rev(): - # there was nothing to rebase we force an update - hg.update(repo, dest) - if bookmarks.update(repo, [movemarkfrom], repo['.'].node()): - ui.status(_("updating bookmark %s\n") - % repo._activebookmark) + try: + rebase(ui, repo, **opts) + except error.NoMergeDestAbort: + # we can maybe update instead + rev, _a, _b = destutil.destupdate(repo) + if rev == repo['.'].rev(): + ui.status(_('nothing to rebase\n')) + else: + ui.status(_('nothing to rebase - updating instead\n')) + # not passing argument to get the bare update behavior + # with warning and trumpets + commands.update(ui, repo) finally: release(lock, wlock) else: @@ -1274,4 +1326,3 @@ ['rebasestate', _('hg rebase --continue')]) # ensure rebased rev are not hidden extensions.wrapfunction(repoview, '_getdynamicblockers', _rebasedvisible) - revsetpredicate.setup()
--- a/hgext/record.py Sun Mar 13 02:29:11 2016 +0100 +++ b/hgext/record.py Tue Mar 15 14:10:46 2016 -0700 @@ -6,10 +6,15 @@ # GNU General Public License version 2 or any later version. '''commands to interactively select changes for commit/qrefresh''' +from __future__ import absolute_import +from mercurial import ( + cmdutil, + commands, + error, + extensions, +) from mercurial.i18n import _ -from mercurial import cmdutil, commands, extensions -from mercurial import error cmdtable = {} command = cmdutil.command(cmdtable)
--- a/hgext/relink.py Sun Mar 13 02:29:11 2016 +0100 +++ b/hgext/relink.py Tue Mar 15 14:10:46 2016 -0700 @@ -6,10 +6,18 @@ # GNU General Public License version 2 or any later version. """recreates hardlinks between repository clones""" +from __future__ import absolute_import -from mercurial import cmdutil, hg, util, error +import os +import stat + +from mercurial import ( + cmdutil, + error, + hg, + util, +) from mercurial.i18n import _ -import os, stat cmdtable = {} command = cmdutil.command(cmdtable)
--- a/hgext/schemes.py Sun Mar 13 02:29:11 2016 +0100 +++ b/hgext/schemes.py Tue Mar 15 14:10:46 2016 -0700 @@ -39,11 +39,22 @@ You can override a predefined scheme by defining a new scheme with the same name. """ +from __future__ import absolute_import -import os, re -from mercurial import extensions, hg, templater, util, error +import os +import re +from mercurial import ( + cmdutil, + error, + extensions, + hg, + templater, + util, +) from mercurial.i18n import _ +cmdtable = {} +command = cmdutil.command(cmdtable) # Note for extension authors: ONLY specify testedwith = 'internal' for # extensions which SHIP WITH MERCURIAL. Non-mainline extensions should # be specifying the version(s) of Mercurial they are tested with, or @@ -65,6 +76,10 @@ return '<ShortRepository: %s>' % self.scheme def instance(self, ui, url, create): + url = self.resolve(url) + return hg._peerlookup(url).instance(ui, url, create) + + def resolve(self, url): # Should this use the util.url class, or is manual parsing better? try: url = url.split('://', 1)[1] @@ -77,8 +92,7 @@ else: tail = '' context = dict((str(i + 1), v) for i, v in enumerate(parts)) - url = ''.join(self.templater.process(self.url, context)) + tail - return hg._peerlookup(url).instance(ui, url, create) + return ''.join(self.templater.process(self.url, context)) + tail def hasdriveletter(orig, path): if path: @@ -106,3 +120,12 @@ hg.schemes[scheme] = ShortRepository(url, scheme, t) extensions.wrapfunction(util, 'hasdriveletter', hasdriveletter) + +@command('debugexpandscheme', norepo=True) +def expandscheme(ui, url, **opts): + """given a repo path, provide the scheme-expanded path + """ + repo = hg._peerlookup(url) + if isinstance(repo, ShortRepository): + url = repo.resolve(url) + ui.write(url + '\n')
--- a/hgext/shelve.py Sun Mar 13 02:29:11 2016 +0100 +++ b/hgext/shelve.py Tue Mar 15 14:10:46 2016 -0700 @@ -20,17 +20,36 @@ shelved change has a distinct name. For details, see the help for "hg shelve". """ +from __future__ import absolute_import import collections +import errno import itertools +from mercurial import ( + bundle2, + bundlerepo, + changegroup, + cmdutil, + commands, + error, + exchange, + hg, + lock as lockmod, + mdiff, + merge, + node as nodemod, + patch, + phases, + repair, + scmutil, + templatefilters, + util, +) from mercurial.i18n import _ -from mercurial.node import nullid, nullrev, bin, hex -from mercurial import changegroup, cmdutil, scmutil, phases, commands -from mercurial import error, hg, mdiff, merge, patch, repair, util -from mercurial import templatefilters, exchange, bundlerepo, bundle2 -from mercurial import lock as lockmod -from hgext import rebase -import errno + +from . import ( + rebase, +) cmdtable = {} command = cmdutil.command(cmdtable) @@ -146,15 +165,15 @@ name = fp.readline().strip() wctx = fp.readline().strip() pendingctx = fp.readline().strip() - parents = [bin(h) for h in fp.readline().split()] - stripnodes = [bin(h) for h in fp.readline().split()] + parents = [nodemod.bin(h) for h in fp.readline().split()] + stripnodes = [nodemod.bin(h) for h in fp.readline().split()] finally: fp.close() obj = cls() obj.name = name - obj.wctx = repo[bin(wctx)] - obj.pendingctx = repo[bin(pendingctx)] + obj.wctx = repo[nodemod.bin(wctx)] + obj.pendingctx = repo[nodemod.bin(pendingctx)] obj.parents = parents obj.stripnodes = stripnodes @@ -165,10 +184,12 @@ fp = repo.vfs(cls._filename, 'wb') fp.write('%i\n' % cls._version) fp.write('%s\n' % name) - fp.write('%s\n' % hex(originalwctx.node())) - fp.write('%s\n' % hex(pendingctx.node())) - fp.write('%s\n' % ' '.join([hex(p) for p in repo.dirstate.parents()])) - fp.write('%s\n' % ' '.join([hex(n) for n in stripnodes])) + fp.write('%s\n' % nodemod.hex(originalwctx.node())) + fp.write('%s\n' % nodemod.hex(pendingctx.node())) + fp.write('%s\n' % + ' '.join([nodemod.hex(p) for p in repo.dirstate.parents()])) + fp.write('%s\n' % + ' '.join([nodemod.hex(n) for n in stripnodes])) fp.close() @classmethod @@ -233,7 +254,7 @@ """return all mutable ancestors for ctx (included) Much faster than the revset ancestors(ctx) & draft()""" - seen = set([nullrev]) + seen = set([nodemod.nullrev]) visit = collections.deque() visit.append(ctx) while visit: @@ -264,15 +285,15 @@ for i in xrange(1, 100): yield '%s-%02d' % (label, i) - if parent.node() != nullid: + if parent.node() != nodemod.nullid: desc = "changes to: %s" % parent.description().split('\n', 1)[0] else: desc = '(changes in empty repository)' - if not opts['message']: + if not opts.get('message'): opts['message'] = desc - name = opts['name'] + name = opts.get('name') lock = tr = None try: @@ -519,7 +540,7 @@ def unshelvecleanup(ui, repo, name, opts): """remove related files after an unshelve""" - if not opts['keep']: + if not opts.get('keep'): for filetype in 'hg patch'.split(): shelvedfile(repo, name, filetype).movetobackup() cleanupoldbackups(repo) @@ -609,8 +630,8 @@ return _dounshelve(ui, repo, *shelved, **opts) def _dounshelve(ui, repo, *shelved, **opts): - abortf = opts['abort'] - continuef = opts['continue'] + abortf = opts.get('abort') + continuef = opts.get('continue') if not abortf and not continuef: cmdutil.checkunfinished(repo) @@ -628,7 +649,7 @@ except IOError as err: if err.errno != errno.ENOENT: raise - raise error.Abort(_('no unshelve operation underway')) + cmdutil.wrongtooltocontinue(repo, _('unshelve')) if abortf: return unshelveabort(ui, repo, state, opts) @@ -829,7 +850,7 @@ ('stat', set(['stat', 'list'])), ] def checkopt(opt): - if opts[opt]: + if opts.get(opt): for i, allowable in allowables: if opts[i] and opt not in allowable: raise error.Abort(_("options '--%s' and '--%s' may not be "
--- a/hgext/strip.py Sun Mar 13 02:29:11 2016 +0100 +++ b/hgext/strip.py Tue Mar 15 14:10:46 2016 -0700 @@ -3,11 +3,23 @@ This extension allows you to strip changesets and all their descendants from the repository. See the command help for details. """ +from __future__ import absolute_import + +from mercurial import ( + bookmarks as bookmarksmod, + cmdutil, + error, + hg, + lock as lockmod, + merge, + node as nodemod, + repair, + scmutil, + util, +) from mercurial.i18n import _ -from mercurial.node import nullid -from mercurial.lock import release -from mercurial import cmdutil, hg, scmutil, util, error -from mercurial import repair, bookmarks as bookmarksmod , merge +nullid = nodemod.nullid +release = lockmod.release cmdtable = {} command = cmdutil.command(cmdtable)
--- a/hgext/transplant.py Sun Mar 13 02:29:11 2016 +0100 +++ b/hgext/transplant.py Tue Mar 15 14:10:46 2016 -0700 @@ -13,13 +13,28 @@ Transplanted patches are recorded in .hg/transplant/transplants, as a map from a changeset hash to its hash in the source repository. ''' +from __future__ import absolute_import +import os +import tempfile from mercurial.i18n import _ -import os, tempfile -from mercurial.node import short -from mercurial import bundlerepo, hg, merge, match -from mercurial import patch, revlog, scmutil, util, error, cmdutil -from mercurial import revset, templatekw, exchange +from mercurial import ( + bundlerepo, + cmdutil, + error, + exchange, + hg, + match, + merge, + node as nodemod, + patch, + registrar, + revlog, + revset, + scmutil, + templatekw, + util, +) class TransplantError(error.Abort): pass @@ -64,7 +79,7 @@ fp = self.opener(self.transplantfile, 'w') for list in self.transplants.itervalues(): for t in list: - l, r = map(revlog.hex, (t.lnode, t.rnode)) + l, r = map(nodemod.hex, (t.lnode, t.rnode)) fp.write(l + ':' + r + '\n') fp.close() self.dirty = False @@ -133,7 +148,7 @@ tr = repo.transaction('transplant') for rev in revs: node = revmap[rev] - revstr = '%s:%s' % (rev, short(node)) + revstr = '%s:%s' % (rev, nodemod.short(node)) if self.applied(repo, node, p1): self.ui.warn(_('skipping already applied revision %s\n') % @@ -168,13 +183,14 @@ if parents[1] != revlog.nullid: if not opts.get('parent'): self.ui.note(_('skipping merge changeset %s:%s\n') - % (rev, short(node))) + % (rev, nodemod.short(node))) skipmerge = True else: parent = source.lookup(opts['parent']) if parent not in parents: raise error.Abort(_('%s is not a parent of %s') % - (short(parent), short(node))) + (nodemod.short(parent), + nodemod.short(node))) else: parent = parents[0] @@ -204,11 +220,11 @@ raise if n and domerge: self.ui.status(_('%s merged at %s\n') % (revstr, - short(n))) + nodemod.short(n))) elif n: self.ui.status(_('%s transplanted to %s\n') - % (short(node), - short(n))) + % (nodemod.short(node), + nodemod.short(n))) finally: if patchfile: os.unlink(patchfile) @@ -241,7 +257,7 @@ self.ui.system('%s %s %s' % (filter, util.shellquote(headerfile), util.shellquote(patchfile)), environ={'HGUSER': changelog[1], - 'HGREVISION': revlog.hex(node), + 'HGREVISION': nodemod.hex(node), }, onerr=error.Abort, errprefix=_('filter failed')) user, date, msg = self.parselog(file(headerfile))[1:4] @@ -261,9 +277,9 @@ if log: # we don't translate messages inserted into commits - message += '\n(transplanted from %s)' % revlog.hex(node) + message += '\n(transplanted from %s)' % nodemod.hex(node) - self.ui.status(_('applying %s\n') % short(node)) + self.ui.status(_('applying %s\n') % nodemod.short(node)) self.ui.note('%s %s\n%s\n' % (user, date, message)) if not patchfile and not merge: @@ -295,7 +311,8 @@ n = repo.commit(message, user, date, extra=extra, match=m, editor=self.getcommiteditor()) if not n: - self.ui.warn(_('skipping emptied changeset %s\n') % short(node)) + self.ui.warn(_('skipping emptied changeset %s\n') % + nodemod.short(node)) return None if not merge: self.transplants.set(n, node) @@ -310,11 +327,12 @@ if os.path.exists(os.path.join(self.path, 'journal')): n, node = self.recover(repo, source, opts) if n: - self.ui.status(_('%s transplanted as %s\n') % (short(node), - short(n))) + self.ui.status(_('%s transplanted as %s\n') % + (nodemod.short(node), + nodemod.short(n))) else: self.ui.status(_('%s skipped due to empty diff\n') - % (short(node),)) + % (nodemod.short(node),)) seriespath = os.path.join(self.path, 'series') if not os.path.exists(seriespath): self.transplants.write() @@ -341,7 +359,8 @@ parent = source.lookup(opts['parent']) if parent not in parents: raise error.Abort(_('%s is not a parent of %s') % - (short(parent), short(node))) + (nodemod.short(parent), + nodemod.short(node))) else: merge = True @@ -350,7 +369,7 @@ p1, p2 = repo.dirstate.parents() if p1 != parent: raise error.Abort(_('working directory not at transplant ' - 'parent %s') % revlog.hex(parent)) + 'parent %s') % nodemod.hex(parent)) if merge: repo.setparents(p1, parents[1]) modified, added, removed, deleted = repo.status()[:4] @@ -391,11 +410,11 @@ os.mkdir(self.path) series = self.opener('series', 'w') for rev in sorted(revmap): - series.write(revlog.hex(revmap[rev]) + '\n') + series.write(nodemod.hex(revmap[rev]) + '\n') if merges: series.write('# Merges\n') for m in merges: - series.write(revlog.hex(m) + '\n') + series.write(nodemod.hex(m) + '\n') series.close() def parselog(self, fp): @@ -431,10 +450,10 @@ fp = self.opener('journal', 'w') fp.write('# User %s\n' % user) fp.write('# Date %s\n' % date) - fp.write('# Node ID %s\n' % revlog.hex(p2)) - fp.write('# Parent ' + revlog.hex(p1) + '\n') + fp.write('# Node ID %s\n' % nodemod.hex(p2)) + fp.write('# Parent ' + nodemod.hex(p1) + '\n') if merge: - fp.write('# Parent ' + revlog.hex(p2) + '\n') + fp.write('# Parent ' + nodemod.hex(p2) + '\n') fp.write(message.rstrip() + '\n') fp.close() @@ -694,7 +713,7 @@ if cleanupfn: cleanupfn() -revsetpredicate = revset.extpredicate() +revsetpredicate = registrar.revsetpredicate() @revsetpredicate('transplanted([set])') def revsettransplanted(repo, subset, x): @@ -711,10 +730,9 @@ """:transplanted: String. The node identifier of the transplanted changeset if any.""" n = ctx.extra().get('transplant_source') - return n and revlog.hex(n) or '' + return n and nodemod.hex(n) or '' def extsetup(ui): - revsetpredicate.setup() templatekw.keywords['transplanted'] = kwtransplanted cmdutil.unfinishedstates.append( ['transplant/journal', True, False, _('transplant in progress'),
--- a/hgext/win32mbcs.py Sun Mar 13 02:29:11 2016 +0100 +++ b/hgext/win32mbcs.py Tue Mar 15 14:10:46 2016 -0700 @@ -44,10 +44,17 @@ It is useful for the users who want to commit with UTF-8 log message. ''' +from __future__ import absolute_import -import os, sys +import os +import sys + +from mercurial import ( + encoding, + error, +) from mercurial.i18n import _ -from mercurial import error, encoding + # Note for extension authors: ONLY specify testedwith = 'internal' for # extensions which SHIP WITH MERCURIAL. Non-mainline extensions should # be specifying the version(s) of Mercurial they are tested with, or
--- a/hgext/zeroconf/Zeroconf.py Sun Mar 13 02:29:11 2016 +0100 +++ b/hgext/zeroconf/Zeroconf.py Tue Mar 15 14:10:46 2016 -0700 @@ -1,3 +1,5 @@ +from __future__ import absolute_import, print_function + """ Multicast DNS Service Discovery for Python, v0.12 Copyright (C) 2003, Paul Scott-Murphy @@ -23,29 +25,29 @@ """ """0.12 update - allow selection of binding interface - typo fix - Thanks A. M. Kuchlingi - removed all use of word 'Rendezvous' - this is an API change""" + typo fix - Thanks A. M. Kuchlingi + removed all use of word 'Rendezvous' - this is an API change""" """0.11 update - correction to comments for addListener method support for new record types seen from OS X - - IPv6 address - - hostinfo - ignore unknown DNS record types - fixes to name decoding - works alongside other processes using port 5353 (e.g. on Mac OS X) - tested against Mac OS X 10.3.2's mDNSResponder - corrections to removal of list entries for service browser""" + - IPv6 address + - hostinfo + ignore unknown DNS record types + fixes to name decoding + works alongside other processes using port 5353 (e.g. Mac OS X) + tested against Mac OS X 10.3.2's mDNSResponder + corrections to removal of list entries for service browser""" """0.10 update - Jonathon Paisley contributed these corrections: always multicast replies, even when query is unicast - correct a pointer encoding problem - can now write records in any order - traceback shown on failure - better TXT record parsing - server is now separate from name - can cancel a service browser + correct a pointer encoding problem + can now write records in any order + traceback shown on failure + better TXT record parsing + server is now separate from name + can cancel a service browser - modified some unit tests to accommodate these changes""" + modified some unit tests to accommodate these changes""" """0.09 update - remove all records on service unregistration fix DOS security problem with readName""" @@ -54,36 +56,37 @@ """0.07 update - faster shutdown on engine pointer encoding of outgoing names - ServiceBrowser now works - new unit tests""" + ServiceBrowser now works + new unit tests""" """0.06 update - small improvements with unit tests added defined exception types - new style objects - fixed hostname/interface problem - fixed socket timeout problem - fixed addServiceListener() typo bug - using select() for socket reads - tested on Debian unstable with Python 2.2.2""" + new style objects + fixed hostname/interface problem + fixed socket timeout problem + fixed addServiceListener() typo bug + using select() for socket reads + tested on Debian unstable with Python 2.2.2""" """0.05 update - ensure case insensitivity on domain names support for unicast DNS queries""" """0.04 update - added some unit tests added __ne__ adjuncts where required - ensure names end in '.local.' - timeout on receiving socket for clean shutdown""" + ensure names end in '.local.' + timeout on receiving socket for clean shutdown""" __author__ = "Paul Scott-Murphy" __email__ = "paul at scott dash murphy dot com" __version__ = "0.12" +import itertools +import select +import socket import string -import time import struct -import socket import threading -import select +import time import traceback __all__ = ["Zeroconf", "ServiceInfo", "ServiceBrowser"] @@ -103,9 +106,9 @@ # Some DNS constants _MDNS_ADDR = '224.0.0.251' -_MDNS_PORT = 5353; -_DNS_PORT = 53; -_DNS_TTL = 60 * 60; # one hour default TTL +_MDNS_PORT = 5353 +_DNS_PORT = 53 +_DNS_TTL = 60 * 60 # one hour default TTL _MAX_MSG_TYPICAL = 1460 # unused _MAX_MSG_ABSOLUTE = 8972 @@ -155,1426 +158,1523 @@ # Mapping constants to names _CLASSES = { _CLASS_IN : "in", - _CLASS_CS : "cs", - _CLASS_CH : "ch", - _CLASS_HS : "hs", - _CLASS_NONE : "none", - _CLASS_ANY : "any" } + _CLASS_CS : "cs", + _CLASS_CH : "ch", + _CLASS_HS : "hs", + _CLASS_NONE : "none", + _CLASS_ANY : "any" } _TYPES = { _TYPE_A : "a", - _TYPE_NS : "ns", - _TYPE_MD : "md", - _TYPE_MF : "mf", - _TYPE_CNAME : "cname", - _TYPE_SOA : "soa", - _TYPE_MB : "mb", - _TYPE_MG : "mg", - _TYPE_MR : "mr", - _TYPE_NULL : "null", - _TYPE_WKS : "wks", - _TYPE_PTR : "ptr", - _TYPE_HINFO : "hinfo", - _TYPE_MINFO : "minfo", - _TYPE_MX : "mx", - _TYPE_TXT : "txt", - _TYPE_AAAA : "quada", - _TYPE_SRV : "srv", - _TYPE_ANY : "any" } + _TYPE_NS : "ns", + _TYPE_MD : "md", + _TYPE_MF : "mf", + _TYPE_CNAME : "cname", + _TYPE_SOA : "soa", + _TYPE_MB : "mb", + _TYPE_MG : "mg", + _TYPE_MR : "mr", + _TYPE_NULL : "null", + _TYPE_WKS : "wks", + _TYPE_PTR : "ptr", + _TYPE_HINFO : "hinfo", + _TYPE_MINFO : "minfo", + _TYPE_MX : "mx", + _TYPE_TXT : "txt", + _TYPE_AAAA : "quada", + _TYPE_SRV : "srv", + _TYPE_ANY : "any" } # utility functions def currentTimeMillis(): - """Current system time in milliseconds""" - return time.time() * 1000 + """Current system time in milliseconds""" + return time.time() * 1000 # Exceptions class NonLocalNameException(Exception): - pass + pass class NonUniqueNameException(Exception): - pass + pass class NamePartTooLongException(Exception): - pass + pass class AbstractMethodException(Exception): - pass + pass class BadTypeInNameException(Exception): - pass + pass class BadDomainName(Exception): - def __init__(self, pos): - Exception.__init__(self, "at position %s" % pos) + def __init__(self, pos): + Exception.__init__(self, "at position %s" % pos) class BadDomainNameCircular(BadDomainName): - pass + pass # implementation classes class DNSEntry(object): - """A DNS entry""" + """A DNS entry""" - def __init__(self, name, type, clazz): - self.key = string.lower(name) - self.name = name - self.type = type - self.clazz = clazz & _CLASS_MASK - self.unique = (clazz & _CLASS_UNIQUE) != 0 + def __init__(self, name, type, clazz): + self.key = string.lower(name) + self.name = name + self.type = type + self.clazz = clazz & _CLASS_MASK + self.unique = (clazz & _CLASS_UNIQUE) != 0 - def __eq__(self, other): - """Equality test on name, type, and class""" - if isinstance(other, DNSEntry): - return self.name == other.name and self.type == other.type and self.clazz == other.clazz - return 0 + def __eq__(self, other): + """Equality test on name, type, and class""" + if isinstance(other, DNSEntry): + return (self.name == other.name and self.type == other.type and + self.clazz == other.clazz) + return 0 - def __ne__(self, other): - """Non-equality test""" - return not self.__eq__(other) + def __ne__(self, other): + """Non-equality test""" + return not self.__eq__(other) - def getClazz(self, clazz): - """Class accessor""" - try: - return _CLASSES[clazz] - except KeyError: - return "?(%s)" % (clazz) + def getClazz(self, clazz): + """Class accessor""" + try: + return _CLASSES[clazz] + except KeyError: + return "?(%s)" % (clazz) - def getType(self, type): - """Type accessor""" - try: - return _TYPES[type] - except KeyError: - return "?(%s)" % (type) + def getType(self, type): + """Type accessor""" + try: + return _TYPES[type] + except KeyError: + return "?(%s)" % (type) - def toString(self, hdr, other): - """String representation with additional information""" - result = "%s[%s,%s" % (hdr, self.getType(self.type), self.getClazz(self.clazz)) - if self.unique: - result += "-unique," - else: - result += "," - result += self.name - if other is not None: - result += ",%s]" % (other) - else: - result += "]" - return result + def toString(self, hdr, other): + """String representation with additional information""" + result = ("%s[%s,%s" % + (hdr, self.getType(self.type), self.getClazz(self.clazz))) + if self.unique: + result += "-unique," + else: + result += "," + result += self.name + if other is not None: + result += ",%s]" % (other) + else: + result += "]" + return result class DNSQuestion(DNSEntry): - """A DNS question entry""" + """A DNS question entry""" - def __init__(self, name, type, clazz): - if not name.endswith(".local."): - raise NonLocalNameException(name) - DNSEntry.__init__(self, name, type, clazz) + def __init__(self, name, type, clazz): + if not name.endswith(".local."): + raise NonLocalNameException(name) + DNSEntry.__init__(self, name, type, clazz) - def answeredBy(self, rec): - """Returns true if the question is answered by the record""" - return self.clazz == rec.clazz and (self.type == rec.type or self.type == _TYPE_ANY) and self.name == rec.name + def answeredBy(self, rec): + """Returns true if the question is answered by the record""" + return (self.clazz == rec.clazz and + (self.type == rec.type or self.type == _TYPE_ANY) and + self.name == rec.name) - def __repr__(self): - """String representation""" - return DNSEntry.toString(self, "question", None) + def __repr__(self): + """String representation""" + return DNSEntry.toString(self, "question", None) class DNSRecord(DNSEntry): - """A DNS record - like a DNS entry, but has a TTL""" + """A DNS record - like a DNS entry, but has a TTL""" - def __init__(self, name, type, clazz, ttl): - DNSEntry.__init__(self, name, type, clazz) - self.ttl = ttl - self.created = currentTimeMillis() + def __init__(self, name, type, clazz, ttl): + DNSEntry.__init__(self, name, type, clazz) + self.ttl = ttl + self.created = currentTimeMillis() - def __eq__(self, other): - """Tests equality as per DNSRecord""" - if isinstance(other, DNSRecord): - return DNSEntry.__eq__(self, other) - return 0 + def __eq__(self, other): + """Tests equality as per DNSRecord""" + if isinstance(other, DNSRecord): + return DNSEntry.__eq__(self, other) + return 0 - def suppressedBy(self, msg): - """Returns true if any answer in a message can suffice for the - information held in this record.""" - for record in msg.answers: - if self.suppressedByAnswer(record): - return 1 - return 0 + def suppressedBy(self, msg): + """Returns true if any answer in a message can suffice for the + information held in this record.""" + for record in msg.answers: + if self.suppressedByAnswer(record): + return 1 + return 0 - def suppressedByAnswer(self, other): - """Returns true if another record has same name, type and class, - and if its TTL is at least half of this record's.""" - if self == other and other.ttl > (self.ttl / 2): - return 1 - return 0 + def suppressedByAnswer(self, other): + """Returns true if another record has same name, type and class, + and if its TTL is at least half of this record's.""" + if self == other and other.ttl > (self.ttl / 2): + return 1 + return 0 - def getExpirationTime(self, percent): - """Returns the time at which this record will have expired - by a certain percentage.""" - return self.created + (percent * self.ttl * 10) + def getExpirationTime(self, percent): + """Returns the time at which this record will have expired + by a certain percentage.""" + return self.created + (percent * self.ttl * 10) - def getRemainingTTL(self, now): - """Returns the remaining TTL in seconds.""" - return max(0, (self.getExpirationTime(100) - now) / 1000) + def getRemainingTTL(self, now): + """Returns the remaining TTL in seconds.""" + return max(0, (self.getExpirationTime(100) - now) / 1000) - def isExpired(self, now): - """Returns true if this record has expired.""" - return self.getExpirationTime(100) <= now + def isExpired(self, now): + """Returns true if this record has expired.""" + return self.getExpirationTime(100) <= now - def isStale(self, now): - """Returns true if this record is at least half way expired.""" - return self.getExpirationTime(50) <= now + def isStale(self, now): + """Returns true if this record is at least half way expired.""" + return self.getExpirationTime(50) <= now + + def resetTTL(self, other): + """Sets this record's TTL and created time to that of + another record.""" + self.created = other.created + self.ttl = other.ttl - def resetTTL(self, other): - """Sets this record's TTL and created time to that of - another record.""" - self.created = other.created - self.ttl = other.ttl + def write(self, out): + """Abstract method""" + raise AbstractMethodException - def write(self, out): - """Abstract method""" - raise AbstractMethodException - - def toString(self, other): - """String representation with additional information""" - arg = "%s/%s,%s" % (self.ttl, self.getRemainingTTL(currentTimeMillis()), other) - return DNSEntry.toString(self, "record", arg) + def toString(self, other): + """String representation with additional information""" + arg = ("%s/%s,%s" % + (self.ttl, self.getRemainingTTL(currentTimeMillis()), other)) + return DNSEntry.toString(self, "record", arg) class DNSAddress(DNSRecord): - """A DNS address record""" + """A DNS address record""" - def __init__(self, name, type, clazz, ttl, address): - DNSRecord.__init__(self, name, type, clazz, ttl) - self.address = address + def __init__(self, name, type, clazz, ttl, address): + DNSRecord.__init__(self, name, type, clazz, ttl) + self.address = address - def write(self, out): - """Used in constructing an outgoing packet""" - out.writeString(self.address, len(self.address)) + def write(self, out): + """Used in constructing an outgoing packet""" + out.writeString(self.address, len(self.address)) - def __eq__(self, other): - """Tests equality on address""" - if isinstance(other, DNSAddress): - return self.address == other.address - return 0 + def __eq__(self, other): + """Tests equality on address""" + if isinstance(other, DNSAddress): + return self.address == other.address + return 0 - def __repr__(self): - """String representation""" - try: - return socket.inet_ntoa(self.address) - except Exception: - return self.address + def __repr__(self): + """String representation""" + try: + return socket.inet_ntoa(self.address) + except Exception: + return self.address class DNSHinfo(DNSRecord): - """A DNS host information record""" + """A DNS host information record""" - def __init__(self, name, type, clazz, ttl, cpu, os): - DNSRecord.__init__(self, name, type, clazz, ttl) - self.cpu = cpu - self.os = os + def __init__(self, name, type, clazz, ttl, cpu, os): + DNSRecord.__init__(self, name, type, clazz, ttl) + self.cpu = cpu + self.os = os - def write(self, out): - """Used in constructing an outgoing packet""" - out.writeString(self.cpu, len(self.cpu)) - out.writeString(self.os, len(self.os)) + def write(self, out): + """Used in constructing an outgoing packet""" + out.writeString(self.cpu, len(self.cpu)) + out.writeString(self.os, len(self.os)) - def __eq__(self, other): - """Tests equality on cpu and os""" - if isinstance(other, DNSHinfo): - return self.cpu == other.cpu and self.os == other.os - return 0 + def __eq__(self, other): + """Tests equality on cpu and os""" + if isinstance(other, DNSHinfo): + return self.cpu == other.cpu and self.os == other.os + return 0 - def __repr__(self): - """String representation""" - return self.cpu + " " + self.os + def __repr__(self): + """String representation""" + return self.cpu + " " + self.os class DNSPointer(DNSRecord): - """A DNS pointer record""" + """A DNS pointer record""" - def __init__(self, name, type, clazz, ttl, alias): - DNSRecord.__init__(self, name, type, clazz, ttl) - self.alias = alias + def __init__(self, name, type, clazz, ttl, alias): + DNSRecord.__init__(self, name, type, clazz, ttl) + self.alias = alias - def write(self, out): - """Used in constructing an outgoing packet""" - out.writeName(self.alias) + def write(self, out): + """Used in constructing an outgoing packet""" + out.writeName(self.alias) - def __eq__(self, other): - """Tests equality on alias""" - if isinstance(other, DNSPointer): - return self.alias == other.alias - return 0 + def __eq__(self, other): + """Tests equality on alias""" + if isinstance(other, DNSPointer): + return self.alias == other.alias + return 0 - def __repr__(self): - """String representation""" - return self.toString(self.alias) + def __repr__(self): + """String representation""" + return self.toString(self.alias) class DNSText(DNSRecord): - """A DNS text record""" + """A DNS text record""" - def __init__(self, name, type, clazz, ttl, text): - DNSRecord.__init__(self, name, type, clazz, ttl) - self.text = text + def __init__(self, name, type, clazz, ttl, text): + DNSRecord.__init__(self, name, type, clazz, ttl) + self.text = text - def write(self, out): - """Used in constructing an outgoing packet""" - out.writeString(self.text, len(self.text)) + def write(self, out): + """Used in constructing an outgoing packet""" + out.writeString(self.text, len(self.text)) - def __eq__(self, other): - """Tests equality on text""" - if isinstance(other, DNSText): - return self.text == other.text - return 0 + def __eq__(self, other): + """Tests equality on text""" + if isinstance(other, DNSText): + return self.text == other.text + return 0 - def __repr__(self): - """String representation""" - if len(self.text) > 10: - return self.toString(self.text[:7] + "...") - else: - return self.toString(self.text) + def __repr__(self): + """String representation""" + if len(self.text) > 10: + return self.toString(self.text[:7] + "...") + else: + return self.toString(self.text) class DNSService(DNSRecord): - """A DNS service record""" + """A DNS service record""" - def __init__(self, name, type, clazz, ttl, priority, weight, port, server): - DNSRecord.__init__(self, name, type, clazz, ttl) - self.priority = priority - self.weight = weight - self.port = port - self.server = server + def __init__(self, name, type, clazz, ttl, priority, weight, port, server): + DNSRecord.__init__(self, name, type, clazz, ttl) + self.priority = priority + self.weight = weight + self.port = port + self.server = server - def write(self, out): - """Used in constructing an outgoing packet""" - out.writeShort(self.priority) - out.writeShort(self.weight) - out.writeShort(self.port) - out.writeName(self.server) + def write(self, out): + """Used in constructing an outgoing packet""" + out.writeShort(self.priority) + out.writeShort(self.weight) + out.writeShort(self.port) + out.writeName(self.server) - def __eq__(self, other): - """Tests equality on priority, weight, port and server""" - if isinstance(other, DNSService): - return self.priority == other.priority and self.weight == other.weight and self.port == other.port and self.server == other.server - return 0 + def __eq__(self, other): + """Tests equality on priority, weight, port and server""" + if isinstance(other, DNSService): + return (self.priority == other.priority and + self.weight == other.weight and + self.port == other.port and + self.server == other.server) + return 0 - def __repr__(self): - """String representation""" - return self.toString("%s:%s" % (self.server, self.port)) + def __repr__(self): + """String representation""" + return self.toString("%s:%s" % (self.server, self.port)) class DNSIncoming(object): - """Object representation of an incoming DNS packet""" + """Object representation of an incoming DNS packet""" - def __init__(self, data): - """Constructor from string holding bytes of packet""" - self.offset = 0 - self.data = data - self.questions = [] - self.answers = [] - self.numQuestions = 0 - self.numAnswers = 0 - self.numAuthorities = 0 - self.numAdditionals = 0 + def __init__(self, data): + """Constructor from string holding bytes of packet""" + self.offset = 0 + self.data = data + self.questions = [] + self.answers = [] + self.numquestions = 0 + self.numanswers = 0 + self.numauthorities = 0 + self.numadditionals = 0 - self.readHeader() - self.readQuestions() - self.readOthers() + self.readHeader() + self.readQuestions() + self.readOthers() - def readHeader(self): - """Reads header portion of packet""" - format = '!HHHHHH' - length = struct.calcsize(format) - info = struct.unpack(format, self.data[self.offset:self.offset+length]) - self.offset += length + def readHeader(self): + """Reads header portion of packet""" + format = '!HHHHHH' + length = struct.calcsize(format) + info = struct.unpack(format, + self.data[self.offset:self.offset + length]) + self.offset += length - self.id = info[0] - self.flags = info[1] - self.numQuestions = info[2] - self.numAnswers = info[3] - self.numAuthorities = info[4] - self.numAdditionals = info[5] + self.id = info[0] + self.flags = info[1] + self.numquestions = info[2] + self.numanswers = info[3] + self.numauthorities = info[4] + self.numadditionals = info[5] - def readQuestions(self): - """Reads questions section of packet""" - format = '!HH' - length = struct.calcsize(format) - for i in range(0, self.numQuestions): - name = self.readName() - info = struct.unpack(format, self.data[self.offset:self.offset+length]) - self.offset += length + def readQuestions(self): + """Reads questions section of packet""" + format = '!HH' + length = struct.calcsize(format) + for i in range(0, self.numquestions): + name = self.readName() + info = struct.unpack(format, + self.data[self.offset:self.offset + length]) + self.offset += length - try: - question = DNSQuestion(name, info[0], info[1]) - self.questions.append(question) - except NonLocalNameException: - pass + try: + question = DNSQuestion(name, info[0], info[1]) + self.questions.append(question) + except NonLocalNameException: + pass - def readInt(self): - """Reads an integer from the packet""" - format = '!I' - length = struct.calcsize(format) - info = struct.unpack(format, self.data[self.offset:self.offset+length]) - self.offset += length - return info[0] + def readInt(self): + """Reads an integer from the packet""" + format = '!I' + length = struct.calcsize(format) + info = struct.unpack(format, + self.data[self.offset:self.offset + length]) + self.offset += length + return info[0] - def readCharacterString(self): - """Reads a character string from the packet""" - length = ord(self.data[self.offset]) - self.offset += 1 - return self.readString(length) + def readCharacterString(self): + """Reads a character string from the packet""" + length = ord(self.data[self.offset]) + self.offset += 1 + return self.readString(length) - def readString(self, len): - """Reads a string of a given length from the packet""" - format = '!' + str(len) + 's' - length = struct.calcsize(format) - info = struct.unpack(format, self.data[self.offset:self.offset+length]) - self.offset += length - return info[0] + def readString(self, len): + """Reads a string of a given length from the packet""" + format = '!' + str(len) + 's' + length = struct.calcsize(format) + info = struct.unpack(format, + self.data[self.offset:self.offset + length]) + self.offset += length + return info[0] - def readUnsignedShort(self): - """Reads an unsigned short from the packet""" - format = '!H' - length = struct.calcsize(format) - info = struct.unpack(format, self.data[self.offset:self.offset+length]) - self.offset += length - return info[0] + def readUnsignedShort(self): + """Reads an unsigned short from the packet""" + format = '!H' + length = struct.calcsize(format) + info = struct.unpack(format, + self.data[self.offset:self.offset + length]) + self.offset += length + return info[0] - def readOthers(self): - """Reads the answers, authorities and additionals section of the packet""" - format = '!HHiH' - length = struct.calcsize(format) - n = self.numAnswers + self.numAuthorities + self.numAdditionals - for i in range(0, n): - domain = self.readName() - info = struct.unpack(format, self.data[self.offset:self.offset+length]) - self.offset += length + def readOthers(self): + """Reads answers, authorities and additionals section of the packet""" + format = '!HHiH' + length = struct.calcsize(format) + n = self.numanswers + self.numauthorities + self.numadditionals + for i in range(0, n): + domain = self.readName() + info = struct.unpack(format, + self.data[self.offset:self.offset + length]) + self.offset += length - rec = None - if info[0] == _TYPE_A: - rec = DNSAddress(domain, info[0], info[1], info[2], self.readString(4)) - elif info[0] == _TYPE_CNAME or info[0] == _TYPE_PTR: - rec = DNSPointer(domain, info[0], info[1], info[2], self.readName()) - elif info[0] == _TYPE_TXT: - rec = DNSText(domain, info[0], info[1], info[2], self.readString(info[3])) - elif info[0] == _TYPE_SRV: - rec = DNSService(domain, info[0], info[1], info[2], self.readUnsignedShort(), self.readUnsignedShort(), self.readUnsignedShort(), self.readName()) - elif info[0] == _TYPE_HINFO: - rec = DNSHinfo(domain, info[0], info[1], info[2], self.readCharacterString(), self.readCharacterString()) - elif info[0] == _TYPE_AAAA: - rec = DNSAddress(domain, info[0], info[1], info[2], self.readString(16)) - else: - # Try to ignore types we don't know about - # this may mean the rest of the name is - # unable to be parsed, and may show errors - # so this is left for debugging. New types - # encountered need to be parsed properly. - # - #print "UNKNOWN TYPE = " + str(info[0]) - #raise BadTypeInNameException - self.offset += info[3] - - if rec is not None: - self.answers.append(rec) - - def isQuery(self): - """Returns true if this is a query""" - return (self.flags & _FLAGS_QR_MASK) == _FLAGS_QR_QUERY + rec = None + if info[0] == _TYPE_A: + rec = DNSAddress(domain, info[0], info[1], info[2], + self.readString(4)) + elif info[0] == _TYPE_CNAME or info[0] == _TYPE_PTR: + rec = DNSPointer(domain, info[0], info[1], info[2], + self.readName()) + elif info[0] == _TYPE_TXT: + rec = DNSText(domain, info[0], info[1], info[2], + self.readString(info[3])) + elif info[0] == _TYPE_SRV: + rec = DNSService(domain, info[0], info[1], info[2], + self.readUnsignedShort(), + self.readUnsignedShort(), + self.readUnsignedShort(), + self.readName()) + elif info[0] == _TYPE_HINFO: + rec = DNSHinfo(domain, info[0], info[1], info[2], + self.readCharacterString(), + self.readCharacterString()) + elif info[0] == _TYPE_AAAA: + rec = DNSAddress(domain, info[0], info[1], info[2], + self.readString(16)) + else: + # Try to ignore types we don't know about + # this may mean the rest of the name is + # unable to be parsed, and may show errors + # so this is left for debugging. New types + # encountered need to be parsed properly. + # + #print "UNKNOWN TYPE = " + str(info[0]) + #raise BadTypeInNameException + self.offset += info[3] - def isResponse(self): - """Returns true if this is a response""" - return (self.flags & _FLAGS_QR_MASK) == _FLAGS_QR_RESPONSE + if rec is not None: + self.answers.append(rec) + + def isQuery(self): + """Returns true if this is a query""" + return (self.flags & _FLAGS_QR_MASK) == _FLAGS_QR_QUERY + + def isResponse(self): + """Returns true if this is a response""" + return (self.flags & _FLAGS_QR_MASK) == _FLAGS_QR_RESPONSE - def readUTF(self, offset, len): - """Reads a UTF-8 string of a given length from the packet""" - return self.data[offset:offset+len].decode('utf-8') + def readUTF(self, offset, len): + """Reads a UTF-8 string of a given length from the packet""" + return self.data[offset:offset + len].decode('utf-8') - def readName(self): - """Reads a domain name from the packet""" - result = '' - off = self.offset - next = -1 - first = off + def readName(self): + """Reads a domain name from the packet""" + result = '' + off = self.offset + next = -1 + first = off - while True: - len = ord(self.data[off]) - off += 1 - if len == 0: - break - t = len & 0xC0 - if t == 0x00: - result = ''.join((result, self.readUTF(off, len) + '.')) - off += len - elif t == 0xC0: - if next < 0: - next = off + 1 - off = ((len & 0x3F) << 8) | ord(self.data[off]) - if off >= first: - raise BadDomainNameCircular(off) - first = off - else: - raise BadDomainName(off) + while True: + len = ord(self.data[off]) + off += 1 + if len == 0: + break + t = len & 0xC0 + if t == 0x00: + result = ''.join((result, self.readUTF(off, len) + '.')) + off += len + elif t == 0xC0: + if next < 0: + next = off + 1 + off = ((len & 0x3F) << 8) | ord(self.data[off]) + if off >= first: + raise BadDomainNameCircular(off) + first = off + else: + raise BadDomainName(off) - if next >= 0: - self.offset = next - else: - self.offset = off + if next >= 0: + self.offset = next + else: + self.offset = off - return result + return result class DNSOutgoing(object): - """Object representation of an outgoing packet""" + """Object representation of an outgoing packet""" - def __init__(self, flags, multicast = 1): - self.finished = 0 - self.id = 0 - self.multicast = multicast - self.flags = flags - self.names = {} - self.data = [] - self.size = 12 + def __init__(self, flags, multicast=1): + self.finished = 0 + self.id = 0 + self.multicast = multicast + self.flags = flags + self.names = {} + self.data = [] + self.size = 12 - self.questions = [] - self.answers = [] - self.authorities = [] - self.additionals = [] + self.questions = [] + self.answers = [] + self.authorities = [] + self.additionals = [] - def addQuestion(self, record): - """Adds a question""" - self.questions.append(record) + def addQuestion(self, record): + """Adds a question""" + self.questions.append(record) - def addAnswer(self, inp, record): - """Adds an answer""" - if not record.suppressedBy(inp): - self.addAnswerAtTime(record, 0) + def addAnswer(self, inp, record): + """Adds an answer""" + if not record.suppressedBy(inp): + self.addAnswerAtTime(record, 0) - def addAnswerAtTime(self, record, now): - """Adds an answer if if does not expire by a certain time""" - if record is not None: - if now == 0 or not record.isExpired(now): - self.answers.append((record, now)) + def addAnswerAtTime(self, record, now): + """Adds an answer if if does not expire by a certain time""" + if record is not None: + if now == 0 or not record.isExpired(now): + self.answers.append((record, now)) - def addAuthoritativeAnswer(self, record): - """Adds an authoritative answer""" - self.authorities.append(record) + def addAuthoritativeAnswer(self, record): + """Adds an authoritative answer""" + self.authorities.append(record) - def addAdditionalAnswer(self, record): - """Adds an additional answer""" - self.additionals.append(record) + def addAdditionalAnswer(self, record): + """Adds an additional answer""" + self.additionals.append(record) - def writeByte(self, value): - """Writes a single byte to the packet""" - format = '!c' - self.data.append(struct.pack(format, chr(value))) - self.size += 1 + def writeByte(self, value): + """Writes a single byte to the packet""" + format = '!c' + self.data.append(struct.pack(format, chr(value))) + self.size += 1 - def insertShort(self, index, value): - """Inserts an unsigned short in a certain position in the packet""" - format = '!H' - self.data.insert(index, struct.pack(format, value)) - self.size += 2 + def insertShort(self, index, value): + """Inserts an unsigned short in a certain position in the packet""" + format = '!H' + self.data.insert(index, struct.pack(format, value)) + self.size += 2 - def writeShort(self, value): - """Writes an unsigned short to the packet""" - format = '!H' - self.data.append(struct.pack(format, value)) - self.size += 2 + def writeShort(self, value): + """Writes an unsigned short to the packet""" + format = '!H' + self.data.append(struct.pack(format, value)) + self.size += 2 - def writeInt(self, value): - """Writes an unsigned integer to the packet""" - format = '!I' - self.data.append(struct.pack(format, int(value))) - self.size += 4 + def writeInt(self, value): + """Writes an unsigned integer to the packet""" + format = '!I' + self.data.append(struct.pack(format, int(value))) + self.size += 4 - def writeString(self, value, length): - """Writes a string to the packet""" - format = '!' + str(length) + 's' - self.data.append(struct.pack(format, value)) - self.size += length + def writeString(self, value, length): + """Writes a string to the packet""" + format = '!' + str(length) + 's' + self.data.append(struct.pack(format, value)) + self.size += length - def writeUTF(self, s): - """Writes a UTF-8 string of a given length to the packet""" - utfstr = s.encode('utf-8') - length = len(utfstr) - if length > 64: - raise NamePartTooLongException - self.writeByte(length) - self.writeString(utfstr, length) + def writeUTF(self, s): + """Writes a UTF-8 string of a given length to the packet""" + utfstr = s.encode('utf-8') + length = len(utfstr) + if length > 64: + raise NamePartTooLongException + self.writeByte(length) + self.writeString(utfstr, length) - def writeName(self, name): - """Writes a domain name to the packet""" + def writeName(self, name): + """Writes a domain name to the packet""" - try: - # Find existing instance of this name in packet - # - index = self.names[name] - except KeyError: - # No record of this name already, so write it - # out as normal, recording the location of the name - # for future pointers to it. - # - self.names[name] = self.size - parts = name.split('.') - if parts[-1] == '': - parts = parts[:-1] - for part in parts: - self.writeUTF(part) - self.writeByte(0) - return + try: + # Find existing instance of this name in packet + # + index = self.names[name] + except KeyError: + # No record of this name already, so write it + # out as normal, recording the location of the name + # for future pointers to it. + # + self.names[name] = self.size + parts = name.split('.') + if parts[-1] == '': + parts = parts[:-1] + for part in parts: + self.writeUTF(part) + self.writeByte(0) + return - # An index was found, so write a pointer to it - # - self.writeByte((index >> 8) | 0xC0) - self.writeByte(index) + # An index was found, so write a pointer to it + # + self.writeByte((index >> 8) | 0xC0) + self.writeByte(index) - def writeQuestion(self, question): - """Writes a question to the packet""" - self.writeName(question.name) - self.writeShort(question.type) - self.writeShort(question.clazz) + def writeQuestion(self, question): + """Writes a question to the packet""" + self.writeName(question.name) + self.writeShort(question.type) + self.writeShort(question.clazz) - def writeRecord(self, record, now): - """Writes a record (answer, authoritative answer, additional) to - the packet""" - self.writeName(record.name) - self.writeShort(record.type) - if record.unique and self.multicast: - self.writeShort(record.clazz | _CLASS_UNIQUE) - else: - self.writeShort(record.clazz) - if now == 0: - self.writeInt(record.ttl) - else: - self.writeInt(record.getRemainingTTL(now)) - index = len(self.data) - # Adjust size for the short we will write before this record - # - self.size += 2 - record.write(self) - self.size -= 2 + def writeRecord(self, record, now): + """Writes a record (answer, authoritative answer, additional) to + the packet""" + self.writeName(record.name) + self.writeShort(record.type) + if record.unique and self.multicast: + self.writeShort(record.clazz | _CLASS_UNIQUE) + else: + self.writeShort(record.clazz) + if now == 0: + self.writeInt(record.ttl) + else: + self.writeInt(record.getRemainingTTL(now)) + index = len(self.data) + # Adjust size for the short we will write before this record + # + self.size += 2 + record.write(self) + self.size -= 2 - length = len(''.join(self.data[index:])) - self.insertShort(index, length) # Here is the short we adjusted for + length = len(''.join(self.data[index:])) + self.insertShort(index, length) # Here is the short we adjusted for - def packet(self): - """Returns a string containing the packet's bytes + def packet(self): + """Returns a string containing the packet's bytes - No further parts should be added to the packet once this - is done.""" - if not self.finished: - self.finished = 1 - for question in self.questions: - self.writeQuestion(question) - for answer, time in self.answers: - self.writeRecord(answer, time) - for authority in self.authorities: - self.writeRecord(authority, 0) - for additional in self.additionals: - self.writeRecord(additional, 0) + No further parts should be added to the packet once this + is done.""" + if not self.finished: + self.finished = 1 + for question in self.questions: + self.writeQuestion(question) + for answer, time_ in self.answers: + self.writeRecord(answer, time_) + for authority in self.authorities: + self.writeRecord(authority, 0) + for additional in self.additionals: + self.writeRecord(additional, 0) - self.insertShort(0, len(self.additionals)) - self.insertShort(0, len(self.authorities)) - self.insertShort(0, len(self.answers)) - self.insertShort(0, len(self.questions)) - self.insertShort(0, self.flags) - if self.multicast: - self.insertShort(0, 0) - else: - self.insertShort(0, self.id) - return ''.join(self.data) + self.insertShort(0, len(self.additionals)) + self.insertShort(0, len(self.authorities)) + self.insertShort(0, len(self.answers)) + self.insertShort(0, len(self.questions)) + self.insertShort(0, self.flags) + if self.multicast: + self.insertShort(0, 0) + else: + self.insertShort(0, self.id) + return ''.join(self.data) class DNSCache(object): - """A cache of DNS entries""" + """A cache of DNS entries""" - def __init__(self): - self.cache = {} + def __init__(self): + self.cache = {} - def add(self, entry): - """Adds an entry""" - try: - list = self.cache[entry.key] - except KeyError: - list = self.cache[entry.key] = [] - list.append(entry) + def add(self, entry): + """Adds an entry""" + try: + list = self.cache[entry.key] + except KeyError: + list = self.cache[entry.key] = [] + list.append(entry) - def remove(self, entry): - """Removes an entry""" - try: - list = self.cache[entry.key] - list.remove(entry) - except KeyError: - pass + def remove(self, entry): + """Removes an entry""" + try: + list = self.cache[entry.key] + list.remove(entry) + except KeyError: + pass - def get(self, entry): - """Gets an entry by key. Will return None if there is no - matching entry.""" - try: - list = self.cache[entry.key] - return list[list.index(entry)] - except (KeyError, ValueError): - return None - - def getByDetails(self, name, type, clazz): - """Gets an entry by details. Will return None if there is - no matching entry.""" - entry = DNSEntry(name, type, clazz) - return self.get(entry) + def get(self, entry): + """Gets an entry by key. Will return None if there is no + matching entry.""" + try: + list = self.cache[entry.key] + return list[list.index(entry)] + except (KeyError, ValueError): + return None - def entriesWithName(self, name): - """Returns a list of entries whose key matches the name.""" - try: - return self.cache[name] - except KeyError: - return [] + def getByDetails(self, name, type, clazz): + """Gets an entry by details. Will return None if there is + no matching entry.""" + entry = DNSEntry(name, type, clazz) + return self.get(entry) - def entries(self): - """Returns a list of all entries""" - def add(x, y): return x+y - try: - return reduce(add, self.cache.values()) - except Exception: - return [] + def entriesWithName(self, name): + """Returns a list of entries whose key matches the name.""" + try: + return self.cache[name] + except KeyError: + return [] + + def entries(self): + """Returns a list of all entries""" + try: + return list(itertools.chain.from_iterable(self.cache.values())) + except Exception: + return [] class Engine(threading.Thread): - """An engine wraps read access to sockets, allowing objects that - need to receive data from sockets to be called back when the - sockets are ready. + """An engine wraps read access to sockets, allowing objects that + need to receive data from sockets to be called back when the + sockets are ready. - A reader needs a handle_read() method, which is called when the socket - it is interested in is ready for reading. + A reader needs a handle_read() method, which is called when the socket + it is interested in is ready for reading. - Writers are not implemented here, because we only send short - packets. - """ + Writers are not implemented here, because we only send short + packets. + """ - def __init__(self, zeroconf): - threading.Thread.__init__(self) - self.zeroconf = zeroconf - self.readers = {} # maps socket to reader - self.timeout = 5 - self.condition = threading.Condition() - self.start() + def __init__(self, zeroconf): + threading.Thread.__init__(self) + self.zeroconf = zeroconf + self.readers = {} # maps socket to reader + self.timeout = 5 + self.condition = threading.Condition() + self.start() - def run(self): - while not globals()['_GLOBAL_DONE']: - rs = self.getReaders() - if len(rs) == 0: - # No sockets to manage, but we wait for the timeout - # or addition of a socket - # - self.condition.acquire() - self.condition.wait(self.timeout) - self.condition.release() - else: - try: - rr, wr, er = select.select(rs, [], [], self.timeout) - for socket in rr: - try: - self.readers[socket].handle_read() - except Exception: - if not globals()['_GLOBAL_DONE']: - traceback.print_exc() - except Exception: - pass + def run(self): + while not globals()['_GLOBAL_DONE']: + rs = self.getReaders() + if len(rs) == 0: + # No sockets to manage, but we wait for the timeout + # or addition of a socket + # + self.condition.acquire() + self.condition.wait(self.timeout) + self.condition.release() + else: + try: + rr, wr, er = select.select(rs, [], [], self.timeout) + for sock in rr: + try: + self.readers[sock].handle_read() + except Exception: + if not globals()['_GLOBAL_DONE']: + traceback.print_exc() + except Exception: + pass - def getReaders(self): - self.condition.acquire() - result = self.readers.keys() - self.condition.release() - return result + def getReaders(self): + self.condition.acquire() + result = self.readers.keys() + self.condition.release() + return result - def addReader(self, reader, socket): - self.condition.acquire() - self.readers[socket] = reader - self.condition.notify() - self.condition.release() + def addReader(self, reader, socket): + self.condition.acquire() + self.readers[socket] = reader + self.condition.notify() + self.condition.release() - def delReader(self, socket): - self.condition.acquire() - del(self.readers[socket]) - self.condition.notify() - self.condition.release() + def delReader(self, socket): + self.condition.acquire() + del self.readers[socket] + self.condition.notify() + self.condition.release() - def notify(self): - self.condition.acquire() - self.condition.notify() - self.condition.release() + def notify(self): + self.condition.acquire() + self.condition.notify() + self.condition.release() class Listener(object): - """A Listener is used by this module to listen on the multicast - group to which DNS messages are sent, allowing the implementation - to cache information as it arrives. + """A Listener is used by this module to listen on the multicast + group to which DNS messages are sent, allowing the implementation + to cache information as it arrives. - It requires registration with an Engine object in order to have - the read() method called when a socket is available for reading.""" + It requires registration with an Engine object in order to have + the read() method called when a socket is available for reading.""" - def __init__(self, zeroconf): - self.zeroconf = zeroconf - self.zeroconf.engine.addReader(self, self.zeroconf.socket) + def __init__(self, zeroconf): + self.zeroconf = zeroconf + self.zeroconf.engine.addReader(self, self.zeroconf.socket) - def handle_read(self): - data, (addr, port) = self.zeroconf.socket.recvfrom(_MAX_MSG_ABSOLUTE) - self.data = data - msg = DNSIncoming(data) - if msg.isQuery(): - # Always multicast responses - # - if port == _MDNS_PORT: - self.zeroconf.handleQuery(msg, _MDNS_ADDR, _MDNS_PORT) - # If it's not a multicast query, reply via unicast - # and multicast - # - elif port == _DNS_PORT: - self.zeroconf.handleQuery(msg, addr, port) - self.zeroconf.handleQuery(msg, _MDNS_ADDR, _MDNS_PORT) - else: - self.zeroconf.handleResponse(msg) + def handle_read(self): + data, (addr, port) = self.zeroconf.socket.recvfrom(_MAX_MSG_ABSOLUTE) + self.data = data + msg = DNSIncoming(data) + if msg.isQuery(): + # Always multicast responses + # + if port == _MDNS_PORT: + self.zeroconf.handleQuery(msg, _MDNS_ADDR, _MDNS_PORT) + # If it's not a multicast query, reply via unicast + # and multicast + # + elif port == _DNS_PORT: + self.zeroconf.handleQuery(msg, addr, port) + self.zeroconf.handleQuery(msg, _MDNS_ADDR, _MDNS_PORT) + else: + self.zeroconf.handleResponse(msg) class Reaper(threading.Thread): - """A Reaper is used by this module to remove cache entries that - have expired.""" + """A Reaper is used by this module to remove cache entries that + have expired.""" - def __init__(self, zeroconf): - threading.Thread.__init__(self) - self.zeroconf = zeroconf - self.start() + def __init__(self, zeroconf): + threading.Thread.__init__(self) + self.zeroconf = zeroconf + self.start() - def run(self): - while True: - self.zeroconf.wait(10 * 1000) - if globals()['_GLOBAL_DONE']: - return - now = currentTimeMillis() - for record in self.zeroconf.cache.entries(): - if record.isExpired(now): - self.zeroconf.updateRecord(now, record) - self.zeroconf.cache.remove(record) + def run(self): + while True: + self.zeroconf.wait(10 * 1000) + if globals()['_GLOBAL_DONE']: + return + now = currentTimeMillis() + for record in self.zeroconf.cache.entries(): + if record.isExpired(now): + self.zeroconf.updateRecord(now, record) + self.zeroconf.cache.remove(record) class ServiceBrowser(threading.Thread): - """Used to browse for a service of a specific type. + """Used to browse for a service of a specific type. - The listener object will have its addService() and - removeService() methods called when this browser - discovers changes in the services availability.""" + The listener object will have its addService() and + removeService() methods called when this browser + discovers changes in the services availability.""" - def __init__(self, zeroconf, type, listener): - """Creates a browser for a specific type""" - threading.Thread.__init__(self) - self.zeroconf = zeroconf - self.type = type - self.listener = listener - self.services = {} - self.nextTime = currentTimeMillis() - self.delay = _BROWSER_TIME - self.list = [] + def __init__(self, zeroconf, type, listener): + """Creates a browser for a specific type""" + threading.Thread.__init__(self) + self.zeroconf = zeroconf + self.type = type + self.listener = listener + self.services = {} + self.nexttime = currentTimeMillis() + self.delay = _BROWSER_TIME + self.list = [] - self.done = 0 + self.done = 0 - self.zeroconf.addListener(self, DNSQuestion(self.type, _TYPE_PTR, _CLASS_IN)) - self.start() + self.zeroconf.addListener(self, DNSQuestion(self.type, _TYPE_PTR, + _CLASS_IN)) + self.start() - def updateRecord(self, zeroconf, now, record): - """Callback invoked by Zeroconf when new information arrives. + def updateRecord(self, zeroconf, now, record): + """Callback invoked by Zeroconf when new information arrives. - Updates information required by browser in the Zeroconf cache.""" - if record.type == _TYPE_PTR and record.name == self.type: - expired = record.isExpired(now) - try: - oldrecord = self.services[record.alias.lower()] - if not expired: - oldrecord.resetTTL(record) - else: - del(self.services[record.alias.lower()]) - callback = lambda x: self.listener.removeService(x, self.type, record.alias) - self.list.append(callback) - return - except Exception: - if not expired: - self.services[record.alias.lower()] = record - callback = lambda x: self.listener.addService(x, self.type, record.alias) - self.list.append(callback) + Updates information required by browser in the Zeroconf cache.""" + if record.type == _TYPE_PTR and record.name == self.type: + expired = record.isExpired(now) + try: + oldrecord = self.services[record.alias.lower()] + if not expired: + oldrecord.resetTTL(record) + else: + del self.services[record.alias.lower()] + callback = (lambda x: + self.listener.removeService(x, self.type, record.alias)) + self.list.append(callback) + return + except Exception: + if not expired: + self.services[record.alias.lower()] = record + callback = (lambda x: + self.listener.addService(x, self.type, record.alias)) + self.list.append(callback) - expires = record.getExpirationTime(75) - if expires < self.nextTime: - self.nextTime = expires - - def cancel(self): - self.done = 1 - self.zeroconf.notifyAll() + expires = record.getExpirationTime(75) + if expires < self.nexttime: + self.nexttime = expires - def run(self): - while True: - event = None - now = currentTimeMillis() - if len(self.list) == 0 and self.nextTime > now: - self.zeroconf.wait(self.nextTime - now) - if globals()['_GLOBAL_DONE'] or self.done: - return - now = currentTimeMillis() + def cancel(self): + self.done = 1 + self.zeroconf.notifyAll() + + def run(self): + while True: + event = None + now = currentTimeMillis() + if len(self.list) == 0 and self.nexttime > now: + self.zeroconf.wait(self.nexttime - now) + if globals()['_GLOBAL_DONE'] or self.done: + return + now = currentTimeMillis() - if self.nextTime <= now: - out = DNSOutgoing(_FLAGS_QR_QUERY) - out.addQuestion(DNSQuestion(self.type, _TYPE_PTR, _CLASS_IN)) - for record in self.services.values(): - if not record.isExpired(now): - out.addAnswerAtTime(record, now) - self.zeroconf.send(out) - self.nextTime = now + self.delay - self.delay = min(20 * 1000, self.delay * 2) + if self.nexttime <= now: + out = DNSOutgoing(_FLAGS_QR_QUERY) + out.addQuestion(DNSQuestion(self.type, _TYPE_PTR, _CLASS_IN)) + for record in self.services.values(): + if not record.isExpired(now): + out.addAnswerAtTime(record, now) + self.zeroconf.send(out) + self.nexttime = now + self.delay + self.delay = min(20 * 1000, self.delay * 2) - if len(self.list) > 0: - event = self.list.pop(0) + if len(self.list) > 0: + event = self.list.pop(0) - if event is not None: - event(self.zeroconf) + if event is not None: + event(self.zeroconf) class ServiceInfo(object): - """Service information""" - - def __init__(self, type, name, address=None, port=None, weight=0, priority=0, properties=None, server=None): - """Create a service description. + """Service information""" - type: fully qualified service type name - name: fully qualified service name - address: IP address as unsigned short, network byte order - port: port that the service runs on - weight: weight of the service - priority: priority of the service - properties: dictionary of properties (or a string holding the bytes for the text field) - server: fully qualified name for service host (defaults to name)""" + def __init__(self, type, name, address=None, port=None, weight=0, + priority=0, properties=None, server=None): + """Create a service description. - if not name.endswith(type): - raise BadTypeInNameException - self.type = type - self.name = name - self.address = address - self.port = port - self.weight = weight - self.priority = priority - if server: - self.server = server - else: - self.server = name - self.setProperties(properties) + type: fully qualified service type name + name: fully qualified service name + address: IP address as unsigned short, network byte order + port: port that the service runs on + weight: weight of the service + priority: priority of the service + properties: dictionary of properties (or a string holding the bytes for + the text field) + server: fully qualified name for service host (defaults to name)""" + + if not name.endswith(type): + raise BadTypeInNameException + self.type = type + self.name = name + self.address = address + self.port = port + self.weight = weight + self.priority = priority + if server: + self.server = server + else: + self.server = name + self.setProperties(properties) - def setProperties(self, properties): - """Sets properties and text of this info from a dictionary""" - if isinstance(properties, dict): - self.properties = properties - list = [] - result = '' - for key in properties: - value = properties[key] - if value is None: - suffix = '' - elif isinstance(value, str): - suffix = value - elif isinstance(value, int): - if value: - suffix = 'true' - else: - suffix = 'false' - else: - suffix = '' - list.append('='.join((key, suffix))) - for item in list: - result = ''.join((result, struct.pack('!c', chr(len(item))), item)) - self.text = result - else: - self.text = properties + def setProperties(self, properties): + """Sets properties and text of this info from a dictionary""" + if isinstance(properties, dict): + self.properties = properties + list = [] + result = '' + for key in properties: + value = properties[key] + if value is None: + suffix = '' + elif isinstance(value, str): + suffix = value + elif isinstance(value, int): + if value: + suffix = 'true' + else: + suffix = 'false' + else: + suffix = '' + list.append('='.join((key, suffix))) + for item in list: + result = ''.join((result, struct.pack('!c', chr(len(item))), + item)) + self.text = result + else: + self.text = properties + + def setText(self, text): + """Sets properties and text given a text field""" + self.text = text + try: + result = {} + end = len(text) + index = 0 + strs = [] + while index < end: + length = ord(text[index]) + index += 1 + strs.append(text[index:index + length]) + index += length - def setText(self, text): - """Sets properties and text given a text field""" - self.text = text - try: - result = {} - end = len(text) - index = 0 - strs = [] - while index < end: - length = ord(text[index]) - index += 1 - strs.append(text[index:index+length]) - index += length + for s in strs: + eindex = s.find('=') + if eindex == -1: + # No equals sign at all + key = s + value = 0 + else: + key = s[:eindex] + value = s[eindex + 1:] + if value == 'true': + value = 1 + elif value == 'false' or not value: + value = 0 + + # Only update non-existent properties + if key and result.get(key) is None: + result[key] = value - for s in strs: - eindex = s.find('=') - if eindex == -1: - # No equals sign at all - key = s - value = 0 - else: - key = s[:eindex] - value = s[eindex+1:] - if value == 'true': - value = 1 - elif value == 'false' or not value: - value = 0 + self.properties = result + except Exception: + traceback.print_exc() + self.properties = None + + def getType(self): + """Type accessor""" + return self.type - # Only update non-existent properties - if key and result.get(key) == None: - result[key] = value - - self.properties = result - except Exception: - traceback.print_exc() - self.properties = None + def getName(self): + """Name accessor""" + if self.type is not None and self.name.endswith("." + self.type): + return self.name[:len(self.name) - len(self.type) - 1] + return self.name - def getType(self): - """Type accessor""" - return self.type + def getAddress(self): + """Address accessor""" + return self.address - def getName(self): - """Name accessor""" - if self.type is not None and self.name.endswith("." + self.type): - return self.name[:len(self.name) - len(self.type) - 1] - return self.name + def getPort(self): + """Port accessor""" + return self.port - def getAddress(self): - """Address accessor""" - return self.address + def getPriority(self): + """Priority accessor""" + return self.priority - def getPort(self): - """Port accessor""" - return self.port + def getWeight(self): + """Weight accessor""" + return self.weight - def getPriority(self): - """Priority accessor""" - return self.priority + def getProperties(self): + """Properties accessor""" + return self.properties - def getWeight(self): - """Weight accessor""" - return self.weight + def getText(self): + """Text accessor""" + return self.text - def getProperties(self): - """Properties accessor""" - return self.properties + def getServer(self): + """Server accessor""" + return self.server - def getText(self): - """Text accessor""" - return self.text - - def getServer(self): - """Server accessor""" - return self.server - - def updateRecord(self, zeroconf, now, record): - """Updates service information from a DNS record""" - if record is not None and not record.isExpired(now): - if record.type == _TYPE_A: - #if record.name == self.name: - if record.name == self.server: - self.address = record.address - elif record.type == _TYPE_SRV: - if record.name == self.name: - self.server = record.server - self.port = record.port - self.weight = record.weight - self.priority = record.priority - #self.address = None - self.updateRecord(zeroconf, now, zeroconf.cache.getByDetails(self.server, _TYPE_A, _CLASS_IN)) - elif record.type == _TYPE_TXT: - if record.name == self.name: - self.setText(record.text) + def updateRecord(self, zeroconf, now, record): + """Updates service information from a DNS record""" + if record is not None and not record.isExpired(now): + if record.type == _TYPE_A: + #if record.name == self.name: + if record.name == self.server: + self.address = record.address + elif record.type == _TYPE_SRV: + if record.name == self.name: + self.server = record.server + self.port = record.port + self.weight = record.weight + self.priority = record.priority + #self.address = None + self.updateRecord(zeroconf, now, + zeroconf.cache.getByDetails(self.server, + _TYPE_A, _CLASS_IN)) + elif record.type == _TYPE_TXT: + if record.name == self.name: + self.setText(record.text) - def request(self, zeroconf, timeout): - """Returns true if the service could be discovered on the - network, and updates this object with details discovered. - """ - now = currentTimeMillis() - delay = _LISTENER_TIME - next = now + delay - last = now + timeout - result = 0 - try: - zeroconf.addListener(self, DNSQuestion(self.name, _TYPE_ANY, _CLASS_IN)) - while self.server is None or self.address is None or self.text is None: - if last <= now: - return 0 - if next <= now: - out = DNSOutgoing(_FLAGS_QR_QUERY) - out.addQuestion(DNSQuestion(self.name, _TYPE_SRV, _CLASS_IN)) - out.addAnswerAtTime(zeroconf.cache.getByDetails(self.name, _TYPE_SRV, _CLASS_IN), now) - out.addQuestion(DNSQuestion(self.name, _TYPE_TXT, _CLASS_IN)) - out.addAnswerAtTime(zeroconf.cache.getByDetails(self.name, _TYPE_TXT, _CLASS_IN), now) - if self.server is not None: - out.addQuestion(DNSQuestion(self.server, _TYPE_A, _CLASS_IN)) - out.addAnswerAtTime(zeroconf.cache.getByDetails(self.server, _TYPE_A, _CLASS_IN), now) - zeroconf.send(out) - next = now + delay - delay = delay * 2 + def request(self, zeroconf, timeout): + """Returns true if the service could be discovered on the + network, and updates this object with details discovered. + """ + now = currentTimeMillis() + delay = _LISTENER_TIME + next = now + delay + last = now + timeout + result = 0 + try: + zeroconf.addListener(self, DNSQuestion(self.name, _TYPE_ANY, + _CLASS_IN)) + while (self.server is None or self.address is None or + self.text is None): + if last <= now: + return 0 + if next <= now: + out = DNSOutgoing(_FLAGS_QR_QUERY) + out.addQuestion(DNSQuestion(self.name, _TYPE_SRV, + _CLASS_IN)) + out.addAnswerAtTime( + zeroconf.cache.getByDetails(self.name, + _TYPE_SRV, + _CLASS_IN), + now) + out.addQuestion(DNSQuestion(self.name, _TYPE_TXT, + _CLASS_IN)) + out.addAnswerAtTime( + zeroconf.cache.getByDetails(self.name, _TYPE_TXT, + _CLASS_IN), + now) + if self.server is not None: + out.addQuestion( + DNSQuestion(self.server, _TYPE_A, _CLASS_IN)) + out.addAnswerAtTime( + zeroconf.cache.getByDetails(self.server, _TYPE_A, + _CLASS_IN), + now) + zeroconf.send(out) + next = now + delay + delay = delay * 2 - zeroconf.wait(min(next, last) - now) - now = currentTimeMillis() - result = 1 - finally: - zeroconf.removeListener(self) + zeroconf.wait(min(next, last) - now) + now = currentTimeMillis() + result = 1 + finally: + zeroconf.removeListener(self) - return result + return result - def __eq__(self, other): - """Tests equality of service name""" - if isinstance(other, ServiceInfo): - return other.name == self.name - return 0 + def __eq__(self, other): + """Tests equality of service name""" + if isinstance(other, ServiceInfo): + return other.name == self.name + return 0 - def __ne__(self, other): - """Non-equality test""" - return not self.__eq__(other) + def __ne__(self, other): + """Non-equality test""" + return not self.__eq__(other) - def __repr__(self): - """String representation""" - result = "service[%s,%s:%s," % (self.name, socket.inet_ntoa(self.getAddress()), self.port) - if self.text is None: - result += "None" - else: - if len(self.text) < 20: - result += self.text - else: - result += self.text[:17] + "..." - result += "]" - return result + def __repr__(self): + """String representation""" + result = ("service[%s,%s:%s," % + (self.name, socket.inet_ntoa(self.getAddress()), self.port)) + if self.text is None: + result += "None" + else: + if len(self.text) < 20: + result += self.text + else: + result += self.text[:17] + "..." + result += "]" + return result class Zeroconf(object): - """Implementation of Zeroconf Multicast DNS Service Discovery + """Implementation of Zeroconf Multicast DNS Service Discovery + + Supports registration, unregistration, queries and browsing. + """ + def __init__(self, bindaddress=None): + """Creates an instance of the Zeroconf class, establishing + multicast communications, listening and reaping threads.""" + globals()['_GLOBAL_DONE'] = 0 + if bindaddress is None: + self.intf = socket.gethostbyname(socket.gethostname()) + else: + self.intf = bindaddress + self.group = ('', _MDNS_PORT) + self.socket = socket.socket(socket.AF_INET, socket.SOCK_DGRAM) + try: + self.socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1) + self.socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEPORT, 1) + except Exception: + # SO_REUSEADDR should be equivalent to SO_REUSEPORT for + # multicast UDP sockets (p 731, "TCP/IP Illustrated, + # Volume 2"), but some BSD-derived systems require + # SO_REUSEPORT to be specified explicitly. Also, not all + # versions of Python have SO_REUSEPORT available. So + # if you're on a BSD-based system, and haven't upgraded + # to Python 2.3 yet, you may find this library doesn't + # work as expected. + # + pass + self.socket.setsockopt(socket.SOL_IP, socket.IP_MULTICAST_TTL, "\xff") + self.socket.setsockopt(socket.SOL_IP, socket.IP_MULTICAST_LOOP, "\x01") + try: + self.socket.bind(self.group) + except Exception: + # Some versions of linux raise an exception even though + # SO_REUSEADDR and SO_REUSEPORT have been set, so ignore it + pass + self.socket.setsockopt(socket.SOL_IP, socket.IP_ADD_MEMBERSHIP, + socket.inet_aton(_MDNS_ADDR) + socket.inet_aton('0.0.0.0')) - Supports registration, unregistration, queries and browsing. - """ - def __init__(self, bindaddress=None): - """Creates an instance of the Zeroconf class, establishing - multicast communications, listening and reaping threads.""" - globals()['_GLOBAL_DONE'] = 0 - if bindaddress is None: - self.intf = socket.gethostbyname(socket.gethostname()) - else: - self.intf = bindaddress - self.group = ('', _MDNS_PORT) - self.socket = socket.socket(socket.AF_INET, socket.SOCK_DGRAM) - try: - self.socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1) - self.socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEPORT, 1) - except Exception: - # SO_REUSEADDR should be equivalent to SO_REUSEPORT for - # multicast UDP sockets (p 731, "TCP/IP Illustrated, - # Volume 2"), but some BSD-derived systems require - # SO_REUSEPORT to be specified explicitly. Also, not all - # versions of Python have SO_REUSEPORT available. So - # if you're on a BSD-based system, and haven't upgraded - # to Python 2.3 yet, you may find this library doesn't - # work as expected. - # - pass - self.socket.setsockopt(socket.SOL_IP, socket.IP_MULTICAST_TTL, 255) - self.socket.setsockopt(socket.SOL_IP, socket.IP_MULTICAST_LOOP, 1) - try: - self.socket.bind(self.group) - except Exception: - # Some versions of linux raise an exception even though - # SO_REUSEADDR and SO_REUSEPORT have been set, so ignore it - pass - self.socket.setsockopt(socket.SOL_IP, socket.IP_ADD_MEMBERSHIP, socket.inet_aton(_MDNS_ADDR) + socket.inet_aton('0.0.0.0')) + self.listeners = [] + self.browsers = [] + self.services = {} + self.servicetypes = {} + + self.cache = DNSCache() + + self.condition = threading.Condition() + + self.engine = Engine(self) + self.listener = Listener(self) + self.reaper = Reaper(self) + + def isLoopback(self): + return self.intf.startswith("127.0.0.1") + + def isLinklocal(self): + return self.intf.startswith("169.254.") - self.listeners = [] - self.browsers = [] - self.services = {} - self.servicetypes = {} - - self.cache = DNSCache() - - self.condition = threading.Condition() - - self.engine = Engine(self) - self.listener = Listener(self) - self.reaper = Reaper(self) - - def isLoopback(self): - return self.intf.startswith("127.0.0.1") + def wait(self, timeout): + """Calling thread waits for a given number of milliseconds or + until notified.""" + self.condition.acquire() + self.condition.wait(timeout / 1000) + self.condition.release() - def isLinklocal(self): - return self.intf.startswith("169.254.") + def notifyAll(self): + """Notifies all waiting threads""" + self.condition.acquire() + self.condition.notifyAll() + self.condition.release() - def wait(self, timeout): - """Calling thread waits for a given number of milliseconds or - until notified.""" - self.condition.acquire() - self.condition.wait(timeout/1000) - self.condition.release() - - def notifyAll(self): - """Notifies all waiting threads""" - self.condition.acquire() - self.condition.notifyAll() - self.condition.release() + def getServiceInfo(self, type, name, timeout=3000): + """Returns network's service information for a particular + name and type, or None if no service matches by the timeout, + which defaults to 3 seconds.""" + info = ServiceInfo(type, name) + if info.request(self, timeout): + return info + return None - def getServiceInfo(self, type, name, timeout=3000): - """Returns network's service information for a particular - name and type, or None if no service matches by the timeout, - which defaults to 3 seconds.""" - info = ServiceInfo(type, name) - if info.request(self, timeout): - return info - return None + def addServiceListener(self, type, listener): + """Adds a listener for a particular service type. This object + will then have its updateRecord method called when information + arrives for that type.""" + self.removeServiceListener(listener) + self.browsers.append(ServiceBrowser(self, type, listener)) - def addServiceListener(self, type, listener): - """Adds a listener for a particular service type. This object - will then have its updateRecord method called when information - arrives for that type.""" - self.removeServiceListener(listener) - self.browsers.append(ServiceBrowser(self, type, listener)) - - def removeServiceListener(self, listener): - """Removes a listener from the set that is currently listening.""" - for browser in self.browsers: - if browser.listener == listener: - browser.cancel() - del(browser) + def removeServiceListener(self, listener): + """Removes a listener from the set that is currently listening.""" + for browser in self.browsers: + if browser.listener == listener: + browser.cancel() + del browser - def registerService(self, info, ttl=_DNS_TTL): - """Registers service information to the network with a default TTL - of 60 seconds. Zeroconf will then respond to requests for - information for that service. The name of the service may be - changed if needed to make it unique on the network.""" - self.checkService(info) - self.services[info.name.lower()] = info - if self.servicetypes.has_key(info.type): - self.servicetypes[info.type]+=1 - else: - self.servicetypes[info.type]=1 - now = currentTimeMillis() - nextTime = now - i = 0 - while i < 3: - if now < nextTime: - self.wait(nextTime - now) - now = currentTimeMillis() - continue - out = DNSOutgoing(_FLAGS_QR_RESPONSE | _FLAGS_AA) - out.addAnswerAtTime(DNSPointer(info.type, _TYPE_PTR, _CLASS_IN, ttl, info.name), 0) - out.addAnswerAtTime(DNSService(info.name, _TYPE_SRV, _CLASS_IN, ttl, info.priority, info.weight, info.port, info.server), 0) - out.addAnswerAtTime(DNSText(info.name, _TYPE_TXT, _CLASS_IN, ttl, info.text), 0) - if info.address: - out.addAnswerAtTime(DNSAddress(info.server, _TYPE_A, _CLASS_IN, ttl, info.address), 0) - self.send(out) - i += 1 - nextTime += _REGISTER_TIME + def registerService(self, info, ttl=_DNS_TTL): + """Registers service information to the network with a default TTL + of 60 seconds. Zeroconf will then respond to requests for + information for that service. The name of the service may be + changed if needed to make it unique on the network.""" + self.checkService(info) + self.services[info.name.lower()] = info + if info.type in self.servicetypes: + self.servicetypes[info.type] += 1 + else: + self.servicetypes[info.type] = 1 + now = currentTimeMillis() + nexttime = now + i = 0 + while i < 3: + if now < nexttime: + self.wait(nexttime - now) + now = currentTimeMillis() + continue + out = DNSOutgoing(_FLAGS_QR_RESPONSE | _FLAGS_AA) + out.addAnswerAtTime(DNSPointer(info.type, _TYPE_PTR, + _CLASS_IN, ttl, info.name), 0) + out.addAnswerAtTime( + DNSService( + info.name, _TYPE_SRV, + _CLASS_IN, ttl, info.priority, info.weight, info.port, + info.server), + 0) + out.addAnswerAtTime( + DNSText(info.name, _TYPE_TXT, _CLASS_IN, ttl, info.text), + 0) + if info.address: + out.addAnswerAtTime(DNSAddress(info.server, _TYPE_A, + _CLASS_IN, ttl, info.address), 0) + self.send(out) + i += 1 + nexttime += _REGISTER_TIME - def unregisterService(self, info): - """Unregister a service.""" - try: - del(self.services[info.name.lower()]) - if self.servicetypes[info.type]>1: - self.servicetypes[info.type]-=1 - else: - del self.servicetypes[info.type] - except KeyError: - pass - now = currentTimeMillis() - nextTime = now - i = 0 - while i < 3: - if now < nextTime: - self.wait(nextTime - now) - now = currentTimeMillis() - continue - out = DNSOutgoing(_FLAGS_QR_RESPONSE | _FLAGS_AA) - out.addAnswerAtTime(DNSPointer(info.type, _TYPE_PTR, _CLASS_IN, 0, info.name), 0) - out.addAnswerAtTime(DNSService(info.name, _TYPE_SRV, _CLASS_IN, 0, info.priority, info.weight, info.port, info.name), 0) - out.addAnswerAtTime(DNSText(info.name, _TYPE_TXT, _CLASS_IN, 0, info.text), 0) - if info.address: - out.addAnswerAtTime(DNSAddress(info.server, _TYPE_A, _CLASS_IN, 0, info.address), 0) - self.send(out) - i += 1 - nextTime += _UNREGISTER_TIME + def unregisterService(self, info): + """Unregister a service.""" + try: + del self.services[info.name.lower()] + if self.servicetypes[info.type] > 1: + self.servicetypes[info.type] -= 1 + else: + del self.servicetypes[info.type] + except KeyError: + pass + now = currentTimeMillis() + nexttime = now + i = 0 + while i < 3: + if now < nexttime: + self.wait(nexttime - now) + now = currentTimeMillis() + continue + out = DNSOutgoing(_FLAGS_QR_RESPONSE | _FLAGS_AA) + out.addAnswerAtTime( + DNSPointer(info.type, _TYPE_PTR, _CLASS_IN, 0, info.name), 0) + out.addAnswerAtTime( + DNSService(info.name, _TYPE_SRV, + _CLASS_IN, 0, info.priority, info.weight, info.port, + info.name), + 0) + out.addAnswerAtTime(DNSText(info.name, _TYPE_TXT, + _CLASS_IN, 0, info.text), 0) + if info.address: + out.addAnswerAtTime(DNSAddress(info.server, _TYPE_A, + _CLASS_IN, 0, info.address), 0) + self.send(out) + i += 1 + nexttime += _UNREGISTER_TIME - def unregisterAllServices(self): - """Unregister all registered services.""" - if len(self.services) > 0: - now = currentTimeMillis() - nextTime = now - i = 0 - while i < 3: - if now < nextTime: - self.wait(nextTime - now) - now = currentTimeMillis() - continue - out = DNSOutgoing(_FLAGS_QR_RESPONSE | _FLAGS_AA) - for info in self.services.values(): - out.addAnswerAtTime(DNSPointer(info.type, _TYPE_PTR, _CLASS_IN, 0, info.name), 0) - out.addAnswerAtTime(DNSService(info.name, _TYPE_SRV, _CLASS_IN, 0, info.priority, info.weight, info.port, info.server), 0) - out.addAnswerAtTime(DNSText(info.name, _TYPE_TXT, _CLASS_IN, 0, info.text), 0) - if info.address: - out.addAnswerAtTime(DNSAddress(info.server, _TYPE_A, _CLASS_IN, 0, info.address), 0) - self.send(out) - i += 1 - nextTime += _UNREGISTER_TIME + def unregisterAllServices(self): + """Unregister all registered services.""" + if len(self.services) > 0: + now = currentTimeMillis() + nexttime = now + i = 0 + while i < 3: + if now < nexttime: + self.wait(nexttime - now) + now = currentTimeMillis() + continue + out = DNSOutgoing(_FLAGS_QR_RESPONSE | _FLAGS_AA) + for info in self.services.values(): + out.addAnswerAtTime(DNSPointer(info.type, _TYPE_PTR, + _CLASS_IN, 0, info.name), 0) + out.addAnswerAtTime( + DNSService(info.name, _TYPE_SRV, + _CLASS_IN, 0, info.priority, info.weight, + info.port, info.server), + 0) + out.addAnswerAtTime(DNSText(info.name, _TYPE_TXT, + _CLASS_IN, 0, info.text), 0) + if info.address: + out.addAnswerAtTime(DNSAddress(info.server, _TYPE_A, + _CLASS_IN, 0, info.address), 0) + self.send(out) + i += 1 + nexttime += _UNREGISTER_TIME - def checkService(self, info): - """Checks the network for a unique service name, modifying the - ServiceInfo passed in if it is not unique.""" - now = currentTimeMillis() - nextTime = now - i = 0 - while i < 3: - for record in self.cache.entriesWithName(info.type): - if record.type == _TYPE_PTR and not record.isExpired(now) and record.alias == info.name: - if (info.name.find('.') < 0): - info.name = info.name + ".[" + info.address + ":" + info.port + "]." + info.type - self.checkService(info) - return - raise NonUniqueNameException - if now < nextTime: - self.wait(nextTime - now) - now = currentTimeMillis() - continue - out = DNSOutgoing(_FLAGS_QR_QUERY | _FLAGS_AA) - self.debug = out - out.addQuestion(DNSQuestion(info.type, _TYPE_PTR, _CLASS_IN)) - out.addAuthoritativeAnswer(DNSPointer(info.type, _TYPE_PTR, _CLASS_IN, _DNS_TTL, info.name)) - self.send(out) - i += 1 - nextTime += _CHECK_TIME + def checkService(self, info): + """Checks the network for a unique service name, modifying the + ServiceInfo passed in if it is not unique.""" + now = currentTimeMillis() + nexttime = now + i = 0 + while i < 3: + for record in self.cache.entriesWithName(info.type): + if (record.type == _TYPE_PTR and not record.isExpired(now) and + record.alias == info.name): + if (info.name.find('.') < 0): + info.name = ("%w.[%s:%d].%s" % + (info.name, info.address, info.port, info.type)) + self.checkService(info) + return + raise NonUniqueNameException + if now < nexttime: + self.wait(nexttime - now) + now = currentTimeMillis() + continue + out = DNSOutgoing(_FLAGS_QR_QUERY | _FLAGS_AA) + self.debug = out + out.addQuestion(DNSQuestion(info.type, _TYPE_PTR, _CLASS_IN)) + out.addAuthoritativeAnswer(DNSPointer(info.type, _TYPE_PTR, + _CLASS_IN, _DNS_TTL, info.name)) + self.send(out) + i += 1 + nexttime += _CHECK_TIME - def addListener(self, listener, question): - """Adds a listener for a given question. The listener will have - its updateRecord method called when information is available to - answer the question.""" - now = currentTimeMillis() - self.listeners.append(listener) - if question is not None: - for record in self.cache.entriesWithName(question.name): - if question.answeredBy(record) and not record.isExpired(now): - listener.updateRecord(self, now, record) - self.notifyAll() + def addListener(self, listener, question): + """Adds a listener for a given question. The listener will have + its updateRecord method called when information is available to + answer the question.""" + now = currentTimeMillis() + self.listeners.append(listener) + if question is not None: + for record in self.cache.entriesWithName(question.name): + if question.answeredBy(record) and not record.isExpired(now): + listener.updateRecord(self, now, record) + self.notifyAll() - def removeListener(self, listener): - """Removes a listener.""" - try: - self.listeners.remove(listener) - self.notifyAll() - except Exception: - pass + def removeListener(self, listener): + """Removes a listener.""" + try: + self.listeners.remove(listener) + self.notifyAll() + except Exception: + pass - def updateRecord(self, now, rec): - """Used to notify listeners of new information that has updated - a record.""" - for listener in self.listeners: - listener.updateRecord(self, now, rec) - self.notifyAll() + def updateRecord(self, now, rec): + """Used to notify listeners of new information that has updated + a record.""" + for listener in self.listeners: + listener.updateRecord(self, now, rec) + self.notifyAll() - def handleResponse(self, msg): - """Deal with incoming response packets. All answers - are held in the cache, and listeners are notified.""" - now = currentTimeMillis() - for record in msg.answers: - expired = record.isExpired(now) - if record in self.cache.entries(): - if expired: - self.cache.remove(record) - else: - entry = self.cache.get(record) - if entry is not None: - entry.resetTTL(record) - record = entry - else: - self.cache.add(record) + def handleResponse(self, msg): + """Deal with incoming response packets. All answers + are held in the cache, and listeners are notified.""" + now = currentTimeMillis() + for record in msg.answers: + expired = record.isExpired(now) + if record in self.cache.entries(): + if expired: + self.cache.remove(record) + else: + entry = self.cache.get(record) + if entry is not None: + entry.resetTTL(record) + record = entry + else: + self.cache.add(record) + + self.updateRecord(now, record) + + def handleQuery(self, msg, addr, port): + """Deal with incoming query packets. Provides a response if + possible.""" + out = None - self.updateRecord(now, record) - - def handleQuery(self, msg, addr, port): - """Deal with incoming query packets. Provides a response if - possible.""" - out = None + # Support unicast client responses + # + if port != _MDNS_PORT: + out = DNSOutgoing(_FLAGS_QR_RESPONSE | _FLAGS_AA, 0) + for question in msg.questions: + out.addQuestion(question) - # Support unicast client responses - # - if port != _MDNS_PORT: - out = DNSOutgoing(_FLAGS_QR_RESPONSE | _FLAGS_AA, 0) - for question in msg.questions: - out.addQuestion(question) + for question in msg.questions: + if question.type == _TYPE_PTR: + if question.name == "_services._dns-sd._udp.local.": + for stype in self.servicetypes.keys(): + if out is None: + out = DNSOutgoing(_FLAGS_QR_RESPONSE | _FLAGS_AA) + out.addAnswer(msg, + DNSPointer( + "_services._dns-sd._udp.local.", + _TYPE_PTR, _CLASS_IN, + _DNS_TTL, stype)) + for service in self.services.values(): + if question.name == service.type: + if out is None: + out = DNSOutgoing(_FLAGS_QR_RESPONSE | _FLAGS_AA) + out.addAnswer(msg, DNSPointer(service.type, _TYPE_PTR, + _CLASS_IN, _DNS_TTL, service.name)) + else: + try: + if out is None: + out = DNSOutgoing(_FLAGS_QR_RESPONSE | _FLAGS_AA) - for question in msg.questions: - if question.type == _TYPE_PTR: - if question.name == "_services._dns-sd._udp.local.": - for stype in self.servicetypes.keys(): - if out is None: - out = DNSOutgoing(_FLAGS_QR_RESPONSE | _FLAGS_AA) - out.addAnswer(msg, DNSPointer("_services._dns-sd._udp.local.", _TYPE_PTR, _CLASS_IN, _DNS_TTL, stype)) - for service in self.services.values(): - if question.name == service.type: - if out is None: - out = DNSOutgoing(_FLAGS_QR_RESPONSE | _FLAGS_AA) - out.addAnswer(msg, DNSPointer(service.type, _TYPE_PTR, _CLASS_IN, _DNS_TTL, service.name)) - else: - try: - if out is None: - out = DNSOutgoing(_FLAGS_QR_RESPONSE | _FLAGS_AA) + # Answer A record queries for any service addresses we know + if question.type == _TYPE_A or question.type == _TYPE_ANY: + for service in self.services.values(): + if service.server == question.name.lower(): + out.addAnswer(msg, + DNSAddress(question.name, _TYPE_A, + _CLASS_IN | _CLASS_UNIQUE, + _DNS_TTL, service.address)) - # Answer A record queries for any service addresses we know - if question.type == _TYPE_A or question.type == _TYPE_ANY: - for service in self.services.values(): - if service.server == question.name.lower(): - out.addAnswer(msg, DNSAddress(question.name, _TYPE_A, _CLASS_IN | _CLASS_UNIQUE, _DNS_TTL, service.address)) - - service = self.services.get(question.name.lower(), None) - if not service: continue + service = self.services.get(question.name.lower(), None) + if not service: continue - if question.type == _TYPE_SRV or question.type == _TYPE_ANY: - out.addAnswer(msg, DNSService(question.name, _TYPE_SRV, _CLASS_IN | _CLASS_UNIQUE, _DNS_TTL, service.priority, service.weight, service.port, service.server)) - if question.type == _TYPE_TXT or question.type == _TYPE_ANY: - out.addAnswer(msg, DNSText(question.name, _TYPE_TXT, _CLASS_IN | _CLASS_UNIQUE, _DNS_TTL, service.text)) - if question.type == _TYPE_SRV: - out.addAdditionalAnswer(DNSAddress(service.server, _TYPE_A, _CLASS_IN | _CLASS_UNIQUE, _DNS_TTL, service.address)) - except Exception: - traceback.print_exc() - - if out is not None and out.answers: - out.id = msg.id - self.send(out, addr, port) + if (question.type == _TYPE_SRV or + question.type == _TYPE_ANY): + out.addAnswer(msg, + DNSService(question.name, _TYPE_SRV, + _CLASS_IN | _CLASS_UNIQUE, + _DNS_TTL, service.priority, + service.weight, service.port, + service.server)) + if (question.type == _TYPE_TXT or + question.type == _TYPE_ANY): + out.addAnswer(msg, DNSText(question.name, _TYPE_TXT, + _CLASS_IN | _CLASS_UNIQUE, _DNS_TTL, service.text)) + if question.type == _TYPE_SRV: + out.addAdditionalAnswer( + DNSAddress(service.server, _TYPE_A, + _CLASS_IN | _CLASS_UNIQUE, + _DNS_TTL, service.address)) + except Exception: + traceback.print_exc() - def send(self, out, addr = _MDNS_ADDR, port = _MDNS_PORT): - """Sends an outgoing packet.""" - # This is a quick test to see if we can parse the packets we generate - #temp = DNSIncoming(out.packet()) - try: - self.socket.sendto(out.packet(), 0, (addr, port)) - except Exception: - # Ignore this, it may be a temporary loss of network connection - pass + if out is not None and out.answers: + out.id = msg.id + self.send(out, addr, port) - def close(self): - """Ends the background threads, and prevent this instance from - servicing further queries.""" - if globals()['_GLOBAL_DONE'] == 0: - globals()['_GLOBAL_DONE'] = 1 - self.notifyAll() - self.engine.notify() - self.unregisterAllServices() - self.socket.setsockopt(socket.SOL_IP, socket.IP_DROP_MEMBERSHIP, socket.inet_aton(_MDNS_ADDR) + socket.inet_aton('0.0.0.0')) - self.socket.close() + def send(self, out, addr=_MDNS_ADDR, port=_MDNS_PORT): + """Sends an outgoing packet.""" + # This is a quick test to see if we can parse the packets we generate + #temp = DNSIncoming(out.packet()) + try: + self.socket.sendto(out.packet(), 0, (addr, port)) + except Exception: + # Ignore this, it may be a temporary loss of network connection + pass + + def close(self): + """Ends the background threads, and prevent this instance from + servicing further queries.""" + if globals()['_GLOBAL_DONE'] == 0: + globals()['_GLOBAL_DONE'] = 1 + self.notifyAll() + self.engine.notify() + self.unregisterAllServices() + self.socket.setsockopt(socket.SOL_IP, socket.IP_DROP_MEMBERSHIP, + socket.inet_aton(_MDNS_ADDR) + socket.inet_aton('0.0.0.0')) + self.socket.close() # Test a few module features, including service registration, service # query (for Zoe), and service unregistration. if __name__ == '__main__': - print "Multicast DNS Service Discovery for Python, version", __version__ - r = Zeroconf() - print "1. Testing registration of a service..." - desc = {'version':'0.10','a':'test value', 'b':'another value'} - info = ServiceInfo("_http._tcp.local.", "My Service Name._http._tcp.local.", socket.inet_aton("127.0.0.1"), 1234, 0, 0, desc) - print " Registering service..." - r.registerService(info) - print " Registration done." - print "2. Testing query of service information..." - print " Getting ZOE service:", str(r.getServiceInfo("_http._tcp.local.", "ZOE._http._tcp.local.")) - print " Query done." - print "3. Testing query of own service..." - print " Getting self:", str(r.getServiceInfo("_http._tcp.local.", "My Service Name._http._tcp.local.")) - print " Query done." - print "4. Testing unregister of service information..." - r.unregisterService(info) - print " Unregister done." - r.close() - -# no-check-code + print("Multicast DNS Service Discovery for Python, version", __version__) + r = Zeroconf() + print("1. Testing registration of a service...") + desc = {'version':'0.10','a':'test value', 'b':'another value'} + info = ServiceInfo("_http._tcp.local.", + "My Service Name._http._tcp.local.", + socket.inet_aton("127.0.0.1"), 1234, 0, 0, desc) + print(" Registering service...") + r.registerService(info) + print(" Registration done.") + print("2. Testing query of service information...") + print(" Getting ZOE service:", + str(r.getServiceInfo("_http._tcp.local.", "ZOE._http._tcp.local."))) + print(" Query done.") + print("3. Testing query of own service...") + print(" Getting self:", + str(r.getServiceInfo("_http._tcp.local.", + "My Service Name._http._tcp.local."))) + print(" Query done.") + print("4. Testing unregister of service information...") + r.unregisterService(info) + print(" Unregister done.") + r.close()
--- a/hgext/zeroconf/__init__.py Sun Mar 13 02:29:11 2016 +0100 +++ b/hgext/zeroconf/__init__.py Tue Mar 15 14:10:46 2016 -0700 @@ -4,7 +4,6 @@ # # This software may be used and distributed according to the terms of the # GNU General Public License version 2 or any later version. - '''discover and advertise repositories on the local network Zeroconf-enabled repositories will be announced in a network without @@ -23,13 +22,23 @@ $ hg paths zc-test = http://example.com:8000/test ''' +from __future__ import absolute_import -import socket, time, os +import os +import socket +import time -import Zeroconf -from mercurial import ui, hg, encoding, dispatch -from mercurial import extensions -from mercurial.hgweb import server as servermod +from . import Zeroconf +from mercurial import ( + dispatch, + encoding, + extensions, + hg, + ui as uimod, +) +from mercurial.hgweb import ( + server as servermod +) # Note for extension authors: ONLY specify testedwith = 'internal' for # extensions which SHIP WITH MERCURIAL. Non-mainline extensions should @@ -169,6 +178,16 @@ repos += getzcpaths() return repos +def configsuboptions(orig, self, section, name, *args, **kwargs): + opt, sub = orig(self, section, name, *args, **kwargs) + if section == "paths" and name.startswith("zc-"): + # We have to find the URL in the zeroconf paths. We can't cons up any + # suboptions, so we use any that we found in the original config. + for zcname, zcurl in getzcpaths(): + if zcname == name: + return zcurl, sub + return opt, sub + def defaultdest(orig, source): for name, path in getzcpaths(): if path == source: @@ -187,7 +206,8 @@ extensions.wrapfunction(dispatch, '_runcommand', cleanupafterdispatch) -extensions.wrapfunction(ui.ui, 'config', config) -extensions.wrapfunction(ui.ui, 'configitems', configitems) +extensions.wrapfunction(uimod.ui, 'config', config) +extensions.wrapfunction(uimod.ui, 'configitems', configitems) +extensions.wrapfunction(uimod.ui, 'configsuboptions', configsuboptions) extensions.wrapfunction(hg, 'defaultdest', defaultdest) extensions.wrapfunction(servermod, 'create_server', zc_create_server)
--- a/i18n/posplit Sun Mar 13 02:29:11 2016 +0100 +++ b/i18n/posplit Tue Mar 15 14:10:46 2016 -0700 @@ -57,11 +57,13 @@ if mdirective: if not msgid[mdirective.end():].rstrip(): # only directive, nothing to translate here + delta += 2 continue directive = mdirective.group(1) if directive in ('container', 'include'): if msgid.rstrip('\n').count('\n') == 0: # only rst syntax, nothing to translate + delta += 2 continue else: # lines following directly, unexpected
--- a/mercurial/__init__.py Sun Mar 13 02:29:11 2016 +0100 +++ b/mercurial/__init__.py Tue Mar 15 14:10:46 2016 -0700 @@ -19,11 +19,14 @@ # c - require C extensions # allow - allow pure Python implementation when C loading fails # py - only load pure Python modules -modulepolicy = '@MODULELOADPOLICY@' - +# # By default, require the C extensions for performance reasons. -if modulepolicy == '@' 'MODULELOADPOLICY' '@': - modulepolicy = 'c' +modulepolicy = 'c' +try: + from . import __modulepolicy__ + modulepolicy = __modulepolicy__.modulepolicy +except ImportError: + pass # PyPy doesn't load C extensions. # @@ -32,6 +35,11 @@ if '__pypy__' in sys.builtin_module_names: modulepolicy = 'py' +# Our C extensions aren't yet compatible with Python 3. So use pure Python +# on Python 3 for now. +if sys.version_info[0] >= 3: + modulepolicy = 'py' + # Environment variable can always force settings. modulepolicy = os.environ.get('HGMODULEPOLICY', modulepolicy)
--- a/mercurial/archival.py Sun Mar 13 02:29:11 2016 +0100 +++ b/mercurial/archival.py Tue Mar 15 14:10:46 2016 -0700 @@ -331,7 +331,7 @@ if subrepos: for subpath in sorted(ctx.substate): sub = ctx.workingsub(subpath) - submatch = matchmod.narrowmatcher(subpath, matchfn) + submatch = matchmod.subdirmatcher(subpath, matchfn) total += sub.archive(archiver, prefix, submatch) if total == 0:
--- a/mercurial/bookmarks.py Sun Mar 13 02:29:11 2016 +0100 +++ b/mercurial/bookmarks.py Tue Mar 15 14:10:46 2016 -0700 @@ -182,6 +182,11 @@ fp.write("%s %s\n" % (hex(node), encoding.fromlocal(name))) self._clean = True + def expandname(self, bname): + if bname == '.': + return self.active + return bname + def _readactive(repo, marks): """ Get the active bookmark. We can have an active bookmark that updates
--- a/mercurial/branchmap.py Sun Mar 13 02:29:11 2016 +0100 +++ b/mercurial/branchmap.py Tue Mar 15 14:10:46 2016 -0700 @@ -55,6 +55,7 @@ if not partial.validfor(repo): # invalidate the cache raise ValueError('tip differs') + cl = repo.changelog for l in lines: if not l: continue @@ -62,9 +63,9 @@ if state not in 'oc': raise ValueError('invalid branch state') label = encoding.tolocal(label.strip()) - if not node in repo: - raise ValueError('node %s does not exist' % node) node = bin(node) + if not cl.hasnode(node): + raise ValueError('node %s does not exist' % hex(node)) partial.setdefault(label, []).append(node) if state == 'c': partial._closednodes.add(node)
--- a/mercurial/changegroup.py Sun Mar 13 02:29:11 2016 +0100 +++ b/mercurial/changegroup.py Tue Mar 15 14:10:46 2016 -0700 @@ -306,6 +306,7 @@ self.manifestheader() repo.manifest.addgroup(self, revmap, trp) repo.ui.progress(_('manifests'), None) + self.callback = None def apply(self, repo, srctype, url, emptyok=False, targetphase=phases.draft, expectedtotal=None): @@ -363,7 +364,7 @@ efiles = set() def onchangelog(cl, node): - efiles.update(cl.read(node)[3]) + efiles.update(cl.readfiles(node)) self.changelogheader() srccontent = cl.addgroup(self, csmap, trp, @@ -375,6 +376,7 @@ clend = len(cl) changesets = clend - clstart repo.ui.progress(_('changesets'), None) + self.callback = None # pull off the manifest group repo.ui.status(_("adding manifests\n")) @@ -393,10 +395,8 @@ # process the files repo.ui.status(_("adding file changes\n")) - self.callback = None - pr = prog(_('files'), efiles) newrevs, newfiles = _addchangegroupfiles( - repo, self, revmap, trp, pr, needfiles) + repo, self, revmap, trp, efiles, needfiles) revisions += newrevs files += newfiles @@ -553,27 +553,6 @@ return d return readexactly(self._fh, n) -def _moddirs(files): - """Given a set of modified files, find the list of modified directories. - - This returns a list of (path to changed dir, changed dir) tuples, - as that's what the one client needs anyway. - - >>> _moddirs(['a/b/c.py', 'a/b/c.txt', 'a/d/e/f/g.txt', 'i.txt', ]) - [('/', 'a/'), ('a/', 'b/'), ('a/', 'd/'), ('a/d/', 'e/'), ('a/d/e/', 'f/')] - - """ - alldirs = set() - for f in files: - path = f.split('/')[:-1] - for i in xrange(len(path) - 1, -1, -1): - dn = '/'.join(path[:i]) - current = dn + '/', path[i] + '/' - if current in alldirs: - break - alldirs.add(current) - return sorted(alldirs) - class cg1packer(object): deltaheader = _CHANGEGROUPV1_DELTA_HEADER version = '01' @@ -659,33 +638,25 @@ rr, rl = revlog.rev, revlog.linkrev return [n for n in missing if rl(rr(n)) not in commonrevs] - def _packmanifests(self, mfnodes, tmfnodes, lookuplinknode): + def _packmanifests(self, dir, mfnodes, lookuplinknode): """Pack flat manifests into a changegroup stream.""" - ml = self._repo.manifest - size = 0 - for chunk in self.group( - mfnodes, ml, lookuplinknode, units=_('manifests')): - size += len(chunk) + assert not dir + for chunk in self.group(mfnodes, self._repo.manifest, + lookuplinknode, units=_('manifests')): yield chunk - self._verbosenote(_('%8.i (manifests)\n') % size) - # It looks odd to assert this here, but tmfnodes doesn't get - # filled in until after we've called lookuplinknode for - # sending root manifests, so the only way to tell the streams - # got crossed is to check after we've done all the work. - assert not tmfnodes + + def _manifestsdone(self): + return '' def generate(self, commonrevs, clnodes, fastpathlinkrev, source): '''yield a sequence of changegroup chunks (strings)''' repo = self._repo cl = repo.changelog - ml = repo.manifest clrevorder = {} mfs = {} # needed manifests - tmfnodes = {} fnodes = {} # needed file nodes - # maps manifest node id -> set(changed files) - mfchangedfiles = {} + changedfiles = set() # Callback for the changelog, used to collect changed files and manifest # nodes. @@ -698,7 +669,7 @@ mfs.setdefault(n, x) # Record a complete list of potentially-changed files in # this manifest. - mfchangedfiles.setdefault(n, set()).update(c[3]) + changedfiles.update(c[3]) return x self._verbosenote(_('uncompressed size of bundle content:\n')) @@ -729,12 +700,47 @@ # send along with files. This could probably be fixed. fastpathlinkrev = fastpathlinkrev and ( 'treemanifest' not in repo.requirements) + + for chunk in self.generatemanifests(commonrevs, clrevorder, + fastpathlinkrev, mfs, fnodes): + yield chunk + mfs.clear() + clrevs = set(cl.rev(x) for x in clnodes) + + if not fastpathlinkrev: + def linknodes(unused, fname): + return fnodes.get(fname, {}) + else: + cln = cl.node + def linknodes(filerevlog, fname): + llr = filerevlog.linkrev + fln = filerevlog.node + revs = ((r, llr(r)) for r in filerevlog) + return dict((fln(r), cln(lr)) for r, lr in revs if lr in clrevs) + + for chunk in self.generatefiles(changedfiles, linknodes, commonrevs, + source): + yield chunk + + yield self.close() + + if clnodes: + repo.hook('outgoing', node=hex(clnodes[0]), source=source) + + def generatemanifests(self, commonrevs, clrevorder, fastpathlinkrev, mfs, + fnodes): + repo = self._repo + dirlog = repo.manifest.dirlog + tmfnodes = {'': mfs} + # Callback for the manifest, used to collect linkrevs for filelog # revisions. # Returns the linkrev node (collected in lookupcl). - if fastpathlinkrev: - lookupmflinknode = mfs.__getitem__ - else: + def makelookupmflinknode(dir): + if fastpathlinkrev: + assert not dir + return mfs.__getitem__ + def lookupmflinknode(x): """Callback for looking up the linknode for manifests. @@ -751,75 +757,36 @@ the client before you can trust the list of files and treemanifests to send. """ - clnode = mfs[x] - # We no longer actually care about reading deltas of - # the manifest here, because we already know the list - # of changed files, so for treemanifests (which - # lazily-load anyway to *generate* a readdelta) we can - # just load them with read() and then we'll actually - # be able to correctly load node IDs from the - # submanifest entries. - if 'treemanifest' in repo.requirements: - mdata = ml.read(x) - else: - mdata = ml.readfast(x) - for f in mfchangedfiles[x]: - try: - n = mdata[f] - except KeyError: - continue - # record the first changeset introducing this filelog - # version - fclnodes = fnodes.setdefault(f, {}) - fclnode = fclnodes.setdefault(n, clnode) - if clrevorder[clnode] < clrevorder[fclnode]: - fclnodes[n] = clnode - # gather list of changed treemanifest nodes - if 'treemanifest' in repo.requirements: - submfs = {'/': mdata} - for dn, bn in _moddirs(mfchangedfiles[x]): - try: - submf = submfs[dn] - submf = submf._dirs[bn] - except KeyError: - continue # deleted directory, so nothing to send - submfs[submf.dir()] = submf - tmfclnodes = tmfnodes.setdefault(submf.dir(), {}) - tmfclnode = tmfclnodes.setdefault(submf._node, clnode) + clnode = tmfnodes[dir][x] + mdata = dirlog(dir).readshallowfast(x) + for p, n, fl in mdata.iterentries(): + if fl == 't': # subdirectory manifest + subdir = dir + p + '/' + tmfclnodes = tmfnodes.setdefault(subdir, {}) + tmfclnode = tmfclnodes.setdefault(n, clnode) if clrevorder[clnode] < clrevorder[tmfclnode]: tmfclnodes[n] = clnode + else: + f = dir + p + fclnodes = fnodes.setdefault(f, {}) + fclnode = fclnodes.setdefault(n, clnode) + if clrevorder[clnode] < clrevorder[fclnode]: + fclnodes[n] = clnode return clnode - - mfnodes = self.prune(ml, mfs, commonrevs) - for x in self._packmanifests( - mfnodes, tmfnodes, lookupmflinknode): - yield x - - mfs.clear() - clrevs = set(cl.rev(x) for x in clnodes) + return lookupmflinknode - if not fastpathlinkrev: - def linknodes(unused, fname): - return fnodes.get(fname, {}) - else: - cln = cl.node - def linknodes(filerevlog, fname): - llr = filerevlog.linkrev - fln = filerevlog.node - revs = ((r, llr(r)) for r in filerevlog) - return dict((fln(r), cln(lr)) for r, lr in revs if lr in clrevs) - - changedfiles = set() - for x in mfchangedfiles.itervalues(): - changedfiles.update(x) - for chunk in self.generatefiles(changedfiles, linknodes, commonrevs, - source): - yield chunk - - yield self.close() - - if clnodes: - repo.hook('outgoing', node=hex(clnodes[0]), source=source) + size = 0 + while tmfnodes: + dir = min(tmfnodes) + nodes = tmfnodes[dir] + prunednodes = self.prune(dirlog(dir), nodes, commonrevs) + for x in self._packmanifests(dir, prunednodes, + makelookupmflinknode(dir)): + size += len(x) + yield x + del tmfnodes[dir] + self._verbosenote(_('%8.i (manifests)\n') % size) + yield self._manifestsdone() # The 'source' parameter is useful for extensions def generatefiles(self, changedfiles, linknodes, commonrevs, source): @@ -920,23 +887,15 @@ version = '03' deltaheader = _CHANGEGROUPV3_DELTA_HEADER - def _packmanifests(self, mfnodes, tmfnodes, lookuplinknode): - # Note that debug prints are super confusing in this code, as - # tmfnodes gets populated by the calls to lookuplinknode in - # the superclass's manifest packer. In the future we should - # probably see if we can refactor this somehow to be less - # confusing. - for x in super(cg3packer, self)._packmanifests( - mfnodes, {}, lookuplinknode): - yield x - dirlog = self._repo.manifest.dirlog - for name, nodes in tmfnodes.iteritems(): - # For now, directory headers are simply file headers with - # a trailing '/' on the path (already in the name). - yield self.fileheader(name) - for chunk in self.group(nodes, dirlog(name), nodes.get): - yield chunk - yield self.close() + def _packmanifests(self, dir, mfnodes, lookuplinknode): + if dir: + yield self.fileheader(dir) + for chunk in self.group(mfnodes, self._repo.manifest.dirlog(dir), + lookuplinknode, units=_('manifests')): + yield chunk + + def _manifestsdone(self): + return self.close() def builddeltaheader(self, node, p1n, p2n, basenode, linknode, flags): return struct.pack( @@ -1109,16 +1068,18 @@ # to avoid a race we use changegroupsubset() (issue1320) return changegroupsubset(repo, basenodes, repo.heads(), source) -def _addchangegroupfiles(repo, source, revmap, trp, pr, needfiles): +def _addchangegroupfiles(repo, source, revmap, trp, expectedfiles, needfiles): revisions = 0 files = 0 while True: chunkdata = source.filelogheader() if not chunkdata: break + files += 1 f = chunkdata["filename"] repo.ui.debug("adding %s revisions\n" % f) - pr() + repo.ui.progress(_('files'), files, unit=_('files'), + total=expectedfiles) fl = repo.file(f) o = len(fl) try: @@ -1127,7 +1088,6 @@ except error.CensoredBaseError as e: raise error.Abort(_("received delta base is censored: %s") % e) revisions += len(fl) - o - files += 1 if f in needfiles: needs = needfiles[f] for new in xrange(o, len(fl)):
--- a/mercurial/changelog.py Sun Mar 13 02:29:11 2016 +0100 +++ b/mercurial/changelog.py Tue Mar 15 14:10:46 2016 -0700 @@ -7,6 +7,8 @@ from __future__ import absolute_import +import collections + from .i18n import _ from .node import ( bin, @@ -136,6 +138,122 @@ return appender(opener, name, mode, buf) return _delay +_changelogrevision = collections.namedtuple('changelogrevision', + ('manifest', 'user', 'date', + 'files', 'description', 'extra')) + +class changelogrevision(object): + """Holds results of a parsed changelog revision. + + Changelog revisions consist of multiple pieces of data, including + the manifest node, user, and date. This object exposes a view into + the parsed object. + """ + + __slots__ = ( + '_offsets', + '_text', + ) + + def __new__(cls, text): + if not text: + return _changelogrevision( + manifest=nullid, + user='', + date=(0, 0), + files=[], + description='', + extra=_defaultextra, + ) + + self = super(changelogrevision, cls).__new__(cls) + # We could return here and implement the following as an __init__. + # But doing it here is equivalent and saves an extra function call. + + # format used: + # nodeid\n : manifest node in ascii + # user\n : user, no \n or \r allowed + # time tz extra\n : date (time is int or float, timezone is int) + # : extra is metadata, encoded and separated by '\0' + # : older versions ignore it + # files\n\n : files modified by the cset, no \n or \r allowed + # (.*) : comment (free text, ideally utf-8) + # + # changelog v0 doesn't use extra + + nl1 = text.index('\n') + nl2 = text.index('\n', nl1 + 1) + nl3 = text.index('\n', nl2 + 1) + + # The list of files may be empty. Which means nl3 is the first of the + # double newline that precedes the description. + if text[nl3 + 1] == '\n': + doublenl = nl3 + else: + doublenl = text.index('\n\n', nl3 + 1) + + self._offsets = (nl1, nl2, nl3, doublenl) + self._text = text + + return self + + @property + def manifest(self): + return bin(self._text[0:self._offsets[0]]) + + @property + def user(self): + off = self._offsets + return encoding.tolocal(self._text[off[0] + 1:off[1]]) + + @property + def _rawdate(self): + off = self._offsets + dateextra = self._text[off[1] + 1:off[2]] + return dateextra.split(' ', 2)[0:2] + + @property + def _rawextra(self): + off = self._offsets + dateextra = self._text[off[1] + 1:off[2]] + fields = dateextra.split(' ', 2) + if len(fields) != 3: + return None + + return fields[2] + + @property + def date(self): + raw = self._rawdate + time = float(raw[0]) + # Various tools did silly things with the timezone. + try: + timezone = int(raw[1]) + except ValueError: + timezone = 0 + + return time, timezone + + @property + def extra(self): + raw = self._rawextra + if raw is None: + return _defaultextra + + return decodeextra(raw) + + @property + def files(self): + off = self._offsets + if off[2] == off[3]: + return [] + + return self._text[off[2] + 1:off[3]].split('\n') + + @property + def description(self): + return encoding.tolocal(self._text[self._offsets[3] + 2:]) + class changelog(revlog.revlog): def __init__(self, opener): revlog.revlog.__init__(self, opener, "00changelog.i") @@ -323,42 +441,34 @@ revlog.revlog.checkinlinesize(self, tr, fp) def read(self, node): - """ - format used: - nodeid\n : manifest node in ascii - user\n : user, no \n or \r allowed - time tz extra\n : date (time is int or float, timezone is int) - : extra is metadata, encoded and separated by '\0' - : older versions ignore it - files\n\n : files modified by the cset, no \n or \r allowed - (.*) : comment (free text, ideally utf-8) + """Obtain data from a parsed changelog revision. + + Returns a 6-tuple of: - changelog v0 doesn't use extra + - manifest node in binary + - author/user as a localstr + - date as a 2-tuple of (time, timezone) + - list of files + - commit message as a localstr + - dict of extra metadata + + Unless you need to access all fields, consider calling + ``changelogrevision`` instead, as it is faster for partial object + access. """ - text = self.revision(node) - if not text: - return (nullid, "", (0, 0), [], "", _defaultextra) - last = text.index("\n\n") - desc = encoding.tolocal(text[last + 2:]) - l = text[:last].split('\n') - manifest = bin(l[0]) - user = encoding.tolocal(l[1]) + c = changelogrevision(self.revision(node)) + return ( + c.manifest, + c.user, + c.date, + c.files, + c.description, + c.extra + ) - tdata = l[2].split(' ', 2) - if len(tdata) != 3: - time = float(tdata[0]) - try: - # various tools did silly things with the time zone field. - timezone = int(tdata[1]) - except ValueError: - timezone = 0 - extra = _defaultextra - else: - time, timezone = float(tdata[0]), int(tdata[1]) - extra = decodeextra(tdata[2]) - - files = l[3:] - return (manifest, user, (time, timezone), files, desc, extra) + def changelogrevision(self, nodeorrev): + """Obtain a ``changelogrevision`` for a node or revision.""" + return changelogrevision(self.revision(nodeorrev)) def readfiles(self, node): """
--- a/mercurial/cmdutil.py Sun Mar 13 02:29:11 2016 +0100 +++ b/mercurial/cmdutil.py Tue Mar 15 14:10:46 2016 -0700 @@ -5,18 +5,47 @@ # This software may be used and distributed according to the terms of the # GNU General Public License version 2 or any later version. -from node import hex, bin, nullid, nullrev, short -from i18n import _ -import os, sys, errno, re, tempfile, cStringIO -import util, scmutil, templater, patch, error, templatekw, revlog, copies -import match as matchmod -import repair, graphmod, revset, phases, obsolete, pathutil -import changelog -import bookmarks -import encoding -import formatter -import crecord as crecordmod -import lock as lockmod +from __future__ import absolute_import + +import cStringIO +import errno +import os +import re +import sys +import tempfile + +from .i18n import _ +from .node import ( + bin, + hex, + nullid, + nullrev, + short, +) + +from . import ( + bookmarks, + changelog, + copies, + crecord as crecordmod, + encoding, + error, + formatter, + graphmod, + lock as lockmod, + match as matchmod, + obsolete, + patch, + pathutil, + phases, + repair, + revlog, + revset, + scmutil, + templatekw, + templater, + util, +) def ishunk(x): hunkclasses = (crecordmod.uihunk, patch.recordhunk) @@ -78,8 +107,7 @@ def dorecord(ui, repo, commitfunc, cmdsuggest, backupall, filterfn, *pats, **opts): - import merge as mergemod - + from . import merge as mergemod if not ui.interactive(): if cmdsuggest: msg = _('running non-interactively, use %s instead') % cmdsuggest @@ -758,14 +786,14 @@ fp.write(str(pid) + '\n') fp.close() - if opts['daemon'] and not opts['daemon_pipefds']: + if opts['daemon'] and not opts['daemon_postexec']: # Signal child process startup with file removal lockfd, lockpath = tempfile.mkstemp(prefix='hg-service-') os.close(lockfd) try: if not runargs: runargs = util.hgcmd() + sys.argv[1:] - runargs.append('--daemon-pipefds=%s' % lockpath) + runargs.append('--daemon-postexec=unlink:%s' % lockpath) # Don't pass --cwd to the child process, because we've already # changed directory. for i in xrange(1, len(runargs)): @@ -796,15 +824,22 @@ initfn() if not opts['daemon']: - writepid(os.getpid()) - - if opts['daemon_pipefds']: - lockpath = opts['daemon_pipefds'] + writepid(util.getpid()) + + if opts['daemon_postexec']: try: os.setsid() except AttributeError: pass - os.unlink(lockpath) + for inst in opts['daemon_postexec']: + if inst.startswith('unlink:'): + lockpath = inst[7:] + os.unlink(lockpath) + elif inst.startswith('chdir:'): + os.chdir(inst[6:]) + elif inst != 'none': + raise error.Abort(_('invalid value for --daemon-postexec: %s') + % inst) util.hidewindow() sys.stdout.flush() sys.stderr.flush() @@ -863,7 +898,7 @@ updatefunc(<repo>, <node>) """ # avoid cycle context -> subrepo -> cmdutil - import context + from . import context extractdata = patch.extract(ui, hunk) tmpname = extractdata.get('filename') message = extractdata.get('message') @@ -1142,7 +1177,7 @@ # node2 (inclusive). Thus, ctx2's substate won't contain that # subpath. The best we can do is to ignore it. tempnode2 = None - submatch = matchmod.narrowmatcher(subpath, match) + submatch = matchmod.subdirmatcher(subpath, match) sub.diff(ui, diffopts, tempnode2, submatch, changes=changes, stat=stat, fp=fp, prefix=prefix) @@ -1470,6 +1505,7 @@ props['templ'] = self.t props['ctx'] = ctx props['repo'] = self.repo + props['ui'] = self.repo.ui props['revcache'] = {'copies': copies} props['cache'] = self.cache @@ -2173,6 +2209,7 @@ def formatnode(repo, ctx): props['ctx'] = ctx props['repo'] = repo + props['ui'] = repo.ui props['revcache'] = {} return templater.stringify(templ('graphnode', **props)) return formatnode @@ -2180,7 +2217,7 @@ def displaygraph(ui, repo, dag, displayer, edgefn, getrenamed=None, filematcher=None): formatnode = _graphnodeformatter(ui, displayer) - seen, state = [], graphmod.asciistate() + state = graphmod.asciistate() for rev, type, ctx, parents in dag: char = formatnode(repo, ctx) copies = None @@ -2198,7 +2235,7 @@ if not lines[-1]: del lines[-1] displayer.flush(ctx) - edges = edgefn(type, char, lines, seen, rev, parents) + edges = edgefn(type, char, lines, state, rev, parents) for type, char, lines, coldata in edges: graphmod.ascii(ui, state, type, char, lines, coldata) displayer.close() @@ -2260,7 +2297,7 @@ for subpath in sorted(wctx.substate): sub = wctx.sub(subpath) try: - submatch = matchmod.narrowmatcher(subpath, match) + submatch = matchmod.subdirmatcher(subpath, match) if opts.get('subrepos'): bad.extend(sub.add(ui, submatch, prefix, False, **opts)) else: @@ -2289,7 +2326,7 @@ for subpath in sorted(wctx.substate): sub = wctx.sub(subpath) try: - submatch = matchmod.narrowmatcher(subpath, match) + submatch = matchmod.subdirmatcher(subpath, match) subbad, subforgot = sub.forget(submatch, prefix) bad.extend([subpath + '/' + f for f in subbad]) forgot.extend([subpath + '/' + f for f in subforgot]) @@ -2346,7 +2383,7 @@ if subrepos or matchessubrepo(subpath): sub = ctx.sub(subpath) try: - submatch = matchmod.narrowmatcher(subpath, m) + submatch = matchmod.subdirmatcher(subpath, m) recurse = m.exact(subpath) or subrepos if sub.printfiles(ui, submatch, fm, fmt, recurse) == 0: ret = 0 @@ -2376,7 +2413,7 @@ if subrepos or matchessubrepo(m, subpath): sub = wctx.sub(subpath) try: - submatch = matchmod.narrowmatcher(subpath, m) + submatch = matchmod.subdirmatcher(subpath, m) if sub.removefiles(submatch, prefix, after, force, subrepos): ret = 1 except error.LookupError: @@ -2474,7 +2511,7 @@ for subpath in sorted(ctx.substate): sub = ctx.sub(subpath) try: - submatch = matchmod.narrowmatcher(subpath, matcher) + submatch = matchmod.subdirmatcher(subpath, matcher) if not sub.cat(submatch, os.path.join(prefix, sub._path), **opts): @@ -2504,7 +2541,7 @@ def amend(ui, repo, commitfunc, old, extra, pats, opts): # avoid cycle context -> subrepo -> cmdutil - import context + from . import context # amend will reuse the existing user if not specified, but the obsolete # marker creation requires that the current user's name is specified. @@ -3129,13 +3166,26 @@ """ parent, p2 = parents node = ctx.node() + excluded_files = [] + matcher_opts = {"exclude": excluded_files} + def checkout(f): fc = ctx[f] repo.wwrite(f, fc.data(), fc.flags()) audit_path = pathutil.pathauditor(repo.root) for f in actions['forget'][0]: - repo.dirstate.drop(f) + if interactive: + choice = \ + repo.ui.promptchoice( + _("forget added file %s (yn)?$$ &Yes $$ &No") + % f) + if choice == 0: + repo.dirstate.drop(f) + else: + excluded_files.append(repo.wjoin(f)) + else: + repo.dirstate.drop(f) for f in actions['remove'][0]: audit_path(f) try: @@ -3161,7 +3211,7 @@ if interactive: # Prompt the user for changes to revert torevert = [repo.wjoin(f) for f in actions['revert'][0]] - m = scmutil.match(ctx, torevert, {}) + m = scmutil.match(ctx, torevert, matcher_opts) diffopts = patch.difffeatureopts(repo.ui, whitespace=True) diffopts.nodates = True diffopts.git = True @@ -3254,24 +3304,13 @@ def cmd(name, options=(), synopsis=None, norepo=False, optionalrepo=False, inferrepo=False): def decorator(func): + func.norepo = norepo + func.optionalrepo = optionalrepo + func.inferrepo = inferrepo if synopsis: table[name] = func, list(options), synopsis else: table[name] = func, list(options) - - if norepo: - # Avoid import cycle. - import commands - commands.norepo += ' %s' % ' '.join(parsealiases(name)) - - if optionalrepo: - import commands - commands.optionalrepo += ' %s' % ' '.join(parsealiases(name)) - - if inferrepo: - import commands - commands.inferrepo += ' %s' % ' '.join(parsealiases(name)) - return func return decorator @@ -3333,13 +3372,56 @@ _('hg graft --continue')), ] -def checkafterresolved(repo): - contmsg = _("continue: %s\n") +def howtocontinue(repo): + '''Check for an unfinished operation and return the command to finish + it. + + afterresolvedstates tupples define a .hg/{file} and the corresponding + command needed to finish it. + + Returns a (msg, warning) tuple. 'msg' is a string and 'warning' is + a boolean. + ''' + contmsg = _("continue: %s") for f, msg in afterresolvedstates: if repo.vfs.exists(f): - repo.ui.warn(contmsg % msg) - return - repo.ui.note(contmsg % _("hg commit")) + return contmsg % msg, True + workingctx = repo[None] + dirty = any(repo.status()) or any(workingctx.sub(s).dirty() + for s in workingctx.substate) + if dirty: + return contmsg % _("hg commit"), False + return None, None + +def checkafterresolved(repo): + '''Inform the user about the next action after completing hg resolve + + If there's a matching afterresolvedstates, howtocontinue will yield + repo.ui.warn as the reporter. + + Otherwise, it will yield repo.ui.note. + ''' + msg, warning = howtocontinue(repo) + if msg is not None: + if warning: + repo.ui.warn("%s\n" % msg) + else: + repo.ui.note("%s\n" % msg) + +def wrongtooltocontinue(repo, task): + '''Raise an abort suggesting how to properly continue if there is an + active task. + + Uses howtocontinue() to find the active task. + + If there's no task (repo.ui.note for 'hg commit'), it does not offer + a hint. + ''' + after = howtocontinue(repo) + hint = None + if after[1]: + hint = after[0] + raise error.Abort(_('no %s in progress') % task, hint=hint) class dirstateguard(object): '''Restore dirstate at unexpected failure.
--- a/mercurial/commands.py Sun Mar 13 02:29:11 2016 +0100 +++ b/mercurial/commands.py Tue Mar 15 14:10:46 2016 -0700 @@ -5,42 +5,80 @@ # This software may be used and distributed according to the terms of the # GNU General Public License version 2 or any later version. -from node import hex, bin, nullhex, nullid, nullrev, short -from lock import release -from i18n import _ -import os, re, difflib, time, tempfile, errno, shlex -import sys, socket -import hg, scmutil, util, revlog, copies, error, bookmarks -import patch, help, encoding, templatekw, discovery -import archival, changegroup, cmdutil, hbisect -import sshserver, hgweb -import extensions -import merge as mergemod -import minirst, revset, fileset -import dagparser, context, simplemerge, graphmod, copies -import random, operator -import setdiscovery, treediscovery, dagutil, pvec, localrepo, destutil -import phases, obsolete, exchange, bundle2, repair, lock as lockmod -import ui as uimod -import streamclone -import commandserver +from __future__ import absolute_import + +import difflib +import errno +import operator +import os +import random +import re +import shlex +import socket +import sys +import tempfile +import time + +from .i18n import _ +from .node import ( + bin, + hex, + nullhex, + nullid, + nullrev, + short, +) +from . import ( + archival, + bookmarks, + bundle2, + changegroup, + cmdutil, + commandserver, + context, + copies, + dagparser, + dagutil, + destutil, + discovery, + encoding, + error, + exchange, + extensions, + fileset, + graphmod, + hbisect, + help, + hg, + hgweb, + localrepo, + lock as lockmod, + merge as mergemod, + minirst, + obsolete, + patch, + phases, + pvec, + repair, + revlog, + revset, + scmutil, + setdiscovery, + simplemerge, + sshserver, + streamclone, + templatekw, + treediscovery, + ui as uimod, + util, +) + +release = lockmod.release table = {} command = cmdutil.command(table) -# Space delimited list of commands that don't require local repositories. -# This should be populated by passing norepo=True into the @command decorator. -norepo = '' -# Space delimited list of commands that optionally require local repositories. -# This should be populated by passing optionalrepo=True into the @command -# decorator. -optionalrepo = '' -# Space delimited list of commands that will examine arguments looking for -# a repository. This should be populated by passing inferrepo=True into the -# @command decorator. -inferrepo = '' - # label constants # until 3.5, bookmarks.current was the advertised name, not # bookmarks.active, so we must use both to avoid breaking old @@ -1682,6 +1720,15 @@ if not allowunstable and old.children(): raise error.Abort(_('cannot amend changeset with children')) + # Currently histedit gets confused if an amend happens while histedit + # is in progress. Since we have a checkunfinished command, we are + # temporarily honoring it. + # + # Note: eventually this guard will be removed. Please do not expect + # this behavior to remain. + if not obsolete.isenabled(repo, obsolete.createmarkersopt): + cmdutil.checkunfinished(repo) + # commitfunc is used only for temporary amend commit by cmdutil.amend def commitfunc(ui, repo, message, match, opts): return repo.commit(message, @@ -2457,20 +2504,21 @@ raise error.Abort(_("no ignore patterns found")) else: for f in files: + nf = util.normpath(f) ignored = None ignoredata = None - if f != '.': - if ignore(f): - ignored = f - ignoredata = repo.dirstate._ignorefileandline(f) + if nf != '.': + if ignore(nf): + ignored = nf + ignoredata = repo.dirstate._ignorefileandline(nf) else: - for p in util.finddirs(f): + for p in util.finddirs(nf): if ignore(p): ignored = p ignoredata = repo.dirstate._ignorefileandline(p) break if ignored: - if ignored == f: + if ignored == nf: ui.write("%s is ignored\n" % f) else: ui.write("%s is ignored because of containing folder %s\n" @@ -2652,8 +2700,8 @@ fm.end() -@command('debuginstall', [], '', norepo=True) -def debuginstall(ui): +@command('debuginstall', [] + formatteropts, '', norepo=True) +def debuginstall(ui, **opts): '''test Mercurial installation Returns 0 on success. @@ -2668,86 +2716,110 @@ problems = 0 + fm = ui.formatter('debuginstall', opts) + fm.startitem() + # encoding - ui.status(_("checking encoding (%s)...\n") % encoding.encoding) + fm.write('encoding', _("checking encoding (%s)...\n"), encoding.encoding) + err = None try: encoding.fromlocal("test") except error.Abort as inst: - ui.write(" %s\n" % inst) - ui.write(_(" (check that your locale is properly set)\n")) + err = inst problems += 1 + fm.condwrite(err, 'encodingerror', _(" %s\n" + " (check that your locale is properly set)\n"), err) # Python - ui.status(_("checking Python executable (%s)\n") % sys.executable) - ui.status(_("checking Python version (%s)\n") - % ("%s.%s.%s" % sys.version_info[:3])) - ui.status(_("checking Python lib (%s)...\n") - % os.path.dirname(os.__file__)) + fm.write('pythonexe', _("checking Python executable (%s)\n"), + sys.executable) + fm.write('pythonver', _("checking Python version (%s)\n"), + ("%s.%s.%s" % sys.version_info[:3])) + fm.write('pythonlib', _("checking Python lib (%s)...\n"), + os.path.dirname(os.__file__)) # compiled modules - ui.status(_("checking installed modules (%s)...\n") - % os.path.dirname(__file__)) + fm.write('hgmodules', _("checking installed modules (%s)...\n"), + os.path.dirname(__file__)) + + err = None try: - import bdiff, mpatch, base85, osutil + from . import ( + base85, + bdiff, + mpatch, + osutil, + ) dir(bdiff), dir(mpatch), dir(base85), dir(osutil) # quiet pyflakes except Exception as inst: - ui.write(" %s\n" % inst) - ui.write(_(" One or more extensions could not be found")) - ui.write(_(" (check that you compiled the extensions)\n")) + err = inst problems += 1 + fm.condwrite(err, 'extensionserror', " %s\n", err) # templates - import templater + from . import templater p = templater.templatepaths() - ui.status(_("checking templates (%s)...\n") % ' '.join(p)) + fm.write('templatedirs', 'checking templates (%s)...\n', ' '.join(p)) + fm.condwrite(not p, '', _(" no template directories found\n")) if p: m = templater.templatepath("map-cmdline.default") if m: # template found, check if it is working + err = None try: templater.templater(m) except Exception as inst: - ui.write(" %s\n" % inst) + err = inst p = None + fm.condwrite(err, 'defaulttemplateerror', " %s\n", err) else: - ui.write(_(" template 'default' not found\n")) p = None - else: - ui.write(_(" no template directories found\n")) + fm.condwrite(p, 'defaulttemplate', + _("checking default template (%s)\n"), m) + fm.condwrite(not m, 'defaulttemplatenotfound', + _(" template '%s' not found\n"), "default") if not p: - ui.write(_(" (templates seem to have been installed incorrectly)\n")) problems += 1 + fm.condwrite(not p, '', + _(" (templates seem to have been installed incorrectly)\n")) # editor - ui.status(_("checking commit editor...\n")) editor = ui.geteditor() editor = util.expandpath(editor) + fm.write('editor', _("checking commit editor... (%s)\n"), editor) cmdpath = util.findexe(shlex.split(editor)[0]) - if not cmdpath: - if editor == 'vi': - ui.write(_(" No commit editor set and can't find vi in PATH\n")) - ui.write(_(" (specify a commit editor in your configuration" - " file)\n")) - else: - ui.write(_(" Can't find editor '%s' in PATH\n") % editor) - ui.write(_(" (specify a commit editor in your configuration" - " file)\n")) - problems += 1 + fm.condwrite(not cmdpath and editor == 'vi', 'vinotfound', + _(" No commit editor set and can't find %s in PATH\n" + " (specify a commit editor in your configuration" + " file)\n"), not cmdpath and editor == 'vi' and editor) + fm.condwrite(not cmdpath and editor != 'vi', 'editornotfound', + _(" Can't find editor '%s' in PATH\n" + " (specify a commit editor in your configuration" + " file)\n"), not cmdpath and editor) + if not cmdpath and editor != 'vi': + problems += 1 # check username - ui.status(_("checking username...\n")) + username = None + err = None try: - ui.username() + username = ui.username() except error.Abort as e: - ui.write(" %s\n" % e) - ui.write(_(" (specify a username in your configuration file)\n")) + err = e problems += 1 + fm.condwrite(username, 'username', _("checking username (%s)\n"), username) + fm.condwrite(err, 'usernameerror', _("checking username...\n %s\n" + " (specify a username in your configuration file)\n"), err) + + fm.condwrite(not problems, '', + _("no problems detected\n")) if not problems: - ui.status(_("no problems detected\n")) - else: - ui.write(_("%s problems detected," - " please check your install!\n") % problems) + fm.data(problems=problems) + fm.condwrite(problems, 'problems', + _("%s problems detected," + " please check your install!\n"), problems) + fm.end() return problems @@ -2813,6 +2885,17 @@ % (afile, _hashornull(anode))) ui.write((' other path: %s (node %s)\n') % (ofile, _hashornull(onode))) + elif rtype == 'f': + filename, rawextras = record.split('\0', 1) + extras = rawextras.split('\0') + i = 0 + extrastrings = [] + while i < len(extras): + extrastrings.append('%s = %s' % (extras[i], extras[i + 1])) + i += 2 + + ui.write(('file extras: %s (%s)\n') + % (filename, ', '.join(extrastrings))) else: ui.write(('unrecognized entry: %s\t%s\n') % (rtype, record.replace('\0', '\t'))) @@ -3204,12 +3287,16 @@ ts = ts + rs heads -= set(r.parentrevs(rev)) heads.add(rev) + try: + compression = ts / r.end(rev) + except ZeroDivisionError: + compression = 0 ui.write("%5d %5d %5d %5d %5d %10d %4d %4d %4d %7d %9d " "%11d %5d %8d\n" % (rev, p1, p2, r.start(rev), r.end(rev), r.start(dbase), r.start(cbase), r.start(p1), r.start(p2), - rs, ts, ts / r.end(rev), len(heads), clen)) + rs, ts, compression, len(heads), clen)) return 0 v = r.version @@ -3917,7 +4004,7 @@ except IOError as inst: if inst.errno != errno.ENOENT: raise - raise error.Abort(_("no graft state found, can't continue")) + cmdutil.wrongtooltocontinue(repo, _('graft')) else: cmdutil.checkunfinished(repo) cmdutil.bailifchanged(repo) @@ -5231,7 +5318,8 @@ try: # ui.forcemerge is an internal variable, do not document repo.ui.setconfig('ui', 'forcemerge', opts.get('tool', ''), 'merge') - return hg.merge(repo, node, force=opts.get('force')) + force = opts.get('force') + return hg.merge(repo, node, force=force, mergeforce=force) finally: ui.setconfig('ui', 'forcemerge', '', 'merge') @@ -5526,27 +5614,25 @@ ui.warn(_('no phases changed\n')) return ret -def postincoming(ui, repo, modheads, optupdate, checkout): +def postincoming(ui, repo, modheads, optupdate, checkout, brev): + """Run after a changegroup has been added via pull/unbundle + + This takes arguments below: + + :modheads: change of heads by pull/unbundle + :optupdate: updating working directory is needed or not + :checkout: update destination revision (or None to default destination) + :brev: a name, which might be a bookmark to be activated after updating + """ if modheads == 0: return if optupdate: try: - brev = checkout - movemarkfrom = None - if not checkout: - updata = destutil.destupdate(repo) - checkout, movemarkfrom, brev = updata - ret = hg.update(repo, checkout) + return hg.updatetotally(ui, repo, checkout, brev) except error.UpdateAbort as inst: msg = _("not updating: %s") % str(inst) hint = inst.hint raise error.UpdateAbort(msg, hint=hint) - if not ret and movemarkfrom: - if movemarkfrom == repo['.'].node(): - pass # no-op update - elif bookmarks.update(repo, [movemarkfrom], repo['.'].node()): - ui.status(_("updating bookmark %s\n") % repo._activebookmark) - return ret if modheads > 1: currentbranchheads = len(repo.branchheads()) if currentbranchheads == modheads: @@ -5634,11 +5720,28 @@ force=opts.get('force'), bookmarks=opts.get('bookmark', ()), opargs=pullopargs).cgresult + + # brev is a name, which might be a bookmark to be activated at + # the end of the update. In other words, it is an explicit + # destination of the update + brev = None + if checkout: checkout = str(repo.changelog.rev(checkout)) + + # order below depends on implementation of + # hg.addbranchrevs(). opts['bookmark'] is ignored, + # because 'checkout' is determined without it. + if opts.get('rev'): + brev = opts['rev'][0] + elif opts.get('branch'): + brev = opts['branch'][0] + else: + brev = branches[0] repo._subtoppath = source try: - ret = postincoming(ui, repo, modheads, opts.get('update'), checkout) + ret = postincoming(ui, repo, modheads, opts.get('update'), + checkout, brev) finally: del repo._subtoppath @@ -5687,7 +5790,8 @@ If -B/--bookmark is used, the specified bookmarked revision, its ancestors, and the bookmark will be pushed to the remote - repository. + repository. Specifying ``.`` is equivalent to specifying the active + bookmark's name. Please see :hg:`help urls` for important details about ``ssh://`` URLs. If DESTINATION is omitted, a default path will be used. @@ -5699,6 +5803,7 @@ ui.setconfig('bookmarks', 'pushing', opts['bookmark'], 'push') for b in opts['bookmark']: # translate -B options to -r so changesets get pushed + b = repo._bookmarks.expandname(b) if b in repo._bookmarks: opts.setdefault('rev', []).append(b) else: @@ -5891,8 +5996,9 @@ Returns 0 on success, 1 if any files fail a resolve attempt. """ + flaglist = 'all mark unmark list no_status'.split() all, mark, unmark, show, nostatus = \ - [opts.get(o) for o in 'all mark unmark list no_status'.split()] + [opts.get(o) for o in flaglist] if (show and (mark or unmark)) or (mark and unmark): raise error.Abort(_("too many options specified")) @@ -6021,7 +6127,22 @@ ms.recordactions() if not didwork and pats: + hint = None + if not any([p for p in pats if p.find(':') >= 0]): + pats = ['path:%s' % p for p in pats] + m = scmutil.match(wctx, pats, opts) + for f in ms: + if not m(f): + continue + flags = ''.join(['-%s ' % o[0] for o in flaglist + if opts.get(o)]) + hint = _("(try: hg resolve %s%s)\n") % ( + flags, + ' '.join(pats)) + break ui.warn(_("arguments do not match paths that need resolving\n")) + if hint: + ui.warn(hint) elif ms.mergedriver and ms.mdstate() != 's': # run conclude step when either a driver-resolved file is requested # or there are no driver-resolved files @@ -6185,7 +6306,7 @@ [('A', 'accesslog', '', _('name of access log file to write to'), _('FILE')), ('d', 'daemon', None, _('run server in background')), - ('', 'daemon-pipefds', '', _('used internally by daemon mode'), _('FILE')), + ('', 'daemon-postexec', [], _('used internally by daemon mode')), ('E', 'errorlog', '', _('name of error log file to write to'), _('FILE')), # use string type, then we can check if something was passed ('p', 'port', '', _('port to listen on (default: 8000)'), _('PORT')), @@ -6875,7 +6996,7 @@ else: modheads = gen.apply(repo, 'unbundle', 'bundle:' + fname) - return postincoming(ui, repo, modheads, opts.get('update'), None) + return postincoming(ui, repo, modheads, opts.get('update'), None, None) @command('^update|up|checkout|co', [('C', 'clean', None, _('discard uncommitted changes (no backup)')), @@ -6936,62 +7057,34 @@ Returns 0 on success, 1 if there are unresolved files. """ - movemarkfrom = None if rev and node: raise error.Abort(_("please specify just one revision")) if rev is None or rev == '': rev = node + if date and rev is not None: + raise error.Abort(_("you can't specify a revision and a date")) + + if check and clean: + raise error.Abort(_("cannot specify both -c/--check and -C/--clean")) + with repo.wlock(): cmdutil.clearunfinished(repo) if date: - if rev is not None: - raise error.Abort(_("you can't specify a revision and a date")) rev = cmdutil.finddate(ui, repo, date) # if we defined a bookmark, we have to remember the original name brev = rev rev = scmutil.revsingle(repo, rev, rev).rev() - if check and clean: - raise error.Abort(_("cannot specify both -c/--check and -C/--clean") - ) - if check: cmdutil.bailifchanged(repo, merge=False) - if rev is None: - updata = destutil.destupdate(repo, clean=clean, check=check) - rev, movemarkfrom, brev = updata repo.ui.setconfig('ui', 'forcemerge', tool, 'update') - if clean: - ret = hg.clean(repo, rev) - else: - ret = hg.update(repo, rev) - - if not ret and movemarkfrom: - if movemarkfrom == repo['.'].node(): - pass # no-op update - elif bookmarks.update(repo, [movemarkfrom], repo['.'].node()): - ui.status(_("updating bookmark %s\n") % repo._activebookmark) - else: - # this can happen with a non-linear update - ui.status(_("(leaving bookmark %s)\n") % - repo._activebookmark) - bookmarks.deactivate(repo) - elif brev in repo._bookmarks: - bookmarks.activate(repo, brev) - ui.status(_("(activating bookmark %s)\n") % brev) - elif brev: - if repo._activebookmark: - ui.status(_("(leaving bookmark %s)\n") % - repo._activebookmark) - bookmarks.deactivate(repo) - - return ret + return hg.updatetotally(ui, repo, rev, brev, clean=clean, check=check) @command('verify', []) def verify(ui, repo): @@ -7030,10 +7123,25 @@ # format names and versions into columns names = [] vers = [] + place = [] for name, module in extensions.extensions(): names.append(name) vers.append(extensions.moduleversion(module)) + if extensions.ismoduleinternal(module): + place.append(_("internal")) + else: + place.append(_("external")) if names: maxnamelen = max(len(n) for n in names) for i, name in enumerate(names): - ui.write(" %-*s %s\n" % (maxnamelen, name, vers[i])) + ui.write(" %-*s %s %s\n" % + (maxnamelen, name, place[i], vers[i])) + +def loadcmdtable(ui, name, cmdtable): + """Load command functions from specified cmdtable + """ + overrides = [cmd for cmd in cmdtable if cmd in table] + if overrides: + ui.warn(_("extension '%s' overrides commands: %s\n") + % (name, " ".join(overrides))) + table.update(cmdtable)
--- a/mercurial/commandserver.py Sun Mar 13 02:29:11 2016 +0100 +++ b/mercurial/commandserver.py Tue Mar 15 14:10:46 2016 -0700 @@ -190,16 +190,31 @@ return data + def _readstr(self): + """read a string from the channel + + format: + data length (uint32), data + """ + length = struct.unpack('>I', self._read(4))[0] + if not length: + return '' + return self._read(length) + + def _readlist(self): + """read a list of NULL separated strings from the channel""" + s = self._readstr() + if s: + return s.split('\0') + else: + return [] + def runcommand(self): """ reads a list of \0 terminated arguments, executes and writes the return code to the result channel """ from . import dispatch # avoid cycle - length = struct.unpack('>I', self._read(4))[0] - if not length: - args = [] - else: - args = self._read(length).split('\0') + args = self._readlist() # copy the uis so changes (e.g. --config or --verbose) don't # persist between requests @@ -262,7 +277,7 @@ hellomsg += '\n' hellomsg += 'encoding: ' + encoding.encoding hellomsg += '\n' - hellomsg += 'pid: %d' % os.getpid() + hellomsg += 'pid: %d' % util.getpid() # write the hello msg in -one- chunk self.cout.write(hellomsg) @@ -323,8 +338,9 @@ def handle(self): ui = self.server.ui repo = self.server.repo - sv = server(ui, repo, self.rfile, self.wfile) + sv = None try: + sv = server(ui, repo, self.rfile, self.wfile) try: sv.serve() # handle exceptions that may be raised by command server. most of @@ -339,7 +355,11 @@ except: # re-raises # also write traceback to error channel. otherwise client cannot # see it because it is written to server's stderr by default. - traceback.print_exc(file=sv.cerr) + if sv: + cerr = sv.cerr + else: + cerr = channeledoutput(self.wfile, 'e') + traceback.print_exc(file=cerr) raise class unixservice(object):
--- a/mercurial/context.py Sun Mar 13 02:29:11 2016 +0100 +++ b/mercurial/context.py Tue Mar 15 14:10:46 2016 -0700 @@ -259,7 +259,7 @@ if path in self._manifestdelta: return (self._manifestdelta[path], self._manifestdelta.flags(path)) - node, flag = self._repo.manifest.find(self._changeset[0], path) + node, flag = self._repo.manifest.find(self._changeset.manifest, path) if not node: raise error.ManifestLookupError(self._node, path, _('not found in manifest')) @@ -365,7 +365,7 @@ # node1 and node2 (inclusive). Thus, ctx2's substate # won't contain that subpath. The best we can do ignore it. rev2 = None - submatch = matchmod.narrowmatcher(subpath, match) + submatch = matchmod.subdirmatcher(subpath, match) s = sub.status(rev2, match=submatch, ignored=listignored, clean=listclean, unknown=listunknown, listsubrepos=True) @@ -524,15 +524,15 @@ @propertycache def _changeset(self): - return self._repo.changelog.read(self.rev()) + return self._repo.changelog.changelogrevision(self.rev()) @propertycache def _manifest(self): - return self._repo.manifest.read(self._changeset[0]) + return self._repo.manifest.read(self._changeset.manifest) @propertycache def _manifestdelta(self): - return self._repo.manifest.readdelta(self._changeset[0]) + return self._repo.manifest.readdelta(self._changeset.manifest) @propertycache def _parents(self): @@ -543,24 +543,32 @@ return [changectx(repo, p1), changectx(repo, p2)] def changeset(self): - return self._changeset + c = self._changeset + return ( + c.manifest, + c.user, + c.date, + c.files, + c.description, + c.extra, + ) def manifestnode(self): - return self._changeset[0] + return self._changeset.manifest def user(self): - return self._changeset[1] + return self._changeset.user def date(self): - return self._changeset[2] + return self._changeset.date def files(self): - return self._changeset[3] + return self._changeset.files def description(self): - return self._changeset[4] + return self._changeset.description def branch(self): - return encoding.tolocal(self._changeset[5].get("branch")) + return encoding.tolocal(self._changeset.extra.get("branch")) def closesbranch(self): - return 'close' in self._changeset[5] + return 'close' in self._changeset.extra def extra(self): - return self._changeset[5] + return self._changeset.extra def tags(self): return self._repo.nodetags(self._node) def bookmarks(self): @@ -789,7 +797,7 @@ if fctx._customcmp: return fctx.cmp(self) - if (fctx._filerev is None + if (fctx._filenode is None and (self._repo._encodefilterpats # if file data starts with '\1\n', empty metadata block is # prepended, which adds 4 bytes to filelog.size(). @@ -1892,9 +1900,9 @@ p2node = nullid p = pctx[f].parents() # if file isn't in pctx, check p2? if len(p) > 0: - p1node = p[0].node() + p1node = p[0].filenode() if len(p) > 1: - p2node = p[1].node() + p2node = p[1].filenode() man[f] = revlog.hash(self[f].data(), p1node, p2node) for f in self._status.added:
--- a/mercurial/copies.py Sun Mar 13 02:29:11 2016 +0100 +++ b/mercurial/copies.py Tue Mar 15 14:10:46 2016 -0700 @@ -10,7 +10,9 @@ import heapq from . import ( + node, pathutil, + scmutil, util, ) @@ -175,7 +177,18 @@ # we currently don't try to find where old files went, too expensive # this means we can miss a case like 'hg rm b; hg cp a b' cm = {} - missing = _computeforwardmissing(a, b, match=match) + + # Computing the forward missing is quite expensive on large manifests, since + # it compares the entire manifests. We can optimize it in the common use + # case of computing what copies are in a commit versus its parent (like + # during a rebase or histedit). Note, we exclude merge commits from this + # optimization, since the ctx.files() for a merge commit is not correct for + # this comparison. + forwardmissingmatch = match + if not match and b.p1() == a and b.p2().node() == node.nullid: + forwardmissingmatch = scmutil.matchfiles(a._repo, b.files()) + missing = _computeforwardmissing(a, b, match=forwardmissingmatch) + ancestrycontext = a._repo.changelog.ancestors([b.rev()], inclusive=True) for f in missing: fctx = b[f]
--- a/mercurial/demandimport.py Sun Mar 13 02:29:11 2016 +0100 +++ b/mercurial/demandimport.py Tue Mar 15 14:10:46 2016 -0700 @@ -174,7 +174,12 @@ """ symbol = getattr(mod, attr, nothing) if symbol is nothing: - symbol = _demandmod(attr, mod.__dict__, locals, level=1) + mn = '%s.%s' % (mod.__name__, attr) + if mn in ignore: + importfunc = _origimport + else: + importfunc = _demandmod + symbol = importfunc(attr, mod.__dict__, locals, level=1) setattr(mod, attr, symbol) # Record the importing module references this symbol so we can @@ -252,6 +257,7 @@ '_sre', # issue4920 'rfc822', 'mimetools', + 'sqlalchemy.events', # has import-time side effects (issue5085) # setuptools 8 expects this module to explode early when not on windows 'distutils.msvc9compiler' ]
--- a/mercurial/destutil.py Sun Mar 13 02:29:11 2016 +0100 +++ b/mercurial/destutil.py Tue Mar 15 14:10:46 2016 -0700 @@ -88,27 +88,55 @@ return node, movemark, activemark def _destupdatebranch(repo, clean, check): - """decide on an update destination from current branch""" + """decide on an update destination from current branch + + This ignores closed branch heads. + """ wc = repo[None] movemark = node = None - try: - node = repo.branchtip(wc.branch()) + currentbranch = wc.branch() + if currentbranch in repo.branchmap(): + heads = repo.branchheads(currentbranch) + if heads: + node = repo.revs('max(.::(%ln))', heads).first() if bookmarks.isactivewdirparent(repo): movemark = repo['.'].node() - except error.RepoLookupError: - if wc.branch() == 'default': # no default branch! - node = repo.lookup('tip') # update to tip + else: + if currentbranch == 'default': # no default branch! + # update to the tipmost non-closed branch head + node = repo.revs('max(head() and not closed())').first() else: - raise error.Abort(_("branch %s not found") % wc.branch()) + raise error.Abort(_("branch %s not found") % currentbranch) + return node, movemark, None + +def _destupdatebranchfallback(repo, clean, check): + """decide on an update destination from closed heads in current branch""" + wc = repo[None] + currentbranch = wc.branch() + movemark = None + if currentbranch in repo.branchmap(): + # here, all descendant branch heads are closed + heads = repo.branchheads(currentbranch, closed=True) + assert heads, "any branch has at least one head" + node = repo.revs('max(.::(%ln))', heads).first() + assert node is not None, ("any revision has at least " + "one descendant branch head") + if bookmarks.isactivewdirparent(repo): + movemark = repo['.'].node() + else: + # here, no "default" branch, and all branches are closed + node = repo.lookup('tip') + assert node is not None, "'tip' exists even in empty repository" return node, movemark, None # order in which each step should be evalutated # steps are run until one finds a destination -destupdatesteps = ['evolution', 'bookmark', 'branch'] +destupdatesteps = ['evolution', 'bookmark', 'branch', 'branchfallback'] # mapping to ease extension overriding steps. destupdatestepmap = {'evolution': _destupdateobs, 'bookmark': _destupdatebook, 'branch': _destupdatebranch, + 'branchfallback': _destupdatebranchfallback, } def destupdate(repo, clean=False, check=False): @@ -133,7 +161,102 @@ return rev, movemark, activemark -def _destmergebook(repo): +msgdestmerge = { + # too many matching divergent bookmark + 'toomanybookmarks': + {'merge': + (_("multiple matching bookmarks to merge -" + " please merge with an explicit rev or bookmark"), + _("run 'hg heads' to see all heads")), + 'rebase': + (_("multiple matching bookmarks to rebase -" + " please rebase to an explicit rev or bookmark"), + _("run 'hg heads' to see all heads")), + }, + # no other matching divergent bookmark + 'nootherbookmarks': + {'merge': + (_("no matching bookmark to merge - " + "please merge with an explicit rev or bookmark"), + _("run 'hg heads' to see all heads")), + 'rebase': + (_("no matching bookmark to rebase - " + "please rebase to an explicit rev or bookmark"), + _("run 'hg heads' to see all heads")), + }, + # branch have too many unbookmarked heads, no obvious destination + 'toomanyheads': + {'merge': + (_("branch '%s' has %d heads - please merge with an explicit rev"), + _("run 'hg heads .' to see heads")), + 'rebase': + (_("branch '%s' has %d heads - please rebase to an explicit rev"), + _("run 'hg heads .' to see heads")), + }, + # branch have no other unbookmarked heads + 'bookmarkedheads': + {'merge': + (_("heads are bookmarked - please merge with an explicit rev"), + _("run 'hg heads' to see all heads")), + 'rebase': + (_("heads are bookmarked - please rebase to an explicit rev"), + _("run 'hg heads' to see all heads")), + }, + # branch have just a single heads, but there is other branches + 'nootherbranchheads': + {'merge': + (_("branch '%s' has one head - please merge with an explicit rev"), + _("run 'hg heads' to see all heads")), + 'rebase': + (_("branch '%s' has one head - please rebase to an explicit rev"), + _("run 'hg heads' to see all heads")), + }, + # repository have a single head + 'nootherheads': + {'merge': + (_('nothing to merge'), + None), + 'rebase': + (_('nothing to rebase'), + None), + }, + # repository have a single head and we are not on it + 'nootherheadsbehind': + {'merge': + (_('nothing to merge'), + _("use 'hg update' instead")), + 'rebase': + (_('nothing to rebase'), + _("use 'hg update' instead")), + }, + # We are not on a head + 'notatheads': + {'merge': + (_('working directory not at a head revision'), + _("use 'hg update' or merge with an explicit revision")), + 'rebase': + (_('working directory not at a head revision'), + _("use 'hg update' or rebase to an explicit revision")) + }, + 'emptysourceset': + {'merge': + (_('source set is empty'), + None), + 'rebase': + (_('source set is empty'), + None), + }, + 'multiplebranchessourceset': + {'merge': + (_('source set is rooted in multiple branches'), + None), + 'rebase': + (_('rebaseset is rooted in multiple named branches'), + _('specify an explicit destination with --dest')), + }, + } + +def _destmergebook(repo, action='merge', sourceset=None): """find merge destination in the active bookmark case""" node = None bmheads = repo.bookmarkheads(repo._activebookmark) @@ -144,61 +267,90 @@ else: node = bmheads[0] elif len(bmheads) > 2: - raise error.Abort(_("multiple matching bookmarks to merge - " - "please merge with an explicit rev or bookmark"), - hint=_("run 'hg heads' to see all heads")) + msg, hint = msgdestmerge['toomanybookmarks'][action] + raise error.ManyMergeDestAbort(msg, hint=hint) elif len(bmheads) <= 1: - raise error.Abort(_("no matching bookmark to merge - " - "please merge with an explicit rev or bookmark"), - hint=_("run 'hg heads' to see all heads")) + msg, hint = msgdestmerge['nootherbookmarks'][action] + raise error.NoMergeDestAbort(msg, hint=hint) assert node is not None return node -def _destmergebranch(repo): +def _destmergebranch(repo, action='merge', sourceset=None, onheadcheck=True): """find merge destination based on branch heads""" node = None - branch = repo[None].branch() - bheads = repo.branchheads(branch) - nbhs = [bh for bh in bheads if not repo[bh].bookmarks()] - if len(nbhs) > 2: - raise error.Abort(_("branch '%s' has %d heads - " - "please merge with an explicit rev") - % (branch, len(bheads)), - hint=_("run 'hg heads .' to see heads")) + if sourceset is None: + sourceset = [repo[repo.dirstate.p1()].rev()] + branch = repo.dirstate.branch() + elif not sourceset: + msg, hint = msgdestmerge['emptysourceset'][action] + raise error.NoMergeDestAbort(msg, hint=hint) + else: + branch = None + for ctx in repo.set('roots(%ld::%ld)', sourceset, sourceset): + if branch is not None and ctx.branch() != branch: + msg, hint = msgdestmerge['multiplebranchessourceset'][action] + raise error.ManyMergeDestAbort(msg, hint=hint) + branch = ctx.branch() - parent = repo.dirstate.p1() - if len(nbhs) <= 1: - if len(bheads) > 1: - raise error.Abort(_("heads are bookmarked - " - "please merge with an explicit rev"), - hint=_("run 'hg heads' to see all heads")) - if len(repo.heads()) > 1: - raise error.Abort(_("branch '%s' has one head - " - "please merge with an explicit rev") - % branch, - hint=_("run 'hg heads' to see all heads")) - msg, hint = _('nothing to merge'), None - if parent != repo.lookup(branch): - hint = _("use 'hg update' instead") + bheads = repo.branchheads(branch) + onhead = repo.revs('%ld and %ln', sourceset, bheads) + if onheadcheck and not onhead: + # Case A: working copy if not on a head. (merge only) + # + # This is probably a user mistake We bailout pointing at 'hg update' + if len(repo.heads()) <= 1: + msg, hint = msgdestmerge['nootherheadsbehind'][action] + else: + msg, hint = msgdestmerge['notatheads'][action] raise error.Abort(msg, hint=hint) - - if parent not in bheads: - raise error.Abort(_('working directory not at a head revision'), - hint=_("use 'hg update' or merge with an " - "explicit revision")) - if parent == nbhs[0]: - node = nbhs[-1] + # remove heads descendants of source from the set + bheads = list(repo.revs('%ln - (%ld::)', bheads, sourceset)) + # filters out bookmarked heads + nbhs = list(repo.revs('%ld - bookmark()', bheads)) + if len(nbhs) > 1: + # Case B: There is more than 1 other anonymous heads + # + # This means that there will be more than 1 candidate. This is + # ambiguous. We abort asking the user to pick as explicit destination + # instead. + msg, hint = msgdestmerge['toomanyheads'][action] + msg %= (branch, len(bheads) + 1) + raise error.ManyMergeDestAbort(msg, hint=hint) + elif not nbhs: + # Case B: There is no other anonymous heads + # + # This means that there is no natural candidate to merge with. + # We abort, with various messages for various cases. + if bheads: + msg, hint = msgdestmerge['bookmarkedheads'][action] + elif len(repo.heads()) > 1: + msg, hint = msgdestmerge['nootherbranchheads'][action] + msg %= branch + elif not onhead: + # if 'onheadcheck == False' (rebase case), + # this was not caught in Case A. + msg, hint = msgdestmerge['nootherheadsbehind'][action] + else: + msg, hint = msgdestmerge['nootherheads'][action] + raise error.NoMergeDestAbort(msg, hint=hint) else: node = nbhs[0] assert node is not None return node -def destmerge(repo): +def destmerge(repo, action='merge', sourceset=None, onheadcheck=True): + """return the default destination for a merge + + (or raise exception about why it can't pick one) + + :action: the action being performed, controls emitted error message + """ if repo._activebookmark: - node = _destmergebook(repo) + node = _destmergebook(repo, action=action, sourceset=sourceset) else: - node = _destmergebranch(repo) + node = _destmergebranch(repo, action=action, sourceset=sourceset, + onheadcheck=onheadcheck) return repo[node].rev() histeditdefaultrevset = 'reverse(only(.) and not public() and not ::merge())' @@ -218,3 +370,53 @@ return revs.first() return None + +def _statusotherbook(ui, repo): + bmheads = repo.bookmarkheads(repo._activebookmark) + curhead = repo[repo._activebookmark].node() + if repo.revs('%n and parents()', curhead): + # we are on the active bookmark + bmheads = [b for b in bmheads if curhead != b] + if bmheads: + msg = _('%i other divergent bookmarks for "%s"\n') + ui.status(msg % (len(bmheads), repo._activebookmark)) + +def _statusotherbranchheads(ui, repo): + currentbranch = repo.dirstate.branch() + allheads = repo.branchheads(currentbranch, closed=True) + heads = repo.branchheads(currentbranch) + if repo.revs('%ln and parents()', allheads): + # we are on a head, even though it might be closed + # + # on closed otherheads + # ========= ========== + # o 0 all heads for current branch are closed + # N only descendant branch heads are closed + # x 0 there is only one non-closed branch head + # N there are some non-closed branch heads + # ========= ========== + otherheads = repo.revs('%ln - parents()', heads) + if repo['.'].closesbranch(): + ui.status(_('updated to a closed branch head, ' + 'because all descendant heads are closed.\n' + 'beware of re-opening closed head ' + 'by subsequent commit here.\n')) + if otherheads: + ui.status(_('%i other heads for branch "%s"\n') % + (len(otherheads), currentbranch)) + else: + ui.status(_('all heads for branch "%s" are closed.\n') % + currentbranch) + elif otherheads: + ui.status(_('%i other heads for branch "%s"\n') % + (len(otherheads), currentbranch)) + +def statusotherdests(ui, repo): + """Print message about other head""" + # XXX we should probably include a hint: + # - about what to do + # - how to see such heads + if repo._activebookmark: + _statusotherbook(ui, repo) + else: + _statusotherbranchheads(ui, repo)
--- a/mercurial/dispatch.py Sun Mar 13 02:29:11 2016 +0100 +++ b/mercurial/dispatch.py Tue Mar 15 14:10:46 2016 -0700 @@ -31,8 +31,10 @@ error, extensions, fancyopts, + fileset, hg, hook, + revset, ui as uimod, util, ) @@ -79,6 +81,8 @@ else: write(_("hg: parse error: %s\n") % inst.args[0]) _reportsimilar(write, similar) + if inst.hint: + write(_("(%s)\n") % inst.hint) def dispatch(req): "run the command specified in req.args" @@ -109,8 +113,6 @@ return -1 except error.ParseError as inst: _formatparse(ferr.write, inst) - if inst.hint: - ferr.write(_("(%s)\n") % inst.hint) return -1 msg = ' '.join(' ' in a and repr(a) or a for a in req.args) @@ -118,11 +120,18 @@ ret = None try: ret = _runcatch(req) - return ret + except KeyboardInterrupt: + try: + req.ui.warn(_("interrupted!\n")) + except IOError as inst: + if inst.errno != errno.EPIPE: + raise + ret = -1 finally: duration = time.time() - starttime req.ui.log("commandfinish", "%s exited %s after %0.2f seconds\n", msg, ret or 0, duration) + return ret def _runcatch(req): def catchterm(*args): @@ -206,8 +215,6 @@ (inst.args[0], " ".join(inst.args[1]))) except error.ParseError as inst: _formatparse(ui.warn, inst) - if inst.hint: - ui.warn(_("(%s)\n") % inst.hint) return -1 except error.LockHeld as inst: if inst.errno == errno.ETIMEDOUT: @@ -313,11 +320,7 @@ else: ui.warn(_("abort: %s\n") % inst.strerror) except KeyboardInterrupt: - try: - ui.warn(_("interrupted!\n")) - except IOError as inst: - if inst.errno != errno.EPIPE: - raise + raise except MemoryError: ui.warn(_("abort: out of memory\n")) except SystemExit as inst: @@ -496,11 +499,11 @@ self.fn, self.opts = tableentry self.args = aliasargs(self.fn, args) - if cmd not in commands.norepo.split(' '): + if not self.fn.norepo: self.norepo = False - if cmd in commands.optionalrepo.split(' '): + if self.fn.optionalrepo: self.optionalrepo = True - if cmd in commands.inferrepo.split(' '): + if self.fn.inferrepo: self.inferrepo = True if self.help.startswith("hg " + cmd): # drop prefix in old-style help lines so hg shows the alias @@ -556,12 +559,6 @@ pass cmdtable[aliasdef.name] = (aliasdef, aliasdef.opts, aliasdef.help) - if aliasdef.norepo: - commands.norepo += ' %s' % alias - if aliasdef.optionalrepo: - commands.optionalrepo += ' %s' % alias - if aliasdef.inferrepo: - commands.inferrepo += ' %s' % alias def _parse(ui, args): options = {} @@ -609,7 +606,8 @@ for cfg in config: try: - name, value = cfg.split('=', 1) + name, value = [cfgelem.strip() + for cfgelem in cfg.split('=', 1)] section, name = name.split('.', 1) if not section or not name: raise IndexError @@ -684,16 +682,17 @@ result=ret, pats=cmdpats, opts=cmdoptions) return ret -def _getlocal(ui, rpath): +def _getlocal(ui, rpath, wd=None): """Return (path, local ui object) for the given target path. Takes paths in [cwd]/.hg/hgrc into account." """ - try: - wd = os.getcwd() - except OSError as e: - raise error.Abort(_("error getting current working directory: %s") % - e.strerror) + if wd is None: + try: + wd = os.getcwd() + except OSError as e: + raise error.Abort(_("error getting current working directory: %s") % + e.strerror) path = cmdutil.findrepo(wd) or "" if not path: lui = ui @@ -726,26 +725,16 @@ if precheck: strict = True - norepo = commands.norepo - optionalrepo = commands.optionalrepo - inferrepo = commands.inferrepo - def restorecommands(): - commands.norepo = norepo - commands.optionalrepo = optionalrepo - commands.inferrepo = inferrepo cmdtable = commands.table.copy() addaliases(lui, cmdtable) else: strict = False - def restorecommands(): - pass cmdtable = commands.table cmd = args[0] try: aliases, entry = cmdutil.findcmd(cmd, cmdtable, strict) except (error.AmbiguousCommand, error.UnknownCommand): - restorecommands() return cmd = aliases[0] @@ -756,9 +745,20 @@ return lambda: runcommand(lui, None, cmd, args[:1], ui, options, d, [], {}) - restorecommands() +_loaded = set() -_loaded = set() +# list of (objname, loadermod, loadername) tuple: +# - objname is the name of an object in extension module, from which +# extra information is loaded +# - loadermod is the module where loader is placed +# - loadername is the name of the function, which takes (ui, extensionname, +# extraobj) arguments +extraloaders = [ + ('cmdtable', commands, 'loadcmdtable'), + ('filesetpredicate', fileset, 'loadpredicate'), + ('revsetpredicate', revset, 'loadpredicate'), +] + def _dispatch(req): args = req.args ui = req.ui @@ -788,12 +788,10 @@ # (uisetup and extsetup are handled in extensions.loadall) for name, module in exts: - cmdtable = getattr(module, 'cmdtable', {}) - overrides = [cmd for cmd in cmdtable if cmd in commands.table] - if overrides: - ui.warn(_("extension '%s' overrides commands: %s\n") - % (name, " ".join(overrides))) - commands.table.update(cmdtable) + for objname, loadermod, loadername in extraloaders: + extraobj = getattr(module, objname, None) + if extraobj is not None: + getattr(loadermod, loadername)(ui, name, extraobj) _loaded.add(name) # (reposetup is handled in hg.repository) @@ -874,7 +872,7 @@ repo = None cmdpats = args[:] - if cmd not in commands.norepo.split(): + if not func.norepo: # use the repo from the request only if we don't have -R if not rpath and not cwd: repo = req.repo @@ -895,9 +893,9 @@ except error.RepoError: if rpath and rpath[-1]: # invalid -R path raise - if cmd not in commands.optionalrepo.split(): - if (cmd in commands.inferrepo.split() and - args and not path): # try to infer -R from command args + if not func.optionalrepo: + if func.inferrepo and args and not path: + # try to infer -R from command args repos = map(cmdutil.findrepo, args) guess = repos[0] if guess and repos.count(guess) == len(repos):
--- a/mercurial/encoding.py Sun Mar 13 02:29:11 2016 +0100 +++ b/mercurial/encoding.py Tue Mar 15 14:10:46 2016 -0700 @@ -7,14 +7,19 @@ from __future__ import absolute_import +import array import locale import os +import sys import unicodedata from . import ( error, ) +if sys.version_info[0] >= 3: + unichr = chr + # These unicode characters are ignored by HFS+ (Apple Technote 1150, # "Unicode Subtleties"), so we need to ignore them in some places for # sanity. @@ -22,7 +27,10 @@ "200c 200d 200e 200f 202a 202b 202c 202d 202e " "206a 206b 206c 206d 206e 206f feff".split()] # verify the next function will work -assert set([i[0] for i in _ignore]) == set(["\xe2", "\xef"]) +if sys.version_info[0] >= 3: + assert set(i[0] for i in _ignore) == set([ord(b'\xe2'), ord(b'\xef')]) +else: + assert set(i[0] for i in _ignore) == set(["\xe2", "\xef"]) def hfsignoreclean(s): """Remove codepoints ignored by HFS+ from s. @@ -378,9 +386,23 @@ upper = 1 other = 0 -_jsonmap = {} +_jsonmap = [] +_jsonmap.extend("\\u%04x" % x for x in range(32)) +_jsonmap.extend(chr(x) for x in range(32, 127)) +_jsonmap.append('\\u007f') +_jsonmap[0x09] = '\\t' +_jsonmap[0x0a] = '\\n' +_jsonmap[0x22] = '\\"' +_jsonmap[0x5c] = '\\\\' +_jsonmap[0x08] = '\\b' +_jsonmap[0x0c] = '\\f' +_jsonmap[0x0d] = '\\r' +_paranoidjsonmap = _jsonmap[:] +_paranoidjsonmap[0x3c] = '\\u003c' # '<' (e.g. escape "</script>") +_paranoidjsonmap[0x3e] = '\\u003e' # '>' +_jsonmap.extend(chr(x) for x in range(128, 256)) -def jsonescape(s): +def jsonescape(s, paranoid=False): '''returns a string suitable for JSON JSON is problematic for us because it doesn't support non-Unicode @@ -405,24 +427,36 @@ 'utf-8: caf\\xc3\\xa9' >>> jsonescape('') '' + + If paranoid, non-ascii and common troublesome characters are also escaped. + This is suitable for web output. + + >>> jsonescape('escape boundary: \\x7e \\x7f \\xc2\\x80', paranoid=True) + 'escape boundary: ~ \\\\u007f \\\\u0080' + >>> jsonescape('a weird byte: \\xdd', paranoid=True) + 'a weird byte: \\\\udcdd' + >>> jsonescape('utf-8: caf\\xc3\\xa9', paranoid=True) + 'utf-8: caf\\\\u00e9' + >>> jsonescape('non-BMP: \\xf0\\x9d\\x84\\x9e', paranoid=True) + 'non-BMP: \\\\ud834\\\\udd1e' + >>> jsonescape('<foo@example.org>', paranoid=True) + '\\\\u003cfoo@example.org\\\\u003e' ''' - if not _jsonmap: - for x in xrange(32): - _jsonmap[chr(x)] = "\\u%04x" % x - for x in xrange(32, 256): - c = chr(x) - _jsonmap[c] = c - _jsonmap['\x7f'] = '\\u007f' - _jsonmap['\t'] = '\\t' - _jsonmap['\n'] = '\\n' - _jsonmap['\"'] = '\\"' - _jsonmap['\\'] = '\\\\' - _jsonmap['\b'] = '\\b' - _jsonmap['\f'] = '\\f' - _jsonmap['\r'] = '\\r' + if paranoid: + jm = _paranoidjsonmap + else: + jm = _jsonmap - return ''.join(_jsonmap[c] for c in toutf8b(s)) + u8chars = toutf8b(s) + try: + return ''.join(jm[x] for x in bytearray(u8chars)) # fast path + except IndexError: + pass + # non-BMP char is represented as UTF-16 surrogate pair + u16codes = array.array('H', u8chars.decode('utf-8').encode('utf-16')) + u16codes.pop(0) # drop BOM + return ''.join(jm[x] if x < 128 else '\\u%04x' % x for x in u16codes) _utf8len = [0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 2, 2, 3, 4]
--- a/mercurial/error.py Sun Mar 13 02:29:11 2016 +0100 +++ b/mercurial/error.py Tue Mar 15 14:10:46 2016 -0700 @@ -72,6 +72,15 @@ class UpdateAbort(Abort): """Raised when an update is aborted for destination issue""" +class MergeDestAbort(Abort): + """Raised when an update is aborted for destination issues""" + +class NoMergeDestAbort(MergeDestAbort): + """Raised when an update is aborted because there is nothing to merge""" + +class ManyMergeDestAbort(MergeDestAbort): + """Raised when an update is aborted because destination is ambigious""" + class ResponseExpected(Abort): """Raised when an EOF is received for a prompt""" def __init__(self):
--- a/mercurial/exchange.py Sun Mar 13 02:29:11 2016 +0100 +++ b/mercurial/exchange.py Tue Mar 15 14:10:46 2016 -0700 @@ -266,10 +266,10 @@ class pushoperation(object): """A object that represent a single push operation - It purpose is to carry push related state and very common operation. + Its purpose is to carry push related state and very common operations. - A new should be created at the beginning of each push and discarded - afterward. + A new pushoperation should be created at the beginning of each push and + discarded afterward. """ def __init__(self, repo, remote, force=False, revs=None, newbranch=False, @@ -576,7 +576,8 @@ ancestors = repo.changelog.ancestors(revnums, inclusive=True) remotebookmark = remote.listkeys('bookmarks') - explicit = set(pushop.bookmarks) + explicit = set([repo._bookmarks.expandname(bookmark) + for bookmark in pushop.bookmarks]) comp = bookmod.compare(repo, repo._bookmarks, remotebookmark, srchex=hex) addsrc, adddst, advsrc, advdst, diverge, differ, invalid, same = comp
--- a/mercurial/extensions.py Sun Mar 13 02:29:11 2016 +0100 +++ b/mercurial/extensions.py Tue Mar 15 14:10:46 2016 -0700 @@ -71,6 +71,20 @@ exc.filename = path # python does not fill this raise +def _importh(name): + """import and return the <name> module""" + mod = __import__(name) + components = name.split('.') + for comp in components[1:]: + mod = getattr(mod, comp) + return mod + +def _reportimporterror(ui, err, failed, next): + ui.debug('could not import %s (%s): trying %s\n' + % (failed, err, next)) + if ui.debugflag: + ui.traceback() + def load(ui, name, path): if name.startswith('hgext.') or name.startswith('hgext/'): shortname = name[6:] @@ -87,20 +101,11 @@ # conflicts with other modules mod = loadpath(path, 'hgext.%s' % name) else: - def importh(name): - mod = __import__(name) - components = name.split('.') - for comp in components[1:]: - mod = getattr(mod, comp) - return mod try: - mod = importh("hgext.%s" % name) + mod = _importh("hgext.%s" % name) except ImportError as err: - ui.debug('could not import hgext.%s (%s): trying %s\n' - % (name, err, name)) - if ui.debugflag: - ui.traceback() - mod = importh(name) + _reportimporterror(ui, err, "hgext.%s" % name, name) + mod = _importh(name) # Before we do anything with the extension, check against minimum stated # compatibility. This gives extension authors a mechanism to have their @@ -195,6 +200,12 @@ return func(*(args + a), **kw) return closure +def _updatewrapper(wrap, origfn): + '''Copy attributes to wrapper function''' + wrap.__module__ = getattr(origfn, '__module__') + wrap.__doc__ = getattr(origfn, '__doc__') + wrap.__dict__.update(getattr(origfn, '__dict__', {})) + def wrapcommand(table, command, wrapper, synopsis=None, docstring=None): '''Wrap the command named `command' in table @@ -233,13 +244,9 @@ origfn = entry[0] wrap = bind(util.checksignature(wrapper), util.checksignature(origfn)) - - wrap.__module__ = getattr(origfn, '__module__') - - doc = getattr(origfn, '__doc__') + _updatewrapper(wrap, origfn) if docstring is not None: - doc += docstring - wrap.__doc__ = doc + wrap.__doc__ += docstring newentry = list(entry) newentry[0] = wrap @@ -285,7 +292,9 @@ origfn = getattr(container, funcname) assert callable(origfn) - setattr(container, funcname, bind(wrapper, origfn)) + wrap = bind(wrapper, origfn) + _updatewrapper(wrap, origfn) + setattr(container, funcname, wrap) return origfn def _disabledpaths(strip_init=False): @@ -456,6 +465,10 @@ return exts +def notloaded(): + '''return short names of extensions that failed to load''' + return [name for name, mod in _extensions.iteritems() if mod is None] + def moduleversion(module): '''return version information from given module as a string''' if (util.safehasattr(module, 'getversion') @@ -468,3 +481,7 @@ if isinstance(version, (list, tuple)): version = '.'.join(str(o) for o in version) return version + +def ismoduleinternal(module): + exttestedwith = getattr(module, 'testedwith', None) + return exttestedwith == "internal"
--- a/mercurial/fileset.py Sun Mar 13 02:29:11 2016 +0100 +++ b/mercurial/fileset.py Tue Mar 15 14:10:46 2016 -0700 @@ -14,6 +14,7 @@ error, merge, parser, + registrar, util, ) @@ -144,34 +145,7 @@ # filesets using matchctx.existing() _existingcallers = set() -def predicate(decl, callstatus=False, callexisting=False): - """Return a decorator for fileset predicate function - - 'decl' argument is the declaration (including argument list like - 'adds(pattern)') or the name (for internal use only) of predicate. - - Optional 'callstatus' argument indicates whether predicate implies - 'matchctx.status()' at runtime or not (False, by default). - - Optional 'callexisting' argument indicates whether predicate - implies 'matchctx.existing()' at runtime or not (False, by - default). - """ - def decorator(func): - i = decl.find('(') - if i > 0: - name = decl[:i] - else: - name = decl - symbols[name] = func - if callstatus: - _statuscallers.add(name) - if callexisting: - _existingcallers.add(name) - if func.__doc__: - func.__doc__ = "``%s``\n %s" % (decl, func.__doc__.strip()) - return func - return decorator +predicate = registrar.filesetpredicate() @predicate('modified()', callstatus=True) def modified(mctx, x): @@ -560,5 +534,18 @@ def prettyformat(tree): return parser.prettyformat(tree, ('string', 'symbol')) +def loadpredicate(ui, extname, registrarobj): + """Load fileset predicates from specified registrarobj + """ + for name, func in registrarobj._table.iteritems(): + symbols[name] = func + if func._callstatus: + _statuscallers.add(name) + if func._callexisting: + _existingcallers.add(name) + +# load built-in predicates explicitly to setup _statuscallers/_existingcallers +loadpredicate(None, None, predicate) + # tell hggettext to extract docstrings from these functions: i18nfunctions = symbols.values()
--- a/mercurial/formatter.py Sun Mar 13 02:29:11 2016 +0100 +++ b/mercurial/formatter.py Tue Mar 15 14:10:46 2016 -0700 @@ -153,7 +153,7 @@ self._topic = topic self._t = gettemplater(ui, topic, opts.get('template', '')) def _showitem(self): - g = self._t(self._topic, **self._item) + g = self._t(self._topic, ui=self._ui, **self._item) self._ui.write(templater.stringify(g)) def lookuptemplate(ui, topic, tmpl):
--- a/mercurial/graphmod.py Sun Mar 13 02:29:11 2016 +0100 +++ b/mercurial/graphmod.py Tue Mar 15 14:10:46 2016 -0700 @@ -28,6 +28,9 @@ ) CHANGESET = 'C' +PARENT = 'P' +GRANDPARENT = 'G' +MISSINGPARENT = 'M' def groupbranchiter(revs, parentsfunc, firstbranch=()): """Yield revisions from heads to roots one (topo) branch at a time. @@ -228,12 +231,16 @@ yield r def dagwalker(repo, revs): - """cset DAG generator yielding (id, CHANGESET, ctx, [parentids]) tuples + """cset DAG generator yielding (id, CHANGESET, ctx, [parentinfo]) tuples This generator function walks through revisions (which should be ordered - from bigger to lower). It returns a tuple for each node. The node and parent - ids are arbitrary integers which identify a node in the context of the graph + from bigger to lower). It returns a tuple for each node. + + Each parentinfo entry is a tuple with (edgetype, parentid), where edgetype + is one of PARENT, GRANDPARENT or MISSINGPARENT. The node and parent ids + are arbitrary integers which identify a node in the context of the graph returned. + """ if not revs: return @@ -252,10 +259,13 @@ for rev in revs: ctx = repo[rev] - parents = sorted(set([p.rev() for p in ctx.parents() - if p.rev() in revs])) - mpars = [p.rev() for p in ctx.parents() if - p.rev() != nullrev and p.rev() not in parents] + # partition into parents in the rev set and missing parents, then + # augment the lists with markers, to inform graph drawing code about + # what kind of edge to draw between nodes. + pset = set(p.rev() for p in ctx.parents() if p.rev() in revs) + mpars = [p.rev() for p in ctx.parents() + if p.rev() != nullrev and p.rev() not in pset] + parents = [(PARENT, p) for p in sorted(pset)] for mpar in mpars: gp = gpcache.get(mpar) @@ -264,11 +274,14 @@ # through all revs (issue4782) if not isinstance(revs, revset.baseset): revs = revset.baseset(revs) - gp = gpcache[mpar] = revset.reachableroots(repo, revs, [mpar]) + gp = gpcache[mpar] = sorted(set(revset.reachableroots( + repo, revs, [mpar]))) if not gp: - parents.append(mpar) + parents.append((MISSINGPARENT, mpar)) + pset.add(mpar) else: - parents.extend(g for g in gp if g not in parents) + parents.extend((GRANDPARENT, g) for g in gp if g not in pset) + pset.update(gp) yield (ctx.rev(), CHANGESET, ctx, parents) @@ -281,7 +294,8 @@ include = set(nodes) for node in nodes: ctx = repo[node] - parents = set([p.rev() for p in ctx.parents() if p.node() in include]) + parents = set((PARENT, p.rev()) for p in ctx.parents() + if p.node() in include) yield (ctx.rev(), CHANGESET, ctx, sorted(parents)) def colored(dag, repo): @@ -330,7 +344,7 @@ next = seen[:] # Add parents to next - addparents = [p for p in parents if p not in next] + addparents = [p for pt, p in parents if p not in next] next[col:col + 1] = addparents # Set colors for the parents @@ -351,7 +365,7 @@ bconf.get('width', -1), bconf.get('color', ''))) elif eid == cur: - for p in parents: + for ptype, p in parents: bconf = getconf(p) edges.append(( ecol, next.index(p), color, @@ -362,15 +376,16 @@ yield (cur, type, data, (col, color), edges) seen = next -def asciiedges(type, char, lines, seen, rev, parents): +def asciiedges(type, char, lines, state, rev, parents): """adds edge info to changelog DAG walk suitable for ascii()""" + seen = state['seen'] if rev not in seen: seen.append(rev) nodeidx = seen.index(rev) knownparents = [] newparents = [] - for parent in parents: + for ptype, parent in parents: if parent in seen: knownparents.append(parent) else: @@ -461,7 +476,7 @@ def asciistate(): """returns the initial value for the "state" argument to ascii()""" - return [0, 0] + return {'seen': [], 'lastcoldiff': 0, 'lastindex': 0} def ascii(ui, state, type, char, text, coldata): """prints an ASCII graph of the DAG @@ -519,8 +534,8 @@ nodeline.extend([char, " "]) nodeline.extend( - _getnodelineedgestail(idx, state[1], ncols, coldiff, - state[0], fix_nodeline_tail)) + _getnodelineedgestail(idx, state['lastindex'], ncols, coldiff, + state['lastcoldiff'], fix_nodeline_tail)) # shift_interline is the line containing the non-vertical # edges between this entry and the next @@ -562,5 +577,5 @@ ui.write(ln.rstrip() + '\n') # ... and start over - state[0] = coldiff - state[1] = idx + state['lastcoldiff'] = coldiff + state['lastindex'] = idx
--- a/mercurial/help.py Sun Mar 13 02:29:11 2016 +0100 +++ b/mercurial/help.py Tue Mar 15 14:10:46 2016 -0700 @@ -149,6 +149,8 @@ for name, docs in itertools.chain( extensions.enabled(False).iteritems(), extensions.disabled().iteritems()): + if not docs: + continue mod = extensions.load(ui, name, '') name = name.rpartition('.')[-1] if lowercontains(name) or lowercontains(docs): @@ -186,6 +188,8 @@ loaddoc('bundles', subdir='internals')), (['changegroups'], _('representation of revlog data'), loaddoc('changegroups', subdir='internals')), + (['requirements'], _('repository requirements'), + loaddoc('requirements', subdir='internals')), (['revlogs'], _('revision storage mechanism'), loaddoc('revlogs', subdir='internals')), ])
--- a/mercurial/help/config.txt Sun Mar 13 02:29:11 2016 +0100 +++ b/mercurial/help/config.txt Tue Mar 15 14:10:46 2016 -0700 @@ -800,7 +800,7 @@ ``outgoing`` Run after sending changes from local repository to another. ID of first changeset sent is in ``$HG_NODE``. Source of operation is in - ``$HG_SOURCE``; Also see :hg:`help config.preoutgoing` hook. + ``$HG_SOURCE``; Also see :hg:`help config.hooks.preoutgoing` hook. ``post-<command>`` Run after successful invocations of the associated command. The @@ -881,11 +881,11 @@ ``txnclose`` Run after any repository transaction has been committed. At this point, the transaction can no longer be rolled back. The hook will run - after the lock is released. See :hg:`help config.pretxnclose` docs for + after the lock is released. See :hg:`help config.hooks.pretxnclose` docs for details about available variables. ``txnabort`` - Run when a transaction is aborted. See :hg:`help config.pretxnclose` + Run when a transaction is aborted. See :hg:`help config.hooks.pretxnclose` docs for details about available variables. ``pretxnchangegroup`` @@ -968,10 +968,16 @@ -------------------- Fingerprints of the certificates of known HTTPS servers. + A HTTPS connection to a server with a fingerprint configured here will only succeed if the servers certificate matches the fingerprint. This is very similar to how ssh known hosts works. + The fingerprint is the SHA-1 hash value of the DER encoded certificate. +Multiple values can be specified (separated by spaces or commas). This can +be used to define both old and new fingerprints while a host transitions +to a new certificate. + The CA chain and web.cacerts is not used for servers with a fingerprint. For example:: @@ -1007,6 +1013,25 @@ Optional. Always use the proxy, even for localhost and any entries in ``http_proxy.no``. (default: False) +``merge`` +--------- + +This section specifies behavior during merges and updates. + +``checkignored`` + Controls behavior when an ignored file on disk has the same name as a tracked + file in the changeset being merged or updated to, and has different + contents. Options are ``abort``, ``warn`` and ``ignore``. With ``abort``, + abort on such files. With ``warn``, warn on such files and back them up as + .orig. With ``ignore``, don't print a warning and back them up as + .orig. (default: ``abort``) + +``checkunknown`` + Controls behavior when an unknown file that isn't ignored has the same name + as a tracked file in the changeset being merged or updated to, and has + different contents. Similar to ``merge.checkignored``, except for files that + are not ignored. (default: ``abort``) + ``merge-patterns`` ------------------
--- a/mercurial/help/environment.txt Sun Mar 13 02:29:11 2016 +0100 +++ b/mercurial/help/environment.txt Tue Mar 15 14:10:46 2016 -0700 @@ -69,6 +69,8 @@ Preserve internationalization. ``revsetalias`` Don't remove revset aliases. + ``progress`` + Don't hide progress output. Setting HGPLAINEXCEPT to anything (even an empty string) will enable plain mode.
--- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/mercurial/help/internals/requirements.txt Tue Mar 15 14:10:46 2016 -0700 @@ -0,0 +1,110 @@ +Requirements +============ + +Repositories contain a file (``.hg/requires``) containing a list of +features/capabilities that are *required* for clients to interface +with the repository. This file has been present in Mercurial since +version 0.9.2 (released December 2006). + +One of the first things clients do when opening a repository is read +``.hg/requires`` and verify that all listed requirements are supported, +aborting if not. Requirements are therefore a strong mechanism to +prevent incompatible clients from reading from unknown repository +formats or even corrupting them by writing to them. + +Extensions may add requirements. When they do this, clients not running +an extension will be unable to read from repositories. + +The following sections describe the requirements defined by the +Mercurial core distribution. + +revlogv1 +-------- + +When present, revlogs are version 1 (RevlogNG). RevlogNG was introduced +in 2006. The ``revlogv1`` requirement has been enabled by default +since the ``requires`` file was introduced in Mercurial 0.9.2. + +If this requirement is not present, version 0 revlogs are assumed. + +store +----- + +The *store* repository layout should be used. + +This requirement has been enabled by default since the ``requires`` file +was introduced in Mercurial 0.9.2. + +fncache +------- + +The *fncache* repository layout should be used. + +The *fncache* layout hash encodes filenames with long paths and +encodes reserved filenames. + +This requirement is enabled by default when the *store* requirement is +enabled (which is the default behavior). It was introduced in Mercurial +1.1 (released December 2008). + +shared +------ + +Denotes that the store for a repository is shared from another location +(defined by the ``.hg/sharedpath`` file). + +This requirement is set when a repository is created via :hg:`share`. + +The requirement was added in Mercurial 1.3 (released July 2009). + +dotencode +--------- + +The *dotencode* repository layout should be used. + +The *dotencode* layout encodes the first period or space in filenames +to prevent issues on OS X and Windows. + +This requirement is enabled by default when the *store* requirement +is enabled (which is the default behavior). It was introduced in +Mercurial 1.7 (released November 2010). + +parentdelta +----------- + +Denotes a revlog delta encoding format that was experimental and +replaced by *generaldelta*. It should not be seen in the wild because +it was never enabled by default. + +This requirement was added in Mercurial 1.7 and removed in Mercurial +1.9. + +generaldelta +------------ + +Revlogs should be created with the *generaldelta* flag enabled. The +generaldelta flag will cause deltas to be encoded against a parent +revision instead of the previous revision in the revlog. + +Support for this requirement was added in Mercurial 1.9 (released +July 2011). The requirement was disabled on new repositories by +default until Mercurial 3.7 (released February 2016). + +manifestv2 +---------- + +Denotes that version 2 of manifests are being used. + +Support for this requirement was added in Mercurial 3.4 (released +May 2015). The requirement is currently experimental and is disabled +by default. + +treemanifest +------------ + +Denotes that tree manifests are being used. Tree manifests are +one manifest per directory (as opposed to a single flat manifest). + +Support for this requirement was added in Mercurial 3.4 (released +August 2015). The requirement is currently experimental and is +disabled by default.
--- a/mercurial/hg.py Sun Mar 13 02:29:11 2016 +0100 +++ b/mercurial/hg.py Tue Mar 15 14:10:46 2016 -0700 @@ -19,6 +19,7 @@ bookmarks, bundlerepo, cmdutil, + destutil, discovery, error, exchange, @@ -236,20 +237,7 @@ r = repository(ui, destwvfs.base) postshare(srcrepo, r, bookmarks=bookmarks) - - if update: - r.ui.status(_("updating working directory\n")) - if update is not True: - checkout = update - for test in (checkout, 'default', 'tip'): - if test is None: - continue - try: - uprev = r.lookup(test) - break - except error.RepoLookupError: - continue - _update(r, uprev) + _postshareupdate(r, update, checkout=checkout) def postshare(sourcerepo, destrepo, bookmarks=True): """Called after a new shared repo is created. @@ -272,6 +260,27 @@ fp.write('bookmarks\n') fp.close() +def _postshareupdate(repo, update, checkout=None): + """Maybe perform a working directory update after a shared repo is created. + + ``update`` can be a boolean or a revision to update to. + """ + if not update: + return + + repo.ui.status(_("updating working directory\n")) + if update is not True: + checkout = update + for test in (checkout, 'default', 'tip'): + if test is None: + continue + try: + uprev = repo.lookup(test) + break + except error.RepoLookupError: + continue + _update(repo, uprev) + def copystore(ui, srcrepo, destpath): '''copy files from store of srcrepo in destpath @@ -361,7 +370,7 @@ rev=rev, update=False, stream=stream) sharerepo = repository(ui, path=sharepath) - share(ui, sharerepo, dest=dest, update=update, bookmarks=False) + share(ui, sharerepo, dest=dest, update=False, bookmarks=False) # We need to perform a pull against the dest repo to fetch bookmarks # and other non-store data that isn't shared by default. In the case of @@ -371,6 +380,8 @@ destrepo = repository(ui, path=dest) exchange.pull(destrepo, srcpeer, heads=revs) + _postshareupdate(destrepo, update) + return srcpeer, peer(ui, peeropts, dest) def clone(ui, peeropts, source, dest=None, pull=False, rev=None, @@ -684,10 +695,67 @@ _showstats(repo, stats, quietempty) return stats[3] > 0 -def merge(repo, node, force=None, remind=True): +# naming conflict in updatetotally() +_clean = clean + +def updatetotally(ui, repo, checkout, brev, clean=False, check=False): + """Update the working directory with extra care for non-file components + + This takes care of non-file components below: + + :bookmark: might be advanced or (in)activated + + This takes arguments below: + + :checkout: to which revision the working directory is updated + :brev: a name, which might be a bookmark to be activated after updating + :clean: whether changes in the working directory can be discarded + :check: whether changes in the working directory should be checked + + This returns whether conflict is detected at updating or not. + """ + with repo.wlock(): + movemarkfrom = None + warndest = False + if checkout is None: + updata = destutil.destupdate(repo, clean=clean, check=check) + checkout, movemarkfrom, brev = updata + warndest = True + + if clean: + ret = _clean(repo, checkout) + else: + ret = _update(repo, checkout) + + if not ret and movemarkfrom: + if movemarkfrom == repo['.'].node(): + pass # no-op update + elif bookmarks.update(repo, [movemarkfrom], repo['.'].node()): + ui.status(_("updating bookmark %s\n") % repo._activebookmark) + else: + # this can happen with a non-linear update + ui.status(_("(leaving bookmark %s)\n") % + repo._activebookmark) + bookmarks.deactivate(repo) + elif brev in repo._bookmarks: + if brev != repo._activebookmark: + ui.status(_("(activating bookmark %s)\n") % brev) + bookmarks.activate(repo, brev) + elif brev: + if repo._activebookmark: + ui.status(_("(leaving bookmark %s)\n") % + repo._activebookmark) + bookmarks.deactivate(repo) + + if warndest: + destutil.statusotherdests(ui, repo) + + return ret + +def merge(repo, node, force=None, remind=True, mergeforce=False): """Branch merge with node, resolving changes. Return true if any unresolved conflicts.""" - stats = mergemod.update(repo, node, True, force) + stats = mergemod.update(repo, node, True, force, mergeforce=mergeforce) _showstats(repo, stats) if stats[3]: repo.ui.status(_("use 'hg resolve' to retry unresolved file merges " @@ -878,6 +946,7 @@ assert isinstance(repo, localrepo.localrepository) self._repo = repo self._state, self.mtime = self._repostate() + self._filtername = repo.filtername def fetch(self): """Refresh (if necessary) and return a repository. @@ -897,7 +966,11 @@ if state == self._state: return self._repo, False - self._repo = repository(self._repo.baseui, self._repo.url()) + repo = repository(self._repo.baseui, self._repo.url()) + if self._filtername: + self._repo = repo.filtered(self._filtername) + else: + self._repo = repo.unfiltered() self._state = state self.mtime = mtime @@ -925,6 +998,10 @@ completely independent of the original. """ repo = repository(self._repo.baseui, self._repo.origroot) + if self._filtername: + repo = repo.filtered(self._filtername) + else: + repo = repo.unfiltered() c = cachedlocalrepo(repo) c._state = self._state c.mtime = self.mtime
--- a/mercurial/hgweb/protocol.py Sun Mar 13 02:29:11 2016 +0100 +++ b/mercurial/hgweb/protocol.py Tue Mar 15 14:10:46 2016 -0700 @@ -45,6 +45,11 @@ return [data[k] for k in keys] def _args(self): args = self.req.form.copy() + postlen = int(self.req.env.get('HTTP_X_HGARGS_POST', 0)) + if postlen: + args.update(cgi.parse_qs( + self.req.read(postlen), keep_blank_values=True)) + return args chunks = [] i = 1 while True:
--- a/mercurial/hgweb/webcommands.py Sun Mar 13 02:29:11 2016 +0100 +++ b/mercurial/hgweb/webcommands.py Tue Mar 15 14:10:46 2016 -0700 @@ -1135,7 +1135,7 @@ max([edge[1] for edge in edges] or [0])) return cols - def graphdata(usetuples, **map): + def graphdata(usetuples, encodestr): data = [] row = 0 @@ -1143,11 +1143,11 @@ if type != graphmod.CHANGESET: continue node = str(ctx) - age = templatefilters.age(ctx.date()) - desc = templatefilters.firstline(ctx.description()) + age = encodestr(templatefilters.age(ctx.date())) + desc = templatefilters.firstline(encodestr(ctx.description())) desc = cgi.escape(templatefilters.nonempty(desc)) - user = cgi.escape(templatefilters.person(ctx.user())) - branch = cgi.escape(ctx.branch()) + user = cgi.escape(templatefilters.person(encodestr(ctx.user()))) + branch = cgi.escape(encodestr(ctx.branch())) try: branchnode = web.repo.branchtip(branch) except error.RepoLookupError: @@ -1156,8 +1156,9 @@ if usetuples: data.append((node, vtx, edges, desc, user, age, branch, - [cgi.escape(x) for x in ctx.tags()], - [cgi.escape(x) for x in ctx.bookmarks()])) + [cgi.escape(encodestr(x)) for x in ctx.tags()], + [cgi.escape(encodestr(x)) + for x in ctx.bookmarks()])) else: edgedata = [{'col': edge[0], 'nextcol': edge[1], 'color': (edge[2] - 1) % 6 + 1, @@ -1195,8 +1196,9 @@ canvaswidth=(cols + 1) * bg_height, truecanvasheight=rows * bg_height, canvasheight=canvasheight, bg_height=bg_height, - jsdata=lambda **x: graphdata(True, **x), - nodes=lambda **x: graphdata(False, **x), + # {jsdata} will be passed to |json, so it must be in utf-8 + jsdata=lambda **x: graphdata(True, encoding.fromlocal), + nodes=lambda **x: graphdata(False, str), node=ctx.hex(), changenav=changenav) def _getdoc(e):
--- a/mercurial/hook.py Sun Mar 13 02:29:11 2016 +0100 +++ b/mercurial/hook.py Tue Mar 15 14:10:46 2016 -0700 @@ -36,7 +36,7 @@ d = funcname.rfind('.') if d == -1: raise error.HookLoadError( - _('%s hook is invalid ("%s" not in a module)') + _('%s hook is invalid: "%s" not in a module') % (hname, funcname)) modname = funcname[:d] oldpaths = sys.path @@ -49,13 +49,13 @@ with demandimport.deactivated(): try: obj = __import__(modname) - except ImportError: - e1 = sys.exc_type, sys.exc_value, sys.exc_traceback + except (ImportError, SyntaxError): + e1 = sys.exc_info() try: # extensions are loaded with hgext_ prefix obj = __import__("hgext_%s" % modname) - except ImportError: - e2 = sys.exc_type, sys.exc_value, sys.exc_traceback + except (ImportError, SyntaxError): + e2 = sys.exc_info() if ui.tracebackflag: ui.warn(_('exception from first failed import ' 'attempt:\n')) @@ -64,20 +64,26 @@ ui.warn(_('exception from second failed import ' 'attempt:\n')) ui.traceback(e2) + + if not ui.tracebackflag: + tracebackhint = _( + 'run with --traceback for stack trace') + else: + tracebackhint = None raise error.HookLoadError( - _('%s hook is invalid (import of "%s" failed)') % - (hname, modname)) + _('%s hook is invalid: import of "%s" failed') % + (hname, modname), hint=tracebackhint) sys.path = oldpaths try: for p in funcname.split('.')[1:]: obj = getattr(obj, p) except AttributeError: raise error.HookLoadError( - _('%s hook is invalid ("%s" is not defined)') + _('%s hook is invalid: "%s" is not defined') % (hname, funcname)) if not callable(obj): raise error.HookLoadError( - _('%s hook is invalid ("%s" is not callable)') + _('%s hook is invalid: "%s" is not callable') % (hname, funcname)) ui.note(_("calling hook %s: %s\n") % (hname, funcname)) @@ -100,6 +106,8 @@ '%s\n') % (hname, exc)) if throw: raise + if not ui.tracebackflag: + ui.warn(_('(run with --traceback for stack trace)\n')) ui.traceback() return True, True finally:
--- a/mercurial/httpconnection.py Sun Mar 13 02:29:11 2016 +0100 +++ b/mercurial/httpconnection.py Tue Mar 15 14:10:46 2016 -0700 @@ -33,9 +33,6 @@ """ def __init__(self, ui, *args, **kwargs): - # We can't just "self._data = open(*args, **kwargs)" here because there - # is an "open" function defined in this module that shadows the global - # one self.ui = ui self._data = open(*args, **kwargs) self.seek = self._data.seek
--- a/mercurial/httppeer.py Sun Mar 13 02:29:11 2016 +0100 +++ b/mercurial/httppeer.py Tue Mar 15 14:10:46 2016 -0700 @@ -92,43 +92,58 @@ if cmd == 'pushkey': args['data'] = '' data = args.pop('data', None) + headers = args.pop('headers', {}) + + self.ui.debug("sending %s command\n" % cmd) + q = [('cmd', cmd)] + headersize = 0 + # Important: don't use self.capable() here or else you end up + # with infinite recursion when trying to look up capabilities + # for the first time. + postargsok = self.caps is not None and 'httppostargs' in self.caps + # TODO: support for httppostargs when data is a file-like + # object rather than a basestring + canmungedata = not data or isinstance(data, basestring) + if postargsok and canmungedata: + strargs = urllib.urlencode(sorted(args.items())) + if strargs: + if not data: + data = strargs + elif isinstance(data, basestring): + data = strargs + data + headers['X-HgArgs-Post'] = len(strargs) + else: + if len(args) > 0: + httpheader = self.capable('httpheader') + if httpheader: + headersize = int(httpheader.split(',', 1)[0]) + if headersize > 0: + # The headers can typically carry more data than the URL. + encargs = urllib.urlencode(sorted(args.items())) + headerfmt = 'X-HgArg-%s' + contentlen = headersize - len(headerfmt % '000' + ': \r\n') + headernum = 0 + varyheaders = [] + for i in xrange(0, len(encargs), contentlen): + headernum += 1 + header = headerfmt % str(headernum) + headers[header] = encargs[i:i + contentlen] + varyheaders.append(header) + headers['Vary'] = ','.join(varyheaders) + else: + q += sorted(args.items()) + qs = '?%s' % urllib.urlencode(q) + cu = "%s%s" % (self._url, qs) size = 0 if util.safehasattr(data, 'length'): size = data.length elif data is not None: size = len(data) - headers = args.pop('headers', {}) - if data is not None and 'Content-Type' not in headers: - headers['Content-Type'] = 'application/mercurial-0.1' - - if size and self.ui.configbool('ui', 'usehttp2', False): headers['Expect'] = '100-Continue' headers['X-HgHttp2'] = '1' - - self.ui.debug("sending %s command\n" % cmd) - q = [('cmd', cmd)] - headersize = 0 - if len(args) > 0: - httpheader = self.capable('httpheader') - if httpheader: - headersize = int(httpheader.split(',')[0]) - if headersize > 0: - # The headers can typically carry more data than the URL. - encargs = urllib.urlencode(sorted(args.items())) - headerfmt = 'X-HgArg-%s' - contentlen = headersize - len(headerfmt % '000' + ': \r\n') - headernum = 0 - for i in xrange(0, len(encargs), contentlen): - headernum += 1 - header = headerfmt % str(headernum) - headers[header] = encargs[i:i + contentlen] - varyheaders = [headerfmt % str(h) for h in range(1, headernum + 1)] - headers['Vary'] = ','.join(varyheaders) - else: - q += sorted(args.items()) - qs = '?%s' % urllib.urlencode(q) - cu = "%s%s" % (self._url, qs) + if data is not None and 'Content-Type' not in headers: + headers['Content-Type'] = 'application/mercurial-0.1' req = self.requestbuilder(cu, data, headers) if data is not None: self.ui.debug("sending %s bytes\n" % size)
--- a/mercurial/keepalive.py Sun Mar 13 02:29:11 2016 +0100 +++ b/mercurial/keepalive.py Tue Mar 15 14:10:46 2016 -0700 @@ -345,7 +345,7 @@ h.putheader('Content-length', '%d' % len(data)) else: h.putrequest('GET', req.get_selector(), **skipheaders) - except (socket.error) as err: + except socket.error as err: raise urllib2.URLError(err) for k, v in headers.items(): h.putheader(k, v)
--- a/mercurial/localrepo.py Sun Mar 13 02:29:11 2016 +0100 +++ b/mercurial/localrepo.py Tue Mar 15 14:10:46 2016 -0700 @@ -242,9 +242,6 @@ # only functions defined in module of enabled extensions are invoked featuresetupfuncs = set() - def _baserequirements(self, create): - return ['revlogv1'] - def __init__(self, baseui, path=None, create=False): self.requirements = set() self.wvfs = scmutil.vfs(path, expandpath=True, realpath=True) @@ -282,29 +279,21 @@ if not self.vfs.isdir(): if create: + self.requirements = newreporequirements(self) + if not self.wvfs.exists(): self.wvfs.makedirs() self.vfs.makedir(notindexed=True) - self.requirements.update(self._baserequirements(create)) - if self.ui.configbool('format', 'usestore', True): + + if 'store' in self.requirements: self.vfs.mkdir("store") - self.requirements.add("store") - if self.ui.configbool('format', 'usefncache', True): - self.requirements.add("fncache") - if self.ui.configbool('format', 'dotencode', True): - self.requirements.add('dotencode') + # create an invalid changelog self.vfs.append( "00changelog.i", '\0\0\0\2' # represents revlogv2 ' dummy changelog to prevent using the old repo layout' ) - if scmutil.gdinitconfig(self.ui): - self.requirements.add("generaldelta") - if self.ui.configbool('experimental', 'treemanifest', False): - self.requirements.add("treemanifest") - if self.ui.configbool('experimental', 'manifestv2', False): - self.requirements.add("manifestv2") else: raise error.RepoError(_("repository %s not found") % path) elif create: @@ -985,7 +974,7 @@ data = self.wvfs.read(filename) return self._filter(self._encodefilterpats, filename, data) - def wwrite(self, filename, data, flags): + def wwrite(self, filename, data, flags, backgroundclose=False): """write ``data`` into ``filename`` in the working directory This returns length of written (maybe decoded) data. @@ -994,7 +983,7 @@ if 'l' in flags: self.wvfs.symlink(data, filename) else: - self.wvfs.write(filename, data) + self.wvfs.write(filename, data, backgroundclose=backgroundclose) if 'x' in flags: self.wvfs.setflags(filename, False, True) return len(data) @@ -1962,3 +1951,27 @@ def islocal(path): return True + +def newreporequirements(repo): + """Determine the set of requirements for a new local repository. + + Extensions can wrap this function to specify custom requirements for + new repositories. + """ + ui = repo.ui + requirements = set(['revlogv1']) + if ui.configbool('format', 'usestore', True): + requirements.add('store') + if ui.configbool('format', 'usefncache', True): + requirements.add('fncache') + if ui.configbool('format', 'dotencode', True): + requirements.add('dotencode') + + if scmutil.gdinitconfig(ui): + requirements.add('generaldelta') + if ui.configbool('experimental', 'treemanifest', False): + requirements.add('treemanifest') + if ui.configbool('experimental', 'manifestv2', False): + requirements.add('manifestv2') + + return requirements
--- a/mercurial/lock.py Sun Mar 13 02:29:11 2016 +0100 +++ b/mercurial/lock.py Tue Mar 15 14:10:46 2016 -0700 @@ -9,7 +9,6 @@ import contextlib import errno -import os import socket import time import warnings @@ -77,8 +76,8 @@ self.release() def _getpid(self): - # wrapper around os.getpid() to make testing easier - return os.getpid() + # wrapper around util.getpid() to make testing easier + return util.getpid() def lock(self): timeout = self.timeout
--- a/mercurial/mail.py Sun Mar 13 02:29:11 2016 +0100 +++ b/mercurial/mail.py Tue Mar 15 14:10:46 2016 -0700 @@ -332,3 +332,21 @@ if not display: s, cs = _encode(ui, s, charsets) return mimetextqp(s, 'plain', cs) + +def headdecode(s): + '''Decodes RFC-2047 header''' + uparts = [] + for part, charset in email.Header.decode_header(s): + if charset is not None: + try: + uparts.append(part.decode(charset)) + continue + except UnicodeDecodeError: + pass + try: + uparts.append(part.decode('UTF-8')) + continue + except UnicodeDecodeError: + pass + uparts.append(part.decode('ISO-8859-1')) + return encoding.tolocal(u' '.join(uparts).encode('UTF-8'))
--- a/mercurial/manifest.py Sun Mar 13 02:29:11 2016 +0100 +++ b/mercurial/manifest.py Tue Mar 15 14:10:46 2016 -0700 @@ -325,6 +325,9 @@ def iteritems(self): return (x[:2] for x in self._lm.iterentries()) + def iterentries(self): + return self._lm.iterentries() + def text(self, usemanifestv2=False): if usemanifestv2: return _textv2(self._lm.iterentries()) @@ -517,6 +520,15 @@ self._node = node self._dirty = False + def iterentries(self): + self._load() + for p, n in sorted(self._dirs.items() + self._files.items()): + if p in self._files: + yield self._subpath(p), n, self._flags.get(p, '') + else: + for x in n.iterentries(): + yield x + def iteritems(self): self._load() for p, n in sorted(self._dirs.items() + self._files.items()): @@ -627,7 +639,6 @@ def setflag(self, f, flags): """Set the flags (symlink, executable) for path f.""" - assert 't' not in flags self._load() dir, subpath = _splittopdir(f) if dir: @@ -849,9 +860,7 @@ def text(self, usemanifestv2=False): """Get the full data of this manifest as a bytestring.""" self._load() - flags = self.flags - return _text(((f, self[f], flags(f)) for f in self.keys()), - usemanifestv2) + return _text(self.iterentries(), usemanifestv2) def dirtext(self, usemanifestv2=False): """Get the full data of this directory as a bytestring. Make sure that @@ -920,7 +929,8 @@ return manifestdict(data) def dirlog(self, dir): - assert self._treeondisk + if dir: + assert self._treeondisk if dir not in self._dirlogcache: self._dirlogcache[dir] = manifest(self.opener, dir, self._dirlogcache) @@ -945,6 +955,22 @@ d = mdiff.patchtext(self.revdiff(self.deltaparent(r), r)) return self._newmanifest(d) + def readshallowdelta(self, node): + '''For flat manifests, this is the same as readdelta(). For + treemanifests, this will read the delta for this revlog's directory, + without recursively reading subdirectory manifests. Instead, any + subdirectory entry will be reported as it appears in the manifests, i.e. + the subdirectory will be reported among files and distinguished only by + its 't' flag.''' + if not self._treeondisk: + return self.readdelta(node) + if self._usemanifestv2: + raise error.Abort( + "readshallowdelta() not implemented for manifestv2") + r = self.rev(node) + d = mdiff.patchtext(self.revdiff(self.deltaparent(r), r)) + return manifestdict(d) + def readfast(self, node): '''use the faster of readdelta or read @@ -959,6 +985,15 @@ return self.readdelta(node) return self.read(node) + def readshallowfast(self, node): + '''like readfast(), but calls readshallowdelta() instead of readdelta() + ''' + r = self.rev(node) + deltaparent = self.deltaparent(r) + if deltaparent != revlog.nullrev and deltaparent in self.parentrevs(r): + return self.readshallowdelta(node) + return self.readshallow(node) + def read(self, node): if node == revlog.nullid: return self._newmanifest() # don't upset local cache @@ -980,6 +1015,13 @@ self._mancache[node] = (m, arraytext) return m + def readshallow(self, node): + '''Reads the manifest in this directory. When using flat manifests, + this manifest will generally have files in subdirectories in it. Does + not cache the manifest as the callers generally do not read the same + version twice.''' + return manifestdict(self.revision(node)) + def find(self, node, f): '''look up entry for a single file efficiently. return (node, flags) pair if found, (None, None) if not.'''
--- a/mercurial/match.py Sun Mar 13 02:29:11 2016 +0100 +++ b/mercurial/match.py Tue Mar 15 14:10:46 2016 -0700 @@ -334,13 +334,13 @@ m.bad = badfn return m -class narrowmatcher(match): +class subdirmatcher(match): """Adapt a matcher to work on a subdirectory only. The paths are remapped to remove/insert the path as needed: >>> m1 = match('root', '', ['a.txt', 'sub/b.txt']) - >>> m2 = narrowmatcher('sub', m1) + >>> m2 = subdirmatcher('sub', m1) >>> bool(m2('a.txt')) False >>> bool(m2('b.txt')) @@ -381,7 +381,16 @@ self._always = any(f == path for f in matcher._files) self._anypats = matcher._anypats + # Some information is lost in the superclass's constructor, so we + # can not accurately create the matching function for the subdirectory + # from the inputs. Instead, we override matchfn() and visitdir() to + # call the original matcher with the subdirectory path prepended. self.matchfn = lambda fn: matcher.matchfn(self._path + "/" + fn) + def visitdir(dir): + if dir == '.': + return matcher.visitdir(self._path) + return matcher.visitdir(self._path + "/" + dir) + self.visitdir = visitdir self._fileroots = set(self._files) def abs(self, f):
--- a/mercurial/merge.py Sun Mar 13 02:29:11 2016 +0100 +++ b/mercurial/merge.py Tue Mar 15 14:10:46 2016 -0700 @@ -65,6 +65,7 @@ (experimental) m: the external merge driver defined for this merge plus its run state (experimental) + f: a (filename, dictonary) tuple of optional values for a given file X: unsupported mandatory record type (used in tests) x: unsupported advisory record type (used in tests) @@ -102,6 +103,7 @@ def reset(self, node=None, other=None): self._state = {} + self._stateextras = {} self._local = None self._other = None for var in ('localctx', 'otherctx'): @@ -126,6 +128,7 @@ of on disk file. """ self._state = {} + self._stateextras = {} self._local = None self._other = None for var in ('localctx', 'otherctx'): @@ -152,6 +155,16 @@ elif rtype in 'FDC': bits = record.split('\0') self._state[bits[0]] = bits[1:] + elif rtype == 'f': + filename, rawextras = record.split('\0', 1) + extraparts = rawextras.split('\0') + extras = {} + i = 0 + while i < len(extraparts): + extras[extraparts[i]] = extraparts[i + 1] + i += 2 + + self._stateextras[filename] = extras elif not rtype.islower(): unsupported.add(rtype) self._results = {} @@ -298,7 +311,7 @@ @util.propertycache def otherctx(self): if self._other is None: - raise RuntimeError("localctx accessed but self._local isn't set") + raise RuntimeError("otherctx accessed but self._other isn't set") return self._repo[self._other] def active(self): @@ -336,6 +349,10 @@ records.append(('C', '\0'.join([d] + v))) else: records.append(('F', '\0'.join([d] + v))) + for filename, extras in sorted(self._stateextras.iteritems()): + rawextras = '\0'.join('%s\0%s' % (k, v) for k, v in + extras.iteritems()) + records.append(('f', '%s\0%s' % (filename, rawextras))) return records def _writerecords(self, records): @@ -388,6 +405,7 @@ fca.path(), hex(fca.filenode()), fco.path(), hex(fco.filenode()), fcl.flags()] + self._stateextras[fd] = { 'ancestorlinknode' : hex(fca.node()) } self._dirty = True def __contains__(self, dfile): @@ -423,6 +441,9 @@ if entry[0] == 'd': yield f + def extras(self, filename): + return self._stateextras.setdefault(filename, {}) + def _resolve(self, preresolve, dfile, wctx, labels=None): """rerun merge process for file path `dfile`""" if self[dfile] in 'rd': @@ -430,10 +451,16 @@ stateentry = self._state[dfile] state, hash, lfile, afile, anode, ofile, onode, flags = stateentry octx = self._repo[self._other] + extras = self.extras(dfile) + anccommitnode = extras.get('ancestorlinknode') + if anccommitnode: + actx = self._repo[anccommitnode] + else: + actx = None fcd = self._filectxorabsent(hash, wctx, dfile) fco = self._filectxorabsent(onode, octx, ofile) # TODO: move this to filectxorabsent - fca = self._repo.filectx(afile, fileid=anode) + fca = self._repo.filectx(afile, fileid=anode, changeid=actx) # "premerge" x flags flo = fco.flags() fla = fca.flags() @@ -462,6 +489,7 @@ if r is None: # no real conflict del self._state[dfile] + self._stateextras.pop(dfile, None) self._dirty = True elif not r: self.mark(dfile, 'r') @@ -570,29 +598,29 @@ def _checkunknownfile(repo, wctx, mctx, f, f2=None): if f2 is None: f2 = f - return (repo.wvfs.isfileorlink(f) - and repo.wvfs.audit.check(f) + return (repo.wvfs.audit.check(f) + and repo.wvfs.isfileorlink(f) and repo.dirstate.normalize(f) not in repo.dirstate and mctx[f2].cmp(wctx[f])) -def _checkunknownfiles(repo, wctx, mctx, force, actions): +def _checkunknownfiles(repo, wctx, mctx, force, actions, mergeforce): """ Considers any actions that care about the presence of conflicting unknown files. For some actions, the result is to abort; for others, it is to choose a different action. """ conflicts = set() + warnconflicts = set() + abortconflicts = set() + unknownconfig = _getcheckunknownconfig(repo, 'merge', 'checkunknown') + ignoredconfig = _getcheckunknownconfig(repo, 'merge', 'checkignored') if not force: - abortconflicts = set() - warnconflicts = set() def collectconflicts(conflicts, config): if config == 'abort': abortconflicts.update(conflicts) elif config == 'warn': warnconflicts.update(conflicts) - unknownconfig = _getcheckunknownconfig(repo, 'merge', 'checkunknown') - ignoredconfig = _getcheckunknownconfig(repo, 'merge', 'checkignored') for f, (m, args, msg) in actions.iteritems(): if m in ('c', 'dc'): if _checkunknownfile(repo, wctx, mctx, f): @@ -606,28 +634,54 @@ unknownconflicts = conflicts - ignoredconflicts collectconflicts(ignoredconflicts, ignoredconfig) collectconflicts(unknownconflicts, unknownconfig) - for f in sorted(abortconflicts): - repo.ui.warn(_("%s: untracked file differs\n") % f) - if abortconflicts: - raise error.Abort(_("untracked files in working directory " - "differ from files in requested revision")) + else: + for f, (m, args, msg) in actions.iteritems(): + if m == 'cm': + fl2, anc = args + different = _checkunknownfile(repo, wctx, mctx, f) + if repo.dirstate._ignore(f): + config = ignoredconfig + else: + config = unknownconfig - for f in sorted(warnconflicts): - repo.ui.warn(_("%s: replacing untracked file\n") % f) + # The behavior when force is True is described by this table: + # config different mergeforce | action backup + # * n * | get n + # * y y | merge - + # abort y n | merge - (1) + # warn y n | warn + get y + # ignore y n | get y + # + # (1) this is probably the wrong behavior here -- we should + # probably abort, but some actions like rebases currently + # don't like an abort happening in the middle of + # merge.update. + if not different: + actions[f] = ('g', (fl2, False), "remote created") + elif mergeforce or config == 'abort': + actions[f] = ('m', (f, f, None, False, anc), + "remote differs from untracked local") + elif config == 'abort': + abortconflicts.add(f) + else: + if config == 'warn': + warnconflicts.add(f) + actions[f] = ('g', (fl2, True), "remote created") + + for f in sorted(abortconflicts): + repo.ui.warn(_("%s: untracked file differs\n") % f) + if abortconflicts: + raise error.Abort(_("untracked files in working directory " + "differ from files in requested revision")) + + for f in sorted(warnconflicts): + repo.ui.warn(_("%s: replacing untracked file\n") % f) for f, (m, args, msg) in actions.iteritems(): backup = f in conflicts if m == 'c': flags, = args actions[f] = ('g', (flags, backup), msg) - elif m == 'cm': - fl2, anc = args - different = _checkunknownfile(repo, wctx, mctx, f) - if different: - actions[f] = ('m', (f, f, None, False, anc), - "remote differs from untracked local") - else: - actions[f] = ('g', (fl2, backup), "remote created") def _forgetremoved(wctx, mctx, branchmerge): """ @@ -747,10 +801,8 @@ if '.hgsubstate' in m1: # check whether sub state is modified - for s in sorted(wctx.substate): - if wctx.sub(s).dirty(): - m1['.hgsubstate'] += '+' - break + if any(wctx.sub(s).dirty() for s in wctx.substate): + m1['.hgsubstate'] += '+' # Compare manifests if matcher is not None: @@ -876,13 +928,14 @@ del actions[f] # don't get = keep local deleted def calculateupdates(repo, wctx, mctx, ancestors, branchmerge, force, - acceptremote, followcopies, matcher=None): + acceptremote, followcopies, matcher=None, + mergeforce=False): "Calculate the actions needed to merge mctx into wctx using ancestors" if len(ancestors) == 1: # default actions, diverge, renamedelete = manifestmerge( repo, wctx, mctx, ancestors[0], branchmerge, force, matcher, acceptremote, followcopies) - _checkunknownfiles(repo, wctx, mctx, force, actions) + _checkunknownfiles(repo, wctx, mctx, force, actions, mergeforce) else: # only when merge.preferancestor=* - the default repo.ui.note( @@ -897,7 +950,7 @@ actions, diverge1, renamedelete1 = manifestmerge( repo, wctx, mctx, ancestor, branchmerge, force, matcher, acceptremote, followcopies) - _checkunknownfiles(repo, wctx, mctx, force, actions) + _checkunknownfiles(repo, wctx, mctx, force, actions, mergeforce) # Track the shortest set of warning on the theory that bid # merge will correctly incorporate more information @@ -1003,29 +1056,30 @@ wwrite = repo.wwrite ui = repo.ui i = 0 - for f, (flags, backup), msg in actions: - repo.ui.debug(" %s: %s -> g\n" % (f, msg)) - if verbose: - repo.ui.note(_("getting %s\n") % f) + with repo.wvfs.backgroundclosing(ui, expectedcount=len(actions)): + for f, (flags, backup), msg in actions: + repo.ui.debug(" %s: %s -> g\n" % (f, msg)) + if verbose: + repo.ui.note(_("getting %s\n") % f) - if backup: - absf = repo.wjoin(f) - orig = scmutil.origpath(ui, repo, absf) - try: - # TODO Mercurial has always aborted if an untracked directory - # is replaced by a tracked file, or generally with - # file/directory merges. This needs to be sorted out. - if repo.wvfs.isfileorlink(f): - util.rename(absf, orig) - except OSError as e: - if e.errno != errno.ENOENT: - raise + if backup: + absf = repo.wjoin(f) + orig = scmutil.origpath(ui, repo, absf) + try: + # TODO Mercurial has always aborted if an untracked + # directory is replaced by a tracked file, or generally + # with file/directory merges. This needs to be sorted out. + if repo.wvfs.isfileorlink(f): + util.rename(absf, orig) + except OSError as e: + if e.errno != errno.ENOENT: + raise - wwrite(f, fctx(f).data(), flags) - if i == 100: - yield i, f - i = 0 - i += 1 + wwrite(f, fctx(f).data(), flags, backgroundclose=True) + if i == 100: + yield i, f + i = 0 + i += 1 if i > 0: yield i, f @@ -1315,7 +1369,7 @@ repo.dirstate.normal(f) def update(repo, node, branchmerge, force, ancestor=None, - mergeancestor=False, labels=None, matcher=None): + mergeancestor=False, labels=None, matcher=None, mergeforce=False): """ Perform a merge between the working directory and the given node @@ -1328,6 +1382,9 @@ If false, merging with an ancestor (fast-forward) is only allowed between different named branches. This flag is used by rebase extension as a temporary fix and should be avoided in general. + labels = labels to use for base, local and other + mergeforce = whether the merge was run with 'merge --force' (deprecated): if + this is True, then 'force' should be True as well. The table below shows all the behaviors of the update command given the -c and -C or no options, whether the working directory @@ -1463,7 +1520,7 @@ ### calculate phase actionbyfile, diverge, renamedelete = calculateupdates( repo, wc, p2, pas, branchmerge, force, mergeancestor, - followcopies, matcher=matcher) + followcopies, matcher=matcher, mergeforce=mergeforce) # Prompt and create actions. Most of this is in the resolve phase # already, but we can't handle .hgsubstate in filemerge or
--- a/mercurial/parsers.c Sun Mar 13 02:29:11 2016 +0100 +++ b/mercurial/parsers.c Tue Mar 15 14:10:46 2016 -0700 @@ -1446,20 +1446,26 @@ goto bail; } - for (i = 0; i < len; i++) { + for (i = len - 1; i >= 0; i--) { int isfiltered; int parents[2]; - isfiltered = check_filter(filter, i); - if (isfiltered == -1) { - PyErr_SetString(PyExc_TypeError, - "unable to check filter"); - goto bail; - } - - if (isfiltered) { - nothead[i] = 1; - continue; + /* If nothead[i] == 1, it means we've seen an unfiltered child of this + * node already, and therefore this node is not filtered. So we can skip + * the expensive check_filter step. + */ + if (nothead[i] != 1) { + isfiltered = check_filter(filter, i); + if (isfiltered == -1) { + PyErr_SetString(PyExc_TypeError, + "unable to check filter"); + goto bail; + } + + if (isfiltered) { + nothead[i] = 1; + continue; + } } if (index_get_parents(self, i, parents, (int)len - 1) < 0)
--- a/mercurial/patch.py Sun Mar 13 02:29:11 2016 +0100 +++ b/mercurial/patch.py Tue Mar 15 14:10:46 2016 -0700 @@ -31,6 +31,7 @@ diffhelpers, encoding, error, + mail, mdiff, pathutil, scmutil, @@ -210,8 +211,8 @@ try: msg = email.Parser.Parser().parse(fileobj) - subject = msg['Subject'] - data['user'] = msg['From'] + subject = msg['Subject'] and mail.headdecode(msg['Subject']) + data['user'] = msg['From'] and mail.headdecode(msg['From']) if not subject and not data['user']: # Not an email, restore parsed headers if any subject = '\n'.join(': '.join(h) for h in msg.items()) + '\n'
--- a/mercurial/pathutil.py Sun Mar 13 02:29:11 2016 +0100 +++ b/mercurial/pathutil.py Tue Mar 15 14:10:46 2016 -0700 @@ -83,16 +83,17 @@ parts.pop() normparts.pop() prefixes = [] - while parts: - prefix = os.sep.join(parts) - normprefix = os.sep.join(normparts) + # It's important that we check the path parts starting from the root. + # This means we won't accidentaly traverse a symlink into some other + # filesystem (which is potentially expensive to access). + for i in range(len(parts)): + prefix = os.sep.join(parts[:i + 1]) + normprefix = os.sep.join(normparts[:i + 1]) if normprefix in self.auditeddir: - break + continue if self._realfs: self._checkfs(prefix, path) prefixes.append(normprefix) - parts.pop() - normparts.pop() self.audited.add(normpath) # only add prefixes to the cache after checking everything: we don't
--- a/mercurial/peer.py Sun Mar 13 02:29:11 2016 +0100 +++ b/mercurial/peer.py Tue Mar 15 14:10:46 2016 -0700 @@ -39,7 +39,15 @@ return resref return call def submit(self): - pass + raise NotImplementedError() + +class iterbatcher(batcher): + + def submit(self): + raise NotImplementedError() + + def results(self): + raise NotImplementedError() class localbatch(batcher): '''performs the queued calls directly''' @@ -50,6 +58,19 @@ for name, args, opts, resref in self.calls: resref.set(getattr(self.local, name)(*args, **opts)) +class localiterbatcher(iterbatcher): + def __init__(self, local): + super(iterbatcher, self).__init__() + self.local = local + + def submit(self): + # submit for a local iter batcher is a noop + pass + + def results(self): + for name, args, opts, resref in self.calls: + yield getattr(self.local, name)(*args, **opts) + def batchable(f): '''annotation for batchable methods @@ -91,6 +112,14 @@ def batch(self): return localbatch(self) + def iterbatch(self): + """Batch requests but allow iterating over the results. + + This is to allow interleaving responses with things like + progress updates for clients. + """ + return localiterbatcher(self) + def capable(self, name): '''tell whether repo supports named capability. return False if not supported.
--- a/mercurial/phases.py Sun Mar 13 02:29:11 2016 +0100 +++ b/mercurial/phases.py Tue Mar 15 14:10:46 2016 -0700 @@ -435,11 +435,11 @@ continue node = bin(nhex) phase = int(phase) - if phase == 0: + if phase == public: if node != nullid: repo.ui.warn(_('ignoring inconsistent public root' ' from remote: %s\n') % nhex) - elif phase == 1: + elif phase == draft: if node in nodemap: draftroots.append(node) else:
--- a/mercurial/progress.py Sun Mar 13 02:29:11 2016 +0100 +++ b/mercurial/progress.py Tue Mar 15 14:10:46 2016 -0700 @@ -18,7 +18,7 @@ return ' '.join(s for s in args if s) def shouldprint(ui): - return not (ui.quiet or ui.plain()) and ( + return not (ui.quiet or ui.plain('progress')) and ( ui._isatty(sys.stderr) or ui.configbool('progress', 'assume-tty')) def fmtremaining(seconds):
--- a/mercurial/registrar.py Sun Mar 13 02:29:11 2016 +0100 +++ b/mercurial/registrar.py Tue Mar 15 14:10:46 2016 -0700 @@ -11,16 +11,20 @@ util, ) -class funcregistrar(object): +class _funcregistrarbase(object): """Base of decorator to register a fuction for specific purpose - The least derived class can be defined by overriding 'table' and - 'formatdoc', for example:: + This decorator stores decorated functions into own dict 'table'. + + The least derived class can be defined by overriding 'formatdoc', + for example:: - symbols = {} - class keyword(funcregistrar): - table = symbols - formatdoc = ":%s: %s" + class keyword(_funcregistrarbase): + _docformat = ":%s: %s" + + This should be used as below: + + keyword = registrar.keyword() @keyword('bar') def barfunc(*args, **kwargs): @@ -30,99 +34,130 @@ In this case: - - 'barfunc' is registered as 'bar' in 'symbols' - - online help uses ":bar: Explanation of bar keyword" + - 'barfunc' is stored as 'bar' in '_table' of an instance 'keyword' above + - 'barfunc.__doc__' becomes ":bar: Explanation of bar keyword" """ - - def __init__(self, decl): - """'decl' is a name or more descriptive string of a function + def __init__(self, table=None): + if table is None: + self._table = {} + else: + self._table = table - Specification of 'decl' depends on registration purpose. - """ - self.decl = decl + def __call__(self, decl, *args, **kwargs): + return lambda func: self._doregister(func, decl, *args, **kwargs) - table = None - - def __call__(self, func): - """Execute actual registration for specified function - """ - name = self.getname() + def _doregister(self, func, decl, *args, **kwargs): + name = self._getname(decl) if func.__doc__ and not util.safehasattr(func, '_origdoc'): doc = func.__doc__.strip() func._origdoc = doc - if callable(self.formatdoc): - func.__doc__ = self.formatdoc(doc) - else: - # convenient shortcut for simple format - func.__doc__ = self.formatdoc % (self.decl, doc) + func.__doc__ = self._formatdoc(decl, doc) - self.table[name] = func - self.extraaction(name, func) + self._table[name] = func + self._extrasetup(name, func, *args, **kwargs) return func - def getname(self): - """Return the name of the registered function from self.decl + def _parsefuncdecl(self, decl): + """Parse function declaration and return the name of function in it + """ + i = decl.find('(') + if i >= 0: + return decl[:i] + else: + return decl + + def _getname(self, decl): + """Return the name of the registered function from decl Derived class should override this, if it allows more descriptive 'decl' string than just a name. """ - return self.decl + return decl - def parsefuncdecl(self): - """Parse function declaration and return the name of function in it - """ - i = self.decl.find('(') - if i > 0: - return self.decl[:i] - else: - return self.decl + _docformat = None - def formatdoc(self, doc): + def _formatdoc(self, decl, doc): """Return formatted document of the registered function for help 'doc' is '__doc__.strip()' of the registered function. + """ + return self._docformat % (decl, doc) - If this is overridden by non-callable object in derived class, - such value is treated as "format string" and used to format - document by 'self.formatdoc % (self.decl, doc)' for convenience. - """ - raise NotImplementedError() - - def extraaction(self, name, func): - """Execute exra action for registered function, if needed + def _extrasetup(self, name, func): + """Execute exra setup for registered function, if needed """ pass -class delayregistrar(object): - """Decorator to delay actual registration until uisetup or so +class revsetpredicate(_funcregistrarbase): + """Decorator to register revset predicate + + Usage:: + + revsetpredicate = registrar.revsetpredicate() - For example, the decorator class to delay registration by - 'keyword' funcregistrar can be defined as below:: + @revsetpredicate('mypredicate(arg1, arg2[, arg3])') + def mypredicatefunc(repo, subset, x): + '''Explanation of this revset predicate .... + ''' + pass + + The first string argument is used also in online help. + + Optional argument 'safe' indicates whether a predicate is safe for + DoS attack (False by default). - class extkeyword(delayregistrar): - registrar = keyword + 'revsetpredicate' instance in example above can be used to + decorate multiple functions. + + Decorated functions are registered automatically at loading + extension, if an instance named as 'revsetpredicate' is used for + decorating in extension. + + Otherwise, explicit 'revset.loadpredicate()' is needed. """ - def __init__(self): - self._list = [] + _getname = _funcregistrarbase._parsefuncdecl + _docformat = "``%s``\n %s" - registrar = None + def _extrasetup(self, name, func, safe=False): + func._safe = safe + +class filesetpredicate(_funcregistrarbase): + """Decorator to register fileset predicate - def __call__(self, *args, **kwargs): - """Return the decorator to delay actual registration until setup - """ - assert self.registrar is not None - def decorator(func): - # invocation of self.registrar() here can detect argument - # mismatching immediately - self._list.append((func, self.registrar(*args, **kwargs))) - return func - return decorator + Usage:: + + filesetpredicate = registrar.filesetpredicate() + + @filesetpredicate('mypredicate()') + def mypredicatefunc(mctx, x): + '''Explanation of this fileset predicate .... + ''' + pass + + The first string argument is used also in online help. + + Optional argument 'callstatus' indicates whether a predicate + implies 'matchctx.status()' at runtime or not (False, by + default). - def setup(self): - """Execute actual registration - """ - while self._list: - func, decorator = self._list.pop(0) - decorator(func) + Optional argument 'callexisting' indicates whether a predicate + implies 'matchctx.existing()' at runtime or not (False, by + default). + + 'filesetpredicate' instance in example above can be used to + decorate multiple functions. + + Decorated functions are registered automatically at loading + extension, if an instance named as 'filesetpredicate' is used for + decorating in extension. + + Otherwise, explicit 'fileset.loadpredicate()' is needed. + """ + _getname = _funcregistrarbase._parsefuncdecl + _docformat = "``%s``\n %s" + + def _extrasetup(self, name, func, callstatus=False, callexisting=False): + func._callstatus = callstatus + func._callexisting = callexisting
--- a/mercurial/repair.py Sun Mar 13 02:29:11 2016 +0100 +++ b/mercurial/repair.py Tue Mar 15 14:10:46 2016 -0700 @@ -254,7 +254,8 @@ repolen = len(repo) for rev in repo: - ui.progress(_('changeset'), rev, total=repolen) + ui.progress(_('rebuilding'), rev, total=repolen, + unit=_('changesets')) ctx = repo[rev] for f in ctx.files(): @@ -271,7 +272,17 @@ if repo.store._exists(d): newentries.add(d) - ui.progress(_('changeset'), None) + ui.progress(_('rebuilding'), None) + + if 'treemanifest' in repo.requirements: # safe but unnecessary otherwise + for dir in util.dirs(seenfiles): + i = 'meta/%s/00manifest.i' % dir + d = 'meta/%s/00manifest.d' % dir + + if repo.store._exists(i): + newentries.add(i) + if repo.store._exists(d): + newentries.add(d) addcount = len(newentries - oldentries) removecount = len(oldentries - newentries)
--- a/mercurial/repoview.py Sun Mar 13 02:29:11 2016 +0100 +++ b/mercurial/repoview.py Tue Mar 15 14:10:46 2016 -0700 @@ -315,7 +315,10 @@ revs = filterrevs(unfi, self.filtername) cl = self._clcache newkey = (unfilen, unfinode, hash(revs), unfichangelog._delayed) - if cl is not None and newkey != self._clcachekey: + # if cl.index is not unfiindex, unfi.changelog would be + # recreated, and our clcache refers to garbage object + if (cl is not None and + (cl.index is not unfiindex or newkey != self._clcachekey)): cl = None # could have been made None by the previous if if cl is None:
--- a/mercurial/revset.py Sun Mar 13 02:29:11 2016 +0100 +++ b/mercurial/revset.py Tue Mar 15 14:10:46 2016 -0700 @@ -436,6 +436,9 @@ def andset(repo, subset, x, y): return getset(repo, getset(repo, subset, x), y) +def differenceset(repo, subset, x, y): + return getset(repo, subset, x) - getset(repo, subset, y) + def orset(repo, subset, *xs): assert xs if len(xs) == 1: @@ -479,58 +482,7 @@ # functions that just return a lot of changesets (like all) don't count here safesymbols = set() -class predicate(registrar.funcregistrar): - """Decorator to register revset predicate - - Usage:: - - @predicate('mypredicate(arg1, arg2[, arg3])') - def mypredicatefunc(repo, subset, x): - '''Explanation of this revset predicate .... - ''' - pass - - The first string argument of the constructor is used also in - online help. - - Use 'extpredicate' instead of this to register revset predicate in - extensions. - """ - table = symbols - formatdoc = "``%s``\n %s" - getname = registrar.funcregistrar.parsefuncdecl - - def __init__(self, decl, safe=False): - """'safe' indicates whether a predicate is safe for DoS attack - """ - super(predicate, self).__init__(decl) - self.safe = safe - - def extraaction(self, name, func): - if self.safe: - safesymbols.add(name) - -class extpredicate(registrar.delayregistrar): - """Decorator to register revset predicate in extensions - - Usage:: - - revsetpredicate = revset.extpredicate() - - @revsetpredicate('mypredicate(arg1, arg2[, arg3])') - def mypredicatefunc(repo, subset, x): - '''Explanation of this revset predicate .... - ''' - pass - - def uisetup(ui): - revsetpredicate.setup() - - 'revsetpredicate' instance above can be used to decorate multiple - functions, and 'setup()' on it registers all such functions at - once. - """ - registrar = predicate +predicate = registrar.revsetpredicate() @predicate('_destupdate') def _destupdate(repo, subset, x): @@ -541,8 +493,10 @@ @predicate('_destmerge') def _destmerge(repo, subset, x): # experimental revset for merge destination - getargs(x, 0, 0, _("_mergedefaultdest takes no arguments")) - return subset & baseset([destutil.destmerge(repo)]) + sourceset = None + if x is not None: + sourceset = getset(repo, fullreposet(repo), x) + return subset & baseset([destutil.destmerge(repo, sourceset=sourceset)]) @predicate('adds(pattern)', safe=True) def adds(repo, subset, x): @@ -624,7 +578,8 @@ # i18n: "author" is a keyword n = encoding.lower(getstring(x, _("author requires a string"))) kind, pattern, matcher = _substringmatcher(n) - return subset.filter(lambda x: matcher(encoding.lower(repo[x].user()))) + return subset.filter(lambda x: matcher(encoding.lower(repo[x].user())), + condrepr=('<user %r>', n)) @predicate('bisect(string)', safe=True) def bisect(repo, subset, x): @@ -710,19 +665,22 @@ # note: falls through to the revspec case if no branch with # this name exists and pattern kind is not specified explicitly if pattern in repo.branchmap(): - return subset.filter(lambda r: matcher(getbi(r)[0])) + return subset.filter(lambda r: matcher(getbi(r)[0]), + condrepr=('<branch %r>', b)) if b.startswith('literal:'): raise error.RepoLookupError(_("branch '%s' does not exist") % pattern) else: - return subset.filter(lambda r: matcher(getbi(r)[0])) + return subset.filter(lambda r: matcher(getbi(r)[0]), + condrepr=('<branch %r>', b)) s = getset(repo, fullreposet(repo), x) b = set() for r in s: b.add(getbi(r)[0]) c = s.__contains__ - return subset.filter(lambda r: c(r) or getbi(r)[0] in b) + return subset.filter(lambda r: c(r) or getbi(r)[0] in b, + condrepr=lambda: '<branch %r>' % sorted(b)) @predicate('bumped()', safe=True) def bumped(repo, subset, x): @@ -777,7 +735,7 @@ if m(f): return True - return subset.filter(matches) + return subset.filter(matches, condrepr=('<status[%r] %r>', field, pat)) def _children(repo, narrow, parentset): if not parentset: @@ -809,7 +767,8 @@ """ # i18n: "closed" is a keyword getargs(x, 0, 0, _("closed takes no arguments")) - return subset.filter(lambda r: repo[r].closesbranch()) + return subset.filter(lambda r: repo[r].closesbranch(), + condrepr='<branch closed>') @predicate('contains(pattern)') def contains(repo, subset, x): @@ -836,7 +795,7 @@ return True return False - return subset.filter(matches) + return subset.filter(matches, condrepr=('<contains %r>', pat)) @predicate('converted([id])', safe=True) def converted(repo, subset, x): @@ -858,7 +817,8 @@ source = repo[r].extra().get('convert_revision', None) return source is not None and (rev is None or source.startswith(rev)) - return subset.filter(lambda r: _matchvalue(r)) + return subset.filter(lambda r: _matchvalue(r), + condrepr=('<converted %r>', rev)) @predicate('date(interval)', safe=True) def date(repo, subset, x): @@ -867,7 +827,8 @@ # i18n: "date" is a keyword ds = getstring(x, _("date requires a string")) dm = util.matchdate(ds) - return subset.filter(lambda x: dm(repo[x].date()[0])) + return subset.filter(lambda x: dm(repo[x].date()[0]), + condrepr=('<date %r>', ds)) @predicate('desc(string)', safe=True) def desc(repo, subset, x): @@ -880,7 +841,7 @@ c = repo[x] return ds in encoding.lower(c.description()) - return subset.filter(matches) + return subset.filter(matches, condrepr=('<desc %r>', ds)) def _descendants(repo, subset, x, followfirst=False): roots = getset(repo, fullreposet(repo), x) @@ -955,7 +916,8 @@ r = src src = _getrevsource(repo, r) - return subset.filter(dests.__contains__) + return subset.filter(dests.__contains__, + condrepr=lambda: '<destination %r>' % sorted(dests)) @predicate('divergent()', safe=True) def divergent(repo, subset, x): @@ -1004,7 +966,8 @@ extra = repo[r].extra() return label in extra and (value is None or matcher(extra[label])) - return subset.filter(lambda r: _matchvalue(r)) + return subset.filter(lambda r: _matchvalue(r), + condrepr=('<extra[%r] %r>', label, value)) @predicate('filelog(pattern)', safe=True) def filelog(repo, subset, x): @@ -1086,13 +1049,14 @@ matcher = matchmod.match(repo.root, repo.getcwd(), [x], ctx=repo[None], default='path') + files = c.manifest().walk(matcher) + s = set() - for fname in c: - if matcher(fname): - fctx = c[fname] - s = s.union(set(c.rev() for c in fctx.ancestors(followfirst))) - # include the revision responsible for the most recent version - s.add(fctx.introrev()) + for fname in files: + fctx = c[fname] + s = s.union(set(c.rev() for c in fctx.ancestors(followfirst))) + # include the revision responsible for the most recent version + s.add(fctx.introrev()) else: s = _revancestors(repo, baseset([c.rev()]), followfirst) @@ -1141,7 +1105,7 @@ return True return False - return subset.filter(matches) + return subset.filter(matches, condrepr=('<grep %r>', gr.pattern)) @predicate('_matchfiles', safe=True) def _matchfiles(repo, subset, x): @@ -1157,13 +1121,11 @@ # initialized. Use 'd:' to set the default matching mode, default # to 'glob'. At most one 'r:' and 'd:' argument can be passed. - # i18n: "_matchfiles" is a keyword - l = getargs(x, 1, -1, _("_matchfiles requires at least one argument")) + l = getargs(x, 1, -1, "_matchfiles requires at least one argument") pats, inc, exc = [], [], [] rev, default = None, None for arg in l: - # i18n: "_matchfiles" is a keyword - s = getstring(arg, _("_matchfiles requires string arguments")) + s = getstring(arg, "_matchfiles requires string arguments") prefix, value = s[:2], s[2:] if prefix == 'p:': pats.append(value) @@ -1173,20 +1135,17 @@ exc.append(value) elif prefix == 'r:': if rev is not None: - # i18n: "_matchfiles" is a keyword - raise error.ParseError(_('_matchfiles expected at most one ' - 'revision')) + raise error.ParseError('_matchfiles expected at most one ' + 'revision') if value != '': # empty means working directory; leave rev as None rev = value elif prefix == 'd:': if default is not None: - # i18n: "_matchfiles" is a keyword - raise error.ParseError(_('_matchfiles expected at most one ' - 'default mode')) + raise error.ParseError('_matchfiles expected at most one ' + 'default mode') default = value else: - # i18n: "_matchfiles" is a keyword - raise error.ParseError(_('invalid _matchfiles prefix: %s') % prefix) + raise error.ParseError('invalid _matchfiles prefix: %s' % prefix) if not default: default = 'glob' @@ -1207,7 +1166,10 @@ return True return False - return subset.filter(matches) + return subset.filter(matches, + condrepr=('<matchfiles patterns=%r, include=%r ' + 'exclude=%r, default=%r, rev=%r>', + pats, inc, exc, default, rev)) @predicate('file(pattern)', safe=True) def hasfile(repo, subset, x): @@ -1268,7 +1230,7 @@ return any(kw in encoding.lower(t) for t in c.files() + [c.user(), c.description()]) - return subset.filter(matches) + return subset.filter(matches, condrepr=('<keyword %r>', kw)) @predicate('limit(set[, n[, offset]])', safe=True) def limit(repo, subset, x): @@ -1304,7 +1266,8 @@ break elif y in subset: result.append(y) - return baseset(result) + return baseset(result, datarepr=('<limit n=%d, offset=%d, %r, %r>', + lim, ofs, subset, os)) @predicate('last(set, [n])', safe=True) def last(repo, subset, x): @@ -1330,7 +1293,7 @@ break elif y in subset: result.append(y) - return baseset(result) + return baseset(result, datarepr=('<last n=%d, %r, %r>', lim, subset, os)) @predicate('max(set)', safe=True) def maxrev(repo, subset, x): @@ -1340,12 +1303,12 @@ try: m = os.max() if m in subset: - return baseset([m]) + return baseset([m], datarepr=('<max %r, %r>', subset, os)) except ValueError: # os.max() throws a ValueError when the collection is empty. # Same as python's max(). pass - return baseset() + return baseset(datarepr=('<max %r, %r>', subset, os)) @predicate('merge()', safe=True) def merge(repo, subset, x): @@ -1354,7 +1317,8 @@ # i18n: "merge" is a keyword getargs(x, 0, 0, _("merge takes no arguments")) cl = repo.changelog - return subset.filter(lambda r: cl.parentrevs(r)[1] != -1) + return subset.filter(lambda r: cl.parentrevs(r)[1] != -1, + condrepr='<merge>') @predicate('branchpoint()', safe=True) def branchpoint(repo, subset, x): @@ -1373,7 +1337,8 @@ for p in cl.parentrevs(r): if p >= baserev: parentscount[p - baserev] += 1 - return subset.filter(lambda r: parentscount[r - baserev] > 1) + return subset.filter(lambda r: parentscount[r - baserev] > 1, + condrepr='<branchpoint>') @predicate('min(set)', safe=True) def minrev(repo, subset, x): @@ -1383,12 +1348,12 @@ try: m = os.min() if m in subset: - return baseset([m]) + return baseset([m], datarepr=('<min %r, %r>', subset, os)) except ValueError: # os.min() throws a ValueError when the collection is empty. # Same as python's min(). pass - return baseset() + return baseset(datarepr=('<min %r, %r>', subset, os)) @predicate('modifies(pattern)', safe=True) def modifies(repo, subset, x): @@ -1630,7 +1595,8 @@ else: phase = repo._phasecache.phase condition = lambda r: phase(repo, r) == target - return subset.filter(condition, cache=False) + return subset.filter(condition, condrepr=('<phase %r>', target), + cache=False) @predicate('draft()', safe=True) def draft(repo, subset, x): @@ -1703,7 +1669,8 @@ phase = repo._phasecache.phase target = phases.public condition = lambda r: phase(repo, r) != target - return subset.filter(condition, cache=False) + return subset.filter(condition, condrepr=('<phase %r>', target), + cache=False) @predicate('public()', safe=True) def public(repo, subset, x): @@ -1713,7 +1680,8 @@ phase = repo._phasecache.phase target = phases.public condition = lambda r: phase(repo, r) == target - return subset.filter(condition, cache=False) + return subset.filter(condition, condrepr=('<phase %r>', target), + cache=False) @predicate('remote([id [,path]])', safe=True) def remote(repo, subset, x): @@ -1888,7 +1856,7 @@ return True return False - return subset.filter(matches) + return subset.filter(matches, condrepr=('<matching%r %r>', fields, revs)) @predicate('reverse(set)', safe=True) def reverse(repo, subset, x): @@ -1909,7 +1877,7 @@ if 0 <= p and p in s: return False return True - return subset & s.filter(filter) + return subset & s.filter(filter, condrepr='<roots>') @predicate('sort(set[, [-]key...])', safe=True) def sort(repo, subset, x): @@ -1981,6 +1949,7 @@ """ # i18n: "subrepo" is a keyword args = getargs(x, 0, 1, _('subrepo takes at most one argument')) + pat = None if len(args) != 0: pat = getstring(args[0], _("subrepo requires a pattern")) @@ -1996,7 +1965,7 @@ c = repo[x] s = repo.status(c.p1().node(), c.node(), match=m) - if len(args) == 0: + if pat is None: return s.added or s.modified or s.removed if s.added: @@ -2015,7 +1984,7 @@ return False - return subset.filter(matches) + return subset.filter(matches, condrepr=('<subrepo %r>', pat)) def _substringmatcher(pattern): kind, pattern, matcher = util.stringmatcher(pattern) @@ -2144,6 +2113,7 @@ "and": andset, "or": orset, "not": notset, + "difference": differenceset, "list": listset, "keyvalue": keyvaluepair, "func": func, @@ -2204,6 +2174,9 @@ if isonly(tb, ta): return w, ('func', ('symbol', 'only'), ('list', tb[2], ta[1][2])) + if tb is not None and tb[0] == 'not': + return wa, ('difference', ta, tb[1]) + if wa > wb: return w, (op, tb, ta) return w, (op, ta, tb) @@ -2753,6 +2726,29 @@ funcs.add(tree[1][1]) return funcs +def _formatsetrepr(r): + """Format an optional printable representation of a set + + ======== ================================= + type(r) example + ======== ================================= + tuple ('<not %r>', other) + str '<branch closed>' + callable lambda: '<branch %r>' % sorted(b) + object other + ======== ================================= + """ + if r is None: + return '' + elif isinstance(r, tuple): + return r[0] % r[1:] + elif isinstance(r, str): + return r + elif callable(r): + return r() + else: + return repr(r) + class abstractsmartset(object): def __nonzero__(self): @@ -2833,7 +2829,7 @@ This is part of the mandatory API for smartset.""" if isinstance(other, fullreposet): return self - return self.filter(other.__contains__, cache=False) + return self.filter(other.__contains__, condrepr=other, cache=False) def __add__(self, other): """Returns a new object with the union of the two collections. @@ -2846,19 +2842,21 @@ This is part of the mandatory API for smartset.""" c = other.__contains__ - return self.filter(lambda r: not c(r), cache=False) - - def filter(self, condition, cache=True): + return self.filter(lambda r: not c(r), condrepr=('<not %r>', other), + cache=False) + + def filter(self, condition, condrepr=None, cache=True): """Returns this smartset filtered by condition as a new smartset. `condition` is a callable which takes a revision number and returns a - boolean. + boolean. Optional `condrepr` provides a printable representation of + the given `condition`. This is part of the mandatory API for smartset.""" # builtin cannot be cached. but do not needs to if cache and util.safehasattr(condition, 'func_code'): condition = util.cachefunc(condition) - return filteredset(self, condition) + return filteredset(self, condition, condrepr) class baseset(abstractsmartset): """Basic data structure that represents a revset and contains the basic @@ -2866,12 +2864,17 @@ Every method in this class should be implemented by any smartset class. """ - def __init__(self, data=()): + def __init__(self, data=(), datarepr=None): + """ + datarepr: a tuple of (format, obj, ...), a function or an object that + provides a printable representation of the given data. + """ if not isinstance(data, list): if isinstance(data, set): self._set = data data = list(data) self._list = data + self._datarepr = datarepr self._ascending = None @util.propertycache @@ -2955,20 +2958,26 @@ def __repr__(self): d = {None: '', False: '-', True: '+'}[self._ascending] - return '<%s%s %r>' % (type(self).__name__, d, self._list) + s = _formatsetrepr(self._datarepr) + if not s: + s = repr(self._list) + return '<%s%s %s>' % (type(self).__name__, d, s) class filteredset(abstractsmartset): """Duck type for baseset class which iterates lazily over the revisions in the subset and contains a function which tests for membership in the revset """ - def __init__(self, subset, condition=lambda x: True): + def __init__(self, subset, condition=lambda x: True, condrepr=None): """ condition: a function that decide whether a revision in the subset belongs to the revset or not. + condrepr: a tuple of (format, obj, ...), a function or an object that + provides a printable representation of the given condition. """ self._subset = subset self._condition = condition + self._condrepr = condrepr def __contains__(self, x): return x in self._subset and self._condition(x) @@ -3048,7 +3057,11 @@ return x def __repr__(self): - return '<%s %r>' % (type(self).__name__, self._subset) + xs = [repr(self._subset)] + s = _formatsetrepr(self._condrepr) + if s: + xs.append(s) + return '<%s %s>' % (type(self).__name__, ', '.join(xs)) def _iterordered(ascending, iter1, iter2): """produce an ordered iteration from two iterators with the same order @@ -3622,5 +3635,16 @@ p = q return '\n'.join(' ' * l + s for l, s in lines) +def loadpredicate(ui, extname, registrarobj): + """Load revset predicates from specified registrarobj + """ + for name, func in registrarobj._table.iteritems(): + symbols[name] = func + if func._safe: + safesymbols.add(name) + +# load built-in predicates explicitly to setup safesymbols +loadpredicate(None, None, predicate) + # tell hggettext to extract docstrings from these functions: i18nfunctions = symbols.values()
--- a/mercurial/scmutil.py Sun Mar 13 02:29:11 2016 +0100 +++ b/mercurial/scmutil.py Tue Mar 15 14:10:46 2016 -0700 @@ -276,8 +276,8 @@ with self(path, mode=mode) as fp: return fp.readlines() - def write(self, path, data): - with self(path, 'wb') as fp: + def write(self, path, data, backgroundclose=False): + with self(path, 'wb', backgroundclose=backgroundclose) as fp: return fp.write(data) def writelines(self, path, data, mode='wb', notindexed=False): @@ -913,7 +913,7 @@ if opts.get('subrepos') or matchessubrepo(m, subpath): sub = wctx.sub(subpath) try: - submatch = matchmod.narrowmatcher(subpath, m) + submatch = matchmod.subdirmatcher(subpath, m) if sub.addremove(submatch, prefix, opts, dry_run, similarity): ret = 1 except error.LookupError:
--- a/mercurial/setdiscovery.py Sun Mar 13 02:29:11 2016 +0100 +++ b/mercurial/setdiscovery.py Tue Mar 15 14:10:46 2016 -0700 @@ -147,12 +147,11 @@ sample = _limitsample(ownheads, initialsamplesize) # indices between sample and externalized version must match sample = list(sample) - batch = remote.batch() - srvheadhashesref = batch.heads() - yesnoref = batch.known(dag.externalizeall(sample)) + batch = remote.iterbatch() + batch.heads() + batch.known(dag.externalizeall(sample)) batch.submit() - srvheadhashes = srvheadhashesref.value - yesno = yesnoref.value + srvheadhashes, yesno = batch.results() if cl.tip() == nullid: if srvheadhashes != [nullid]:
--- a/mercurial/similar.py Sun Mar 13 02:29:11 2016 +0100 +++ b/mercurial/similar.py Tue Mar 15 14:10:46 2016 -0700 @@ -25,14 +25,15 @@ # Get hashes of removed files. hashes = {} for i, fctx in enumerate(removed): - repo.ui.progress(_('searching for exact renames'), i, total=numfiles) + repo.ui.progress(_('searching for exact renames'), i, total=numfiles, + unit=_('files')) h = util.sha1(fctx.data()).digest() hashes[h] = fctx # For each added file, see if it corresponds to a removed file. for i, fctx in enumerate(added): repo.ui.progress(_('searching for exact renames'), i + len(removed), - total=numfiles) + total=numfiles, unit=_('files')) h = util.sha1(fctx.data()).digest() if h in hashes: yield (hashes[h], fctx) @@ -49,7 +50,7 @@ copies = {} for i, r in enumerate(removed): repo.ui.progress(_('searching for similar files'), i, - total=len(removed)) + total=len(removed), unit=_('files')) # lazily load text @util.cachefunc
--- a/mercurial/simplemerge.py Sun Mar 13 02:29:11 2016 +0100 +++ b/mercurial/simplemerge.py Tue Mar 15 14:10:46 2016 -0700 @@ -92,7 +92,8 @@ mid_marker='=======', end_marker='>>>>>>>', base_marker=None, - localorother=None): + localorother=None, + minimize=False): """Return merge in cvs-like form. """ self.conflicts = False @@ -109,6 +110,8 @@ if name_base and base_marker: base_marker = base_marker + ' ' + name_base merge_regions = self.merge_regions() + if minimize: + merge_regions = self.minimize(merge_regions) for t in merge_regions: what = t[0] if what == 'unchanged': @@ -195,6 +198,9 @@ 'a', start, end Non-clashing insertion from a[start:end] + 'conflict', zstart, zend, astart, aend, bstart, bend + Conflict between a and b, with z as common ancestor + Method is as follows: The two sequences align only on regions which match the base @@ -266,6 +272,45 @@ ia = aend ib = bend + def minimize(self, merge_regions): + """Trim conflict regions of lines where A and B sides match. + + Lines where both A and B have made the same changes at the begining + or the end of each merge region are eliminated from the conflict + region and are instead considered the same. + """ + for region in merge_regions: + if region[0] != "conflict": + yield region + continue + issue, z1, z2, a1, a2, b1, b2 = region + alen = a2 - a1 + blen = b2 - b1 + + # find matches at the front + ii = 0 + while ii < alen and ii < blen and \ + self.a[a1 + ii] == self.b[b1 + ii]: + ii += 1 + startmatches = ii + + # find matches at the end + ii = 0 + while ii < alen and ii < blen and \ + self.a[a2 - ii - 1] == self.b[b2 - ii - 1]: + ii += 1 + endmatches = ii + + if startmatches > 0: + yield 'same', a1, a1 + startmatches + + yield ('conflict', z1, z2, + a1 + startmatches, a2 - endmatches, + b1 + startmatches, b2 - endmatches) + + if endmatches > 0: + yield 'same', a2 - endmatches, a2 + def find_sync_regions(self): """Return a list of sync regions, where both descendants match the base. @@ -399,7 +444,10 @@ out = sys.stdout m3 = Merge3Text(basetext, localtext, othertext) - extrakwargs = {"localorother": opts.get("localorother", None)} + extrakwargs = { + "localorother": opts.get("localorother", None), + 'minimize': True, + } if mode == 'union': extrakwargs['start_marker'] = None extrakwargs['mid_marker'] = None @@ -407,6 +455,7 @@ elif name_base is not None: extrakwargs['base_marker'] = '|||||||' extrakwargs['name_base'] = name_base + extrakwargs['minimize'] = False for line in m3.merge_lines(name_a=name_a, name_b=name_b, **extrakwargs): out.write(line)
--- a/mercurial/sshpeer.py Sun Mar 13 02:29:11 2016 +0100 +++ b/mercurial/sshpeer.py Tue Mar 15 14:10:46 2016 -0700 @@ -231,6 +231,31 @@ __del__ = cleanup + def _submitbatch(self, req): + cmds = [] + for op, argsdict in req: + args = ','.join('%s=%s' % (wireproto.escapearg(k), + wireproto.escapearg(v)) + for k, v in argsdict.iteritems()) + cmds.append('%s %s' % (op, args)) + rsp = self._callstream("batch", cmds=';'.join(cmds)) + available = self._getamount() + # TODO this response parsing is probably suboptimal for large + # batches with large responses. + toread = min(available, 1024) + work = rsp.read(toread) + available -= toread + chunk = work + while chunk: + while ';' in work: + one, work = work.split(';', 1) + yield wireproto.unescapearg(one) + toread = min(available, 1024) + chunk = rsp.read(toread) + available -= toread + work += chunk + yield wireproto.unescapearg(work) + def _callstream(self, cmd, **args): self.ui.debug("sending %s command\n" % cmd) self.pipeo.write("%s\n" % cmd) @@ -291,7 +316,7 @@ self._send("", flush=True) return self.pipei - def _recv(self): + def _getamount(self): l = self.pipei.readline() if l == '\n': self.readerr() @@ -299,10 +324,12 @@ self._abort(error.OutOfBandError(hint=msg)) self.readerr() try: - l = int(l) + return int(l) except ValueError: self._abort(error.ResponseError(_("unexpected response:"), l)) - return self.pipei.read(l) + + def _recv(self): + return self.pipei.read(self._getamount()) def _send(self, data, flush=False): self.pipeo.write("%d\n" % len(data))
--- a/mercurial/sslutil.py Sun Mar 13 02:29:11 2016 +0100 +++ b/mercurial/sslutil.py Tue Mar 15 14:10:46 2016 -0700 @@ -162,7 +162,7 @@ def __call__(self, sock, strict=False): host = self.host cacerts = self.ui.config('web', 'cacerts') - hostfingerprint = self.ui.config('hostfingerprints', host) + hostfingerprints = self.ui.configlist('hostfingerprints', host) if not sock.cipher(): # work around http://bugs.python.org/issue13721 raise error.Abort(_('%s ssl connection error') % host) @@ -178,9 +178,14 @@ peerfingerprint = util.sha1(peercert).hexdigest() nicefingerprint = ":".join([peerfingerprint[x:x + 2] for x in xrange(0, len(peerfingerprint), 2)]) - if hostfingerprint: - if peerfingerprint.lower() != \ - hostfingerprint.replace(':', '').lower(): + if hostfingerprints: + fingerprintmatch = False + for hostfingerprint in hostfingerprints: + if peerfingerprint.lower() == \ + hostfingerprint.replace(':', '').lower(): + fingerprintmatch = True + break + if not fingerprintmatch: raise error.Abort(_('certificate for %s has unexpected ' 'fingerprint %s') % (host, nicefingerprint), hint=_('check hostfingerprint configuration'))
--- a/mercurial/store.py Sun Mar 13 02:29:11 2016 +0100 +++ b/mercurial/store.py Tue Mar 15 14:10:46 2016 -0700 @@ -290,7 +290,7 @@ mode = None return mode -_data = ('data 00manifest.d 00manifest.i 00changelog.d 00changelog.i' +_data = ('data meta 00manifest.d 00manifest.i 00changelog.d 00changelog.i' ' phaseroots obsstore') class basicstore(object): @@ -330,7 +330,7 @@ return l def datafiles(self): - return self._walk('data', True) + return self._walk('data', True) + self._walk('meta', True) def topfiles(self): # yield manifest before changelog @@ -378,7 +378,7 @@ self.opener = self.vfs def datafiles(self): - for a, b, size in self._walk('data', True): + for a, b, size in super(encodedstore, self).datafiles(): try: a = decodefilename(a) except KeyError: @@ -460,7 +460,8 @@ self.encode = encode def __call__(self, path, mode='r', *args, **kw): - if mode not in ('r', 'rb') and path.startswith('data/'): + if mode not in ('r', 'rb') and (path.startswith('data/') or + path.startswith('meta/')): self.fncache.add(path) return self.vfs(self.encode(path), mode, *args, **kw) @@ -504,7 +505,7 @@ raise def copylist(self): - d = ('data dh fncache phaseroots obsstore' + d = ('data meta dh fncache phaseroots obsstore' ' 00manifest.d 00manifest.i 00changelog.d 00changelog.i') return (['requires', '00changelog.i'] + ['store/' + f for f in d.split()])
--- a/mercurial/streamclone.py Sun Mar 13 02:29:11 2016 +0100 +++ b/mercurial/streamclone.py Tue Mar 15 14:10:46 2016 -0700 @@ -271,11 +271,12 @@ assert compression == 'UN' seen = 0 - repo.ui.progress(_('bundle'), 0, total=bytecount) + repo.ui.progress(_('bundle'), 0, total=bytecount, unit=_('bytes')) for chunk in it: seen += len(chunk) - repo.ui.progress(_('bundle'), seen, total=bytecount) + repo.ui.progress(_('bundle'), seen, total=bytecount, + unit=_('bytes')) yield chunk repo.ui.progress(_('bundle'), None) @@ -295,7 +296,7 @@ repo.ui.status(_('%d files to transfer, %s of data\n') % (filecount, util.bytecount(bytecount))) handled_bytes = 0 - repo.ui.progress(_('clone'), 0, total=bytecount) + repo.ui.progress(_('clone'), 0, total=bytecount, unit=_('bytes')) start = time.time() with repo.transaction('clone'): @@ -318,7 +319,7 @@ for chunk in util.filechunkiter(fp, limit=size): handled_bytes += len(chunk) repo.ui.progress(_('clone'), handled_bytes, - total=bytecount) + total=bytecount, unit=_('bytes')) ofp.write(chunk) # Writing straight to files circumvented the inmemory caches
--- a/mercurial/subrepo.py Sun Mar 13 02:29:11 2016 +0100 +++ b/mercurial/subrepo.py Tue Mar 15 14:10:46 2016 -0700 @@ -774,7 +774,7 @@ ctx = self._repo[rev] for subpath in ctx.substate: s = subrepo(ctx, subpath, True) - submatch = matchmod.narrowmatcher(subpath, match) + submatch = matchmod.subdirmatcher(subpath, match) total += s.archive(archiver, prefix + self._path + '/', submatch) return total
--- a/mercurial/templatefilters.py Sun Mar 13 02:29:11 2016 +0100 +++ b/mercurial/templatefilters.py Tue Mar 15 14:10:46 2016 -0700 @@ -197,15 +197,8 @@ return {None: 'null', False: 'false', True: 'true'}[obj] elif isinstance(obj, int) or isinstance(obj, float): return str(obj) - elif isinstance(obj, encoding.localstr): - u = encoding.fromlocal(obj).decode('utf-8') # can round-trip - return '"%s"' % jsonescape(u) elif isinstance(obj, str): - # no encoding.fromlocal() because it may abort if obj can't be decoded - u = unicode(obj, encoding.encoding, 'replace') - return '"%s"' % jsonescape(u) - elif isinstance(obj, unicode): - return '"%s"' % jsonescape(obj) + return '"%s"' % encoding.jsonescape(obj, paranoid=True) elif util.safehasattr(obj, 'keys'): out = [] for k, v in sorted(obj.iteritems()): @@ -222,23 +215,6 @@ else: raise TypeError('cannot encode type %s' % obj.__class__.__name__) -def _uescape(c): - if 0x20 <= ord(c) < 0x80: - return c - else: - return '\\u%04x' % ord(c) - -_escapes = [ - ('\\', '\\\\'), ('"', '\\"'), ('\t', '\\t'), ('\n', '\\n'), - ('\r', '\\r'), ('\f', '\\f'), ('\b', '\\b'), - ('<', '\\u003c'), ('>', '\\u003e'), ('\0', '\\u0000') -] - -def jsonescape(s): - for k, v in _escapes: - s = s.replace(k, v) - return ''.join(_uescape(c) for c in s) - def lower(text): """:lower: Any text. Converts the text to lowercase.""" return encoding.lower(text) @@ -377,6 +353,10 @@ """:emailuser: Any text. Returns the user portion of an email address.""" return util.emailuser(text) +def utf8(text): + """:utf8: Any text. Converts from the local character encoding to UTF-8.""" + return encoding.fromlocal(text) + def xmlescape(text): text = (text .replace('&', '&') @@ -402,7 +382,6 @@ "isodate": isodate, "isodatesec": isodatesec, "json": json, - "jsonescape": jsonescape, "lower": lower, "nonempty": nonempty, "obfuscate": obfuscate, @@ -423,6 +402,7 @@ "urlescape": urlescape, "user": userfilter, "emailuser": emailuser, + "utf8": utf8, "xmlescape": xmlescape, }
--- a/mercurial/templatekw.py Sun Mar 13 02:29:11 2016 +0100 +++ b/mercurial/templatekw.py Tue Mar 15 14:10:46 2016 -0700 @@ -9,6 +9,7 @@ from .node import hex, nullid from . import ( + encoding, error, hbisect, patch, @@ -257,7 +258,12 @@ def showdescription(repo, ctx, templ, **args): """:desc: String. The text of the changeset description.""" - return ctx.description().strip() + s = ctx.description() + if isinstance(s, encoding.localstr): + # try hard to preserve utf-8 bytes + return encoding.tolocal(encoding.fromlocal(s).strip()) + else: + return s.strip() def showdiffstat(repo, ctx, templ, **args): """:diffstat: String. Statistics of changes with the following format: @@ -468,11 +474,14 @@ revision) nothing is shown.""" repo = args['repo'] ctx = args['ctx'] + pctxs = scmutil.meaningfulparents(repo, ctx) + prevs = [str(p.rev()) for p in pctxs] # ifcontains() needs a list of str parents = [[('rev', p.rev()), ('node', p.hex()), ('phase', p.phasestr())] - for p in scmutil.meaningfulparents(repo, ctx)] - return showlist('parent', parents, **args) + for p in pctxs] + f = _showlist('parent', parents, **args) + return _hybrid(f, prevs, lambda x: {'ctx': repo[int(x)], 'revcache': {}}) def showphase(repo, ctx, templ, **args): """:phase: String. The changeset phase name.""" @@ -490,9 +499,10 @@ """helper to generate a list of revisions in which a mapped template will be evaluated""" repo = args['ctx'].repo() + revs = [str(r) for r in revs] # ifcontains() needs a list of str f = _showlist(name, revs, **args) return _hybrid(f, revs, - lambda x: {name: x, 'ctx': repo[x], 'revcache': {}}) + lambda x: {name: x, 'ctx': repo[int(x)], 'revcache': {}}) def showsubrepos(**args): """:subrepos: List of strings. Updated subrepositories in the changeset."""
--- a/mercurial/templater.py Sun Mar 13 02:29:11 2016 +0100 +++ b/mercurial/templater.py Tue Mar 15 14:10:46 2016 -0700 @@ -220,6 +220,27 @@ thing = stringify(thing) return thing +def evalinteger(context, mapping, arg, err): + v = evalfuncarg(context, mapping, arg) + try: + return int(v) + except (TypeError, ValueError): + raise error.ParseError(err) + +def evalstring(context, mapping, arg): + func, data = arg + return stringify(func(context, mapping, data)) + +def evalstringliteral(context, mapping, arg): + """Evaluate given argument as string template, but returns symbol name + if it is unknown""" + func, data = arg + if func is runsymbol: + thing = func(context, mapping, data, default=data) + else: + thing = func(context, mapping, data) + return stringify(thing) + def runinteger(context, mapping, data): return int(data) @@ -234,7 +255,7 @@ def _runrecursivesymbol(context, mapping, key): raise error.Abort(_("recursive reference '%s' in template") % key) -def runsymbol(context, mapping, key): +def runsymbol(context, mapping, key, default=''): v = mapping.get(key) if v is None: v = context._defaults.get(key) @@ -246,7 +267,7 @@ try: v = context.process(key, safemapping) except TemplateNotFound: - v = '' + v = default if callable(v): return v(**mapping) return v @@ -294,11 +315,18 @@ func, data, ctmpl = data d = func(context, mapping, data) if util.safehasattr(d, 'itermaps'): - d = d.itermaps() + diter = d.itermaps() + else: + try: + diter = iter(d) + except TypeError: + if func is runsymbol: + raise error.ParseError(_("keyword '%s' is not iterable") % data) + else: + raise error.ParseError(_("%r is not iterable") % d) - lm = mapping.copy() - - for i in d: + for i in diter: + lm = mapping.copy() if isinstance(i, dict): lm.update(i) lm['originalnode'] = mapping.get('node') @@ -330,10 +358,10 @@ # i18n: "date" is a keyword raise error.ParseError(_("date expects one or two arguments")) - date = args[0][0](context, mapping, args[0][1]) + date = evalfuncarg(context, mapping, args[0]) fmt = None if len(args) == 2: - fmt = stringify(args[1][0](context, mapping, args[1][1])) + fmt = evalstring(context, mapping, args[1]) try: if fmt is None: return util.datestr(date) @@ -352,7 +380,7 @@ def getpatterns(i): if i < len(args): - s = stringify(args[i][0](context, mapping, args[i][1])).strip() + s = evalstring(context, mapping, args[i]).strip() if s: return [s] return [] @@ -369,19 +397,17 @@ # i18n: "fill" is a keyword raise error.ParseError(_("fill expects one to four arguments")) - text = stringify(args[0][0](context, mapping, args[0][1])) + text = evalstring(context, mapping, args[0]) width = 76 initindent = '' hangindent = '' if 2 <= len(args) <= 4: + width = evalinteger(context, mapping, args[1], + # i18n: "fill" is a keyword + _("fill expects an integer width")) try: - width = int(stringify(args[1][0](context, mapping, args[1][1]))) - except ValueError: - # i18n: "fill" is a keyword - raise error.ParseError(_("fill expects an integer width")) - try: - initindent = stringify(args[2][0](context, mapping, args[2][1])) - hangindent = stringify(args[3][0](context, mapping, args[3][1])) + initindent = evalstring(context, mapping, args[2]) + hangindent = evalstring(context, mapping, args[3]) except IndexError: pass @@ -394,14 +420,16 @@ # i18n: "pad" is a keyword raise error.ParseError(_("pad() expects two to four arguments")) - width = int(args[1][1]) + width = evalinteger(context, mapping, args[1], + # i18n: "pad" is a keyword + _("pad() expects an integer width")) - text = stringify(args[0][0](context, mapping, args[0][1])) + text = evalstring(context, mapping, args[0]) right = False fillchar = ' ' if len(args) > 2: - fillchar = stringify(args[2][0](context, mapping, args[2][1])) + fillchar = evalstring(context, mapping, args[2]) if len(args) > 3: right = util.parsebool(args[3][1]) @@ -419,11 +447,11 @@ # i18n: "indent" is a keyword raise error.ParseError(_("indent() expects two or three arguments")) - text = stringify(args[0][0](context, mapping, args[0][1])) - indent = stringify(args[1][0](context, mapping, args[1][1])) + text = evalstring(context, mapping, args[0]) + indent = evalstring(context, mapping, args[1]) if len(args) == 3: - firstline = stringify(args[2][0](context, mapping, args[2][1])) + firstline = evalstring(context, mapping, args[2]) else: firstline = indent @@ -438,12 +466,12 @@ # i18n: "get" is a keyword raise error.ParseError(_("get() expects two arguments")) - dictarg = args[0][0](context, mapping, args[0][1]) + dictarg = evalfuncarg(context, mapping, args[0]) if not util.safehasattr(dictarg, 'get'): # i18n: "get" is a keyword raise error.ParseError(_("get() expects a dict as first argument")) - key = args[1][0](context, mapping, args[1][1]) + key = evalfuncarg(context, mapping, args[1]) return dictarg.get(key) def if_(context, mapping, args): @@ -453,7 +481,7 @@ # i18n: "if" is a keyword raise error.ParseError(_("if expects two or three arguments")) - test = stringify(args[0][0](context, mapping, args[0][1])) + test = evalstring(context, mapping, args[0]) if test: yield args[1][0](context, mapping, args[1][1]) elif len(args) == 3: @@ -466,8 +494,8 @@ # i18n: "ifcontains" is a keyword raise error.ParseError(_("ifcontains expects three or four arguments")) - item = stringify(args[0][0](context, mapping, args[0][1])) - items = args[1][0](context, mapping, args[1][1]) + item = evalstring(context, mapping, args[0]) + items = evalfuncarg(context, mapping, args[1]) if item in items: yield args[2][0](context, mapping, args[2][1]) @@ -481,8 +509,8 @@ # i18n: "ifeq" is a keyword raise error.ParseError(_("ifeq expects three or four arguments")) - test = stringify(args[0][0](context, mapping, args[0][1])) - match = stringify(args[1][0](context, mapping, args[1][1])) + test = evalstring(context, mapping, args[0]) + match = evalstring(context, mapping, args[1]) if test == match: yield args[2][0](context, mapping, args[2][1]) elif len(args) == 4: @@ -501,7 +529,7 @@ joiner = " " if len(args) > 1: - joiner = stringify(args[1][0](context, mapping, args[1][1])) + joiner = evalstring(context, mapping, args[1]) first = True for x in joinset: @@ -519,8 +547,13 @@ # i18n: "label" is a keyword raise error.ParseError(_("label expects two arguments")) - # ignore args[0] (the label string) since this is supposed to be a a no-op - yield args[1][0](context, mapping, args[1][1]) + ui = mapping['ui'] + thing = evalstring(context, mapping, args[1]) + # preserve unknown symbol as literal so effects like 'red', 'bold', + # etc. don't need to be quoted + label = evalstringliteral(context, mapping, args[0]) + + return ui.label(thing, label) def latesttag(context, mapping, args): """:latesttag([pattern]): The global tags matching the given pattern on the @@ -531,7 +564,7 @@ pattern = None if len(args) == 1: - pattern = stringify(args[0][0](context, mapping, args[0][1])) + pattern = evalstring(context, mapping, args[0]) return templatekw.showlatesttags(pattern, **mapping) @@ -570,7 +603,7 @@ # i18n: "revset" is a keyword raise error.ParseError(_("revset expects one or more arguments")) - raw = stringify(args[0][0](context, mapping, args[0][1])) + raw = evalstring(context, mapping, args[0]) ctx = mapping['ctx'] repo = ctx.repo() @@ -579,16 +612,16 @@ return m(repo) if len(args) > 1: - formatargs = list([a[0](context, mapping, a[1]) for a in args[1:]]) + formatargs = [evalfuncarg(context, mapping, a) for a in args[1:]] revs = query(revsetmod.formatspec(raw, *formatargs)) - revs = list([str(r) for r in revs]) + revs = list(revs) else: revsetcache = mapping['cache'].setdefault("revsetcache", {}) if raw in revsetcache: revs = revsetcache[raw] else: revs = query(raw) - revs = list([str(r) for r in revs]) + revs = list(revs) revsetcache[raw] = revs return templatekw.showrevslist("revision", revs, **mapping) @@ -599,8 +632,8 @@ # i18n: "rstdoc" is a keyword raise error.ParseError(_("rstdoc expects two arguments")) - text = stringify(args[0][0](context, mapping, args[0][1])) - style = stringify(args[1][0](context, mapping, args[1][1])) + text = evalstring(context, mapping, args[0]) + style = evalstring(context, mapping, args[1]) return minirst.format(text, style=style, keep=['verbose']) @@ -611,11 +644,13 @@ # i18n: "shortest" is a keyword raise error.ParseError(_("shortest() expects one or two arguments")) - node = stringify(args[0][0](context, mapping, args[0][1])) + node = evalstring(context, mapping, args[0]) minlength = 4 if len(args) > 1: - minlength = int(args[1][1]) + minlength = evalinteger(context, mapping, args[1], + # i18n: "shortest" is a keyword + _("shortest() expects an integer minlength")) cl = mapping['ctx']._repo.changelog def isvalid(test): @@ -663,9 +698,9 @@ # i18n: "strip" is a keyword raise error.ParseError(_("strip expects one or two arguments")) - text = stringify(args[0][0](context, mapping, args[0][1])) + text = evalstring(context, mapping, args[0]) if len(args) == 2: - chars = stringify(args[1][0](context, mapping, args[1][1])) + chars = evalstring(context, mapping, args[1]) return text.strip(chars) return text.strip() @@ -676,9 +711,9 @@ # i18n: "sub" is a keyword raise error.ParseError(_("sub expects three arguments")) - pat = stringify(args[0][0](context, mapping, args[0][1])) - rpl = stringify(args[1][0](context, mapping, args[1][1])) - src = stringify(args[2][0](context, mapping, args[2][1])) + pat = evalstring(context, mapping, args[0]) + rpl = evalstring(context, mapping, args[1]) + src = evalstring(context, mapping, args[2]) try: patre = re.compile(pat) except re.error: @@ -697,8 +732,8 @@ # i18n: "startswith" is a keyword raise error.ParseError(_("startswith expects two arguments")) - patn = stringify(args[0][0](context, mapping, args[0][1])) - text = stringify(args[1][0](context, mapping, args[1][1])) + patn = evalstring(context, mapping, args[0]) + text = evalstring(context, mapping, args[1]) if text.startswith(patn): return text return '' @@ -711,14 +746,12 @@ raise error.ParseError(_("word expects two or three arguments, got %d") % len(args)) - try: - num = int(stringify(args[0][0](context, mapping, args[0][1]))) - except ValueError: - # i18n: "word" is a keyword - raise error.ParseError(_("word expects an integer index")) - text = stringify(args[1][0](context, mapping, args[1][1])) + num = evalinteger(context, mapping, args[0], + # i18n: "word" is a keyword + _("word expects an integer index")) + text = evalstring(context, mapping, args[1]) if len(args) == 3: - splitter = stringify(args[2][0](context, mapping, args[2][1])) + splitter = evalstring(context, mapping, args[2]) else: splitter = None @@ -854,6 +887,8 @@ stylelist = [] for file in dirlist: split = file.split(".") + if split[-1] in ('orig', 'rej'): + continue if split[0] == "map-cmdline": stylelist.append(split[1]) return ", ".join(sorted(stylelist))
--- a/mercurial/templates/json/map Sun Mar 13 02:29:11 2016 +0100 +++ b/mercurial/templates/json/map Tue Mar 15 14:10:46 2016 -0700 @@ -8,26 +8,26 @@ changelistentry = '\{ "node": {node|json}, "date": {date|json}, - "desc": {desc|json}, + "desc": {desc|utf8|json}, "bookmarks": [{join(bookmarks%changelistentryname, ", ")}], "tags": [{join(tags%changelistentryname, ", ")}], - "user": {author|json} + "user": {author|utf8|json} }' -changelistentryname = '{name|json}' +changelistentryname = '{name|utf8|json}' changeset = '\{ "node": {node|json}, "date": {date|json}, - "desc": {desc|json}, + "desc": {desc|utf8|json}, "branch": {if(branch, branch%changesetbranch, "default"|json)}, "bookmarks": [{join(changesetbookmark, ", ")}], "tags": [{join(changesettag, ", ")}], - "user": {author|json}, + "user": {author|utf8|json}, "parents": [{join(parent%changesetparent, ", ")}], "phase": {phase|json} }' -changesetbranch = '{name|json}' -changesetbookmark = '{bookmark|json}' -changesettag = '{tag|json}' +changesetbranch = '{name|utf8|json}' +changesetbookmark = '{bookmark|utf8|json}' +changesettag = '{tag|utf8|json}' changesetparent = '{node|json}' manifest = '\{ "node": {node|json}, @@ -37,7 +37,7 @@ "bookmarks": [{join(bookmarks%name, ", ")}], "tags": [{join(tags%name, ", ")}] }' -name = '{name|json}' +name = '{name|utf8|json}' direntry = '\{ "abspath": {path|json}, "basename": {basename|json}, @@ -55,7 +55,7 @@ "tags": [{join(entriesnotip%tagentry, ", ")}] }' tagentry = '\{ - "tag": {tag|json}, + "tag": {tag|utf8|json}, "node": {node|json}, "date": {date|json} }' @@ -64,7 +64,7 @@ "bookmarks": [{join(entries%bookmarkentry, ", ")}] }' bookmarkentry = '\{ - "bookmark": {bookmark|json}, + "bookmark": {bookmark|utf8|json}, "node": {node|json}, "date": {date|json} }' @@ -72,7 +72,7 @@ "branches": [{join(entries%branchentry, ", ")}] }' branchentry = '\{ - "branch": {branch|json}, + "branch": {branch|utf8|json}, "node": {node|json}, "date": {date|json}, "status": {status|json} @@ -82,8 +82,8 @@ "path": {file|json}, "node": {node|json}, "date": {date|json}, - "desc": {desc|json}, - "author": {author|json}, + "desc": {desc|utf8|json}, + "author": {author|utf8|json}, "parents": [{join(parent%changesetparent, ", ")}], "children": [{join(child%changesetparent, ", ")}], "diff": [{join(diff%diffblock, ", ")}] @@ -116,8 +116,8 @@ "path": {file|json}, "node": {node|json}, "date": {date|json}, - "desc": {desc|json}, - "author": {author|json}, + "desc": {desc|utf8|json}, + "author": {author|utf8|json}, "parents": [{join(parent%changesetparent, ", ")}], "children": [{join(child%changesetparent, ", ")}], "leftnode": {leftnode|json}, @@ -137,9 +137,9 @@ fileannotate = '\{ "abspath": {file|json}, "node": {node|json}, - "author": {author|json}, + "author": {author|utf8|json}, "date": {date|json}, - "desc": {desc|json}, + "desc": {desc|utf8|json}, "parents": [{join(parent%changesetparent, ", ")}], "children": [{join(child%changesetparent, ", ")}], "permissions": {permissions|json}, @@ -147,8 +147,8 @@ }' fileannotation = '\{ "node": {node|json}, - "author": {author|json}, - "desc": {desc|json}, + "author": {author|utf8|json}, + "desc": {desc|utf8|json}, "abspath": {file|json}, "targetline": {targetline|json}, "line": {line|json}, @@ -163,12 +163,21 @@ "othercommands": [{join(othercommands%helptopicentry, ", ")}] }' helptopicentry = '\{ - "topic": {topic|json}, - "summary": {summary|json} + "topic": {topic|utf8|json}, + "summary": {summary|utf8|json} }' help = '\{ - "topic": {topic|json}, - "rawdoc": {doc|json} + "topic": {topic|utf8|json}, + "rawdoc": {doc|utf8|json} }' filenodelink = '' filenolink = '' +index = '\{ + "entries": [{join(entries%indexentry, ", ")}] + }' +indexentry = '\{ + "name": {name|utf8|json}, + "description": {description|utf8|json}, + "contact": {contact|utf8|json}, + "lastchange": {lastchange|json} + }'
--- a/mercurial/ui.py Sun Mar 13 02:29:11 2016 +0100 +++ b/mercurial/ui.py Tue Mar 15 14:10:46 2016 -0700 @@ -42,7 +42,6 @@ # (see "hg help extensions" for more info) # # pager = -# progress = # color =""", 'cloned': @@ -86,7 +85,6 @@ # (see "hg help extensions" for more info) # # blackbox = -# progress = # color = # pager =""", } @@ -1071,11 +1069,15 @@ stacklevel += 1 # get in develwarn if self.tracebackflag: util.debugstacktrace(msg, stacklevel, self.ferr, self.fout) + self.log('develwarn', '%s at:\n%s' % + (msg, ''.join(util.getstackframes(stacklevel)))) else: curframe = inspect.currentframe() calframe = inspect.getouterframes(curframe, 2) self.write_err('%s at: %s:%s (%s)\n' % ((msg,) + calframe[stacklevel][1:4])) + self.log('develwarn', '%s at: %s:%s (%s)\n', + msg, *calframe[stacklevel][1:4]) def deprecwarn(self, msg, version): """issue a deprecation warning
--- a/mercurial/util.py Sun Mar 13 02:29:11 2016 +0100 +++ b/mercurial/util.py Tue Mar 15 14:10:46 2016 -0700 @@ -65,6 +65,7 @@ findexe = platform.findexe gethgcmd = platform.gethgcmd getuser = platform.getuser +getpid = os.getpid groupmembers = platform.groupmembers groupname = platform.groupname hidewindow = platform.hidewindow @@ -2549,21 +2550,39 @@ results.append(hook(*args)) return results +def getstackframes(skip=0, line=' %-*s in %s\n', fileline='%s:%s'): + '''Yields lines for a nicely formatted stacktrace. + Skips the 'skip' last entries. + Each file+linenumber is formatted according to fileline. + Each line is formatted according to line. + If line is None, it yields: + length of longest filepath+line number, + filepath+linenumber, + function + + Not be used in production code but very convenient while developing. + ''' + entries = [(fileline % (fn, ln), func) + for fn, ln, func, _text in traceback.extract_stack()[:-skip - 1]] + if entries: + fnmax = max(len(entry[0]) for entry in entries) + for fnln, func in entries: + if line is None: + yield (fnmax, fnln, func) + else: + yield line % (fnmax, fnln, func) + def debugstacktrace(msg='stacktrace', skip=0, f=sys.stderr, otherf=sys.stdout): '''Writes a message to f (stderr) with a nicely formatted stacktrace. Skips the 'skip' last entries. By default it will flush stdout first. - It can be used everywhere and do intentionally not require an ui object. + It can be used everywhere and intentionally does not require an ui object. Not be used in production code but very convenient while developing. ''' if otherf: otherf.flush() f.write('%s at:\n' % msg) - entries = [('%s:%s' % (fn, ln), func) - for fn, ln, func, _text in traceback.extract_stack()[:-skip - 1]] - if entries: - fnmax = max(len(entry[0]) for entry in entries) - for fnln, func in entries: - f.write(' %-*s in %s\n' % (fnmax, fnln, func)) + for line in getstackframes(skip + 1): + f.write(line) f.flush() class dirs(object):
--- a/mercurial/verify.py Sun Mar 13 02:29:11 2016 +0100 +++ b/mercurial/verify.py Tue Mar 15 14:10:46 2016 -0700 @@ -147,9 +147,9 @@ mflinkrevs, filelinkrevs = self._verifychangelog() filenodes = self._verifymanifest(mflinkrevs) + del mflinkrevs - self._crosscheckfiles(mflinkrevs, filelinkrevs, filenodes) - del mflinkrevs + self._crosscheckfiles(filelinkrevs, filenodes) totalfiles, filerevisions = self._verifyfiles(filenodes, filelinkrevs) @@ -197,60 +197,111 @@ ui.progress(_('checking'), None) return mflinkrevs, filelinkrevs - def _verifymanifest(self, mflinkrevs): + def _verifymanifest(self, mflinkrevs, dir="", storefiles=None, + progress=None): repo = self.repo ui = self.ui - mf = self.repo.manifest + mf = self.repo.manifest.dirlog(dir) + + if not dir: + self.ui.status(_("checking manifests\n")) - ui.status(_("checking manifests\n")) filenodes = {} + subdirnodes = {} seen = {} + label = "manifest" + if dir: + label = dir + revlogfiles = mf.files() + storefiles.difference_update(revlogfiles) + if progress: # should be true since we're in a subdirectory + progress() if self.refersmf: # Do not check manifest if there are only changelog entries with # null manifests. - self.checklog(mf, "manifest", 0) + self.checklog(mf, label, 0) total = len(mf) for i in mf: - ui.progress(_('checking'), i, total=total, unit=_('manifests')) + if not dir: + ui.progress(_('checking'), i, total=total, unit=_('manifests')) n = mf.node(i) - lr = self.checkentry(mf, i, n, seen, mflinkrevs.get(n, []), - "manifest") + lr = self.checkentry(mf, i, n, seen, mflinkrevs.get(n, []), label) if n in mflinkrevs: del mflinkrevs[n] + elif dir: + self.err(lr, _("%s not in parent-directory manifest") % + short(n), label) else: - self.err(lr, _("%s not in changesets") % short(n), "manifest") + self.err(lr, _("%s not in changesets") % short(n), label) try: - for f, fn in mf.readdelta(n).iteritems(): + for f, fn, fl in mf.readshallowdelta(n).iterentries(): if not f: - self.err(lr, _("file without name in manifest")) - elif f != "/dev/null": # ignore this in very old repos - if _validpath(repo, f): - filenodes.setdefault( - _normpath(f), {}).setdefault(fn, lr) + self.err(lr, _("entry without name in manifest")) + elif f == "/dev/null": # ignore this in very old repos + continue + fullpath = dir + _normpath(f) + if not _validpath(repo, fullpath): + continue + if fl == 't': + subdirnodes.setdefault(fullpath + '/', {}).setdefault( + fn, []).append(lr) + else: + filenodes.setdefault(fullpath, {}).setdefault(fn, lr) except Exception as inst: - self.exc(lr, _("reading manifest delta %s") % short(n), inst) - ui.progress(_('checking'), None) + self.exc(lr, _("reading delta %s") % short(n), inst, label) + if not dir: + ui.progress(_('checking'), None) + + if self.havemf: + for c, m in sorted([(c, m) for m in mflinkrevs + for c in mflinkrevs[m]]): + if dir: + self.err(c, _("parent-directory manifest refers to unknown " + "revision %s") % short(m), label) + else: + self.err(c, _("changeset refers to unknown revision %s") % + short(m), label) + + if not dir and subdirnodes: + self.ui.status(_("checking directory manifests\n")) + storefiles = set() + subdirs = set() + revlogv1 = self.revlogv1 + for f, f2, size in repo.store.datafiles(): + if not f: + self.err(None, _("cannot decode filename '%s'") % f2) + elif (size > 0 or not revlogv1) and f.startswith('meta/'): + storefiles.add(_normpath(f)) + subdirs.add(os.path.dirname(f)) + subdircount = len(subdirs) + currentsubdir = [0] + def progress(): + currentsubdir[0] += 1 + ui.progress(_('checking'), currentsubdir[0], total=subdircount, + unit=_('manifests')) + + for subdir, linkrevs in subdirnodes.iteritems(): + subdirfilenodes = self._verifymanifest(linkrevs, subdir, storefiles, + progress) + for f, onefilenodes in subdirfilenodes.iteritems(): + filenodes.setdefault(f, {}).update(onefilenodes) + + if not dir and subdirnodes: + ui.progress(_('checking'), None) + for f in sorted(storefiles): + self.warn(_("warning: orphan revlog '%s'") % f) return filenodes - def _crosscheckfiles(self, mflinkrevs, filelinkrevs, filenodes): + def _crosscheckfiles(self, filelinkrevs, filenodes): repo = self.repo ui = self.ui ui.status(_("crosschecking files in changesets and manifests\n")) - total = len(mflinkrevs) + len(filelinkrevs) + len(filenodes) + total = len(filelinkrevs) + len(filenodes) count = 0 if self.havemf: - for c, m in sorted([(c, m) for m in mflinkrevs - for c in mflinkrevs[m]]): - count += 1 - if m == nullid: - continue - ui.progress(_('crosschecking'), count, total=total) - self.err(c, _("changeset refers to unknown manifest %s") % - short(m)) - for f in sorted(filelinkrevs): count += 1 ui.progress(_('crosschecking'), count, total=total) @@ -284,14 +335,14 @@ for f, f2, size in repo.store.datafiles(): if not f: self.err(None, _("cannot decode filename '%s'") % f2) - elif size > 0 or not revlogv1: + elif (size > 0 or not revlogv1) and f.startswith('data/'): storefiles.add(_normpath(f)) files = sorted(set(filenodes) | set(filelinkrevs)) total = len(files) revisions = 0 for i, f in enumerate(files): - ui.progress(_('checking'), i, item=f, total=total) + ui.progress(_('checking'), i, item=f, total=total, unit=_('files')) try: linkrevs = filelinkrevs[f] except KeyError: @@ -374,11 +425,11 @@ if f in filenodes: fns = [(lr, n) for n, lr in filenodes[f].iteritems()] for lr, node in sorted(fns): - self.err(lr, _("%s in manifests not found") % short(node), - f) + self.err(lr, _("manifest refers to unknown revision %s") % + short(node), f) ui.progress(_('checking'), None) - for f in storefiles: + for f in sorted(storefiles): self.warn(_("warning: orphan revlog '%s'") % f) return len(files), revisions
--- a/mercurial/wireproto.py Sun Mar 13 02:29:11 2016 +0100 +++ b/mercurial/wireproto.py Tue Mar 15 14:10:46 2016 -0700 @@ -7,6 +7,7 @@ from __future__ import absolute_import +import itertools import os import sys import tempfile @@ -114,6 +115,41 @@ encresref.set(encres) resref.set(batchable.next()) +class remoteiterbatcher(peer.iterbatcher): + def __init__(self, remote): + super(remoteiterbatcher, self).__init__() + self._remote = remote + + def __getattr__(self, name): + if not getattr(self._remote, name, False): + raise AttributeError( + 'Attempted to iterbatch non-batchable call to %r' % name) + return super(remoteiterbatcher, self).__getattr__(name) + + def submit(self): + """Break the batch request into many patch calls and pipeline them. + + This is mostly valuable over http where request sizes can be + limited, but can be used in other places as well. + """ + req, rsp = [], [] + for name, args, opts, resref in self.calls: + mtd = getattr(self._remote, name) + batchable = mtd.batchable(mtd.im_self, *args, **opts) + encargsorres, encresref = batchable.next() + assert encresref + req.append((name, encargsorres)) + rsp.append((batchable, encresref)) + if req: + self._resultiter = self._remote._submitbatch(req) + self._rsp = rsp + + def results(self): + for (batchable, encresref), encres in itertools.izip( + self._rsp, self._resultiter): + encresref.set(encres) + yield batchable.next() + # Forward a couple of names from peer to make wireproto interactions # slightly more sensible. batchable = peer.batchable @@ -183,16 +219,34 @@ else: return peer.localbatch(self) def _submitbatch(self, req): + """run batch request <req> on the server + + Returns an iterator of the raw responses from the server. + """ cmds = [] for op, argsdict in req: args = ','.join('%s=%s' % (escapearg(k), escapearg(v)) for k, v in argsdict.iteritems()) cmds.append('%s %s' % (op, args)) - rsp = self._call("batch", cmds=';'.join(cmds)) - return [unescapearg(r) for r in rsp.split(';')] + rsp = self._callstream("batch", cmds=';'.join(cmds)) + # TODO this response parsing is probably suboptimal for large + # batches with large responses. + work = rsp.read(1024) + chunk = work + while chunk: + while ';' in work: + one, work = work.split(';', 1) + yield unescapearg(one) + chunk = rsp.read(1024) + work += chunk + yield unescapearg(work) + def _submitone(self, op, args): return self._call(op, **args) + def iterbatch(self): + return remoteiterbatcher(self) + @batchable def lookup(self, key): self.requirecap('lookup', _('look up remote revision')) @@ -396,9 +450,12 @@ def _callstream(self, cmd, **args): """execute <cmd> on the server - The command is expected to return a stream. + The command is expected to return a stream. Note that if the + command doesn't return a stream, _callstream behaves + differently for ssh and http peers. - returns the server reply as a file like object.""" + returns the server reply as a file like object. + """ raise NotImplementedError() def _callcompressable(self, cmd, **args): @@ -631,6 +688,8 @@ caps.append('unbundle=%s' % ','.join(changegroupmod.bundlepriority)) caps.append( 'httpheader=%d' % repo.ui.configint('server', 'maxhttpheaderlen', 1024)) + if repo.ui.configbool('experimental', 'httppostargs', False): + caps.append('httppostargs') return caps # If you are writing an extension and consider wrapping this function. Wrap
--- a/mercurial/worker.py Sun Mar 13 02:29:11 2016 +0100 +++ b/mercurial/worker.py Tue Mar 15 14:10:46 2016 -0700 @@ -152,14 +152,33 @@ _exitstatus = _posixexitstatus def partition(lst, nslices): - '''partition a list into N slices of equal size''' - n = len(lst) - chunk, slop = n / nslices, n % nslices - end = 0 - for i in xrange(nslices): - start = end - end = start + chunk - if slop: - end += 1 - slop -= 1 - yield lst[start:end] + '''partition a list into N slices of roughly equal size + + The current strategy takes every Nth element from the input. If + we ever write workers that need to preserve grouping in input + we should consider allowing callers to specify a partition strategy. + + mpm is not a fan of this partitioning strategy when files are involved. + In his words: + + Single-threaded Mercurial makes a point of creating and visiting + files in a fixed order (alphabetical). When creating files in order, + a typical filesystem is likely to allocate them on nearby regions on + disk. Thus, when revisiting in the same order, locality is maximized + and various forms of OS and disk-level caching and read-ahead get a + chance to work. + + This effect can be quite significant on spinning disks. I discovered it + circa Mercurial v0.4 when revlogs were named by hashes of filenames. + Tarring a repo and copying it to another disk effectively randomized + the revlog ordering on disk by sorting the revlogs by hash and suddenly + performance of my kernel checkout benchmark dropped by ~10x because the + "working set" of sectors visited no longer fit in the drive's cache and + the workload switched from streaming to random I/O. + + What we should really be doing is have workers read filenames from a + ordered queue. This preserves locality and also keeps any worker from + getting more than one file out of balance. + ''' + for i in range(nslices): + yield lst[i::nslices]
--- a/setup.py Sun Mar 13 02:29:11 2016 +0100 +++ b/setup.py Tue Mar 15 14:10:46 2016 -0700 @@ -84,19 +84,6 @@ from distutils.sysconfig import get_python_inc, get_config_var from distutils.version import StrictVersion -convert2to3 = '--c2to3' in sys.argv -if convert2to3: - try: - from distutils.command.build_py import build_py_2to3 as build_py - from lib2to3.refactor import get_fixers_from_package as getfixers - except ImportError: - if sys.version_info[0] < 3: - raise SystemExit("--c2to3 is only compatible with python3.") - raise - sys.path.append('contrib') -elif sys.version_info[0] >= 3: - raise SystemExit("setup.py with python3 needs --c2to3 (experimental)") - scripts = ['hg'] if os.name == 'nt': # We remove hg.bat if we are able to build hg.exe. @@ -220,30 +207,27 @@ version = kw.get('node', '')[:12] if version: - f = open("mercurial/__version__.py", "w") - f.write('# this file is autogenerated by setup.py\n') - f.write('version = "%s"\n' % version) - f.close() - + with open("mercurial/__version__.py", "w") as f: + f.write('# this file is autogenerated by setup.py\n') + f.write('version = "%s"\n' % version) try: + oldpolicy = os.environ.get('HGMODULEPOLICY', None) + os.environ['HGMODULEPOLICY'] = 'py' from mercurial import __version__ version = __version__.version except ImportError: version = 'unknown' +finally: + if oldpolicy is None: + del os.environ['HGMODULEPOLICY'] + else: + os.environ['HGMODULEPOLICY'] = oldpolicy class hgbuild(build): # Insert hgbuildmo first so that files in mercurial/locale/ are found # when build_py is run next. - sub_commands = [('build_mo', None), - - # We also need build_ext before build_py. Otherwise, when 2to3 is - # called (in build_py), it will not find osutil & friends, - # thinking that those modules are global and, consequently, making - # a mess, now that all module imports are global. - - ('build_ext', build.has_ext_modules), - ] + build.sub_commands + sub_commands = [('build_mo', None)] + build.sub_commands class hgbuildmo(build): @@ -282,8 +266,6 @@ global_options = Distribution.global_options + \ [('pure', None, "use pure (slow) Python " "code instead of C extensions"), - ('c2to3', None, "(experimental!) convert " - "code with 2to3"), ] def has_ext_modules(self): @@ -328,10 +310,6 @@ return build_scripts.run(self) class hgbuildpy(build_py): - if convert2to3: - fixer_names = sorted(set(getfixers("lib2to3.fixes") + - getfixers("hgfixes"))) - def finalize_options(self): build_py.finalize_options(self) @@ -343,21 +321,16 @@ raise SystemExit('Python headers are required to build ' 'Mercurial but weren\'t found in %s' % h) - def copy_file(self, *args, **kwargs): - dst, copied = build_py.copy_file(self, *args, **kwargs) + def run(self): + if self.distribution.pure: + modulepolicy = 'py' + else: + modulepolicy = 'c' + with open("mercurial/__modulepolicy__.py", "w") as f: + f.write('# this file is autogenerated by setup.py\n') + f.write('modulepolicy = "%s"\n' % modulepolicy) - if copied and dst.endswith('__init__.py'): - if self.distribution.pure: - modulepolicy = 'py' - else: - modulepolicy = 'c' - content = open(dst, 'rb').read() - content = content.replace(b'@MODULELOADPOLICY@', - modulepolicy.encode(libdir_escape)) - with open(dst, 'wb') as fh: - fh.write(content) - - return dst, copied + build_py.run(self) class buildhgextindex(Command): description = 'generate prebuilt index of hgext (for frozen package)' @@ -372,9 +345,8 @@ def run(self): if os.path.exists(self._indexfilename): - f = open(self._indexfilename, 'w') - f.write('# empty\n') - f.close() + with open(self._indexfilename, 'w') as f: + f.write('# empty\n') # here no extension enabled, disabled() lists up everything code = ('import pprint; from mercurial import extensions; ' @@ -383,11 +355,10 @@ if err: raise DistutilsExecError(err) - f = open(self._indexfilename, 'w') - f.write('# this file is autogenerated by setup.py\n') - f.write('docs = ') - f.write(out) - f.close() + with open(self._indexfilename, 'w') as f: + f.write('# this file is autogenerated by setup.py\n') + f.write('docs = ') + f.write(out) class buildhgexe(build_ext): description = 'compile hg.exe from mercurial/exewrapper.c' @@ -400,10 +371,9 @@ self.compiler.dll_libraries = [] # no -lmsrvc90 hv = sys.hexversion pythonlib = 'python%d%d' % (hv >> 24, (hv >> 16) & 0xff) - f = open('mercurial/hgpythonlib.h', 'wb') - f.write('/* this file is autogenerated by setup.py */\n') - f.write('#define HGPYTHONLIB "%s"\n' % pythonlib) - f.close() + with open('mercurial/hgpythonlib.h', 'wb') as f: + f.write('/* this file is autogenerated by setup.py */\n') + f.write('#define HGPYTHONLIB "%s"\n' % pythonlib) objects = self.compiler.compile(['mercurial/exewrapper.c'], output_dir=self.build_temp) dir = os.path.dirname(self.get_ext_fullpath('dummy')) @@ -503,9 +473,8 @@ libdir = uplevel * ('..' + os.sep) + self.install_lib[len(common):] for outfile in self.outfiles: - fp = open(outfile, 'rb') - data = fp.read() - fp.close() + with open(outfile, 'rb') as fp: + data = fp.read() # skip binary files if b'\0' in data: @@ -520,9 +489,8 @@ continue data = data.replace(b'@LIBDIR@', libdir.encode(libdir_escape)) - fp = open(outfile, 'wb') - fp.write(data) - fp.close() + with open(outfile, 'wb') as fp: + fp.write(data) cmdclass = {'build': hgbuild, 'build_mo': hgbuildmo, @@ -564,6 +532,8 @@ Extension('mercurial.osutil', ['mercurial/osutil.c'], extra_link_args=osutil_ldflags, depends=common_depends), + Extension('hgext.fsmonitor.pywatchman.bser', + ['hgext/fsmonitor/pywatchman/bser.c']), ] try:
--- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/tests/blackbox-readonly-dispatch.py Tue Mar 15 14:10:46 2016 -0700 @@ -0,0 +1,36 @@ +from __future__ import absolute_import, print_function +import os +from mercurial import ( + dispatch, +) + +def testdispatch(cmd): + """Simple wrapper around dispatch.dispatch() + + Prints command and result value, but does not handle quoting. + """ + print("running: %s" % (cmd,)) + req = dispatch.request(cmd.split()) + result = dispatch.dispatch(req) + print("result: %r" % (result,)) + +# create file 'foo', add and commit +f = open('foo', 'wb') +f.write('foo\n') +f.close() +testdispatch("add foo") +testdispatch("commit -m commit1 -d 2000-01-01 foo") + +# append to file 'foo' and commit +f = open('foo', 'ab') +f.write('bar\n') +f.close() +# remove blackbox.log directory (proxy for readonly log file) +os.rmdir(".hg/blackbox.log") +# replace it with the real blackbox.log file +os.rename(".hg/blackbox.log-", ".hg/blackbox.log") +testdispatch("commit -m commit2 -d 2000-01-02 foo") + +# check 88803a69b24 (fancyopts modified command table) +testdispatch("log -r 0") +testdispatch("log -r tip")
--- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/tests/blacklists/fsmonitor Tue Mar 15 14:10:46 2016 -0700 @@ -0,0 +1,52 @@ +# Blacklist for a full testsuite run with fsmonitor enabled. +# Use with +# run-tests --blacklist=blacklists/fsmonitor \ +# --extra-config="extensions.fsmonitor=" +# The following tests all fail because they either use extensions that conflict +# with fsmonitor, use subrepositories, or don't anticipate the extra file in +# the .hg directory that fsmonitor adds. +test-basic.t +test-blackbox.t +test-check-commit.t +test-commandserver.t +test-copy.t +test-debugextensions.t +test-eol-add.t +test-eol-clone.t +test-eol-hook.t +test-eol-patch.t +test-eol-tag.t +test-eol-update.t +test-eol.t +test-eolfilename.t +test-extension.t +test-fncache.t +test-hardlinks.t +test-help.t +test-inherit-mode.t +test-issue3084.t +test-largefiles-cache.t +test-largefiles-misc.t +test-largefiles-small-disk.t +test-largefiles-update.t +test-largefiles-wireproto.t +test-largefiles.t +test-lfconvert.t +test-merge-tools.t +test-nested-repo.t +test-permissions.t +test-push-warn.t +test-subrepo-deep-nested-change.t +test-subrepo-recursion.t +test-subrepo.t +test-tags.t + +# The following tests remain enabled; they fail *too*, but only because they +# occasionally use blacklisted extensions and don't anticipate the warning +# generated. +#test-log.t +#test-hook.t +#test-rename.t +#test-histedit-fold.t +#test-fileset-generated.t +#test-init.t
--- a/tests/dumbhttp.py Sun Mar 13 02:29:11 2016 +0100 +++ b/tests/dumbhttp.py Tue Mar 15 14:10:46 2016 -0700 @@ -38,7 +38,7 @@ parser.add_option('-f', '--foreground', dest='foreground', action='store_true', help='do not start the HTTP server in the background') - parser.add_option('--daemon-pipefds') + parser.add_option('--daemon-postexec', action='append') (options, args) = parser.parse_args() @@ -49,7 +49,7 @@ opts = {'pid_file': options.pid, 'daemon': not options.foreground, - 'daemon_pipefds': options.daemon_pipefds} + 'daemon_postexec': options.daemon_postexec} service = simplehttpservice(options.host, options.port) cmdutil.service(opts, initfn=service.init, runfn=service.run, runargs=[sys.executable, __file__] + sys.argv[1:])
--- a/tests/f Sun Mar 13 02:29:11 2016 +0100 +++ b/tests/f Tue Mar 15 14:10:46 2016 -0700 @@ -64,7 +64,7 @@ if opts.size and not isdir: facts.append('size=%s' % stat.st_size) if opts.mode and not islink: - facts.append('mode=%o' % (stat.st_mode & 0777)) + facts.append('mode=%o' % (stat.st_mode & 0o777)) if opts.links: facts.append('links=%s' % stat.st_nlink) if opts.newer: @@ -106,7 +106,7 @@ chunk = chunk[opts.bytes:] if opts.hexdump: for i in range(0, len(chunk), 16): - s = chunk[i:i+16] + s = chunk[i:i + 16] outfile.write('%04x: %-47s |%s|\n' % (i, ' '.join('%02x' % ord(c) for c in s), re.sub('[^ -~]', '.', s)))
--- a/tests/hghave Sun Mar 13 02:29:11 2016 +0100 +++ b/tests/hghave Tue Mar 15 14:10:46 2016 -0700 @@ -3,6 +3,9 @@ if all features are there, non-zero otherwise. If a feature name is prefixed with "no-", the absence of feature is tested. """ + +from __future__ import print_function + import optparse import os, sys import hghave @@ -12,7 +15,7 @@ def list_features(): for name, feature in sorted(checks.iteritems()): desc = feature[1] - print name + ':', desc + print(name + ':', desc) def test_features(): failed = 0 @@ -20,8 +23,8 @@ check, _ = feature try: check() - except Exception, e: - print "feature %s failed: %s" % (name, e) + except Exception as e: + print("feature %s failed: %s" % (name, e)) failed += 1 return failed @@ -45,7 +48,7 @@ sys.path.insert(0, path) try: import hghaveaddon - except BaseException, inst: + except BaseException as inst: sys.stderr.write('failed to import hghaveaddon.py from %r: %s\n' % (path, inst)) sys.exit(2)
--- a/tests/hghave.py Sun Mar 13 02:29:11 2016 +0100 +++ b/tests/hghave.py Tue Mar 15 14:10:46 2016 -0700 @@ -335,21 +335,6 @@ except ImportError: return False -@check("json", "some json module available") -def has_json(): - try: - import json - json.dumps - return True - except ImportError: - try: - import simplejson as json - json.dumps - return True - except ImportError: - pass - return False - @check("outer-repo", "outer repo") def has_outer_repo(): # failing for other reasons than 'no repo' imply that there is a repo @@ -469,7 +454,7 @@ def has_slow(): return os.environ.get('HGTEST_SLOW') == 'slow' -@check("hypothesis", "is Hypothesis installed") +@check("hypothesis", "Hypothesis automated test generation") def has_hypothesis(): try: import hypothesis
--- a/tests/hypothesishelpers.py Sun Mar 13 02:29:11 2016 +0100 +++ b/tests/hypothesishelpers.py Tue Mar 15 14:10:46 2016 -0700 @@ -33,7 +33,8 @@ # Fixed in version 1.13 (released 2015 october 29th) f.__module__ = '__anon__' try: - given(*args, settings=settings(max_examples=2000), **kwargs)(f)() + with settings(max_examples=2000): + given(*args, **kwargs)(f)() except Exception: traceback.print_exc(file=sys.stdout) sys.exit(1)
--- a/tests/mockblackbox.py Sun Mar 13 02:29:11 2016 +0100 +++ b/tests/mockblackbox.py Tue Mar 15 14:10:46 2016 -0700 @@ -4,8 +4,11 @@ return 0, 0 def getuser(): return 'bob' +def getpid(): + return 5000 # mock the date and user apis so the output is always the same def uisetup(ui): util.makedate = makedate util.getuser = getuser + util.getpid = getpid
--- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/tests/pdiff Tue Mar 15 14:10:46 2016 -0700 @@ -0,0 +1,58 @@ +#!/bin/sh + +# Script to get stable diff output on any platform. +# +# Output of this script is almost equivalent to GNU diff with "-Nru". +# +# Use this script as "hg pdiff" via extdiff extension with preparation +# below in test scripts: +# +# $ cat >> $HGRCPATH <<EOF +# > [extdiff] +# > pdiff = sh "$RUNTESTDIR/pdiff" +# > EOF + +filediff(){ + # USAGE: filediff file1 file2 [header] + + # compare with /dev/null if file doesn't exist (as "-N" option) + file1="$1" + if test ! -f "$file1"; then + file1=/dev/null + fi + file2="$2" + if test ! -f "$file2"; then + file2=/dev/null + fi + + if cmp -s "$file1" "$file2" 2> /dev/null; then + # Return immediately, because comparison isn't needed. This + # also avoids redundant message of diff like "No differences + # encountered" (on Solaris) + return + fi + + if test -n "$3"; then + # show header only in recursive case + echo "$3" + fi + + # replace "/dev/null" by corresponded filename (as "-N" option) + diff -u "$file1" "$file2" | + sed "s@^--- /dev/null\(.*\)\$@--- $1\1@" | + sed "s@^\+\+\+ /dev/null\(.*\)\$@+++ $2\1@" +} + +if test -d "$1" -o -d "$2"; then + # ensure comparison in dictionary order + ( + if test -d "$1"; then (cd "$1" && find . -type f); fi + if test -d "$2"; then (cd "$2" && find . -type f); fi + ) | + sed 's@^\./@@g' | sort | uniq | + while read file; do + filediff "$1/$file" "$2/$file" "diff -Nru $1/$file $2/$file" + done +else + filediff "$1" "$2" +fi
--- a/tests/run-tests.py Sun Mar 13 02:29:11 2016 +0100 +++ b/tests/run-tests.py Tue Mar 15 14:10:46 2016 -0700 @@ -48,6 +48,7 @@ from distutils import version import difflib import errno +import json import optparse import os import shutil @@ -69,15 +70,6 @@ import unittest osenvironb = getattr(os, 'environb', os.environ) - -try: - import json -except ImportError: - try: - import simplejson as json - except ImportError: - json = None - processlock = threading.Lock() if sys.version_info > (3, 5, 0): @@ -257,6 +249,10 @@ metavar="HG", help="test using specified hg script rather than a " "temporary installation") + parser.add_option("--chg", action="store_true", + help="install and use chg wrapper in place of hg") + parser.add_option("--with-chg", metavar="CHG", + help="use specified chg wrapper in place of hg") parser.add_option("-3", "--py3k-warnings", action="store_true", help="enable Py3k warnings on Python 2.6+") parser.add_option('--extra-config-opt', action="append", @@ -285,11 +281,12 @@ options.pure = True if options.with_hg: - options.with_hg = os.path.expanduser(options.with_hg) + options.with_hg = os.path.realpath( + os.path.expanduser(_bytespath(options.with_hg))) if not (os.path.isfile(options.with_hg) and os.access(options.with_hg, os.X_OK)): parser.error('--with-hg must specify an executable hg script') - if not os.path.basename(options.with_hg) == 'hg': + if not os.path.basename(options.with_hg) == b'hg': sys.stderr.write('warning: --with-hg should specify an hg script\n') if options.local: testdir = os.path.dirname(_bytespath(os.path.realpath(sys.argv[0]))) @@ -299,6 +296,20 @@ % hgbin) options.with_hg = hgbin + if (options.chg or options.with_chg) and os.name == 'nt': + parser.error('chg does not work on %s' % os.name) + if options.with_chg: + options.chg = False # no installation to temporary location + options.with_chg = os.path.realpath( + os.path.expanduser(_bytespath(options.with_chg))) + if not (os.path.isfile(options.with_chg) and + os.access(options.with_chg, os.X_OK)): + parser.error('--with-chg must specify a chg executable') + if options.chg and options.with_hg: + # chg shares installation location with hg + parser.error('--chg does not work when --with-hg is specified ' + '(use --with-chg instead)') + options.anycoverage = options.cover or options.annotate or options.htmlcov if options.anycoverage: try: @@ -443,7 +454,7 @@ debug=False, timeout=defaults['timeout'], startport=defaults['port'], extraconfigopts=None, - py3kwarnings=False, shell=None, + py3kwarnings=False, shell=None, hgcommand=None, slowtimeout=defaults['slowtimeout']): """Create a test from parameters. @@ -490,6 +501,7 @@ self._extraconfigopts = extraconfigopts or [] self._py3kwarnings = py3kwarnings self._shell = _bytespath(shell) + self._hgcommand = hgcommand or b'hg' self._aborted = False self._daemonpids = [] @@ -686,7 +698,8 @@ if self._keeptmpdir: log('\nKeeping testtmp dir: %s\nKeeping threadtmp dir: %s' % - (self._testtmp, self._threadtmp)) + (self._testtmp.decode('utf-8'), + self._threadtmp.decode('utf-8'))) else: shutil.rmtree(self._testtmp, True) shutil.rmtree(self._threadtmp, True) @@ -708,6 +721,10 @@ """Terminate execution of this test.""" self._aborted = True + def _portmap(self, i): + offset = b'' if i == 0 else b'%d' % i + return (br':%d\b' % (self._startport + i), b':$HGPORT%s' % offset) + def _getreplacements(self): """Obtain a mapping of text replacements to apply to test output. @@ -716,31 +733,39 @@ occur. """ r = [ - (br':%d\b' % self._startport, b':$HGPORT'), - (br':%d\b' % (self._startport + 1), b':$HGPORT1'), - (br':%d\b' % (self._startport + 2), b':$HGPORT2'), + # This list should be parallel to defineport in _getenv + self._portmap(0), + self._portmap(1), + self._portmap(2), (br'(?m)^(saved backup bundle to .*\.hg)( \(glob\))?$', br'\1 (glob)'), ] - - if os.name == 'nt': - r.append( - (b''.join(c.isalpha() and b'[%s%s]' % (c.lower(), c.upper()) or - c in b'/\\' and br'[/\\]' or c.isdigit() and c or b'\\' + c - for c in self._testtmp), b'$TESTTMP')) - else: - r.append((re.escape(self._testtmp), b'$TESTTMP')) + r.append((self._escapepath(self._testtmp), b'$TESTTMP')) return r + def _escapepath(self, p): + if os.name == 'nt': + return ( + (b''.join(c.isalpha() and b'[%s%s]' % (c.lower(), c.upper()) or + c in b'/\\' and br'[/\\]' or c.isdigit() and c or b'\\' + c + for c in p)) + ) + else: + return re.escape(p) + def _getenv(self): """Obtain environment variables to use during test execution.""" + def defineport(i): + offset = '' if i == 0 else '%s' % i + env["HGPORT%s" % offset] = '%s' % (self._startport + i) env = os.environ.copy() env['TESTTMP'] = self._testtmp env['HOME'] = self._testtmp - env["HGPORT"] = str(self._startport) - env["HGPORT1"] = str(self._startport + 1) - env["HGPORT2"] = str(self._startport + 2) + # This number should match portneeded in _getport + for port in xrange(3): + # This list should be parallel to _portmap in _getreplacements + defineport(port) env["HGRCPATH"] = os.path.join(self._threadtmp, b'.hgrc') env["DAEMON_PIDS"] = os.path.join(self._threadtmp, b'daemon.pids') env["HGEDITOR"] = ('"' + sys.executable + '"' @@ -885,8 +910,8 @@ class TTest(Test): """A "t test" is a test backed by a .t file.""" - SKIPPED_PREFIX = 'skipped: ' - FAILED_PREFIX = 'hghave check failed: ' + SKIPPED_PREFIX = b'skipped: ' + FAILED_PREFIX = b'hghave check failed: ' NEEDESCAPE = re.compile(br'[\x00-\x08\x0b-\x1f\x7f-\xff]').search ESCAPESUB = re.compile(br'[\x00-\x08\x0b-\x1f\\\x7f-\xff]').sub @@ -981,6 +1006,8 @@ if self._debug: script.append(b'set -x\n') + if self._hgcommand != b'hg': + script.append(b'alias hg="%s"\n' % self._hgcommand) if os.getenv('MSYSTEM'): script.append(b'alias pwd="pwd -W"\n') @@ -1111,14 +1138,13 @@ elif warnonly == 1: # Is "not yet" and line is warn only. warnonly = 2 # Yes do warn. break - - # clean up any optional leftovers - while expected.get(pos, None): - el = expected[pos].pop(0) - if not el.endswith(b" (?)\n"): - expected[pos].insert(0, el) - break - postout.append(b' ' + el) + else: + # clean up any optional leftovers + while expected.get(pos, None): + el = expected[pos].pop(0) + if el and not el.endswith(b" (?)\n"): + break + postout.append(b' ' + el) if lcmd: # Add on last return code. @@ -1187,7 +1213,7 @@ if el: if el.endswith(b" (?)\n"): retry = "retry" - el = el[:-5] + "\n" + el = el[:-5] + b"\n" if el.endswith(b" (esc)\n"): if PYTHON3: el = el[:-7].decode('unicode_escape') + '\n' @@ -1219,10 +1245,10 @@ for line in lines: if line.startswith(TTest.SKIPPED_PREFIX): line = line.splitlines()[0] - missing.append(line[len(TTest.SKIPPED_PREFIX):]) + missing.append(line[len(TTest.SKIPPED_PREFIX):].decode('utf-8')) elif line.startswith(TTest.FAILED_PREFIX): line = line.splitlines()[0] - failed.append(line[len(TTest.FAILED_PREFIX):]) + failed.append(line[len(TTest.FAILED_PREFIX):].decode('utf-8')) return missing, failed @@ -1347,7 +1373,6 @@ return accepted = False - failed = False lines = [] with iolock: @@ -1388,7 +1413,7 @@ else: rename(test.errpath, '%s.out' % test.path) accepted = True - if not accepted and not failed: + if not accepted: self.faildata[test.name] = b''.join(lines) return accepted @@ -1481,7 +1506,7 @@ def get(): num_tests[0] += 1 if getattr(test, 'should_reload', False): - return self._loadtest(test.bname, num_tests[0]) + return self._loadtest(test.path, num_tests[0]) return test if not os.path.exists(test.path): result.addSkip(test, "Doesn't exist") @@ -1617,7 +1642,7 @@ def loadtimes(testdir): times = [] try: - with open(os.path.join(testdir, '.testtimes-')) as fp: + with open(os.path.join(testdir, b'.testtimes-')) as fp: for line in fp: ts = line.split() times.append((ts[0], [float(t) for t in ts[1:]])) @@ -1637,12 +1662,12 @@ ts.append(real) ts[:] = ts[-maxruns:] - fd, tmpname = tempfile.mkstemp(prefix='.testtimes', + fd, tmpname = tempfile.mkstemp(prefix=b'.testtimes', dir=testdir, text=True) with os.fdopen(fd, 'w') as fp: - for name, ts in sorted(saved.iteritems()): + for name, ts in sorted(saved.items()): fp.write('%s %s\n' % (name, ' '.join(['%.3f' % (t,) for t in ts]))) - timepath = os.path.join(testdir, '.testtimes') + timepath = os.path.join(testdir, b'.testtimes') try: os.unlink(timepath) except OSError: @@ -1714,9 +1739,7 @@ xuf.write(doc.toprettyxml(indent=' ', encoding='utf-8')) if self._runner.options.json: - if json is None: - raise ImportError("json module not installed") - jsonpath = os.path.join(self._runner._testdir, 'report.json') + jsonpath = os.path.join(self._runner._testdir, b'report.json') with open(jsonpath, 'w') as fp: timesd = {} for tdata in result.times: @@ -1731,14 +1754,14 @@ for res, testcases in groups: for tc, __ in testcases: if tc.name in timesd: + diff = result.faildata.get(tc.name, b'') tres = {'result': res, 'time': ('%0.3f' % timesd[tc.name][2]), 'cuser': ('%0.3f' % timesd[tc.name][0]), 'csys': ('%0.3f' % timesd[tc.name][1]), 'start': ('%0.3f' % timesd[tc.name][3]), 'end': ('%0.3f' % timesd[tc.name][4]), - 'diff': result.faildata.get(tc.name, - ''), + 'diff': diff.decode('unicode_escape'), } else: # blacklisted test @@ -1809,7 +1832,9 @@ self._pythondir = None self._coveragefile = None self._createdfiles = [] + self._hgcommand = None self._hgpath = None + self._chgsockdir = None self._portoffset = 0 self._ports = {} @@ -1871,7 +1896,7 @@ for kw, mul in slow.items(): if kw in f: val *= mul - if f.endswith('.py'): + if f.endswith(b'.py'): val /= 10.0 perf[f] = val / 1000.0 return perf[f] @@ -1915,14 +1940,9 @@ if self.options.with_hg: self._installdir = None whg = self.options.with_hg - # If --with-hg is not specified, we have bytes already, - # but if it was specified in python3 we get a str, so we - # have to encode it back into a bytes. - if PYTHON3: - if not isinstance(whg, bytes): - whg = _bytespath(whg) self._bindir = os.path.dirname(os.path.realpath(whg)) assert isinstance(self._bindir, bytes) + self._hgcommand = os.path.basename(whg) self._tmpbindir = os.path.join(self._hgtmp, b'install', b'bin') os.makedirs(self._tmpbindir) @@ -1934,11 +1954,24 @@ self._pythondir = self._bindir else: self._installdir = os.path.join(self._hgtmp, b"install") - self._bindir = osenvironb[b"BINDIR"] = \ - os.path.join(self._installdir, b"bin") + self._bindir = os.path.join(self._installdir, b"bin") + self._hgcommand = b'hg' self._tmpbindir = self._bindir self._pythondir = os.path.join(self._installdir, b"lib", b"python") + # set up crafted chg environment, then replace "hg" command by "chg" + chgbindir = self._bindir + if self.options.chg or self.options.with_chg: + self._chgsockdir = d = os.path.join(self._hgtmp, b'chgsock') + os.mkdir(d) + osenvironb[b'CHGSOCKNAME'] = os.path.join(d, b"server") + osenvironb[b'CHGHG'] = os.path.join(self._bindir, self._hgcommand) + if self.options.chg: + self._hgcommand = b'chg' + elif self.options.with_chg: + chgbindir = os.path.dirname(os.path.realpath(self.options.with_chg)) + self._hgcommand = os.path.basename(self.options.with_chg) + osenvironb[b"BINDIR"] = self._bindir osenvironb[b"PYTHON"] = PYTHON @@ -1955,6 +1988,8 @@ realfile = os.path.realpath(fileb) realdir = os.path.abspath(os.path.dirname(realfile)) path.insert(2, realdir) + if chgbindir != self._bindir: + path.insert(1, chgbindir) if self._testdir != runtestdir: path = [self._testdir] + path if self._tmpbindir != self._bindir: @@ -2023,6 +2058,9 @@ self._checkhglib("Testing") else: self._usecorrectpython() + if self.options.chg: + assert self._installdir + self._installchg() if self.options.restart: orig = list(tests) @@ -2116,12 +2154,15 @@ startport=self._getport(count), extraconfigopts=self.options.extra_config_opt, py3kwarnings=self.options.py3k_warnings, - shell=self.options.shell) + shell=self.options.shell, + hgcommand=self._hgcommand) t.should_reload = True return t def _cleanup(self): """Clean up state from this test invocation.""" + if self._chgsockdir: + self._killchgdaemons() if self.options.keep_tmpdir: return @@ -2182,13 +2223,11 @@ pure = b"--pure" else: pure = b"" - py3 = '' # Run installer in hg root script = os.path.realpath(sys.argv[0]) exe = sys.executable if PYTHON3: - py3 = b'--c2to3' compiler = _bytespath(compiler) script = _bytespath(script) exe = _bytespath(exe) @@ -2202,12 +2241,12 @@ # least on Windows for now, deal with .pydistutils.cfg bugs # when they happen. nohome = b'' - cmd = (b'%(exe)s setup.py %(py3)s %(pure)s clean --all' + cmd = (b'%(exe)s setup.py %(pure)s clean --all' b' build %(compiler)s --build-base="%(base)s"' b' install --force --prefix="%(prefix)s"' b' --install-lib="%(libdir)s"' b' --install-scripts="%(bindir)s" %(nohome)s >%(logfile)s 2>&1' - % {b'exe': exe, b'py3': py3, b'pure': pure, + % {b'exe': exe, b'pure': pure, b'compiler': compiler, b'base': os.path.join(self._hgtmp, b"build"), b'prefix': self._installdir, b'libdir': self._pythondir, @@ -2321,6 +2360,34 @@ return self._hgpath + def _installchg(self): + """Install chg into the test environment""" + vlog('# Performing temporary installation of CHG') + assert os.path.dirname(self._bindir) == self._installdir + assert self._hgroot, 'must be called after _installhg()' + cmd = (b'"%(make)s" clean install PREFIX="%(prefix)s"' + % {b'make': 'make', # TODO: switch by option or environment? + b'prefix': self._installdir}) + cwd = os.path.join(self._hgroot, b'contrib', b'chg') + vlog("# Running", cmd) + proc = subprocess.Popen(cmd, shell=True, cwd=cwd, + stdin=subprocess.PIPE, stdout=subprocess.PIPE, + stderr=subprocess.STDOUT) + out, _err = proc.communicate() + if proc.returncode != 0: + if PYTHON3: + sys.stdout.buffer.write(out) + else: + sys.stdout.write(out) + sys.exit(1) + + def _killchgdaemons(self): + """Kill all background chg command servers spawned by tests""" + for f in os.listdir(self._chgsockdir): + if '.' in f: + continue + os.unlink(os.path.join(self._chgsockdir, f)) + def _outputcoverage(self): """Produce code coverage output.""" from coverage import coverage
--- a/tests/test-1102.t Sun Mar 13 02:29:11 2016 +0100 +++ /dev/null Thu Jan 01 00:00:00 1970 +0000 @@ -1,17 +0,0 @@ - $ rm -rf a - $ hg init a - $ cd a - $ echo a > a - $ hg ci -Am0 - adding a - $ hg tag t1 # 1 - $ hg tag --remove t1 # 2 - - $ hg co 1 - 1 files updated, 0 files merged, 0 files removed, 0 files unresolved - $ hg tag -f -r0 t1 - $ hg tags - tip 3:a49829c4fc11 - t1 0:f7b1eb17ad24 - - $ cd ..
--- a/tests/test-1993.t Sun Mar 13 02:29:11 2016 +0100 +++ /dev/null Thu Jan 01 00:00:00 1970 +0000 @@ -1,48 +0,0 @@ - $ hg init a - $ cd a - $ echo a > a - $ hg ci -Am0 - adding a - $ echo b > b - $ hg ci -Am1 - adding b - $ hg tag -r0 default - warning: tag default conflicts with existing branch name - $ hg log - changeset: 2:30a83d1e4a1e - tag: tip - user: test - date: Thu Jan 01 00:00:00 1970 +0000 - summary: Added tag default for changeset f7b1eb17ad24 - - changeset: 1:925d80f479bb - user: test - date: Thu Jan 01 00:00:00 1970 +0000 - summary: 1 - - changeset: 0:f7b1eb17ad24 - tag: default - user: test - date: Thu Jan 01 00:00:00 1970 +0000 - summary: 0 - - $ hg update 'tag(default)' - 0 files updated, 0 files merged, 2 files removed, 0 files unresolved - $ hg parents - changeset: 0:f7b1eb17ad24 - tag: default - user: test - date: Thu Jan 01 00:00:00 1970 +0000 - summary: 0 - - $ hg update 'branch(default)' - 2 files updated, 0 files merged, 0 files removed, 0 files unresolved - $ hg parents - changeset: 2:30a83d1e4a1e - tag: tip - user: test - date: Thu Jan 01 00:00:00 1970 +0000 - summary: Added tag default for changeset f7b1eb17ad24 - - - $ cd ..
--- a/tests/test-586.t Sun Mar 13 02:29:11 2016 +0100 +++ /dev/null Thu Jan 01 00:00:00 1970 +0000 @@ -1,92 +0,0 @@ -Issue586: removing remote files after merge appears to corrupt the -dirstate - - $ hg init a - $ cd a - $ echo a > a - $ hg ci -Ama - adding a - - $ hg init ../b - $ cd ../b - $ echo b > b - $ hg ci -Amb - adding b - - $ hg pull -f ../a - pulling from ../a - searching for changes - warning: repository is unrelated - requesting all changes - adding changesets - adding manifests - adding file changes - added 1 changesets with 1 changes to 1 files (+1 heads) - (run 'hg heads' to see heads, 'hg merge' to merge) - $ hg merge - 1 files updated, 0 files merged, 0 files removed, 0 files unresolved - (branch merge, don't forget to commit) - $ hg rm -f a - $ hg ci -Amc - - $ hg st -A - C b - $ cd .. - -Issue1433: Traceback after two unrelated pull, two move, a merge and -a commit (related to issue586) - -create test repos - - $ hg init repoa - $ touch repoa/a - $ hg -R repoa ci -Am adda - adding a - - $ hg init repob - $ touch repob/b - $ hg -R repob ci -Am addb - adding b - - $ hg init repoc - $ cd repoc - $ hg pull ../repoa - pulling from ../repoa - requesting all changes - adding changesets - adding manifests - adding file changes - added 1 changesets with 1 changes to 1 files - (run 'hg update' to get a working copy) - $ hg update - 1 files updated, 0 files merged, 0 files removed, 0 files unresolved - $ mkdir tst - $ hg mv * tst - $ hg ci -m "import a in tst" - $ hg pull -f ../repob - pulling from ../repob - searching for changes - warning: repository is unrelated - requesting all changes - adding changesets - adding manifests - adding file changes - added 1 changesets with 1 changes to 1 files (+1 heads) - (run 'hg heads' to see heads, 'hg merge' to merge) - -merge both repos - - $ hg merge - 1 files updated, 0 files merged, 0 files removed, 0 files unresolved - (branch merge, don't forget to commit) - $ mkdir src - -move b content - - $ hg mv b src - $ hg ci -m "import b in src" - $ hg manifest - src/b - tst/a - - $ cd ..
--- a/tests/test-add.t Sun Mar 13 02:29:11 2016 +0100 +++ b/tests/test-add.t Tue Mar 15 14:10:46 2016 -0700 @@ -226,7 +226,7 @@ $ hg diff capsdir1/capsdir diff -r * CapsDir1/CapsDir/SubDir/Def.txt (glob) --- a/CapsDir1/CapsDir/SubDir/Def.txt Thu Jan 01 00:00:00 1970 +0000 - +++ b/CapsDir1/CapsDir/SubDir/Def.txt * +0000 (glob) + +++ b/CapsDir1/CapsDir/SubDir/Def.txt * (glob) @@ -1,1 +1,1 @@ -xyz +def
--- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/tests/test-automv.t Tue Mar 15 14:10:46 2016 -0700 @@ -0,0 +1,338 @@ +Tests for the automv extension; detect moved files at commit time. + + $ cat >> $HGRCPATH << EOF + > [extensions] + > automv= + > rebase= + > EOF + +Setup repo + + $ hg init repo + $ cd repo + +Test automv command for commit + + $ printf 'foo\nbar\nbaz\n' > a.txt + $ hg add a.txt + $ hg commit -m 'init repo with a' + +mv/rm/add + $ mv a.txt b.txt + $ hg rm a.txt + $ hg add b.txt + $ hg status -C + A b.txt + R a.txt + $ hg commit -m 'msg' + detected move of 1 files + $ hg status --change . -C + A b.txt + a.txt + R a.txt + $ hg up -r 0 + 1 files updated, 0 files merged, 1 files removed, 0 files unresolved + +mv/rm/add/modif + $ mv a.txt b.txt + $ hg rm a.txt + $ hg add b.txt + $ printf '\n' >> b.txt + $ hg status -C + A b.txt + R a.txt + $ hg commit -m 'msg' + detected move of 1 files + created new head + $ hg status --change . -C + A b.txt + a.txt + R a.txt + $ hg up -r 0 + 1 files updated, 0 files merged, 1 files removed, 0 files unresolved + +mv/rm/add/modif + $ mv a.txt b.txt + $ hg rm a.txt + $ hg add b.txt + $ printf '\nfoo\n' >> b.txt + $ hg status -C + A b.txt + R a.txt + $ hg commit -m 'msg' + created new head + $ hg status --change . -C + A b.txt + R a.txt + $ hg up -r 0 + 1 files updated, 0 files merged, 1 files removed, 0 files unresolved + +mv/rm/add/modif/changethreshold + $ mv a.txt b.txt + $ hg rm a.txt + $ hg add b.txt + $ printf '\nfoo\n' >> b.txt + $ hg status -C + A b.txt + R a.txt + $ hg commit --config automv.similarity='60' -m 'msg' + detected move of 1 files + created new head + $ hg status --change . -C + A b.txt + a.txt + R a.txt + $ hg up -r 0 + 1 files updated, 0 files merged, 1 files removed, 0 files unresolved + +mv + $ mv a.txt b.txt + $ hg status -C + ! a.txt + ? b.txt + $ hg commit -m 'msg' + nothing changed (1 missing files, see 'hg status') + [1] + $ hg status -C + ! a.txt + ? b.txt + $ hg revert -aqC + $ rm b.txt + +mv/rm/add/notincommitfiles + $ mv a.txt b.txt + $ hg rm a.txt + $ hg add b.txt + $ echo 'bar' > c.txt + $ hg add c.txt + $ hg status -C + A b.txt + A c.txt + R a.txt + $ hg commit c.txt -m 'msg' + created new head + $ hg status --change . -C + A c.txt + $ hg status -C + A b.txt + R a.txt + $ hg up -r 0 + 0 files updated, 0 files merged, 1 files removed, 0 files unresolved + $ hg rm a.txt + $ echo 'bar' > c.txt + $ hg add c.txt + $ hg commit -m 'msg' + detected move of 1 files + created new head + $ hg status --change . -C + A b.txt + a.txt + A c.txt + R a.txt + $ hg up -r 0 + 1 files updated, 0 files merged, 2 files removed, 0 files unresolved + +mv/rm/add/--no-automv + $ mv a.txt b.txt + $ hg rm a.txt + $ hg add b.txt + $ hg status -C + A b.txt + R a.txt + $ hg commit --no-automv -m 'msg' + created new head + $ hg status --change . -C + A b.txt + R a.txt + $ hg up -r 0 + 1 files updated, 0 files merged, 1 files removed, 0 files unresolved + +Test automv command for commit --amend + +mv/rm/add + $ echo 'c' > c.txt + $ hg add c.txt + $ hg commit -m 'revision to amend to' + created new head + $ mv a.txt b.txt + $ hg rm a.txt + $ hg add b.txt + $ hg status -C + A b.txt + R a.txt + $ hg commit --amend -m 'amended' + detected move of 1 files + saved backup bundle to $TESTTMP/repo/.hg/strip-backup/*-amend-backup.hg (glob) + $ hg status --change . -C + A b.txt + a.txt + A c.txt + R a.txt + $ hg up -r 0 + 1 files updated, 0 files merged, 2 files removed, 0 files unresolved + +mv/rm/add/modif + $ echo 'c' > c.txt + $ hg add c.txt + $ hg commit -m 'revision to amend to' + created new head + $ mv a.txt b.txt + $ hg rm a.txt + $ hg add b.txt + $ printf '\n' >> b.txt + $ hg status -C + A b.txt + R a.txt + $ hg commit --amend -m 'amended' + detected move of 1 files + saved backup bundle to $TESTTMP/repo/.hg/strip-backup/*-amend-backup.hg (glob) + $ hg status --change . -C + A b.txt + a.txt + A c.txt + R a.txt + $ hg up -r 0 + 1 files updated, 0 files merged, 2 files removed, 0 files unresolved + +mv/rm/add/modif + $ echo 'c' > c.txt + $ hg add c.txt + $ hg commit -m 'revision to amend to' + created new head + $ mv a.txt b.txt + $ hg rm a.txt + $ hg add b.txt + $ printf '\nfoo\n' >> b.txt + $ hg status -C + A b.txt + R a.txt + $ hg commit --amend -m 'amended' + saved backup bundle to $TESTTMP/repo/.hg/strip-backup/*-amend-backup.hg (glob) + $ hg status --change . -C + A b.txt + A c.txt + R a.txt + $ hg up -r 0 + 1 files updated, 0 files merged, 2 files removed, 0 files unresolved + +mv/rm/add/modif/changethreshold + $ echo 'c' > c.txt + $ hg add c.txt + $ hg commit -m 'revision to amend to' + created new head + $ mv a.txt b.txt + $ hg rm a.txt + $ hg add b.txt + $ printf '\nfoo\n' >> b.txt + $ hg status -C + A b.txt + R a.txt + $ hg commit --amend --config automv.similarity='60' -m 'amended' + detected move of 1 files + saved backup bundle to $TESTTMP/repo/.hg/strip-backup/*-amend-backup.hg (glob) + $ hg status --change . -C + A b.txt + a.txt + A c.txt + R a.txt + $ hg up -r 0 + 1 files updated, 0 files merged, 2 files removed, 0 files unresolved + +mv + $ echo 'c' > c.txt + $ hg add c.txt + $ hg commit -m 'revision to amend to' + created new head + $ mv a.txt b.txt + $ hg status -C + ! a.txt + ? b.txt + $ hg commit --amend -m 'amended' + saved backup bundle to $TESTTMP/repo/.hg/strip-backup/*-amend-backup.hg (glob) + $ hg status -C + ! a.txt + ? b.txt + $ hg up -Cr 0 + 1 files updated, 0 files merged, 1 files removed, 0 files unresolved + +mv/rm/add/notincommitfiles + $ echo 'c' > c.txt + $ hg add c.txt + $ hg commit -m 'revision to amend to' + created new head + $ mv a.txt b.txt + $ hg rm a.txt + $ hg add b.txt + $ echo 'bar' > d.txt + $ hg add d.txt + $ hg status -C + A b.txt + A d.txt + R a.txt + $ hg commit --amend -m 'amended' d.txt + saved backup bundle to $TESTTMP/repo/.hg/strip-backup/*-amend-backup.hg (glob) + $ hg status --change . -C + A c.txt + A d.txt + $ hg status -C + A b.txt + R a.txt + $ hg commit --amend -m 'amended' + detected move of 1 files + saved backup bundle to $TESTTMP/repo/.hg/strip-backup/*-amend-backup.hg (glob) + $ hg status --change . -C + A b.txt + a.txt + A c.txt + A d.txt + R a.txt + $ hg up -r 0 + 1 files updated, 0 files merged, 3 files removed, 0 files unresolved + +mv/rm/add/--no-automv + $ echo 'c' > c.txt + $ hg add c.txt + $ hg commit -m 'revision to amend to' + created new head + $ mv a.txt b.txt + $ hg rm a.txt + $ hg add b.txt + $ hg status -C + A b.txt + R a.txt + $ hg commit --amend -m 'amended' --no-automv + saved backup bundle to $TESTTMP/repo/.hg/strip-backup/*-amend-backup.hg (glob) + $ hg status --change . -C + A b.txt + A c.txt + R a.txt + $ hg up -r 0 + 1 files updated, 0 files merged, 2 files removed, 0 files unresolved + +mv/rm/commit/add/amend + $ echo 'c' > c.txt + $ hg add c.txt + $ hg commit -m 'revision to amend to' + created new head + $ mv a.txt b.txt + $ hg rm a.txt + $ hg status -C + R a.txt + ? b.txt + $ hg commit -m "removed a" + $ hg add b.txt + $ hg commit --amend -m 'amended' + saved backup bundle to $TESTTMP/repo/.hg/strip-backup/*-amend-backup.hg (glob) + $ hg status --change . -C + A b.txt + R a.txt + +error conditions + + $ cat >> $HGRCPATH << EOF + > [automv] + > similarity=110 + > EOF + $ hg commit -m 'revision to amend to' + abort: automv.similarity must be between 0 and 100 + [255]
--- a/tests/test-backout.t Sun Mar 13 02:29:11 2016 +0100 +++ b/tests/test-backout.t Tue Mar 15 14:10:46 2016 -0700 @@ -686,6 +686,7 @@ * version 2 records local: b71750c4b0fdf719734971e3ef90dbeab5919a2d other: a30dd8addae3ce71b8667868478542bc417439e6 + file extras: foo (ancestorlinknode = 91360952243723bd5b1138d5f26bd8c8564cb553) file: foo (record type "F", state "u", hash 0beec7b5ea3f0fdbc95d0dd47f3c5bc275da8a33) local path: foo (flags "") ancestor path: foo (node f89532f44c247a0e993d63e3a734dd781ab04708)
--- a/tests/test-bad-extension.t Sun Mar 13 02:29:11 2016 +0100 +++ b/tests/test-bad-extension.t Tue Mar 15 14:10:46 2016 -0700 @@ -1,11 +1,19 @@ $ echo 'raise Exception("bit bucket overflow")' > badext.py - $ abspath=`pwd`/badext.py + $ abspathexc=`pwd`/badext.py + + $ cat >baddocext.py <<EOF + > """ + > baddocext is bad + > """ + > EOF + $ abspathdoc=`pwd`/baddocext.py $ cat <<EOF >> $HGRCPATH > [extensions] > gpg = > hgext.gpg = - > badext = $abspath + > badext = $abspathexc + > baddocext = $abspathdoc > badext2 = > EOF @@ -23,6 +31,19 @@ Traceback (most recent call last): ImportError: No module named badext2 +names of extensions failed to load can be accessed via extensions.notloaded() + + $ cat <<EOF > showbadexts.py + > from mercurial import cmdutil, commands, extensions + > cmdtable = {} + > command = cmdutil.command(cmdtable) + > @command('showbadexts', norepo=True) + > def showbadexts(ui, *pats, **opts): + > ui.write('BADEXTS: %s\n' % ' '.join(sorted(extensions.notloaded()))) + > EOF + $ hg --config extensions.badexts=showbadexts.py showbadexts 2>&1 | grep '^BADEXTS' + BADEXTS: badext badext2 + show traceback for ImportError of hgext.name if debug is set (note that --debug option isn't applied yet when loading extensions) @@ -38,3 +59,12 @@ *** failed to import extension badext2: No module named badext2 Traceback (most recent call last): ImportError: No module named badext2 + +confirm that there's no crash when an extension's documentation is bad + + $ hg help --keyword baddocext + *** failed to import extension badext from $TESTTMP/badext.py: bit bucket overflow + *** failed to import extension badext2: No module named badext2 + Topics: + + extensions Using Additional Features
--- a/tests/test-bisect2.t Sun Mar 13 02:29:11 2016 +0100 +++ b/tests/test-bisect2.t Tue Mar 15 14:10:46 2016 -0700 @@ -244,6 +244,7 @@ $ hg up -C 0 files updated, 0 files merged, 0 files removed, 0 files unresolved + 3 other heads for branch "default" complex bisect test 1 # first bad rev is 9
--- a/tests/test-blackbox.t Sun Mar 13 02:29:11 2016 +0100 +++ b/tests/test-blackbox.t Tue Mar 15 14:10:46 2016 -0700 @@ -12,9 +12,10 @@ $ echo a > a $ hg add a - $ hg blackbox - 1970/01/01 00:00:00 bob (*)> add a (glob) - 1970/01/01 00:00:00 bob (*)> add a exited 0 after * seconds (glob) + $ hg blackbox --config blackbox.dirty=True + 1970/01/01 00:00:00 bob @0000000000000000000000000000000000000000 (5000)> add a + 1970/01/01 00:00:00 bob @0000000000000000000000000000000000000000 (5000)> add a exited 0 after * seconds (glob) + 1970/01/01 00:00:00 bob @0000000000000000000000000000000000000000+ (5000)> blackbox incoming change tracking @@ -43,22 +44,23 @@ adding file changes added 1 changesets with 1 changes to 1 files (run 'hg update' to get a working copy) - $ hg blackbox -l 5 - 1970/01/01 00:00:00 bob (*)> pull (glob) - 1970/01/01 00:00:00 bob (*)> updated served branch cache in ?.???? seconds (glob) - 1970/01/01 00:00:00 bob (*)> wrote served branch cache with 1 labels and 2 nodes (glob) - 1970/01/01 00:00:00 bob (*)> 1 incoming changes - new heads: d02f48003e62 (glob) - 1970/01/01 00:00:00 bob (*)> pull exited 0 after * seconds (glob) + $ hg blackbox -l 6 + 1970/01/01 00:00:00 bob @6563da9dcf87b1949716e38ff3e3dfaa3198eb06 (5000)> pull + 1970/01/01 00:00:00 bob @6563da9dcf87b1949716e38ff3e3dfaa3198eb06 (5000)> updated served branch cache in * seconds (glob) + 1970/01/01 00:00:00 bob @6563da9dcf87b1949716e38ff3e3dfaa3198eb06 (5000)> wrote served branch cache with 1 labels and 2 nodes + 1970/01/01 00:00:00 bob @6563da9dcf87b1949716e38ff3e3dfaa3198eb06 (5000)> 1 incoming changes - new heads: d02f48003e62 + 1970/01/01 00:00:00 bob @6563da9dcf87b1949716e38ff3e3dfaa3198eb06 (5000)> pull exited 0 after * seconds (glob) + 1970/01/01 00:00:00 bob @6563da9dcf87b1949716e38ff3e3dfaa3198eb06 (5000)> blackbox -l 6 we must not cause a failure if we cannot write to the log $ hg rollback repository tip rolled back to revision 1 (undo pull) -#if unix-permissions no-root - $ chmod 000 .hg/blackbox.log + $ mv .hg/blackbox.log .hg/blackbox.log- + $ mkdir .hg/blackbox.log $ hg --debug incoming - warning: cannot write to blackbox.log: Permission denied + warning: cannot write to blackbox.log: * (glob) comparing with $TESTTMP/blackboxtest (glob) query 1; heads searching for changes @@ -77,7 +79,6 @@ c -#endif $ hg pull pulling from $TESTTMP/blackboxtest (glob) searching for changes @@ -87,14 +88,14 @@ added 1 changesets with 1 changes to 1 files (run 'hg update' to get a working copy) -a failure reading from the log is fine -#if unix-permissions no-root +a failure reading from the log is fatal + $ hg blackbox -l 3 - abort: Permission denied: $TESTTMP/blackboxtest2/.hg/blackbox.log + abort: *$TESTTMP/blackboxtest2/.hg/blackbox.log* (glob) [255] - $ chmod 600 .hg/blackbox.log -#endif + $ rmdir .hg/blackbox.log + $ mv .hg/blackbox.log- .hg/blackbox.log backup bundles get logged @@ -105,12 +106,13 @@ $ hg strip tip 0 files updated, 0 files merged, 1 files removed, 0 files unresolved saved backup bundle to $TESTTMP/blackboxtest2/.hg/strip-backup/*-backup.hg (glob) - $ hg blackbox -l 5 - 1970/01/01 00:00:00 bob (*)> strip tip (glob) - 1970/01/01 00:00:00 bob (*)> saved backup bundle to $TESTTMP/blackboxtest2/.hg/strip-backup/*-backup.hg (glob) - 1970/01/01 00:00:00 bob (*)> updated base branch cache in ?.???? seconds (glob) - 1970/01/01 00:00:00 bob (*)> wrote base branch cache with 1 labels and 2 nodes (glob) - 1970/01/01 00:00:00 bob (*)> strip tip exited 0 after * seconds (glob) + $ hg blackbox -l 6 + 1970/01/01 00:00:00 bob @73f6ee326b27d820b0472f1a825e3a50f3dc489b (5000)> strip tip + 1970/01/01 00:00:00 bob @6563da9dcf87b1949716e38ff3e3dfaa3198eb06 (5000)> saved backup bundle to $TESTTMP/blackboxtest2/.hg/strip-backup/73f6ee326b27-7612e004-backup.hg + 1970/01/01 00:00:00 bob @6563da9dcf87b1949716e38ff3e3dfaa3198eb06 (5000)> updated base branch cache in * seconds (glob) + 1970/01/01 00:00:00 bob @6563da9dcf87b1949716e38ff3e3dfaa3198eb06 (5000)> wrote base branch cache with 1 labels and 2 nodes + 1970/01/01 00:00:00 bob @6563da9dcf87b1949716e38ff3e3dfaa3198eb06 (5000)> strip tip exited 0 after * seconds (glob) + 1970/01/01 00:00:00 bob @6563da9dcf87b1949716e38ff3e3dfaa3198eb06 (5000)> blackbox -l 6 extension and python hooks - use the eol extension for a pythonhook @@ -121,12 +123,14 @@ $ hg update hooked 1 files updated, 0 files merged, 0 files removed, 0 files unresolved - $ hg blackbox -l 5 - 1970/01/01 00:00:00 bob (*)> update (glob) - 1970/01/01 00:00:00 bob (*)> writing .hg/cache/tags2-visible with 0 tags (glob) - 1970/01/01 00:00:00 bob (*)> pythonhook-preupdate: hgext.eol.preupdate finished in * seconds (glob) - 1970/01/01 00:00:00 bob (*)> exthook-update: echo hooked finished in * seconds (glob) - 1970/01/01 00:00:00 bob (*)> update exited 0 after * seconds (glob) + 1 other heads for branch "default" + $ hg blackbox -l 6 + 1970/01/01 00:00:00 bob @6563da9dcf87b1949716e38ff3e3dfaa3198eb06 (5000)> update + 1970/01/01 00:00:00 bob @6563da9dcf87b1949716e38ff3e3dfaa3198eb06 (5000)> writing .hg/cache/tags2-visible with 0 tags + 1970/01/01 00:00:00 bob @6563da9dcf87b1949716e38ff3e3dfaa3198eb06 (5000)> pythonhook-preupdate: hgext.eol.preupdate finished in * seconds (glob) + 1970/01/01 00:00:00 bob @d02f48003e62c24e2659d97d30f2a83abe5d5d51 (5000)> exthook-update: echo hooked finished in * seconds (glob) + 1970/01/01 00:00:00 bob @d02f48003e62c24e2659d97d30f2a83abe5d5d51 (5000)> update exited 0 after * seconds (glob) + 1970/01/01 00:00:00 bob @d02f48003e62c24e2659d97d30f2a83abe5d5d51 (5000)> blackbox -l 6 log rotation @@ -142,6 +146,64 @@ .hg/blackbox.log .hg/blackbox.log.1 .hg/blackbox.log.2 + $ cd .. + + $ hg init blackboxtest3 + $ cd blackboxtest3 + $ hg blackbox + 1970/01/01 00:00:00 bob @0000000000000000000000000000000000000000 (5000)> blackbox + $ mv .hg/blackbox.log .hg/blackbox.log- + $ mkdir .hg/blackbox.log + $ sed -e 's/\(.*test1.*\)/#\1/; s#\(.*commit2.*\)#os.rmdir(".hg/blackbox.log")\ + > os.rename(".hg/blackbox.log-", ".hg/blackbox.log")\ + > \1#' $TESTDIR/test-dispatch.py > ../test-dispatch.py + $ python $TESTDIR/blackbox-readonly-dispatch.py + running: add foo + result: 0 + running: commit -m commit1 -d 2000-01-01 foo + result: None + running: commit -m commit2 -d 2000-01-02 foo + result: None + running: log -r 0 + changeset: 0:0e4634943879 + user: test + date: Sat Jan 01 00:00:00 2000 +0000 + summary: commit1 + + result: None + running: log -r tip + changeset: 1:45589e459b2e + tag: tip + user: test + date: Sun Jan 02 00:00:00 2000 +0000 + summary: commit2 + + result: None + $ hg blackbox + 1970/01/01 00:00:00 bob @0e46349438790c460c5c9f7546bfcd39b267bbd2 (5000)> commit -m commit2 -d 2000-01-02 foo + 1970/01/01 00:00:00 bob @0e46349438790c460c5c9f7546bfcd39b267bbd2 (5000)> updated served branch cache in * seconds (glob) + 1970/01/01 00:00:00 bob @0e46349438790c460c5c9f7546bfcd39b267bbd2 (5000)> wrote served branch cache with 1 labels and 1 nodes + 1970/01/01 00:00:00 bob @45589e459b2edfbf3dbde7e01f611d2c1e7453d7 (5000)> commit -m commit2 -d 2000-01-02 foo exited 0 after * seconds (glob) + 1970/01/01 00:00:00 bob @45589e459b2edfbf3dbde7e01f611d2c1e7453d7 (5000)> log -r 0 + 1970/01/01 00:00:00 bob @45589e459b2edfbf3dbde7e01f611d2c1e7453d7 (5000)> writing .hg/cache/tags2-visible with 0 tags + 1970/01/01 00:00:00 bob @45589e459b2edfbf3dbde7e01f611d2c1e7453d7 (5000)> log -r 0 exited 0 after * seconds (glob) + 1970/01/01 00:00:00 bob @45589e459b2edfbf3dbde7e01f611d2c1e7453d7 (5000)> log -r tip + 1970/01/01 00:00:00 bob @45589e459b2edfbf3dbde7e01f611d2c1e7453d7 (5000)> log -r tip exited 0 after * seconds (glob) + 1970/01/01 00:00:00 bob @45589e459b2edfbf3dbde7e01f611d2c1e7453d7 (5000)> blackbox + +Test log recursion from dirty status check + + $ cat > ../r.py <<EOF + > from mercurial import context, error, extensions + > x=[False] + > def status(orig, *args, **opts): + > args[0].repo().ui.log("broken", "recursion?") + > return orig(*args, **opts) + > def reposetup(ui, repo): + > extensions.wrapfunction(context.basectx, 'status', status) + > EOF + $ hg id --config extensions.x=../r.py --config blackbox.dirty=True + 45589e459b2e tip cleanup $ cd ..
--- a/tests/test-bookmarks-current.t Sun Mar 13 02:29:11 2016 +0100 +++ b/tests/test-bookmarks-current.t Tue Mar 15 14:10:46 2016 -0700 @@ -22,9 +22,10 @@ update to bookmark X + $ hg bookmarks + * X -1:000000000000 $ hg update X 0 files updated, 0 files merged, 0 files removed, 0 files unresolved - (activating bookmark X) list bookmarks @@ -202,3 +203,22 @@ Z $ hg log -T '{bookmarks % "{active}\n"}' -r Z Z + +test that updating to closed branch head also advances active bookmark + + $ hg commit --close-branch -m "closed" + $ hg update -q ".^1" + $ hg bookmark Y + $ hg bookmarks + X 3:4d6bd4bfb1ae + * Y 3:4d6bd4bfb1ae + Z 0:719295282060 + $ hg update + 0 files updated, 0 files merged, 0 files removed, 0 files unresolved + updating bookmark Y + $ hg bookmarks + X 3:4d6bd4bfb1ae + * Y 4:8fa964221e8e + Z 0:719295282060 + $ hg parents -q + 4:8fa964221e8e
--- a/tests/test-bookmarks-pushpull.t Sun Mar 13 02:29:11 2016 +0100 +++ b/tests/test-bookmarks-pushpull.t Tue Mar 15 14:10:46 2016 -0700 @@ -103,6 +103,29 @@ deleting remote bookmark W [1] +export the active bookmark + + $ hg bookmark V + $ hg push -B . ../a + pushing to ../a + searching for changes + no changes found + exporting bookmark V + [1] + +delete the bookmark + + $ hg book -d V + $ hg push -B V ../a + pushing to ../a + searching for changes + no changes found + deleting remote bookmark V + [1] + $ hg up foobar + 0 files updated, 0 files merged, 0 files removed, 0 files unresolved + (activating bookmark foobar) + push/pull name that doesn't exist $ hg push -B badname ../a @@ -680,12 +703,12 @@ pushing an existing but divergent bookmark with -B still requires -f - $ hg clone -q . r + $ hg clone -q . ../r $ hg up -q X $ echo 1 > f2 $ hg ci -qAml - $ cd r + $ cd ../r $ hg up -q X $ echo 2 > f2 $ hg ci -qAmr @@ -696,7 +719,7 @@ abort: push creates new remote head 54694f811df9 with bookmark 'X'! (pull and merge or see "hg help push" for details about pushing new heads) [255] - $ cd .. + $ cd ../addmarks Check summary output for incoming/outgoing bookmarks
--- a/tests/test-bookmarks.t Sun Mar 13 02:29:11 2016 +0100 +++ b/tests/test-bookmarks.t Tue Mar 15 14:10:46 2016 -0700 @@ -573,6 +573,7 @@ $ hg bookmark -r3 Y moving bookmark 'Y' forward from db815d6d32e6 $ cp -r ../cloned-bookmarks-update ../cloned-bookmarks-manual-update + $ cp -r ../cloned-bookmarks-update ../cloned-bookmarks-manual-update-with-divergence (manual version) @@ -598,7 +599,6 @@ $ hg -R ../cloned-bookmarks-manual-update update updating to active bookmark Y 1 files updated, 0 files merged, 0 files removed, 0 files unresolved - (activating bookmark Y) (all in one version) @@ -617,6 +617,33 @@ updating to active bookmark Y 1 files updated, 0 files merged, 0 files removed, 0 files unresolved +We warn about divergent during bare update to the active bookmark + + $ hg -R ../cloned-bookmarks-manual-update-with-divergence update Y + 0 files updated, 0 files merged, 0 files removed, 0 files unresolved + (activating bookmark Y) + $ hg -R ../cloned-bookmarks-manual-update-with-divergence bookmarks -r X2 Y@1 + $ hg -R ../cloned-bookmarks-manual-update-with-divergence bookmarks + X2 1:925d80f479bb + * Y 2:db815d6d32e6 + Y@1 1:925d80f479bb + Z 2:db815d6d32e6 + x y 2:db815d6d32e6 + $ hg -R ../cloned-bookmarks-manual-update-with-divergence pull + pulling from $TESTTMP + searching for changes + adding changesets + adding manifests + adding file changes + added 2 changesets with 2 changes to 2 files (+1 heads) + updating bookmark Y + updating bookmark Z + (run 'hg heads' to see heads, 'hg merge' to merge) + $ hg -R ../cloned-bookmarks-manual-update-with-divergence update + updating to active bookmark Y + 1 files updated, 0 files merged, 0 files removed, 0 files unresolved + 1 other divergent bookmarks for "Y" + test wrongly formated bookmark $ echo '' >> .hg/bookmarks @@ -706,34 +733,14 @@ date: Thu Jan 01 00:00:00 1970 +0000 summary: 0 -test non-linear update not clearing active bookmark - - $ hg up 1 - 1 files updated, 0 files merged, 2 files removed, 0 files unresolved - (leaving bookmark four) - $ hg book drop - $ hg up -C - 1 files updated, 0 files merged, 1 files removed, 0 files unresolved - (leaving bookmark drop) - $ hg sum - parent: 2:db815d6d32e6 - 2 - branch: default - bookmarks: should-end-on-two - commit: 2 unknown (clean) - update: 1 new changesets, 2 branch heads (merge) - phases: 4 draft - $ hg book - drop 1:925d80f479bb - four 3:9ba5f110a0b3 - should-end-on-two 2:db815d6d32e6 - $ hg book -d drop - $ hg up four - 1 files updated, 0 files merged, 0 files removed, 0 files unresolved - (activating bookmark four) no-op update doesn't deactive bookmarks + $ hg bookmarks + * four 3:9ba5f110a0b3 + should-end-on-two 2:db815d6d32e6 + $ hg up four + 0 files updated, 0 files merged, 0 files removed, 0 files unresolved $ hg up 0 files updated, 0 files merged, 0 files removed, 0 files unresolved $ hg sum
--- a/tests/test-branches.t Sun Mar 13 02:29:11 2016 +0100 +++ b/tests/test-branches.t Tue Mar 15 14:10:46 2016 -0700 @@ -544,15 +544,15 @@ 0060: e3 d4 9c 05 80 00 00 02 e2 3b 55 05 00 00 00 02 |.........;U.....| 0070: f8 94 c2 56 80 00 00 03 |...V....| -#if unix-permissions no-root no errors when revbranchcache is not writable $ echo >> .hg/cache/rbc-revs-v1 - $ chmod a-w .hg/cache/rbc-revs-v1 + $ mv .hg/cache/rbc-revs-v1 .hg/cache/rbc-revs-v1_ + $ mkdir .hg/cache/rbc-revs-v1 $ rm -f .hg/cache/branch* && hg head a -T '{rev}\n' 5 - $ chmod a+w .hg/cache/rbc-revs-v1 -#endif + $ rmdir .hg/cache/rbc-revs-v1 + $ mv .hg/cache/rbc-revs-v1_ .hg/cache/rbc-revs-v1 recovery from invalid cache revs file with trailing data $ echo >> .hg/cache/rbc-revs-v1
--- a/tests/test-bundle2-format.t Sun Mar 13 02:29:11 2016 +0100 +++ b/tests/test-bundle2-format.t Tue Mar 15 14:10:46 2016 -0700 @@ -9,8 +9,8 @@ $ cat > bundle2.py << EOF > """A small extension to test bundle2 implementation > - > Current bundle2 implementation is far too limited to be used in any core - > code. We still need to be able to test it while it grow up. + > This extension allows detailed testing of the various bundle2 API and + > behaviors. > """ > > import sys, os, gc
--- a/tests/test-check-code.t Sun Mar 13 02:29:11 2016 +0100 +++ b/tests/test-check-code.t Tue Mar 15 14:10:46 2016 -0700 @@ -8,7 +8,11 @@ $ hg locate | sed 's-\\-/-g' | > xargs "$check_code" --warnings --per-file=0 || false - Skipping hgext/zeroconf/Zeroconf.py it has no-che?k-code (glob) + Skipping hgext/fsmonitor/pywatchman/__init__.py it has no-che?k-code (glob) + Skipping hgext/fsmonitor/pywatchman/bser.c it has no-che?k-code (glob) + Skipping hgext/fsmonitor/pywatchman/capabilities.py it has no-che?k-code (glob) + Skipping hgext/fsmonitor/pywatchman/msc_stdint.h it has no-che?k-code (glob) + Skipping hgext/fsmonitor/pywatchman/pybser.py it has no-che?k-code (glob) Skipping i18n/polib.py it has no-che?k-code (glob) Skipping mercurial/httpclient/__init__.py it has no-che?k-code (glob) Skipping mercurial/httpclient/_readers.py it has no-che?k-code (glob)
--- a/tests/test-check-commit.t Sun Mar 13 02:29:11 2016 +0100 +++ b/tests/test-check-commit.t Tue Mar 15 14:10:46 2016 -0700 @@ -14,7 +14,7 @@ $ for node in `hg log --rev 'not public() and ::.' --template '{node|short}\n'`; do > hg export $node | contrib/check-commit > ${TESTTMP}/check-commit.out > if [ $? -ne 0 ]; then - > echo "Revision $node does not comply to rules" + > echo "Revision $node does not comply with rules" > echo '------------------------------------------------------' > cat ${TESTTMP}/check-commit.out > echo
--- a/tests/test-check-config.t Sun Mar 13 02:29:11 2016 +0100 +++ b/tests/test-check-config.t Tue Mar 15 14:10:46 2016 -0700 @@ -5,4 +5,4 @@ New errors are not allowed. Warnings are strongly discouraged. $ hg files "set:(**.py or **.txt) - tests/**" | sed 's|\\|/|g' | - > xargs python contrib/check-config.py + > python contrib/check-config.py
--- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/tests/test-check-module-imports.t Tue Mar 15 14:10:46 2016 -0700 @@ -0,0 +1,146 @@ +#require test-repo + + $ import_checker="$TESTDIR"/../contrib/import-checker.py + +Run the doctests from the import checker, and make sure +it's working correctly. + $ TERM=dumb + $ export TERM + $ python -m doctest $import_checker + +Run additional tests for the import checker + + $ mkdir testpackage + + $ cat > testpackage/multiple.py << EOF + > from __future__ import absolute_import + > import os, sys + > EOF + + $ cat > testpackage/unsorted.py << EOF + > from __future__ import absolute_import + > import sys + > import os + > EOF + + $ cat > testpackage/stdafterlocal.py << EOF + > from __future__ import absolute_import + > from . import unsorted + > import os + > EOF + + $ cat > testpackage/requirerelative.py << EOF + > from __future__ import absolute_import + > import testpackage.unsorted + > EOF + + $ cat > testpackage/importalias.py << EOF + > from __future__ import absolute_import + > import ui + > EOF + + $ cat > testpackage/relativestdlib.py << EOF + > from __future__ import absolute_import + > from .. import os + > EOF + + $ cat > testpackage/symbolimport.py << EOF + > from __future__ import absolute_import + > from .unsorted import foo + > EOF + + $ cat > testpackage/latesymbolimport.py << EOF + > from __future__ import absolute_import + > from . import unsorted + > from mercurial.node import hex + > EOF + + $ cat > testpackage/multiplegroups.py << EOF + > from __future__ import absolute_import + > from . import unsorted + > from . import more + > EOF + + $ mkdir testpackage/subpackage + $ cat > testpackage/subpackage/levelpriority.py << EOF + > from __future__ import absolute_import + > from . import foo + > from .. import parent + > EOF + + $ touch testpackage/subpackage/foo.py + $ cat > testpackage/subpackage/__init__.py << EOF + > from __future__ import absolute_import + > from . import levelpriority # should not cause cycle + > EOF + + $ cat > testpackage/subpackage/localimport.py << EOF + > from __future__ import absolute_import + > from . import foo + > def bar(): + > # should not cause "higher-level import should come first" + > from .. import unsorted + > # but other errors should be detected + > from .. import more + > import testpackage.subpackage.levelpriority + > EOF + + $ cat > testpackage/importmodulefromsub.py << EOF + > from __future__ import absolute_import + > from .subpackage import foo # not a "direct symbol import" + > EOF + + $ cat > testpackage/importsymbolfromsub.py << EOF + > from __future__ import absolute_import + > from .subpackage import foo, nonmodule + > EOF + + $ cat > testpackage/sortedentries.py << EOF + > from __future__ import absolute_import + > from . import ( + > foo, + > bar, + > ) + > EOF + + $ cat > testpackage/importfromalias.py << EOF + > from __future__ import absolute_import + > from . import ui + > EOF + + $ cat > testpackage/importfromrelative.py << EOF + > from __future__ import absolute_import + > from testpackage.unsorted import foo + > EOF + + $ python "$import_checker" testpackage/*.py testpackage/subpackage/*.py + testpackage/importalias.py:2: ui module must be "as" aliased to uimod + testpackage/importfromalias.py:2: ui from testpackage must be "as" aliased to uimod + testpackage/importfromrelative.py:2: import should be relative: testpackage.unsorted + testpackage/importfromrelative.py:2: direct symbol import foo from testpackage.unsorted + testpackage/importsymbolfromsub.py:2: direct symbol import nonmodule from testpackage.subpackage + testpackage/latesymbolimport.py:3: symbol import follows non-symbol import: mercurial.node + testpackage/multiple.py:2: multiple imported names: os, sys + testpackage/multiplegroups.py:3: multiple "from . import" statements + testpackage/relativestdlib.py:2: relative import of stdlib module + testpackage/requirerelative.py:2: import should be relative: testpackage.unsorted + testpackage/sortedentries.py:2: imports from testpackage not lexically sorted: bar < foo + testpackage/stdafterlocal.py:3: stdlib import "os" follows local import: testpackage + testpackage/subpackage/levelpriority.py:3: higher-level import should come first: testpackage + testpackage/subpackage/localimport.py:7: multiple "from .. import" statements + testpackage/subpackage/localimport.py:8: import should be relative: testpackage.subpackage.levelpriority + testpackage/symbolimport.py:2: direct symbol import foo from testpackage.unsorted + testpackage/unsorted.py:3: imports not lexically sorted: os < sys + [1] + + $ cd "$TESTDIR"/.. + +There are a handful of cases here that require renaming a module so it +doesn't overlap with a stdlib module name. There are also some cycles +here that we should still endeavor to fix, and some cycles will be +hidden by deduplication algorithm in the cycle detector, so fixing +these may expose other cycles. + + $ hg locate 'mercurial/**.py' 'hgext/**.py' | sed 's-\\-/-g' | python "$import_checker" - + Import cycle: hgext.largefiles.basestore -> hgext.largefiles.localstore -> hgext.largefiles.basestore + [1]
--- a/tests/test-check-py3-compat.t Sun Mar 13 02:29:11 2016 +0100 +++ b/tests/test-check-py3-compat.t Tue Mar 15 14:10:46 2016 -0700 @@ -3,62 +3,25 @@ $ cd "$TESTDIR"/.. $ hg files 'set:(**.py)' | sed 's|\\|/|g' | xargs python contrib/check-py3-compat.py - contrib/casesmash.py not using absolute_import - contrib/check-code.py not using absolute_import - contrib/check-code.py requires print_function - contrib/check-config.py not using absolute_import - contrib/check-config.py requires print_function - contrib/debugcmdserver.py not using absolute_import - contrib/debugcmdserver.py requires print_function - contrib/debugshell.py not using absolute_import - contrib/fixpax.py not using absolute_import - contrib/fixpax.py requires print_function - contrib/hgclient.py not using absolute_import - contrib/hgclient.py requires print_function - contrib/hgfixes/fix_bytes.py not using absolute_import - contrib/hgfixes/fix_bytesmod.py not using absolute_import - contrib/hgfixes/fix_leftover_imports.py not using absolute_import contrib/import-checker.py not using absolute_import contrib/import-checker.py requires print_function - contrib/memory.py not using absolute_import contrib/perf.py not using absolute_import contrib/python-hook-examples.py not using absolute_import contrib/revsetbenchmarks.py not using absolute_import contrib/revsetbenchmarks.py requires print_function - contrib/showstack.py not using absolute_import contrib/synthrepo.py not using absolute_import - contrib/win32/hgwebdir_wsgi.py not using absolute_import doc/check-seclevel.py not using absolute_import doc/gendoc.py not using absolute_import doc/hgmanpage.py not using absolute_import - hgext/__init__.py not using absolute_import - hgext/acl.py not using absolute_import - hgext/blackbox.py not using absolute_import - hgext/bugzilla.py not using absolute_import - hgext/censor.py not using absolute_import - hgext/children.py not using absolute_import - hgext/churn.py not using absolute_import - hgext/clonebundles.py not using absolute_import hgext/color.py not using absolute_import - hgext/convert/__init__.py not using absolute_import - hgext/convert/bzr.py not using absolute_import - hgext/convert/common.py not using absolute_import - hgext/convert/convcmd.py not using absolute_import - hgext/convert/cvs.py not using absolute_import - hgext/convert/cvsps.py not using absolute_import - hgext/convert/darcs.py not using absolute_import - hgext/convert/filemap.py not using absolute_import - hgext/convert/git.py not using absolute_import - hgext/convert/gnuarch.py not using absolute_import - hgext/convert/hg.py not using absolute_import - hgext/convert/monotone.py not using absolute_import - hgext/convert/p4.py not using absolute_import - hgext/convert/subversion.py not using absolute_import - hgext/convert/transport.py not using absolute_import hgext/eol.py not using absolute_import hgext/extdiff.py not using absolute_import hgext/factotum.py not using absolute_import hgext/fetch.py not using absolute_import + hgext/fsmonitor/pywatchman/__init__.py not using absolute_import + hgext/fsmonitor/pywatchman/__init__.py requires print_function + hgext/fsmonitor/pywatchman/capabilities.py not using absolute_import + hgext/fsmonitor/pywatchman/pybser.py not using absolute_import hgext/gpg.py not using absolute_import hgext/graphlog.py not using absolute_import hgext/hgcia.py not using absolute_import @@ -66,7 +29,6 @@ hgext/highlight/__init__.py not using absolute_import hgext/highlight/highlight.py not using absolute_import hgext/histedit.py not using absolute_import - hgext/keyword.py not using absolute_import hgext/largefiles/__init__.py not using absolute_import hgext/largefiles/basestore.py not using absolute_import hgext/largefiles/lfcommands.py not using absolute_import @@ -79,27 +41,11 @@ hgext/largefiles/uisetup.py not using absolute_import hgext/largefiles/wirestore.py not using absolute_import hgext/mq.py not using absolute_import - hgext/notify.py not using absolute_import - hgext/pager.py not using absolute_import - hgext/patchbomb.py not using absolute_import - hgext/purge.py not using absolute_import hgext/rebase.py not using absolute_import - hgext/record.py not using absolute_import - hgext/relink.py not using absolute_import - hgext/schemes.py not using absolute_import hgext/share.py not using absolute_import - hgext/shelve.py not using absolute_import - hgext/strip.py not using absolute_import - hgext/transplant.py not using absolute_import - hgext/win32mbcs.py not using absolute_import hgext/win32text.py not using absolute_import - hgext/zeroconf/Zeroconf.py not using absolute_import - hgext/zeroconf/Zeroconf.py requires print_function - hgext/zeroconf/__init__.py not using absolute_import i18n/check-translation.py not using absolute_import i18n/polib.py not using absolute_import - mercurial/cmdutil.py not using absolute_import - mercurial/commands.py not using absolute_import setup.py not using absolute_import tests/filterpyflakes.py requires print_function tests/generate-working-copy-states.py requires print_function @@ -132,8 +78,6 @@ tests/test-context.py requires print_function tests/test-demandimport.py not using absolute_import tests/test-demandimport.py requires print_function - tests/test-dispatch.py not using absolute_import - tests/test-dispatch.py requires print_function tests/test-doctest.py not using absolute_import tests/test-duplicateoptions.py not using absolute_import tests/test-duplicateoptions.py requires print_function
--- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/tests/test-chg.t Tue Mar 15 14:10:46 2016 -0700 @@ -0,0 +1,12 @@ +init repo + + $ hg init foo + $ cd foo + +ill-formed config + + $ hg status + $ echo '=brokenconfig' >> $HGRCPATH + $ hg status + hg: parse error at * (glob) + [255]
--- a/tests/test-clone.t Sun Mar 13 02:29:11 2016 +0100 +++ b/tests/test-clone.t Tue Mar 15 14:10:46 2016 -0700 @@ -774,11 +774,11 @@ adding manifests adding file changes added 3 changesets with 3 changes to 1 files - updating working directory - 1 files updated, 0 files merged, 0 files removed, 0 files unresolved searching for changes no changes found adding remote bookmark bookA + updating working directory + 1 files updated, 0 files merged, 0 files removed, 0 files unresolved The shared repo should have been created @@ -804,8 +804,6 @@ $ hg --config share.pool=share clone source1b share-dest1b (sharing from existing pooled repository b5f04eac9d8f7a6a9fcb070243cccea7dc5ea0c1) - updating working directory - 1 files updated, 0 files merged, 0 files removed, 0 files unresolved searching for changes adding changesets adding manifests @@ -813,6 +811,8 @@ added 4 changesets with 4 changes to 1 files (+4 heads) adding remote bookmark head1 adding remote bookmark head2 + updating working directory + 1 files updated, 0 files merged, 0 files removed, 0 files unresolved $ ls share b5f04eac9d8f7a6a9fcb070243cccea7dc5ea0c1 @@ -831,6 +831,17 @@ $ hg -R share-dest1b config paths.default $TESTTMP/source1a (glob) +Checked out revision should be head of default branch + + $ hg -R share-dest1b log -r . + changeset: 4:99f71071f117 + bookmark: head2 + parent: 0:b5f04eac9d8f + user: test + date: Thu Jan 01 00:00:00 1970 +0000 + summary: head2 + + Clone from unrelated repo should result in new share $ hg --config share.pool=share clone source2 share-dest2 @@ -840,10 +851,10 @@ adding manifests adding file changes added 2 changesets with 2 changes to 1 files + searching for changes + no changes found updating working directory 1 files updated, 0 files merged, 0 files removed, 0 files unresolved - searching for changes - no changes found $ ls share 22aeff664783fd44c6d9b435618173c118c3448e @@ -858,11 +869,11 @@ adding manifests adding file changes added 3 changesets with 3 changes to 1 files - updating working directory - 1 files updated, 0 files merged, 0 files removed, 0 files unresolved searching for changes no changes found adding remote bookmark bookA + updating working directory + 1 files updated, 0 files merged, 0 files removed, 0 files unresolved $ ls shareremote 195bb1fcdb595c14a6c13e0269129ed78f6debde @@ -874,12 +885,12 @@ adding manifests adding file changes added 6 changesets with 6 changes to 1 files (+4 heads) - updating working directory - 1 files updated, 0 files merged, 0 files removed, 0 files unresolved searching for changes no changes found adding remote bookmark head1 adding remote bookmark head2 + updating working directory + 1 files updated, 0 files merged, 0 files removed, 0 files unresolved $ ls shareremote 195bb1fcdb595c14a6c13e0269129ed78f6debde @@ -893,10 +904,10 @@ adding manifests adding file changes added 2 changesets with 2 changes to 1 files + no changes found + adding remote bookmark head1 updating working directory 1 files updated, 0 files merged, 0 files removed, 0 files unresolved - no changes found - adding remote bookmark head1 $ hg -R share-1arev log -G @ changeset: 1:4a8dc1ab4c13 @@ -916,8 +927,6 @@ $ hg --config share.pool=sharerevs clone -r 99f71071f117 source1b share-1brev (sharing from existing pooled repository b5f04eac9d8f7a6a9fcb070243cccea7dc5ea0c1) - updating working directory - 1 files updated, 0 files merged, 0 files removed, 0 files unresolved searching for changes adding changesets adding manifests @@ -925,9 +934,11 @@ added 1 changesets with 1 changes to 1 files (+1 heads) adding remote bookmark head1 adding remote bookmark head2 + updating working directory + 1 files updated, 0 files merged, 0 files removed, 0 files unresolved $ hg -R share-1brev log -G - o changeset: 2:99f71071f117 + @ changeset: 2:99f71071f117 | bookmark: head2 | tag: tip | parent: 0:b5f04eac9d8f @@ -935,7 +946,7 @@ | date: Thu Jan 01 00:00:00 1970 +0000 | summary: head2 | - | @ changeset: 1:4a8dc1ab4c13 + | o changeset: 1:4a8dc1ab4c13 |/ bookmark: head1 | user: test | date: Thu Jan 01 00:00:00 1970 +0000 @@ -955,9 +966,9 @@ adding manifests adding file changes added 2 changesets with 2 changes to 1 files + no changes found updating working directory 1 files updated, 0 files merged, 0 files removed, 0 files unresolved - no changes found $ hg -R share-1bbranch1 log -G o changeset: 1:5f92a6c1a1b1 @@ -975,13 +986,13 @@ $ hg --config share.pool=sharebranch clone -b branch2 source1b share-1bbranch2 (sharing from existing pooled repository b5f04eac9d8f7a6a9fcb070243cccea7dc5ea0c1) - updating working directory - 1 files updated, 0 files merged, 0 files removed, 0 files unresolved searching for changes adding changesets adding manifests adding file changes added 1 changesets with 1 changes to 1 files (+1 heads) + updating working directory + 1 files updated, 0 files merged, 0 files removed, 0 files unresolved $ hg -R share-1bbranch2 log -G o changeset: 2:6bacf4683960 @@ -1056,18 +1067,18 @@ adding manifests adding file changes added 3 changesets with 3 changes to 1 files - updating working directory - 1 files updated, 0 files merged, 0 files removed, 0 files unresolved searching for changes no changes found adding remote bookmark bookA + updating working directory + 1 files updated, 0 files merged, 0 files removed, 0 files unresolved $ cat race2.log (sharing from existing pooled repository b5f04eac9d8f7a6a9fcb070243cccea7dc5ea0c1) - updating working directory - 1 files updated, 0 files merged, 0 files removed, 0 files unresolved waiting for lock on repository share-destrace2 held by * (glob) got lock after \d+ seconds (re) searching for changes no changes found adding remote bookmark bookA + updating working directory + 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
--- a/tests/test-clonebundles.t Sun Mar 13 02:29:11 2016 +0100 +++ b/tests/test-clonebundles.t Tue Mar 15 14:10:46 2016 -0700 @@ -52,7 +52,7 @@ $ echo 'http://does.not.exist/bundle.hg' > server/.hg/clonebundles.manifest $ hg clone http://localhost:$HGPORT 404-url applying clone bundle from http://does.not.exist/bundle.hg - error fetching bundle: (.* not known|getaddrinfo failed) (re) + error fetching bundle: (.* not known|getaddrinfo failed|No address associated with hostname) (re) abort: error applying bundle (if this error persists, consider contacting the server operator or disable clone bundles via "--config ui.clonebundles=false") [255]
--- a/tests/test-command-template.t Sun Mar 13 02:29:11 2016 +0100 +++ b/tests/test-command-template.t Tue Mar 15 14:10:46 2016 -0700 @@ -1018,7 +1018,7 @@ $ hg log --style ./t abort: template file ./q: Permission denied [255] - $ rm q + $ rm -f q #endif Include works: @@ -2790,6 +2790,14 @@ $ hg log -R latesttag -r tip --template 'modified files: {file_mods % " {file}\n"}\n' modified files: .hgtags + + $ hg log -R latesttag -r tip -T '{rev % "a"}\n' + hg: parse error: keyword 'rev' is not iterable + [255] + $ hg log -R latesttag -r tip -T '{get(extras, "unknown") % "a"}\n' + hg: parse error: None is not iterable + [255] + Test the sub function of templating for expansion: $ hg log -R latesttag -r 10 --template '{sub("[0-9]", "x", "{rev}")}\n' @@ -3148,6 +3156,9 @@ text.1:be wrapped text.1:desc to be text.1:wrapped (no-eol) + $ hg log -l1 -T '{fill(desc, date, "", "")}\n' + hg: parse error: fill expects an integer width + [255] $ hg log -l 1 --template '{sub(r"[0-9]", "-", author)}' {node|short} (no-eol) @@ -3167,6 +3178,18 @@ $ hg log --color=always -l 1 --template '{label("text.{rev}", "text\n")}' \x1b[0;32mtext\x1b[0m (esc) +color effect can be specified without quoting: + + $ hg log --color=always -l 1 --template '{label(red, "text\n")}' + \x1b[0;31mtext\x1b[0m (esc) + +label should be no-op if color is disabled: + + $ hg log --color=never -l 1 --template '{label(red, "text\n")}' + text + $ hg log --config extensions.color=! -l 1 --template '{label(red, "text\n")}' + text + Test branches inside if statement: $ hg log -r 0 --template '{if(branches, "yes", "no")}\n' @@ -3176,6 +3199,8 @@ $ hg log -r 0 --template '{get(extras, "branch")}\n' default + $ hg log -r 0 --template '{get(extras, "br{"anch"}")}\n' + default $ hg log -r 0 --template '{get(files, "should_fail")}\n' hg: parse error: get() expects a dict as first argument [255] @@ -3214,6 +3239,12 @@ $ hg log --template '{node|shortest}\n' -l1 e777 + $ hg log -r 0 -T '{shortest(node, "1{"0"}")}\n' + f7769ec2ab + $ hg log -r 0 -T '{shortest(node, "not an int")}\n' + hg: parse error: shortest() expects an integer minlength + [255] + Test pad function $ hg log --template '{pad(rev, 20)} {author|user}\n' @@ -3239,6 +3270,14 @@ $ hg log -r 0 -T '{pad(r"\{rev}", 10)} {author|user}\n' \{rev} test +Test width argument passed to pad function + + $ hg log -r 0 -T '{pad(rev, "1{"0"}")} {author|user}\n' + 0 test + $ hg log -r 0 -T '{pad(rev, "not an int")}\n' + hg: parse error: pad() expects an integer width + [255] + Test ifcontains function $ hg log --template '{rev} {ifcontains(rev, "2 two 0", "is in the string", "is not")}\n' @@ -3246,11 +3285,21 @@ 1 is not 0 is in the string + $ hg log -T '{rev} {ifcontains(rev, "2 two{" 0"}", "is in the string", "is not")}\n' + 2 is in the string + 1 is not + 0 is in the string + $ hg log --template '{rev} {ifcontains("a", file_adds, "added a", "did not add a")}\n' 2 did not add a 1 did not add a 0 added a + $ hg log --debug -T '{rev}{ifcontains(1, parents, " is parent of 1")}\n' + 2 is parent of 1 + 1 + 0 + Test revset function $ hg log --template '{rev} {ifcontains(rev, revset("."), "current rev", "not current rev")}\n' @@ -3293,13 +3342,21 @@ $ hg log --template '{revset("TIP"|lower)}\n' -l1 2 - a list template is evaluated for each item of revset + $ hg log -T '{revset("%s", "t{"ip"}")}\n' -l1 + 2 + + a list template is evaluated for each item of revset/parents $ hg log -T '{rev} p: {revset("p1(%s)", rev) % "{rev}:{node|short}"}\n' 2 p: 1:bcc7ff960b8e 1 p: 0:f7769ec2ab97 0 p: + $ hg log --debug -T '{rev} p:{parents % " {rev}:{node|short}"}\n' + 2 p: 1:bcc7ff960b8e -1:000000000000 + 1 p: 0:f7769ec2ab97 -1:000000000000 + 0 p: -1:000000000000 -1:000000000000 + therefore, 'revcache' should be recreated for each rev $ hg log -T '{rev} {file_adds}\np {revset("p1(%s)", rev) % "{file_adds}"}\n' @@ -3310,6 +3367,21 @@ 0 a p + $ hg log --debug -T '{rev} {file_adds}\np {parents % "{file_adds}"}\n' + 2 aa b + p + 1 + p a + 0 a + p + +a revset item must be evaluated as an integer revision, not an offset from tip + + $ hg log -l 1 -T '{revset("null") % "{rev}:{node|short}"}\n' + -1:000000000000 + $ hg log -l 1 -T '{revset("%s", "null") % "{rev}:{node|short}"}\n' + -1:000000000000 + Test active bookmark templating $ hg book foo @@ -3535,25 +3607,47 @@ hg: parse error: invalid \x escape [255] +json filter should escape HTML tags so that the output can be embedded in hgweb: + + $ hg log -T "{'<foo@example.org>'|json}\n" -R a -l1 + "\u003cfoo@example.org\u003e" + Set up repository for non-ascii encoding tests: $ hg init nonascii $ cd nonascii $ python <<EOF + > open('latin1', 'w').write('\xe9') > open('utf-8', 'w').write('\xc3\xa9') > EOF $ HGENCODING=utf-8 hg branch -q `cat utf-8` - $ HGENCODING=utf-8 hg ci -qAm 'non-ascii branch' utf-8 + $ HGENCODING=utf-8 hg ci -qAm "non-ascii branch: `cat utf-8`" utf-8 json filter should try round-trip conversion to utf-8: $ HGENCODING=ascii hg log -T "{branch|json}\n" -r0 "\u00e9" - -json filter should not abort if it can't decode bytes: -(not sure the current behavior is right; we might want to use utf-8b encoding?) + $ HGENCODING=ascii hg log -T "{desc|json}\n" -r0 + "non-ascii branch: \u00e9" + +json filter takes input as utf-8b: $ HGENCODING=ascii hg log -T "{'`cat utf-8`'|json}\n" -l1 - "\ufffd\ufffd" + "\u00e9" + $ HGENCODING=ascii hg log -T "{'`cat latin1`'|json}\n" -l1 + "\udce9" + +utf8 filter: + + $ HGENCODING=ascii hg log -T "round-trip: {branch|utf8|hex}\n" -r0 + round-trip: c3a9 + $ HGENCODING=latin1 hg log -T "decoded: {'`cat latin1`'|utf8|hex}\n" -l1 + decoded: c3a9 + $ HGENCODING=ascii hg log -T "replaced: {'`cat latin1`'|utf8|hex}\n" -l1 + abort: decoding near * (glob) + [255] + $ hg log -T "invalid type: {rev|utf8}\n" -r0 + abort: template filter 'utf8' is not compatible with keyword 'rev' + [255] $ cd ..
--- a/tests/test-commandserver.t Sun Mar 13 02:29:11 2016 +0100 +++ b/tests/test-commandserver.t Tue Mar 15 14:10:46 2016 -0700 @@ -717,6 +717,35 @@ listening at .hg/server.sock abort: unknown command unknowncommand killed! + $ rm .hg/server.log + + if server crashed before hello, traceback will be sent to 'e' channel as + last ditch: + + $ cat <<EOF >> .hg/hgrc + > [cmdserver] + > log = inexistent/path.log + > EOF + >>> from hgclient import unixserver, readchannel, check + >>> server = unixserver('.hg/server.sock', '.hg/server.log') + >>> def earlycrash(conn): + ... while True: + ... try: + ... ch, data = readchannel(conn) + ... if not data.startswith(' '): + ... print '%c, %r' % (ch, data) + ... except EOFError: + ... break + >>> check(earlycrash, server.connect) + e, 'Traceback (most recent call last):\n' + e, "IOError: *" (glob) + >>> server.shutdown() + + $ cat .hg/server.log | grep -v '^ ' + listening at .hg/server.sock + Traceback (most recent call last): + IOError: * (glob) + killed! #endif #if no-unix-socket @@ -725,3 +754,133 @@ [255] #endif + + $ cd .. + +Test that accessing to invalid changelog cache is avoided at +subsequent operations even if repo object is reused even after failure +of transaction (see 0a7610758c42 also) + +"hg log" after failure of transaction is needed to detect invalid +cache in repoview: this can't detect by "hg verify" only. + +Combination of "finalization" and "empty-ness of changelog" (2 x 2 = +4) are tested, because '00changelog.i' are differently changed in each +cases. + + $ cat > $TESTTMP/failafterfinalize.py <<EOF + > # extension to abort transaction after finalization forcibly + > from mercurial import commands, error, extensions, lock as lockmod + > def fail(tr): + > raise error.Abort('fail after finalization') + > def reposetup(ui, repo): + > class failrepo(repo.__class__): + > def commitctx(self, ctx, error=False): + > if self.ui.configbool('failafterfinalize', 'fail'): + > # 'sorted()' by ASCII code on category names causes + > # invoking 'fail' after finalization of changelog + > # using "'cl-%i' % id(self)" as category name + > self.currenttransaction().addfinalize('zzzzzzzz', fail) + > return super(failrepo, self).commitctx(ctx, error) + > repo.__class__ = failrepo + > EOF + + $ hg init repo3 + $ cd repo3 + + $ cat <<EOF >> $HGRCPATH + > [ui] + > logtemplate = {rev} {desc|firstline} ({files})\n + > + > [extensions] + > failafterfinalize = $TESTTMP/failafterfinalize.py + > EOF + +- test failure with "empty changelog" + + $ echo foo > foo + $ hg add foo + +(failuer before finalization) + + >>> from hgclient import readchannel, runcommand, check + >>> @check + ... def abort(server): + ... readchannel(server) + ... runcommand(server, ['commit', + ... '--config', 'hooks.pretxncommit=false', + ... '-mfoo']) + ... runcommand(server, ['log']) + ... runcommand(server, ['verify', '-q']) + *** runcommand commit --config hooks.pretxncommit=false -mfoo + transaction abort! + rollback completed + abort: pretxncommit hook exited with status 1 + [255] + *** runcommand log + *** runcommand verify -q + +(failuer after finalization) + + >>> from hgclient import readchannel, runcommand, check + >>> @check + ... def abort(server): + ... readchannel(server) + ... runcommand(server, ['commit', + ... '--config', 'failafterfinalize.fail=true', + ... '-mfoo']) + ... runcommand(server, ['log']) + ... runcommand(server, ['verify', '-q']) + *** runcommand commit --config failafterfinalize.fail=true -mfoo + transaction abort! + rollback completed + abort: fail after finalization + [255] + *** runcommand log + *** runcommand verify -q + +- test failure with "not-empty changelog" + + $ echo bar > bar + $ hg add bar + $ hg commit -mbar bar + +(failure before finalization) + + >>> from hgclient import readchannel, runcommand, check + >>> @check + ... def abort(server): + ... readchannel(server) + ... runcommand(server, ['commit', + ... '--config', 'hooks.pretxncommit=false', + ... '-mfoo', 'foo']) + ... runcommand(server, ['log']) + ... runcommand(server, ['verify', '-q']) + *** runcommand commit --config hooks.pretxncommit=false -mfoo foo + transaction abort! + rollback completed + abort: pretxncommit hook exited with status 1 + [255] + *** runcommand log + 0 bar (bar) + *** runcommand verify -q + +(failure after finalization) + + >>> from hgclient import readchannel, runcommand, check + >>> @check + ... def abort(server): + ... readchannel(server) + ... runcommand(server, ['commit', + ... '--config', 'failafterfinalize.fail=true', + ... '-mfoo', 'foo']) + ... runcommand(server, ['log']) + ... runcommand(server, ['verify', '-q']) + *** runcommand commit --config failafterfinalize.fail=true -mfoo foo + transaction abort! + rollback completed + abort: fail after finalization + [255] + *** runcommand log + 0 bar (bar) + *** runcommand verify -q
--- a/tests/test-completion.t Sun Mar 13 02:29:11 2016 +0100 +++ b/tests/test-completion.t Tue Mar 15 14:10:46 2016 -0700 @@ -158,7 +158,7 @@ --config --cwd --daemon - --daemon-pipefds + --daemon-postexec --debug --debugger --encoding @@ -218,7 +218,7 @@ pull: update, force, rev, bookmark, branch, ssh, remotecmd, insecure push: force, rev, bookmark, branch, new-branch, ssh, remotecmd, insecure remove: after, force, subrepos, include, exclude - serve: accesslog, daemon, daemon-pipefds, errorlog, port, address, prefix, name, web-conf, webdir-conf, pid-file, stdio, cmdserver, templates, style, ipv6, certificate + serve: accesslog, daemon, daemon-postexec, errorlog, port, address, prefix, name, web-conf, webdir-conf, pid-file, stdio, cmdserver, templates, style, ipv6, certificate status: all, modified, added, removed, deleted, clean, unknown, ignored, no-status, copies, print0, rev, change, include, exclude, subrepos, template summary: remote update: clean, check, date, rev, tool @@ -254,7 +254,7 @@ debugignore: debugindex: changelog, manifest, dir, format debugindexdot: changelog, manifest, dir - debuginstall: + debuginstall: template debugknown: debuglabelcomplete: debuglocks: force-lock, force-wlock
--- a/tests/test-conflict.t Sun Mar 13 02:29:11 2016 +0100 +++ b/tests/test-conflict.t Tue Mar 15 14:10:46 2016 -0700 @@ -46,16 +46,13 @@ $ cat a Small Mathematical Series. - <<<<<<< local: 618808747361 - test: branch2 1 2 3 + <<<<<<< local: 618808747361 - test: branch2 6 8 ======= - 1 - 2 - 3 4 5 >>>>>>> other: c0c68e4fe667 - test: branch1 @@ -79,16 +76,13 @@ $ cat a Small Mathematical Series. - <<<<<<< local: test 2 1 2 3 + <<<<<<< local: test 2 6 8 ======= - 1 - 2 - 3 4 5 >>>>>>> other: test 1 @@ -108,16 +102,13 @@ $ cat a Small Mathematical Series. - <<<<<<< local: test 2 1 2 3 + <<<<<<< local: test 2 6 8 ======= - 1 - 2 - 3 4 5 >>>>>>> other: test 1 @@ -150,16 +141,13 @@ $ cat a Small Mathematical Series. - <<<<<<< local: 123456789012345678901234567890123456789012345678901234567890\xe3\x81\x82... (esc) 1 2 3 + <<<<<<< local: 123456789012345678901234567890123456789012345678901234567890\xe3\x81\x82... (esc) 6 8 ======= - 1 - 2 - 3 4 5 >>>>>>> other: branch1 @@ -179,16 +167,13 @@ $ cat a Small Mathematical Series. - <<<<<<< local 1 2 3 + <<<<<<< local 6 8 ======= - 1 - 2 - 3 4 5 >>>>>>> other @@ -232,6 +217,7 @@ $ hg up -C 2 files updated, 0 files merged, 0 files removed, 0 files unresolved + 1 other heads for branch "default" $ printf "\n\nEnd of file\n" >> a $ hg ci -m "Add some stuff at the end" $ hg up -r 1 @@ -269,6 +255,7 @@ $ hg up -C 1 files updated, 0 files merged, 1 files removed, 0 files unresolved + 1 other heads for branch "default" $ hg merge --tool :merge-local merging a 2 files updated, 0 files merged, 0 files removed, 0 files unresolved
--- a/tests/test-contrib-check-commit.t Sun Mar 13 02:29:11 2016 +0100 +++ b/tests/test-contrib-check-commit.t Tue Mar 15 14:10:46 2016 -0700 @@ -87,6 +87,10 @@ > @@ -599,7 +599,7 @@ > if opts.get('all'): > + > + > + + > + some = otherjunk + > + > + > + def blah_blah(x): > + pass @@ -102,10 +106,10 @@ This has no topic and ends with a period. 7: don't add trailing period on summary line This has no topic and ends with a period. - 15: adds double empty line - + - 16: adds a function with foo_bar naming - + def blah_blah(x): 19: adds double empty line + + 20: adds a function with foo_bar naming + + def blah_blah(x): + 23: adds double empty line + + [1]
--- a/tests/test-contrib.t Sun Mar 13 02:29:11 2016 +0100 +++ b/tests/test-contrib.t Tue Mar 15 14:10:46 2016 -0700 @@ -148,11 +148,10 @@ base <<<<<<< conflict-local not other - end ======= other + >>>>>>> conflict-other end - >>>>>>> conflict-other [1] 1 label @@ -161,11 +160,10 @@ base <<<<<<< foo not other - end ======= other + >>>>>>> conflict-other end - >>>>>>> conflict-other [1] 2 labels @@ -174,11 +172,10 @@ base <<<<<<< foo not other - end ======= other + >>>>>>> bar end - >>>>>>> bar [1] 3 labels
--- a/tests/test-convert-hg-svn.t Sun Mar 13 02:29:11 2016 +0100 +++ b/tests/test-convert-hg-svn.t Tue Mar 15 14:10:46 2016 -0700 @@ -1,5 +1,9 @@ #require svn svn-bindings + $ filter_svn_output () { + > egrep -v 'Committing|Updating' | sed -e 's/done$//' || true + > } + $ cat <<EOF >> $HGRCPATH > [extensions] > convert = @@ -37,7 +41,7 @@ $ echo a > a $ svn add a A a - $ svn ci -m'added a' a + $ svn ci -m'added a' a | filter_svn_output Adding a Transmitting file data . Committed revision 1.
--- a/tests/test-convert-svn-encoding.t Sun Mar 13 02:29:11 2016 +0100 +++ b/tests/test-convert-svn-encoding.t Tue Mar 15 14:10:46 2016 -0700 @@ -57,55 +57,55 @@ source: svn:afeb9c47-92ff-4c0c-9f72-e1f6eb8ac9af/trunk@2 converting: 1/6 revisions (16.67%) reparent to file://*/svn-repo/trunk (glob) - scanning paths: /trunk/\xc3\xa0 0/3 (0.00%) (esc) - scanning paths: /trunk/\xc3\xa0/e\xcc\x81 1/3 (33.33%) (esc) - scanning paths: /trunk/\xc3\xa9 2/3 (66.67%) (esc) + scanning paths: /trunk/\xc3\xa0 0/3 paths (0.00%) (esc) + scanning paths: /trunk/\xc3\xa0/e\xcc\x81 1/3 paths (33.33%) (esc) + scanning paths: /trunk/\xc3\xa9 2/3 paths (66.67%) (esc) committing files: \xc3\xa0/e\xcc\x81 (esc) - getting files: \xc3\xa0/e\xcc\x81 1/2 (50.00%) (esc) + getting files: \xc3\xa0/e\xcc\x81 1/2 files (50.00%) (esc) \xc3\xa9 (esc) - getting files: \xc3\xa9 2/2 (100.00%) (esc) + getting files: \xc3\xa9 2/2 files (100.00%) (esc) committing manifest committing changelog 3 copy files source: svn:afeb9c47-92ff-4c0c-9f72-e1f6eb8ac9af/trunk@3 converting: 2/6 revisions (33.33%) - scanning paths: /trunk/\xc3\xa0 0/4 (0.00%) (esc) + scanning paths: /trunk/\xc3\xa0 0/4 paths (0.00%) (esc) gone from -1 reparent to file://*/svn-repo (glob) reparent to file://*/svn-repo/trunk (glob) - scanning paths: /trunk/\xc3\xa8 1/4 (25.00%) (esc) + scanning paths: /trunk/\xc3\xa8 1/4 paths (25.00%) (esc) copied to \xc3\xa8 from \xc3\xa9@2 (esc) - scanning paths: /trunk/\xc3\xa9 2/4 (50.00%) (esc) + scanning paths: /trunk/\xc3\xa9 2/4 paths (50.00%) (esc) gone from -1 reparent to file://*/svn-repo (glob) reparent to file://*/svn-repo/trunk (glob) - scanning paths: /trunk/\xc3\xb9 3/4 (75.00%) (esc) + scanning paths: /trunk/\xc3\xb9 3/4 paths (75.00%) (esc) mark /trunk/\xc3\xb9 came from \xc3\xa0:2 (esc) - getting files: \xc3\xa0/e\xcc\x81 1/4 (25.00%) (esc) - getting files: \xc3\xa9 2/4 (50.00%) (esc) + getting files: \xc3\xa0/e\xcc\x81 1/4 files (25.00%) (esc) + getting files: \xc3\xa9 2/4 files (50.00%) (esc) committing files: \xc3\xa8 (esc) - getting files: \xc3\xa8 3/4 (75.00%) (esc) + getting files: \xc3\xa8 3/4 files (75.00%) (esc) \xc3\xa8: copy \xc3\xa9:6b67ccefd5ce6de77e7ead4f5292843a0255329f (esc) \xc3\xb9/e\xcc\x81 (esc) - getting files: \xc3\xb9/e\xcc\x81 4/4 (100.00%) (esc) + getting files: \xc3\xb9/e\xcc\x81 4/4 files (100.00%) (esc) \xc3\xb9/e\xcc\x81: copy \xc3\xa0/e\xcc\x81:a9092a3d84a37b9993b5c73576f6de29b7ea50f6 (esc) committing manifest committing changelog 2 remove files source: svn:afeb9c47-92ff-4c0c-9f72-e1f6eb8ac9af/trunk@4 converting: 3/6 revisions (50.00%) - scanning paths: /trunk/\xc3\xa8 0/2 (0.00%) (esc) + scanning paths: /trunk/\xc3\xa8 0/2 paths (0.00%) (esc) gone from -1 reparent to file://*/svn-repo (glob) reparent to file://*/svn-repo/trunk (glob) - scanning paths: /trunk/\xc3\xb9 1/2 (50.00%) (esc) + scanning paths: /trunk/\xc3\xb9 1/2 paths (50.00%) (esc) gone from -1 reparent to file://*/svn-repo (glob) reparent to file://*/svn-repo/trunk (glob) - getting files: \xc3\xa8 1/2 (50.00%) (esc) - getting files: \xc3\xb9/e\xcc\x81 2/2 (100.00%) (esc) + getting files: \xc3\xa8 1/2 files (50.00%) (esc) + getting files: \xc3\xb9/e\xcc\x81 2/2 files (100.00%) (esc) committing files: committing manifest committing changelog @@ -113,13 +113,13 @@ source: svn:afeb9c47-92ff-4c0c-9f72-e1f6eb8ac9af/branches/branch?@5 converting: 4/6 revisions (66.67%) reparent to file://*/svn-repo/branches/branch%C3%A9 (glob) - scanning paths: /branches/branch\xc3\xa9 0/1 (0.00%) (esc) + scanning paths: /branches/branch\xc3\xa9 0/1 paths (0.00%) (esc) committing changelog 0 branch to branch?e source: svn:afeb9c47-92ff-4c0c-9f72-e1f6eb8ac9af/branches/branch?e@6 converting: 5/6 revisions (83.33%) reparent to file://*/svn-repo/branches/branch%C3%A9e (glob) - scanning paths: /branches/branch\xc3\xa9e 0/1 (0.00%) (esc) + scanning paths: /branches/branch\xc3\xa9e 0/1 paths (0.00%) (esc) committing changelog reparent to file://*/svn-repo (glob) reparent to file://*/svn-repo/branches/branch%C3%A9e (glob)
--- a/tests/test-convert-svn-source.t Sun Mar 13 02:29:11 2016 +0100 +++ b/tests/test-convert-svn-source.t Tue Mar 15 14:10:46 2016 -0700 @@ -1,5 +1,9 @@ #require svn svn-bindings + $ filter_svn_output () { + > egrep -v 'Committing|Updating' | sed -e 's/done$//' || true + > } + $ cat >> $HGRCPATH <<EOF > [extensions] > convert = @@ -27,37 +31,35 @@ $ mkdir tags $ cd .. - $ svn import -m "init projB" projB "$SVNREPOURL/proj%20B" | sort - + $ svn import -m "init projB" projB "$SVNREPOURL/proj%20B" | filter_svn_output | sort Adding projB/mytrunk (glob) Adding projB/tags (glob) Committed revision 1. Update svn repository - $ svn co "$SVNREPOURL/proj%20B/mytrunk" B + $ svn co "$SVNREPOURL/proj%20B/mytrunk" B | filter_svn_output Checked out revision 1. $ cd B $ echo hello > 'letter .txt' - $ svn add 'letter .txt' + $ svn add 'letter .txt' | filter_svn_output A letter .txt - $ svn ci -m hello + $ svn ci -m hello | filter_svn_output Adding letter .txt Transmitting file data . Committed revision 2. $ svn-safe-append.py world 'letter .txt' - $ svn ci -m world + $ svn ci -m world | filter_svn_output Sending letter .txt Transmitting file data . Committed revision 3. - $ svn copy -m "tag v0.1" "$SVNREPOURL/proj%20B/mytrunk" "$SVNREPOURL/proj%20B/tags/v0.1" - + $ svn copy -m "tag v0.1" "$SVNREPOURL/proj%20B/mytrunk" "$SVNREPOURL/proj%20B/tags/v0.1" | filter_svn_output Committed revision 4. $ svn-safe-append.py 'nice day today!' 'letter .txt' - $ svn ci -m "nice day" + $ svn ci -m "nice day" | filter_svn_output Sending letter .txt Transmitting file data . Committed revision 5. @@ -88,20 +90,19 @@ $ cd B $ svn-safe-append.py "see second letter" 'letter .txt' $ echo "nice to meet you" > letter2.txt - $ svn add letter2.txt + $ svn add letter2.txt | filter_svn_output A letter2.txt - $ svn ci -m "second letter" + $ svn ci -m "second letter" | filter_svn_output Sending letter .txt Adding letter2.txt Transmitting file data .. Committed revision 6. - $ svn copy -m "tag v0.2" "$SVNREPOURL/proj%20B/mytrunk" "$SVNREPOURL/proj%20B/tags/v0.2" - + $ svn copy -m "tag v0.2" "$SVNREPOURL/proj%20B/mytrunk" "$SVNREPOURL/proj%20B/tags/v0.2" | filter_svn_output Committed revision 7. $ svn-safe-append.py "blah-blah-blah" letter2.txt - $ svn ci -m "work in progress" + $ svn ci -m "work in progress" | filter_svn_output Sending letter2.txt Transmitting file data . Committed revision 8. @@ -172,7 +173,7 @@ $ cd B $ echo >> "letter .txt" - $ svn ci -m 'nothing' + $ svn ci -m 'nothing' | filter_svn_output Sending letter .txt Transmitting file data . Committed revision 9.
--- a/tests/test-copy-move-merge.t Sun Mar 13 02:29:11 2016 +0100 +++ b/tests/test-copy-move-merge.t Tue Mar 15 14:10:46 2016 -0700 @@ -34,6 +34,7 @@ preserving a for resolve of b preserving a for resolve of c removing a + starting 4 threads for background file closing (?) b: remote moved from a -> m (premerge) picked tool ':merge' for b (binary False symlink False changedelete False) merging a and b to b
--- a/tests/test-devel-warnings.t Sun Mar 13 02:29:11 2016 +0100 +++ b/tests/test-devel-warnings.t Tue Mar 15 14:10:46 2016 -0700 @@ -63,6 +63,8 @@ $ cat << EOF >> $HGRCPATH > [extensions] > buggylocking=$TESTTMP/buggylocking.py + > mock=$TESTDIR/mockblackbox.py + > blackbox= > [devel] > all-warnings=1 > EOF @@ -70,16 +72,16 @@ $ hg init lock-checker $ cd lock-checker $ hg buggylocking - devel-warn: transaction with no lock at: $TESTTMP/buggylocking.py:11 (buggylocking) - devel-warn: "wlock" acquired after "lock" at: $TESTTMP/buggylocking.py:13 (buggylocking) + devel-warn: transaction with no lock at: $TESTTMP/buggylocking.py:* (buggylocking) (glob) + devel-warn: "wlock" acquired after "lock" at: $TESTTMP/buggylocking.py:* (buggylocking) (glob) $ cat << EOF >> $HGRCPATH > [devel] > all=0 > check-locks=1 > EOF $ hg buggylocking - devel-warn: transaction with no lock at: $TESTTMP/buggylocking.py:11 (buggylocking) - devel-warn: "wlock" acquired after "lock" at: $TESTTMP/buggylocking.py:13 (buggylocking) + devel-warn: transaction with no lock at: $TESTTMP/buggylocking.py:* (buggylocking) (glob) + devel-warn: "wlock" acquired after "lock" at: $TESTTMP/buggylocking.py:* (buggylocking) (glob) $ hg buggylocking --traceback devel-warn: transaction with no lock at: */hg:* in * (glob) @@ -112,7 +114,7 @@ $ hg add a $ hg commit -m a $ hg stripintr - saved backup bundle to $TESTTMP/lock-checker/.hg/strip-backup/cb9a9f314b8b-cc5ccb0b-backup.hg (glob) + saved backup bundle to $TESTTMP/lock-checker/.hg/strip-backup/*-backup.hg (glob) abort: programming error: cannot strip from inside a transaction (contact your extension maintainer) [255] @@ -122,7 +124,7 @@ 0 $ hg oldanddeprecated devel-warn: foorbar is deprecated, go shopping - (compatibility will be dropped after Mercurial-42.1337, update your code.) at: $TESTTMP/buggylocking.py:53 (oldanddeprecated) + (compatibility will be dropped after Mercurial-42.1337, update your code.) at: $TESTTMP/buggylocking.py:* (oldanddeprecated) (glob) $ hg oldanddeprecated --traceback devel-warn: foorbar is deprecated, go shopping @@ -138,4 +140,27 @@ */mercurial/dispatch.py:* in <lambda> (glob) */mercurial/util.py:* in check (glob) $TESTTMP/buggylocking.py:* in oldanddeprecated (glob) + $ hg blackbox -l 9 + 1970/01/01 00:00:00 bob @cb9a9f314b8b07ba71012fcdbc544b5a4d82ff5b (5000)> devel-warn: revset "oldstyle" use list instead of smartset, (upgrade your code) at: */mercurial/revset.py:* (mfunc) (glob) + 1970/01/01 00:00:00 bob @cb9a9f314b8b07ba71012fcdbc544b5a4d82ff5b (5000)> log -r oldstyle() -T {rev}\n exited 0 after * seconds (glob) + 1970/01/01 00:00:00 bob @cb9a9f314b8b07ba71012fcdbc544b5a4d82ff5b (5000)> oldanddeprecated + 1970/01/01 00:00:00 bob @cb9a9f314b8b07ba71012fcdbc544b5a4d82ff5b (5000)> devel-warn: foorbar is deprecated, go shopping + (compatibility will be dropped after Mercurial-42.1337, update your code.) at: $TESTTMP/buggylocking.py:* (oldanddeprecated) (glob) + 1970/01/01 00:00:00 bob @cb9a9f314b8b07ba71012fcdbc544b5a4d82ff5b (5000)> oldanddeprecated exited 0 after * seconds (glob) + 1970/01/01 00:00:00 bob @cb9a9f314b8b07ba71012fcdbc544b5a4d82ff5b (5000)> oldanddeprecated --traceback + 1970/01/01 00:00:00 bob @cb9a9f314b8b07ba71012fcdbc544b5a4d82ff5b (5000)> devel-warn: foorbar is deprecated, go shopping + (compatibility will be dropped after Mercurial-42.1337, update your code.) at: + */hg:* in <module> (glob) + */mercurial/dispatch.py:* in run (glob) + */mercurial/dispatch.py:* in dispatch (glob) + */mercurial/dispatch.py:* in _runcatch (glob) + */mercurial/dispatch.py:* in _dispatch (glob) + */mercurial/dispatch.py:* in runcommand (glob) + */mercurial/dispatch.py:* in _runcommand (glob) + */mercurial/dispatch.py:* in checkargs (glob) + */mercurial/dispatch.py:* in <lambda> (glob) + */mercurial/util.py:* in check (glob) + $TESTTMP/buggylocking.py:* in oldanddeprecated (glob) + 1970/01/01 00:00:00 bob @cb9a9f314b8b07ba71012fcdbc544b5a4d82ff5b (5000)> oldanddeprecated --traceback exited 0 after * seconds (glob) + 1970/01/01 00:00:00 bob @cb9a9f314b8b07ba71012fcdbc544b5a4d82ff5b (5000)> blackbox -l 9 $ cd ..
--- a/tests/test-dispatch.py Sun Mar 13 02:29:11 2016 +0100 +++ b/tests/test-dispatch.py Tue Mar 15 14:10:46 2016 -0700 @@ -1,16 +1,18 @@ +from __future__ import absolute_import, print_function import os -from mercurial import dispatch +from mercurial import ( + dispatch, +) def testdispatch(cmd): """Simple wrapper around dispatch.dispatch() Prints command and result value, but does not handle quoting. """ - print "running: %s" % (cmd,) + print("running: %s" % (cmd,)) req = dispatch.request(cmd.split()) result = dispatch.dispatch(req) - print "result: %r" % (result,) - + print("result: %r" % (result,)) testdispatch("init test1") os.chdir('test1')
--- a/tests/test-double-merge.t Sun Mar 13 02:29:11 2016 +0100 +++ b/tests/test-double-merge.t Tue Mar 15 14:10:46 2016 -0700 @@ -37,6 +37,7 @@ ancestor: e6dc8efe11cc, local: 6a0df1dad128+, remote: 484bf6903104 preserving foo for resolve of bar preserving foo for resolve of foo + starting 4 threads for background file closing (?) bar: remote copied from foo -> m (premerge) picked tool ':merge' for bar (binary False symlink False changedelete False) merging foo and bar to bar
--- a/tests/test-extdiff.t Sun Mar 13 02:29:11 2016 +0100 +++ b/tests/test-extdiff.t Tue Mar 15 14:10:46 2016 -0700 @@ -72,7 +72,7 @@ Specifying an empty revision should abort. - $ hg extdiff --patch --rev 'ancestor()' --rev 1 + $ hg extdiff -p diff --patch --rev 'ancestor()' --rev 1 abort: empty revision on one side of range [255]
--- a/tests/test-extension.t Sun Mar 13 02:29:11 2016 +0100 +++ b/tests/test-extension.t Tue Mar 15 14:10:46 2016 -0700 @@ -1003,7 +1003,7 @@ Enabled extensions: - throw 1.2.3 + throw external 1.2.3 $ echo 'getversion = lambda: "1.twentythree"' >> throw.py $ rm -f throw.pyc throw.pyo $ hg version -v --config extensions.throw=throw.py @@ -1016,7 +1016,7 @@ Enabled extensions: - throw 1.twentythree + throw external 1.twentythree Refuse to load extensions with minimum version requirements
--- a/tests/test-fileset.t Sun Mar 13 02:29:11 2016 +0100 +++ b/tests/test-fileset.t Tue Mar 15 14:10:46 2016 -0700 @@ -351,9 +351,10 @@ Test detection of unintentional 'matchctx.existing()' invocation $ cat > $TESTTMP/existingcaller.py <<EOF - > from mercurial import fileset + > from mercurial import registrar > - > @fileset.predicate('existingcaller()', callexisting=False) + > filesetpredicate = registrar.filesetpredicate() + > @filesetpredicate('existingcaller()', callexisting=False) > def existingcaller(mctx, x): > # this 'mctx.existing()' invocation is unintentional > return [f for f in mctx.existing()]
--- a/tests/test-glog.t Sun Mar 13 02:29:11 2016 +0100 +++ b/tests/test-glog.t Tue Mar 15 14:10:46 2016 -0700 @@ -2407,4 +2407,11 @@ o | 5 null+5 | | +label() should just work in node template: + + $ hg log -Gqr 7 --config extensions.color= --color=debug \ + > --config ui.graphnodetemplate='{label("branch.{branch}", rev)}' + [branch.default|7] [log.node|7:02dbb8e276b8] + | + $ cd ..
--- a/tests/test-graft.t Sun Mar 13 02:29:11 2016 +0100 +++ b/tests/test-graft.t Tue Mar 15 14:10:46 2016 -0700 @@ -1,3 +1,9 @@ + $ cat >> $HGRCPATH <<EOF + > [extdiff] + > # for portability: + > pdiff = sh "$RUNTESTDIR/pdiff" + > EOF + Create a repo with some stuff in it: $ hg init a @@ -40,6 +46,13 @@ | o test@0.public: 0 +Can't continue without starting: + + $ hg rm -q e + $ hg graft --continue + abort: no graft in progress + [255] + $ hg revert -r . -q e Need to specify a rev: @@ -154,6 +167,7 @@ branchmerge: True, force: True, partial: False ancestor: 68795b066622, local: ef0ef43d49e7+, remote: 5d205f8b35b6 preserving b for resolve of b + starting 4 threads for background file closing (?) b: local copied/moved from a -> m (premerge) picked tool ':merge' for b (binary False symlink False changedelete False) merging b and a to b @@ -189,10 +203,10 @@ e: versions differ -> m (premerge) picked tool ':merge' for e (binary False symlink False changedelete False) merging e - my e@1905859650ec+ other e@9c233e8e184d ancestor e@68795b066622 + my e@1905859650ec+ other e@9c233e8e184d ancestor e@4c60f11aa304 e: versions differ -> m (merge) picked tool ':merge' for e (binary False symlink False changedelete False) - my e@1905859650ec+ other e@9c233e8e184d ancestor e@68795b066622 + my e@1905859650ec+ other e@9c233e8e184d ancestor e@4c60f11aa304 warning: conflicts while merging e! (edit, then use 'hg resolve --mark') abort: unresolved conflicts, can't continue (use hg resolve and hg graft --continue --log) @@ -342,9 +356,9 @@ skipping already grafted revision 7:ef0ef43d49e7 (was grafted from 2:5c095ad7e90f) [255] - $ hg extdiff --config extensions.extdiff= --patch -r 2 -r 13 - --- */hg-5c095ad7e90f.patch * +0000 (glob) - +++ */hg-7a4785234d87.patch * +0000 (glob) + $ hg pdiff --config extensions.extdiff= --patch -r 2 -r 13 + --- */hg-5c095ad7e90f.patch * (glob) + +++ */hg-7a4785234d87.patch * (glob) @@ -1,18 +1,18 @@ # HG changeset patch -# User test @@ -373,9 +387,9 @@ ++a [1] - $ hg extdiff --config extensions.extdiff= --patch -r 2 -r 13 -X . - --- */hg-5c095ad7e90f.patch * +0000 (glob) - +++ */hg-7a4785234d87.patch * +0000 (glob) + $ hg pdiff --config extensions.extdiff= --patch -r 2 -r 13 -X . + --- */hg-5c095ad7e90f.patch * (glob) + +++ */hg-7a4785234d87.patch * (glob) @@ -1,8 +1,8 @@ # HG changeset patch -# User test
--- a/tests/test-help.t Sun Mar 13 02:29:11 2016 +0100 +++ b/tests/test-help.t Tue Mar 15 14:10:46 2016 -0700 @@ -875,6 +875,7 @@ bundles container for exchange of repository data changegroups representation of revlog data + requirements repository requirements revlogs revision storage mechanism sub-topics can be accessed @@ -2727,6 +2728,13 @@ representation of revlog data </td></tr> <tr><td> + <a href="/help/internals.requirements"> + requirements + </a> + </td><td> + repository requirements + </td></tr> + <tr><td> <a href="/help/internals.revlogs"> revlogs </a>
--- a/tests/test-hgignore.t Sun Mar 13 02:29:11 2016 +0100 +++ b/tests/test-hgignore.t Tue Mar 15 14:10:46 2016 -0700 @@ -286,3 +286,16 @@ $ hg debugignore dir1/file2 dir1/file2 is ignored (ignore rule in dir2/.hgignore, line 1: 'file*2') + +#if windows + +Windows paths are accepted on input + + $ rm dir1/.hgignore + $ echo "dir1/file*" >> .hgignore + $ hg debugignore "dir1\file2" + dir1\file2 is ignored + (ignore rule in $TESTTMP\ignorerepo\.hgignore, line 4: 'dir1/file*') + $ hg up -qC . + +#endif
--- a/tests/test-hgweb-json.t Sun Mar 13 02:29:11 2016 +0100 +++ b/tests/test-hgweb-json.t Tue Mar 15 14:10:46 2016 -0700 @@ -1,4 +1,3 @@ -#require json #require serve $ request() {
--- a/tests/test-hgwebdir.t Sun Mar 13 02:29:11 2016 +0100 +++ b/tests/test-hgwebdir.t Tue Mar 15 14:10:46 2016 -0700 @@ -100,6 +100,23 @@ /a/ /b/ + $ get-with-headers.py localhost:$HGPORT '?style=json' + 200 Script output follows + + { + "entries": [{ + "name": "a", + "description": "unknown", + "contact": "Foo Bar \u003cfoo.bar@example.com\u003e", + "lastchange": [*, *] (glob) + }, { + "name": "b", + "description": "unknown", + "contact": "Foo Bar \u003cfoo.bar@example.com\u003e", + "lastchange": [*, *] (glob) + }] + } (no-eol) + $ get-with-headers.py localhost:$HGPORT 'a/file/tip/a?style=raw' 200 Script output follows
--- a/tests/test-highlight.t Sun Mar 13 02:29:11 2016 +0100 +++ b/tests/test-highlight.t Tue Mar 15 14:10:46 2016 -0700 @@ -10,6 +10,12 @@ $ hg init test $ cd test + $ filterhtml () { + > sed -e "s/class=\"k\"/class=\"kn\"/g" \ + > -e "s/class=\"mf\"/class=\"mi\"/g" \ + > -e "s/class=\"\([cs]\)[h12]\"/class=\"\1\"/g" + > } + create random Python file to exercise Pygments $ cat <<EOF > primes.py @@ -57,8 +63,7 @@ hgweb filerevision, html - $ (get-with-headers.py localhost:$HGPORT 'file/tip/primes.py') \ - > | sed "s/class=\"k\"/class=\"kn\"/g" | sed "s/class=\"mf\"/class=\"mi\"/g" + $ (get-with-headers.py localhost:$HGPORT 'file/tip/primes.py') | filterhtml 200 Script output follows <!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.1//EN" "http://www.w3.org/TR/xhtml11/DTD/xhtml11.dtd"> @@ -190,8 +195,7 @@ hgweb fileannotate, html - $ (get-with-headers.py localhost:$HGPORT 'annotate/tip/primes.py') \ - > | sed "s/class=\"k\"/class=\"kn\"/g" | sed "s/class=\"mi\"/class=\"mf\"/g" + $ (get-with-headers.py localhost:$HGPORT 'annotate/tip/primes.py') | filterhtml 200 Script output follows <!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.1//EN" "http://www.w3.org/TR/xhtml11/DTD/xhtml11.dtd"> @@ -408,7 +412,7 @@ <a href="/annotate/06824edf55d0/primes.py#l18" title="06824edf55d0: a">test@0</a> </td> - <td class="source"><a href="#l18"> 18</a> <span class="n">ns</span> <span class="o">=</span> <span class="n">ifilter</span><span class="p">(</span><span class="kn">lambda</span> <span class="n">n</span><span class="p">:</span> <span class="n">n</span> <span class="o">%</span> <span class="n">p</span> <span class="o">!=</span> <span class="mf">0</span><span class="p">,</span> <span class="n">ns</span><span class="p">)</span></td> + <td class="source"><a href="#l18"> 18</a> <span class="n">ns</span> <span class="o">=</span> <span class="n">ifilter</span><span class="p">(</span><span class="kn">lambda</span> <span class="n">n</span><span class="p">:</span> <span class="n">n</span> <span class="o">%</span> <span class="n">p</span> <span class="o">!=</span> <span class="mi">0</span><span class="p">,</span> <span class="n">ns</span><span class="p">)</span></td> </tr> <tr id="l19"> <td class="annotate"> @@ -436,14 +440,14 @@ <a href="/annotate/06824edf55d0/primes.py#l22" title="06824edf55d0: a">test@0</a> </td> - <td class="source"><a href="#l22"> 22</a> <span class="n">odds</span> <span class="o">=</span> <span class="n">ifilter</span><span class="p">(</span><span class="kn">lambda</span> <span class="n">i</span><span class="p">:</span> <span class="n">i</span> <span class="o">%</span> <span class="mf">2</span> <span class="o">==</span> <span class="mf">1</span><span class="p">,</span> <span class="n">count</span><span class="p">())</span></td> + <td class="source"><a href="#l22"> 22</a> <span class="n">odds</span> <span class="o">=</span> <span class="n">ifilter</span><span class="p">(</span><span class="kn">lambda</span> <span class="n">i</span><span class="p">:</span> <span class="n">i</span> <span class="o">%</span> <span class="mi">2</span> <span class="o">==</span> <span class="mi">1</span><span class="p">,</span> <span class="n">count</span><span class="p">())</span></td> </tr> <tr id="l23"> <td class="annotate"> <a href="/annotate/06824edf55d0/primes.py#l23" title="06824edf55d0: a">test@0</a> </td> - <td class="source"><a href="#l23"> 23</a> <span class="kn">return</span> <span class="n">chain</span><span class="p">([</span><span class="mf">2</span><span class="p">],</span> <span class="n">sieve</span><span class="p">(</span><span class="n">dropwhile</span><span class="p">(</span><span class="kn">lambda</span> <span class="n">n</span><span class="p">:</span> <span class="n">n</span> <span class="o"><</span> <span class="mf">3</span><span class="p">,</span> <span class="n">odds</span><span class="p">)))</span></td> + <td class="source"><a href="#l23"> 23</a> <span class="kn">return</span> <span class="n">chain</span><span class="p">([</span><span class="mi">2</span><span class="p">],</span> <span class="n">sieve</span><span class="p">(</span><span class="n">dropwhile</span><span class="p">(</span><span class="kn">lambda</span> <span class="n">n</span><span class="p">:</span> <span class="n">n</span> <span class="o"><</span> <span class="mi">3</span><span class="p">,</span> <span class="n">odds</span><span class="p">)))</span></td> </tr> <tr id="l24"> <td class="annotate"> @@ -478,7 +482,7 @@ <a href="/annotate/06824edf55d0/primes.py#l28" title="06824edf55d0: a">test@0</a> </td> - <td class="source"><a href="#l28"> 28</a> <span class="n">n</span> <span class="o">=</span> <span class="nb">int</span><span class="p">(</span><span class="n">sys</span><span class="o">.</span><span class="n">argv</span><span class="p">[</span><span class="mf">1</span><span class="p">])</span></td> + <td class="source"><a href="#l28"> 28</a> <span class="n">n</span> <span class="o">=</span> <span class="nb">int</span><span class="p">(</span><span class="n">sys</span><span class="o">.</span><span class="n">argv</span><span class="p">[</span><span class="mi">1</span><span class="p">])</span></td> </tr> <tr id="l29"> <td class="annotate"> @@ -492,7 +496,7 @@ <a href="/annotate/06824edf55d0/primes.py#l30" title="06824edf55d0: a">test@0</a> </td> - <td class="source"><a href="#l30"> 30</a> <span class="n">n</span> <span class="o">=</span> <span class="mf">10</span></td> + <td class="source"><a href="#l30"> 30</a> <span class="n">n</span> <span class="o">=</span> <span class="mi">10</span></td> </tr> <tr id="l31"> <td class="annotate">
--- a/tests/test-histedit-arguments.t Sun Mar 13 02:29:11 2016 +0100 +++ b/tests/test-histedit-arguments.t Tue Mar 15 14:10:46 2016 -0700 @@ -63,6 +63,8 @@ # # Commits are listed from least to most recent # + # You can reorder changesets by reordering the lines + # # Commands: # # e, edit = use commit, but stop for amending @@ -133,6 +135,11 @@ (hg histedit --continue to resume) [1] + $ hg graft --continue + abort: no graft in progress + (continue: hg histedit --continue) + [255] + $ mv .hg/histedit-state .hg/histedit-state.back $ hg update --quiet --clean 2 $ echo alpha >> alpha @@ -243,9 +250,6 @@ > p c8e68270e35a 3 four > f 08d98a8350f3 4 five > EOF - 1 files updated, 0 files merged, 0 files removed, 0 files unresolved - reverting alpha - 1 files updated, 0 files merged, 0 files removed, 0 files unresolved four *** five @@ -258,7 +262,6 @@ HG: user: test HG: branch 'default' HG: changed alpha - 1 files updated, 0 files merged, 0 files removed, 0 files unresolved saved backup bundle to $TESTTMP/foo/.hg/strip-backup/*-backup.hg (glob) saved backup bundle to $TESTTMP/foo/.hg/strip-backup/*-backup.hg (glob) @@ -294,6 +297,8 @@ # # Commits are listed from least to most recent # + # You can reorder changesets by reordering the lines + # # Commands: # # e, edit = use commit, but stop for amending @@ -449,3 +454,46 @@ > pick 6f2f0241f119 > pick 8cde254db839 > EOF + +commit --amend should abort if histedit is in progress +(issue4800) and markers are not being created. +Eventually, histedit could perhaps look at `source` extra, +in which case this test should be revisited. + + $ hg -q up 8cde254db839 + $ hg histedit 6f2f0241f119 --commands - <<EOF + > pick 8cde254db839 + > edit 6f2f0241f119 + > EOF + 1 files updated, 0 files merged, 0 files removed, 0 files unresolved + merging foo + warning: conflicts while merging foo! (edit, then use 'hg resolve --mark') + Fix up the change (pick 8cde254db839) + (hg histedit --continue to resume) + [1] + $ hg resolve -m --all + (no more unresolved files) + continue: hg histedit --continue + $ hg histedit --cont + merging foo + warning: conflicts while merging foo! (edit, then use 'hg resolve --mark') + Editing (6f2f0241f119), you may commit or record as needed now. + (hg histedit --continue to resume) + [1] + $ hg resolve -m --all + (no more unresolved files) + continue: hg histedit --continue + $ hg commit --amend -m 'reject this fold' + abort: histedit in progress + (use 'hg histedit --continue' or 'hg histedit --abort') + [255] + +With markers enabled, histedit does not get confused, and +amend should not be blocked by the ongoing histedit. + + $ cat >>$HGRCPATH <<EOF + > [experimental] + > evolution=createmarkers,allowunstable + > EOF + $ hg commit --amend -m 'allow this fold' + $ hg histedit --continue
--- a/tests/test-histedit-bookmark-motion.t Sun Mar 13 02:29:11 2016 +0100 +++ b/tests/test-histedit-bookmark-motion.t Tue Mar 15 14:10:46 2016 -0700 @@ -69,6 +69,8 @@ # # Commits are listed from least to most recent # + # You can reorder changesets by reordering the lines + # # Commands: # # e, edit = use commit, but stop for amending @@ -130,6 +132,8 @@ # # Commits are listed from least to most recent # + # You can reorder changesets by reordering the lines + # # Commands: # # e, edit = use commit, but stop for amending
--- a/tests/test-histedit-commute.t Sun Mar 13 02:29:11 2016 +0100 +++ b/tests/test-histedit-commute.t Tue Mar 15 14:10:46 2016 -0700 @@ -63,6 +63,8 @@ # # Commits are listed from least to most recent # + # You can reorder changesets by reordering the lines + # # Commands: # # e, edit = use commit, but stop for amending @@ -104,7 +106,6 @@ > pick 055a42cdd887 d > EOF $ HGEDITOR="cat \"$EDITED\" > " hg histedit 177f92b77385 2>&1 | fixbundle - 0 files updated, 0 files merged, 3 files removed, 0 files unresolved log after edit $ hg log --graph @@ -148,7 +149,6 @@ > pick d8249471110a e > pick 8ade9693061e f > EOF - 0 files updated, 0 files merged, 3 files removed, 0 files unresolved $ hg log --graph @ changeset: 5:7eca9b5b1148 @@ -191,7 +191,6 @@ > pick 915da888f2de e > pick 177f92b77385 c > EOF - 0 files updated, 0 files merged, 4 files removed, 0 files unresolved $ hg log --graph @ changeset: 5:38b92f448761 | tag: tip @@ -232,7 +231,6 @@ > pick 38b92f448761 c > pick de71b079d9ce e > EOF - 0 files updated, 0 files merged, 2 files removed, 0 files unresolved $ hg log --graph @ changeset: 7:803ef1c6fcfd | tag: tip @@ -343,6 +341,8 @@ # # Commits are listed from least to most recent # + # You can reorder changesets by reordering the lines + # # Commands: # # e, edit = use commit, but stop for amending @@ -417,11 +417,6 @@ > EOF $ HGEDITOR="sh ./editor.sh" hg histedit 0 - 1 files updated, 0 files merged, 1 files removed, 0 files unresolved - adding another-dir/initial-file (glob) - removing initial-dir/initial-file (glob) - 0 files updated, 0 files merged, 1 files removed, 0 files unresolved - 1 files updated, 0 files merged, 0 files removed, 0 files unresolved saved backup bundle to $TESTTMP/issue4251/.hg/strip-backup/*-backup.hg (glob) saved backup bundle to $TESTTMP/issue4251/.hg/strip-backup/*-backup.hg (glob)
--- a/tests/test-histedit-drop.t Sun Mar 13 02:29:11 2016 +0100 +++ b/tests/test-histedit-drop.t Tue Mar 15 14:10:46 2016 -0700 @@ -59,7 +59,6 @@ > pick 652413bf663e f > pick 055a42cdd887 d > EOF - 0 files updated, 0 files merged, 4 files removed, 0 files unresolved log after edit $ hg log --graph @@ -124,7 +123,6 @@ > pick a4f7421b80f7 f > drop f518305ce889 d > EOF - 0 files updated, 0 files merged, 1 files removed, 0 files unresolved $ hg log --graph @ changeset: 3:a4f7421b80f7 | tag: tip @@ -155,10 +153,13 @@ hg: parse error: missing rules for changeset a4f7421b80f7 (use "drop a4f7421b80f7" to discard, see also: "hg help -e histedit.config") $ hg --config histedit.dropmissing=True histedit cb9a9f314b8b --commands - 2>&1 << EOF | fixbundle + > EOF + hg: parse error: no rules provided + (use strip extension to remove commits) + $ hg --config histedit.dropmissing=True histedit cb9a9f314b8b --commands - 2>&1 << EOF | fixbundle > pick cb9a9f314b8b a > pick ee283cb5f2d5 e > EOF - 0 files updated, 0 files merged, 3 files removed, 0 files unresolved $ hg log --graph @ changeset: 1:e99c679bf03e | tag: tip
--- a/tests/test-histedit-edit.t Sun Mar 13 02:29:11 2016 +0100 +++ b/tests/test-histedit-edit.t Tue Mar 15 14:10:46 2016 -0700 @@ -286,7 +286,6 @@ > mv tmp "\$1" > EOF $ HGEDITOR="sh ../edit.sh" hg histedit tip 2>&1 | fixbundle - 0 files updated, 0 files merged, 1 files removed, 0 files unresolved $ hg status $ hg log --limit 1 changeset: 6:1fd3b2fe7754 @@ -327,7 +326,6 @@ $ HGEDITOR="sh $TESTTMP/editor.sh" hg histedit tip --commands - 2>&1 << EOF | fixbundle > mess 1fd3b2fe7754 f > EOF - 0 files updated, 0 files merged, 1 files removed, 0 files unresolved abort: emulating unexpected abort $ test -f .hg/last-message.txt [1] @@ -354,8 +352,6 @@ $ HGEDITOR="sh $TESTTMP/editor.sh" hg histedit tip --commands - 2>&1 << EOF > mess 1fd3b2fe7754 f > EOF - 0 files updated, 0 files merged, 1 files removed, 0 files unresolved - adding f ==== before editing f @@ -408,7 +404,6 @@ $ hg histedit tip --commands - 2>&1 << EOF | fixbundle > mess 1fd3b2fe7754 f > EOF - 0 files updated, 0 files merged, 1 files removed, 0 files unresolved $ hg status $ hg log --limit 1 changeset: 6:62feedb1200e @@ -468,6 +463,8 @@ # # Commits are listed from least to most recent # + # You can reorder changesets by reordering the lines + # # Commands: # # e, edit = use commit, but stop for amending
--- a/tests/test-histedit-fold-non-commute.t Sun Mar 13 02:29:11 2016 +0100 +++ b/tests/test-histedit-fold-non-commute.t Tue Mar 15 14:10:46 2016 -0700 @@ -97,14 +97,7 @@ $ hg resolve --mark e (no more unresolved files) continue: hg histedit --continue - $ cat > cat.py <<EOF - > import sys - > print open(sys.argv[1]).read() - > print - > print - > EOF - $ HGEDITOR="python cat.py" hg histedit --continue 2>&1 | fixbundle | grep -v '2 files removed' - 2 files updated, 0 files merged, 0 files removed, 0 files unresolved + $ HGEDITOR=cat hg histedit --continue 2>&1 | fixbundle | grep -v '2 files removed' d *** does not commute with e @@ -118,22 +111,20 @@ HG: branch 'default' HG: changed d HG: changed e - - - - 2 files updated, 0 files merged, 0 files removed, 0 files unresolved merging e warning: conflicts while merging e! (edit, then use 'hg resolve --mark') Fix up the change (pick 7b4e2f4b7bcd) (hg histedit --continue to resume) just continue this time +keep the non-commuting change, and thus the pending change will be dropped $ hg revert -r 'p1()' e $ hg resolve --mark e (no more unresolved files) continue: hg histedit --continue + $ hg diff $ hg histedit --continue 2>&1 | fixbundle - 7b4e2f4b7bcd: empty changeset + 7b4e2f4b7bcd: skipping changeset (no changes) log after edit $ hg log --graph @@ -262,8 +253,6 @@ (no more unresolved files) continue: hg histedit --continue $ hg histedit --continue 2>&1 | fixbundle | grep -v '2 files removed' - 2 files updated, 0 files merged, 0 files removed, 0 files unresolved - 2 files updated, 0 files merged, 0 files removed, 0 files unresolved merging e warning: conflicts while merging e! (edit, then use 'hg resolve --mark') Fix up the change (pick 7b4e2f4b7bcd) @@ -275,7 +264,7 @@ (no more unresolved files) continue: hg histedit --continue $ hg histedit --continue 2>&1 | fixbundle - 7b4e2f4b7bcd: empty changeset + 7b4e2f4b7bcd: skipping changeset (no changes) log after edit $ hg log --graph
--- a/tests/test-histedit-fold.t Sun Mar 13 02:29:11 2016 +0100 +++ b/tests/test-histedit-fold.t Tue Mar 15 14:10:46 2016 -0700 @@ -54,9 +54,6 @@ > fold 177f92b77385 c > pick 055a42cdd887 d > EOF - 0 files updated, 0 files merged, 4 files removed, 0 files unresolved - 0 files updated, 0 files merged, 2 files removed, 0 files unresolved - 2 files updated, 0 files merged, 0 files removed, 0 files unresolved log after edit $ hg logt --graph @@ -111,9 +108,6 @@ > pick 6de59d13424a f > pick 9c277da72c9b d > EOF - 0 files updated, 0 files merged, 4 files removed, 0 files unresolved - 0 files updated, 0 files merged, 2 files removed, 0 files unresolved - 2 files updated, 0 files merged, 0 files removed, 0 files unresolved $ HGEDITOR=$OLDHGEDITOR @@ -177,10 +171,7 @@ > pick 8e03a72b6f83 f > fold c4a9eb7989fc d > EOF - 0 files updated, 0 files merged, 1 files removed, 0 files unresolved - adding d allow non-folding commit - 0 files updated, 0 files merged, 3 files removed, 0 files unresolved ==== before editing f *** @@ -242,9 +233,6 @@ > EOF editing: pick e860deea161a 4 e 1/2 changes (50.00%) editing: fold a00ad806cb55 5 f 2/2 changes (100.00%) - 0 files updated, 0 files merged, 1 files removed, 0 files unresolved - 0 files updated, 0 files merged, 2 files removed, 0 files unresolved - 2 files updated, 0 files merged, 0 files removed, 0 files unresolved tip after edit $ hg log --rev . @@ -372,7 +360,6 @@ created new head $ echo 6 >> file $ HGEDITOR=cat hg histedit --continue - 1 files updated, 0 files merged, 0 files removed, 0 files unresolved +4 *** +5.2 @@ -387,7 +374,6 @@ HG: user: test HG: branch 'default' HG: changed file - 1 files updated, 0 files merged, 0 files removed, 0 files unresolved saved backup bundle to $TESTTMP/fold-with-dropped/.hg/strip-backup/55c8d8dc79ce-4066cd98-backup.hg (glob) saved backup bundle to $TESTTMP/fold-with-dropped/.hg/strip-backup/617f94f13c0f-a35700fc-backup.hg (glob) $ hg logt -G @@ -443,10 +429,6 @@ > pick 1c4f440a8085 rename > fold e0371e0426bc b > EOF - 1 files updated, 0 files merged, 0 files removed, 0 files unresolved - reverting b.txt - 1 files updated, 0 files merged, 1 files removed, 0 files unresolved - 1 files updated, 0 files merged, 1 files removed, 0 files unresolved $ hg logt --follow b.txt 1:cf858d235c76 rename @@ -489,9 +471,6 @@ > fold a1a953ffb4b0 c > pick 6c795aa153cb a > EOF - 0 files updated, 0 files merged, 3 files removed, 0 files unresolved - 0 files updated, 0 files merged, 2 files removed, 0 files unresolved - 2 files updated, 0 files merged, 0 files removed, 0 files unresolved commit 9599899f62c05f4377548c32bf1c9f1a39634b0c $ hg logt @@ -530,13 +509,6 @@ > fold b7389cc4d66e 3 foo2 > fold 21679ff7675c 4 foo3 > EOF - 1 files updated, 0 files merged, 0 files removed, 0 files unresolved - reverting foo - 0 files updated, 0 files merged, 1 files removed, 0 files unresolved - 1 files updated, 0 files merged, 0 files removed, 0 files unresolved - merging foo - 0 files updated, 0 files merged, 1 files removed, 0 files unresolved - 1 files updated, 0 files merged, 0 files removed, 0 files unresolved $ hg logt 2:e8bedbda72c1 merged foos 1:578c7455730c a
--- a/tests/test-histedit-non-commute-abort.t Sun Mar 13 02:29:11 2016 +0100 +++ b/tests/test-histedit-non-commute-abort.t Tue Mar 15 14:10:46 2016 -0700 @@ -69,7 +69,6 @@ > pick e860deea161a e > pick 652413bf663e f > EOF - 0 files updated, 0 files merged, 2 files removed, 0 files unresolved merging e warning: conflicts while merging e! (edit, then use 'hg resolve --mark') Fix up the change (pick e860deea161a) @@ -82,6 +81,7 @@ local: 8f7551c7e4a2f2efe0bc8c741baf7f227d65d758 other: e860deea161a2f77de56603b340ebbb4536308ae unrecognized entry: x advisory record + file extras: e (ancestorlinknode = 0000000000000000000000000000000000000000) file: e (record type "F", state "u", hash 58e6b3a414a1e090dfc6029add0f3555ccba127f) local path: e (flags "") ancestor path: e (node null) @@ -95,6 +95,7 @@ * version 2 records local: 8f7551c7e4a2f2efe0bc8c741baf7f227d65d758 other: e860deea161a2f77de56603b340ebbb4536308ae + file extras: e (ancestorlinknode = 0000000000000000000000000000000000000000) file: e (record type "F", state "u", hash 58e6b3a414a1e090dfc6029add0f3555ccba127f) local path: e (flags "") ancestor path: e (node null)
--- a/tests/test-histedit-non-commute.t Sun Mar 13 02:29:11 2016 +0100 +++ b/tests/test-histedit-non-commute.t Tue Mar 15 14:10:46 2016 -0700 @@ -171,7 +171,7 @@ (no more unresolved files) continue: hg histedit --continue $ hg histedit --continue 2>&1 | fixbundle - 7b4e2f4b7bcd: empty changeset + 7b4e2f4b7bcd: skipping changeset (no changes) log after edit $ hg log --graph @@ -254,7 +254,7 @@ (no more unresolved files) continue: hg histedit --continue $ hg histedit --continue 2>&1 | fixbundle - 7b4e2f4b7bcd: empty changeset + 7b4e2f4b7bcd: skipping changeset (no changes) post message fix $ hg log --graph
--- a/tests/test-histedit-obsolete.t Sun Mar 13 02:29:11 2016 +0100 +++ b/tests/test-histedit-obsolete.t Tue Mar 15 14:10:46 2016 -0700 @@ -14,6 +14,85 @@ > rebase= > EOF +Test that histedit learns about obsolescence not stored in histedit state + $ hg init boo + $ cd boo + $ echo a > a + $ hg ci -Am a + adding a + $ echo a > b + $ echo a > c + $ echo a > c + $ hg ci -Am b + adding b + adding c + $ echo a > d + $ hg ci -Am c + adding d + $ echo "pick `hg log -r 0 -T '{node|short}'`" > plan + $ echo "pick `hg log -r 2 -T '{node|short}'`" >> plan + $ echo "edit `hg log -r 1 -T '{node|short}'`" >> plan + $ hg histedit -r 'all()' --commands plan + Editing (1b2d564fad96), you may commit or record as needed now. + (hg histedit --continue to resume) + [1] + $ hg st + A b + A c + ? plan + $ hg commit --amend b + $ hg histedit --continue + $ hg log -G + @ 6:46abc7c4d873 b + | + o 5:49d44ab2be1b c + | + o 0:cb9a9f314b8b a + + $ hg debugobsolete + e72d22b19f8ecf4150ab4f91d0973fd9955d3ddf 49d44ab2be1b67a79127568a67c9c99430633b48 0 (*) {'user': 'test'} (glob) + 3e30a45cf2f719e96ab3922dfe039cfd047956ce 0 {e72d22b19f8ecf4150ab4f91d0973fd9955d3ddf} (*) {'user': 'test'} (glob) + 1b2d564fad96311b45362f17c2aa855150efb35f 46abc7c4d8738e8563e577f7889e1b6db3da4199 0 (*) {'user': 'test'} (glob) + 114f4176969ef342759a8a57e6bccefc4234829b 49d44ab2be1b67a79127568a67c9c99430633b48 0 (*) {'user': 'test'} (glob) + +With some node gone missing during the edit. + + $ echo "pick `hg log -r 0 -T '{node|short}'`" > plan + $ echo "pick `hg log -r 6 -T '{node|short}'`" >> plan + $ echo "edit `hg log -r 5 -T '{node|short}'`" >> plan + $ hg histedit -r 'all()' --commands plan + Editing (49d44ab2be1b), you may commit or record as needed now. + (hg histedit --continue to resume) + [1] + $ hg st + A b + A d + ? plan + $ hg commit --amend -X . -m XXXXXX + $ hg commit --amend -X . -m b2 + $ hg --hidden --config extensions.strip= strip 'desc(XXXXXX)' --no-backup + $ hg histedit --continue + $ hg log -G + @ 9:273c1f3b8626 c + | + o 8:aba7da937030 b2 + | + o 0:cb9a9f314b8b a + + $ hg debugobsolete + e72d22b19f8ecf4150ab4f91d0973fd9955d3ddf 49d44ab2be1b67a79127568a67c9c99430633b48 0 (*) {'user': 'test'} (glob) + 3e30a45cf2f719e96ab3922dfe039cfd047956ce 0 {e72d22b19f8ecf4150ab4f91d0973fd9955d3ddf} (*) {'user': 'test'} (glob) + 1b2d564fad96311b45362f17c2aa855150efb35f 46abc7c4d8738e8563e577f7889e1b6db3da4199 0 (*) {'user': 'test'} (glob) + 114f4176969ef342759a8a57e6bccefc4234829b 49d44ab2be1b67a79127568a67c9c99430633b48 0 (*) {'user': 'test'} (glob) + 76f72745eac0643d16530e56e2f86e36e40631f1 2ca853e48edbd6453a0674dc0fe28a0974c51b9c 0 (*) {'user': 'test'} (glob) + 2ca853e48edbd6453a0674dc0fe28a0974c51b9c aba7da93703075eec9fb1dbaf143ff2bc1c49d46 0 (*) {'user': 'test'} (glob) + 49d44ab2be1b67a79127568a67c9c99430633b48 273c1f3b86267ed3ec684bb13af1fa4d6ba56e02 0 (*) {'user': 'test'} (glob) + 46abc7c4d8738e8563e577f7889e1b6db3da4199 aba7da93703075eec9fb1dbaf143ff2bc1c49d46 0 (*) {'user': 'test'} (glob) + $ cd .. + +Base setup for the rest of the testing +====================================== + $ hg init base $ cd base @@ -48,6 +127,8 @@ # # Commits are listed from least to most recent # + # You can reorder changesets by reordering the lines + # # Commands: # # e, edit = use commit, but stop for amending @@ -108,7 +189,6 @@ > drop 59d9f330561f 7 d > pick cacdfd884a93 8 f > EOF - 0 files updated, 0 files merged, 3 files removed, 0 files unresolved $ hg log --graph @ 11:c13eb81022ca f | @@ -167,7 +247,6 @@ > pick 40db8afa467b 10 c > drop b449568bf7fc 11 f > EOF - 0 files updated, 0 files merged, 1 files removed, 0 files unresolved $ hg log -G @ 12:40db8afa467b c | @@ -187,7 +266,6 @@ > pick 40db8afa467b 10 c > drop 1b3b05f35ff0 13 h > EOF - 0 files updated, 0 files merged, 3 files removed, 0 files unresolved $ hg log -G @ 17:ee6544123ab8 c | @@ -298,7 +376,7 @@ $ cd .. -New-commit as draft (default) +New-commit as secret (config) $ cp -r base simple-secret $ cd simple-secret @@ -357,7 +435,6 @@ > pick 7395e1ff83bd 13 h > pick ee118ab9fa44 16 k > EOF - 0 files updated, 0 files merged, 5 files removed, 0 files unresolved $ hg log -G @ 23:558246857888 (secret) k | @@ -399,13 +476,6 @@ > pick b605fb7503f2 14 i > fold ee118ab9fa44 16 k > EOF - 0 files updated, 0 files merged, 6 files removed, 0 files unresolved - 0 files updated, 0 files merged, 2 files removed, 0 files unresolved - 2 files updated, 0 files merged, 0 files removed, 0 files unresolved - 0 files updated, 0 files merged, 2 files removed, 0 files unresolved - 2 files updated, 0 files merged, 0 files removed, 0 files unresolved - 0 files updated, 0 files merged, 2 files removed, 0 files unresolved - 2 files updated, 0 files merged, 0 files removed, 0 files unresolved $ hg log -G @ 27:f9daec13fb98 (secret) i |
--- a/tests/test-histedit-outgoing.t Sun Mar 13 02:29:11 2016 +0100 +++ b/tests/test-histedit-outgoing.t Tue Mar 15 14:10:46 2016 -0700 @@ -45,6 +45,8 @@ # # Commits are listed from least to most recent # + # You can reorder changesets by reordering the lines + # # Commands: # # e, edit = use commit, but stop for amending @@ -77,6 +79,8 @@ # # Commits are listed from least to most recent # + # You can reorder changesets by reordering the lines + # # Commands: # # e, edit = use commit, but stop for amending @@ -101,6 +105,8 @@ # # Commits are listed from least to most recent # + # You can reorder changesets by reordering the lines + # # Commands: # # e, edit = use commit, but stop for amending
--- a/tests/test-hook.t Sun Mar 13 02:29:11 2016 +0100 +++ b/tests/test-hook.t Tue Mar 15 14:10:46 2016 -0700 @@ -436,6 +436,10 @@ > unreachable = 1 > EOF + $ cat > syntaxerror.py << EOF + > (foo + > EOF + test python hooks #if windows @@ -480,7 +484,7 @@ $ hg pull ../a pulling from ../a searching for changes - abort: preoutgoing.uncallable hook is invalid ("hooktests.uncallable" is not callable) + abort: preoutgoing.uncallable hook is invalid: "hooktests.uncallable" is not callable [255] $ echo '[hooks]' > ../a/.hg/hgrc @@ -488,7 +492,7 @@ $ hg pull ../a pulling from ../a searching for changes - abort: preoutgoing.nohook hook is invalid ("hooktests.nohook" is not defined) + abort: preoutgoing.nohook hook is invalid: "hooktests.nohook" is not defined [255] $ echo '[hooks]' > ../a/.hg/hgrc @@ -496,7 +500,7 @@ $ hg pull ../a pulling from ../a searching for changes - abort: preoutgoing.nomodule hook is invalid ("nomodule" not in a module) + abort: preoutgoing.nomodule hook is invalid: "nomodule" not in a module [255] $ echo '[hooks]' > ../a/.hg/hgrc @@ -504,7 +508,8 @@ $ hg pull ../a pulling from ../a searching for changes - abort: preoutgoing.badmodule hook is invalid (import of "nomodule" failed) + abort: preoutgoing.badmodule hook is invalid: import of "nomodule" failed + (run with --traceback for stack trace) [255] $ echo '[hooks]' > ../a/.hg/hgrc @@ -512,9 +517,34 @@ $ hg pull ../a pulling from ../a searching for changes - abort: preoutgoing.unreachable hook is invalid (import of "hooktests.container" failed) + abort: preoutgoing.unreachable hook is invalid: import of "hooktests.container" failed + (run with --traceback for stack trace) + [255] + + $ echo '[hooks]' > ../a/.hg/hgrc + $ echo 'preoutgoing.syntaxerror = python:syntaxerror.syntaxerror' >> ../a/.hg/hgrc + $ hg pull ../a + pulling from ../a + searching for changes + abort: preoutgoing.syntaxerror hook is invalid: import of "syntaxerror" failed + (run with --traceback for stack trace) [255] +The second egrep is to filter out lines like ' ^', which are slightly +different between Python 2.6 and Python 2.7. + $ hg pull ../a --traceback 2>&1 | egrep -v '^( +File| [_a-zA-Z*(])' | egrep -v '^( )+(\^)?$' + pulling from ../a + searching for changes + exception from first failed import attempt: + Traceback (most recent call last): + SyntaxError: invalid syntax + exception from second failed import attempt: + Traceback (most recent call last): + ImportError: No module named hgext_syntaxerror + Traceback (most recent call last): + HookLoadError: preoutgoing.syntaxerror hook is invalid: import of "syntaxerror" failed + abort: preoutgoing.syntaxerror hook is invalid: import of "syntaxerror" failed + $ echo '[hooks]' > ../a/.hg/hgrc $ echo 'preoutgoing.pass = python:hooktests.passhook' >> ../a/.hg/hgrc $ hg pull ../a @@ -530,6 +560,46 @@ adding remote bookmark quux (run 'hg update' to get a working copy) +post- python hooks that fail to *run* don't cause an abort + $ rm ../a/.hg/hgrc + $ echo '[hooks]' > .hg/hgrc + $ echo 'post-pull.broken = python:hooktests.brokenhook' >> .hg/hgrc + $ hg pull ../a + pulling from ../a + searching for changes + no changes found + error: post-pull.broken hook raised an exception: unsupported operand type(s) for +: 'int' and 'dict' + (run with --traceback for stack trace) + +but post- python hooks that fail to *load* do + $ echo '[hooks]' > .hg/hgrc + $ echo 'post-pull.nomodule = python:nomodule' >> .hg/hgrc + $ hg pull ../a + pulling from ../a + searching for changes + no changes found + abort: post-pull.nomodule hook is invalid: "nomodule" not in a module + [255] + + $ echo '[hooks]' > .hg/hgrc + $ echo 'post-pull.badmodule = python:nomodule.nowhere' >> .hg/hgrc + $ hg pull ../a + pulling from ../a + searching for changes + no changes found + abort: post-pull.badmodule hook is invalid: import of "nomodule" failed + (run with --traceback for stack trace) + [255] + + $ echo '[hooks]' > .hg/hgrc + $ echo 'post-pull.nohook = python:hooktests.nohook' >> .hg/hgrc + $ hg pull ../a + pulling from ../a + searching for changes + no changes found + abort: post-pull.nohook hook is invalid: "hooktests.nohook" is not defined + [255] + make sure --traceback works $ echo '[hooks]' > .hg/hgrc @@ -628,8 +698,8 @@ Traceback (most recent call last): ImportError: No module named hgext_importfail Traceback (most recent call last): - HookLoadError: precommit.importfail hook is invalid (import of "importfail" failed) - abort: precommit.importfail hook is invalid (import of "importfail" failed) + HookLoadError: precommit.importfail hook is invalid: import of "importfail" failed + abort: precommit.importfail hook is invalid: import of "importfail" failed Issue1827: Hooks Update & Commit not completely post operation
--- a/tests/test-https.t Sun Mar 13 02:29:11 2016 +0100 +++ b/tests/test-https.t Tue Mar 15 14:10:46 2016 -0700 @@ -290,6 +290,21 @@ $ hg -R copy-pull id https://localhost:$HGPORT/ --config web.cacerts=! 5fed3813f7f5 +- multiple fingerprints specified and first matches + $ hg --config 'hostfingerprints.localhost=914f1aff87249c09b6859b88b1906d30756491ca, deadbeefdeadbeefdeadbeefdeadbeefdeadbeef' -R copy-pull id https://localhost:$HGPORT/ --config web.cacerts=! + 5fed3813f7f5 + +- multiple fingerprints specified and last matches + $ hg --config 'hostfingerprints.localhost=deadbeefdeadbeefdeadbeefdeadbeefdeadbeef, 914f1aff87249c09b6859b88b1906d30756491ca' -R copy-pull id https://localhost:$HGPORT/ --config web.cacerts=! + 5fed3813f7f5 + +- multiple fingerprints specified and none match + + $ hg --config 'hostfingerprints.localhost=deadbeefdeadbeefdeadbeefdeadbeefdeadbeef, aeadbeefdeadbeefdeadbeefdeadbeefdeadbeef' -R copy-pull id https://localhost:$HGPORT/ --config web.cacerts=! + abort: certificate for localhost has unexpected fingerprint 91:4f:1a:ff:87:24:9c:09:b6:85:9b:88:b1:90:6d:30:75:64:91:ca + (check hostfingerprint configuration) + [255] + - fails when cert doesn't match hostname (port is ignored) $ hg -R copy-pull id https://localhost:$HGPORT1/ abort: certificate for localhost has unexpected fingerprint 28:ff:71:bf:65:31:14:23:ad:62:92:b4:0e:31:99:18:fc:83:e3:9b
--- a/tests/test-import-git.t Sun Mar 13 02:29:11 2016 +0100 +++ b/tests/test-import-git.t Tue Mar 15 14:10:46 2016 -0700 @@ -822,4 +822,27 @@ > EOF applying patch from stdin +Test email metadata + + $ hg revert -qa + $ hg --encoding utf-8 import - <<EOF + > From: =?UTF-8?q?Rapha=C3=ABl=20Hertzog?= <hertzog@debian.org> + > Subject: [PATCH] =?UTF-8?q?=C5=A7=E2=82=AC=C3=9F=E1=B9=AA?= + > + > diff --git a/a b/a + > --- a/a + > +++ b/a + > @@ -1,1 +1,2 @@ + > a + > +a + > EOF + applying patch from stdin + $ hg --encoding utf-8 log -r . + changeset: 2:* (glob) + tag: tip + user: Rapha\xc3\xabl Hertzog <hertzog@debian.org> (esc) + date: * (glob) + summary: \xc5\xa7\xe2\x82\xac\xc3\x9f\xe1\xb9\xaa (esc) + + $ cd ..
--- a/tests/test-install.t Sun Mar 13 02:29:11 2016 +0100 +++ b/tests/test-install.t Tue Mar 15 14:10:46 2016 -0700 @@ -6,10 +6,35 @@ checking Python lib (*lib*)... (glob) checking installed modules (*mercurial)... (glob) checking templates (*mercurial?templates)... (glob) - checking commit editor... - checking username... + checking default template (*mercurial?templates?map-cmdline.default) (glob) + checking commit editor... (*python* -c "import sys; sys.exit(0)") (glob) + checking username (test) no problems detected +hg debuginstall JSON + $ hg debuginstall -Tjson + [ + { + "defaulttemplate": "*mercurial?templates?map-cmdline.default", (glob) + "defaulttemplateerror": null, + "defaulttemplatenotfound": "default", + "editor": "*python* -c \"import sys; sys.exit(0)\"", (glob) + "editornotfound": false, + "encoding": "ascii", + "encodingerror": null, + "extensionserror": null, + "hgmodules": "*mercurial", (glob) + "problems": 0, + "pythonexe": "*python", (glob) + "pythonlib": "*python*", (glob) + "pythonver": "*.*.*", (glob) + "templatedirs": "*mercurial?templates", (glob) + "username": "test", + "usernameerror": null, + "vinotfound": false + } + ] + hg debuginstall with no username $ HGUSER= hg debuginstall checking encoding (ascii)... @@ -18,7 +43,8 @@ checking Python lib (*lib*)... (glob) checking installed modules (*mercurial)... (glob) checking templates (*mercurial?templates)... (glob) - checking commit editor... + checking default template (*mercurial?templates?map-cmdline.default) (glob) + checking commit editor... (*python* -c "import sys; sys.exit(0)") (glob) checking username... no username supplied (specify a username in your configuration file) @@ -38,8 +64,9 @@ checking Python lib (*lib*)... (glob) checking installed modules (*mercurial)... (glob) checking templates (*mercurial?templates)... (glob) - checking commit editor... - checking username... + checking default template (*mercurial?templates?map-cmdline.default) (glob) + checking commit editor... (*python* -c "import sys; sys.exit(0)") (glob) + checking username (test) no problems detected #if test-repo
--- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/tests/test-issue1102.t Tue Mar 15 14:10:46 2016 -0700 @@ -0,0 +1,17 @@ + $ rm -rf a + $ hg init a + $ cd a + $ echo a > a + $ hg ci -Am0 + adding a + $ hg tag t1 # 1 + $ hg tag --remove t1 # 2 + + $ hg co 1 + 1 files updated, 0 files merged, 0 files removed, 0 files unresolved + $ hg tag -f -r0 t1 + $ hg tags + tip 3:a49829c4fc11 + t1 0:f7b1eb17ad24 + + $ cd ..
--- a/tests/test-issue1502.t Sun Mar 13 02:29:11 2016 +0100 +++ b/tests/test-issue1502.t Tue Mar 15 14:10:46 2016 -0700 @@ -12,16 +12,14 @@ $ echo "bar" > foo1/a && hg -R foo1 commit -m "edit a in foo1" $ echo "hi" > foo/a && hg -R foo commit -m "edited a foo" - $ hg -R foo1 pull -u + $ hg -R foo1 pull pulling from $TESTTMP/foo (glob) searching for changes adding changesets adding manifests adding file changes added 1 changesets with 1 changes to 1 files (+1 heads) - abort: not updating: not a linear update - (merge or update --check to force update) - [255] + (run 'hg heads' to see heads, 'hg merge' to merge) $ hg -R foo1 book branchy $ hg -R foo1 book
--- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/tests/test-issue1993.t Tue Mar 15 14:10:46 2016 -0700 @@ -0,0 +1,48 @@ + $ hg init a + $ cd a + $ echo a > a + $ hg ci -Am0 + adding a + $ echo b > b + $ hg ci -Am1 + adding b + $ hg tag -r0 default + warning: tag default conflicts with existing branch name + $ hg log + changeset: 2:30a83d1e4a1e + tag: tip + user: test + date: Thu Jan 01 00:00:00 1970 +0000 + summary: Added tag default for changeset f7b1eb17ad24 + + changeset: 1:925d80f479bb + user: test + date: Thu Jan 01 00:00:00 1970 +0000 + summary: 1 + + changeset: 0:f7b1eb17ad24 + tag: default + user: test + date: Thu Jan 01 00:00:00 1970 +0000 + summary: 0 + + $ hg update 'tag(default)' + 0 files updated, 0 files merged, 2 files removed, 0 files unresolved + $ hg parents + changeset: 0:f7b1eb17ad24 + tag: default + user: test + date: Thu Jan 01 00:00:00 1970 +0000 + summary: 0 + + $ hg update 'branch(default)' + 2 files updated, 0 files merged, 0 files removed, 0 files unresolved + $ hg parents + changeset: 2:30a83d1e4a1e + tag: tip + user: test + date: Thu Jan 01 00:00:00 1970 +0000 + summary: Added tag default for changeset f7b1eb17ad24 + + + $ cd ..
--- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/tests/test-issue586.t Tue Mar 15 14:10:46 2016 -0700 @@ -0,0 +1,92 @@ +Issue586: removing remote files after merge appears to corrupt the +dirstate + + $ hg init a + $ cd a + $ echo a > a + $ hg ci -Ama + adding a + + $ hg init ../b + $ cd ../b + $ echo b > b + $ hg ci -Amb + adding b + + $ hg pull -f ../a + pulling from ../a + searching for changes + warning: repository is unrelated + requesting all changes + adding changesets + adding manifests + adding file changes + added 1 changesets with 1 changes to 1 files (+1 heads) + (run 'hg heads' to see heads, 'hg merge' to merge) + $ hg merge + 1 files updated, 0 files merged, 0 files removed, 0 files unresolved + (branch merge, don't forget to commit) + $ hg rm -f a + $ hg ci -Amc + + $ hg st -A + C b + $ cd .. + +Issue1433: Traceback after two unrelated pull, two move, a merge and +a commit (related to issue586) + +create test repos + + $ hg init repoa + $ touch repoa/a + $ hg -R repoa ci -Am adda + adding a + + $ hg init repob + $ touch repob/b + $ hg -R repob ci -Am addb + adding b + + $ hg init repoc + $ cd repoc + $ hg pull ../repoa + pulling from ../repoa + requesting all changes + adding changesets + adding manifests + adding file changes + added 1 changesets with 1 changes to 1 files + (run 'hg update' to get a working copy) + $ hg update + 1 files updated, 0 files merged, 0 files removed, 0 files unresolved + $ mkdir tst + $ hg mv * tst + $ hg ci -m "import a in tst" + $ hg pull -f ../repob + pulling from ../repob + searching for changes + warning: repository is unrelated + requesting all changes + adding changesets + adding manifests + adding file changes + added 1 changesets with 1 changes to 1 files (+1 heads) + (run 'hg heads' to see heads, 'hg merge' to merge) + +merge both repos + + $ hg merge + 1 files updated, 0 files merged, 0 files removed, 0 files unresolved + (branch merge, don't forget to commit) + $ mkdir src + +move b content + + $ hg mv b src + $ hg ci -m "import b in src" + $ hg manifest + src/b + tst/a + + $ cd ..
--- a/tests/test-issue672.t Sun Mar 13 02:29:11 2016 +0100 +++ b/tests/test-issue672.t Tue Mar 15 14:10:46 2016 -0700 @@ -65,10 +65,11 @@ branchmerge: True, force: False, partial: False ancestor: c64f439569a9, local: e327dca35ac8+, remote: 746e9549ea96 preserving 1a for resolve of 1a + starting 4 threads for background file closing (?) 1a: local copied/moved from 1 -> m (premerge) picked tool ':merge' for 1a (binary False symlink False changedelete False) merging 1a and 1 to 1a - my 1a@e327dca35ac8+ other 1@746e9549ea96 ancestor 1@81f4b099af3d + my 1a@e327dca35ac8+ other 1@746e9549ea96 ancestor 1@c64f439569a9 premerge successful 0 files updated, 1 files merged, 0 files removed, 0 files unresolved (branch merge, don't forget to commit) @@ -88,10 +89,11 @@ ancestor: c64f439569a9, local: 746e9549ea96+, remote: e327dca35ac8 preserving 1 for resolve of 1a removing 1 + starting 4 threads for background file closing (?) 1a: remote moved from 1 -> m (premerge) picked tool ':merge' for 1a (binary False symlink False changedelete False) merging 1 and 1a to 1a - my 1a@746e9549ea96+ other 1a@e327dca35ac8 ancestor 1@81f4b099af3d + my 1a@746e9549ea96+ other 1a@e327dca35ac8 ancestor 1@c64f439569a9 premerge successful 0 files updated, 1 files merged, 0 files removed, 0 files unresolved (branch merge, don't forget to commit)
--- a/tests/test-largefiles-cache.t Sun Mar 13 02:29:11 2016 +0100 +++ b/tests/test-largefiles-cache.t Tue Mar 15 14:10:46 2016 -0700 @@ -189,7 +189,7 @@ Inject corruption into the largefiles store and see how update handles that: $ cd src - $ hg up -qC + $ hg up -qC tip $ cat large modified $ rm large @@ -202,6 +202,7 @@ large: data corruption in $TESTTMP/src/.hg/largefiles/e2fb5f2139d086ded2cb600d5a91a196e76bf020 with hash 6a7bb2556144babe3899b25e5428123735bb1e27 (glob) 0 largefiles updated, 0 removed 0 files updated, 0 files merged, 0 files removed, 0 files unresolved + 2 other heads for branch "default" $ hg st ! large ? z
--- a/tests/test-largefiles-misc.t Sun Mar 13 02:29:11 2016 +0100 +++ b/tests/test-largefiles-misc.t Tue Mar 15 14:10:46 2016 -0700 @@ -682,8 +682,8 @@ all remote heads known locally 1:1acbe71ce432 2:6095d0695d70 - finding outgoing largefiles: 0/2 revision (0.00%) - finding outgoing largefiles: 1/2 revision (50.00%) + finding outgoing largefiles: 0/2 revisions (0.00%) + finding outgoing largefiles: 1/2 revisions (50.00%) largefiles to upload (1 entities): b 89e6c98d92887913cadf06b2adb97f26cde4849b @@ -740,11 +740,11 @@ 3:7983dce246cc 4:233f12ada4ae 5:036794ea641c - finding outgoing largefiles: 0/5 revision (0.00%) - finding outgoing largefiles: 1/5 revision (20.00%) - finding outgoing largefiles: 2/5 revision (40.00%) - finding outgoing largefiles: 3/5 revision (60.00%) - finding outgoing largefiles: 4/5 revision (80.00%) + finding outgoing largefiles: 0/5 revisions (0.00%) + finding outgoing largefiles: 1/5 revisions (20.00%) + finding outgoing largefiles: 2/5 revisions (40.00%) + finding outgoing largefiles: 3/5 revisions (60.00%) + finding outgoing largefiles: 4/5 revisions (80.00%) largefiles to upload (3 entities): b 13f9ed0898e315bf59dc2973fec52037b6f441a2 @@ -791,10 +791,10 @@ 3:7983dce246cc 4:233f12ada4ae 5:036794ea641c - finding outgoing largefiles: 0/4 revision (0.00%) - finding outgoing largefiles: 1/4 revision (25.00%) - finding outgoing largefiles: 2/4 revision (50.00%) - finding outgoing largefiles: 3/4 revision (75.00%) + finding outgoing largefiles: 0/4 revisions (0.00%) + finding outgoing largefiles: 1/4 revisions (25.00%) + finding outgoing largefiles: 2/4 revisions (50.00%) + finding outgoing largefiles: 3/4 revisions (75.00%) largefiles to upload (2 entities): b 13f9ed0898e315bf59dc2973fec52037b6f441a2 @@ -1095,7 +1095,7 @@ adding manifests adding file changes added 1 changesets with 1 changes to 1 files - nothing to rebase - working directory parent is already an ancestor of destination bf5e395ced2c + nothing to rebase - updating instead 1 files updated, 0 files merged, 0 files removed, 0 files unresolved $ cd ..
--- a/tests/test-largefiles-update.t Sun Mar 13 02:29:11 2016 +0100 +++ b/tests/test-largefiles-update.t Tue Mar 15 14:10:46 2016 -0700 @@ -6,6 +6,9 @@ > merge = internal:fail > [extensions] > largefiles = + > [extdiff] + > # for portability: + > pdiff = sh "$RUNTESTDIR/pdiff" > EOF $ hg init repo @@ -20,17 +23,17 @@ $ echo 'large1 in #1' > large1 $ echo 'normal1 in #1' > normal1 $ hg commit -m '#1' - $ hg extdiff -r '.^' --config extensions.extdiff= - diff -Npru repo.0d9d9b8dc9a3/.hglf/large1 repo/.hglf/large1 + $ hg pdiff -r '.^' --config extensions.extdiff= + diff -Nru repo.0d9d9b8dc9a3/.hglf/large1 repo/.hglf/large1 --- repo.0d9d9b8dc9a3/.hglf/large1 * (glob) +++ repo/.hglf/large1 * (glob) - @@ -1 +1 @@ + @@ -1* +1* @@ (glob) -4669e532d5b2c093a78eca010077e708a071bb64 +58e24f733a964da346e2407a2bee99d9001184f5 - diff -Npru repo.0d9d9b8dc9a3/normal1 repo/normal1 + diff -Nru repo.0d9d9b8dc9a3/normal1 repo/normal1 --- repo.0d9d9b8dc9a3/normal1 * (glob) +++ repo/normal1 * (glob) - @@ -1 +1 @@ + @@ -1* +1* @@ (glob) -normal1 +normal1 in #1 [1] @@ -68,6 +71,7 @@ $ hg up 0 files updated, 0 files merged, 0 files removed, 0 files unresolved + 1 other heads for branch "default" $ hg debugdirstate --large --nodate n 644 7 set large1 n 644 13 set large2 @@ -82,6 +86,7 @@ n 644 13 set large2 $ hg up 0 files updated, 0 files merged, 0 files removed, 0 files unresolved + 1 other heads for branch "default" $ hg debugdirstate --large --nodate n 644 7 set large1 n 644 13 set large2 @@ -463,6 +468,7 @@ keep (l)ocal ba94c2efe5b7c5e0af8d189295ce00553b0612b7 or take (o)ther e5bb990443d6a92aaf7223813720f7566c9dd05b? l 2 files updated, 1 files merged, 0 files removed, 0 files unresolved + 1 other heads for branch "default" $ hg status -A large1 M large1 @@ -496,6 +502,7 @@ keep (l)ocal ba94c2efe5b7c5e0af8d189295ce00553b0612b7 or take (o)ther e5bb990443d6a92aaf7223813720f7566c9dd05b? l 2 files updated, 1 files merged, 0 files removed, 0 files unresolved + 1 other heads for branch "default" $ hg status -A large1 M large1
--- a/tests/test-largefiles-wireproto.t Sun Mar 13 02:29:11 2016 +0100 +++ b/tests/test-largefiles-wireproto.t Tue Mar 15 14:10:46 2016 -0700 @@ -291,7 +291,7 @@ using http://localhost:$HGPORT2/ sending capabilities command sending batch command - getting largefiles: 0/1 lfile (0.00%) + getting largefiles: 0/1 files (0.00%) getting f1:02a439e5c31c526465ab1a0ca1f431f76b827b90 sending getlfile command found 02a439e5c31c526465ab1a0ca1f431f76b827b90 in store
--- a/tests/test-lock.py Sun Mar 13 02:29:11 2016 +0100 +++ b/tests/test-lock.py Tue Mar 15 14:10:46 2016 -0700 @@ -28,7 +28,7 @@ self._pidoffset = pidoffset super(lockwrapper, self).__init__(*args, **kwargs) def _getpid(self): - return os.getpid() + self._pidoffset + return super(lockwrapper, self)._getpid() + self._pidoffset class teststate(object): def __init__(self, testcase, dir, pidoffset=0):
--- a/tests/test-merge-changedelete.t Sun Mar 13 02:29:11 2016 +0100 +++ b/tests/test-merge-changedelete.t Tue Mar 15 14:10:46 2016 -0700 @@ -77,14 +77,17 @@ * version 2 records local: 13910f48cf7bdb2a0ba6e24b4900e4fdd5739dd4 other: 10f9a0a634e82080907e62f075ab119cbc565ea6 + file extras: file1 (ancestorlinknode = ab57bf49aa276a22d35a473592d4c34b5abc3eff) file: file1 (record type "C", state "u", hash 60b27f004e454aca81b0480209cce5081ec52390) local path: file1 (flags "") ancestor path: file1 (node b8e02f6433738021a065f94175c7cd23db5f05be) other path: file1 (node null) + file extras: file2 (ancestorlinknode = ab57bf49aa276a22d35a473592d4c34b5abc3eff) file: file2 (record type "C", state "u", hash null) local path: file2 (flags "") ancestor path: file2 (node 5d9299349fc01ddd25d0070d149b124d8f10411e) other path: file2 (node e7c1328648519852e723de86c0c0525acd779257) + file extras: file3 (ancestorlinknode = ab57bf49aa276a22d35a473592d4c34b5abc3eff) file: file3 (record type "F", state "u", hash d5b0a58bc47161b1b8a831084b366f757c4f0b11) local path: file3 (flags "") ancestor path: file3 (node 2661d26c649684b482d10f91960cc3db683c38b4) @@ -108,6 +111,7 @@ $ hg co -C 1 files updated, 0 files merged, 1 files removed, 0 files unresolved + 1 other heads for branch "default" $ hg merge --config ui.interactive=true <<EOF > c @@ -136,14 +140,17 @@ * version 2 records local: 13910f48cf7bdb2a0ba6e24b4900e4fdd5739dd4 other: 10f9a0a634e82080907e62f075ab119cbc565ea6 + file extras: file1 (ancestorlinknode = ab57bf49aa276a22d35a473592d4c34b5abc3eff) file: file1 (record type "C", state "r", hash 60b27f004e454aca81b0480209cce5081ec52390) local path: file1 (flags "") ancestor path: file1 (node b8e02f6433738021a065f94175c7cd23db5f05be) other path: file1 (node null) + file extras: file2 (ancestorlinknode = ab57bf49aa276a22d35a473592d4c34b5abc3eff) file: file2 (record type "C", state "r", hash null) local path: file2 (flags "") ancestor path: file2 (node 5d9299349fc01ddd25d0070d149b124d8f10411e) other path: file2 (node e7c1328648519852e723de86c0c0525acd779257) + file extras: file3 (ancestorlinknode = ab57bf49aa276a22d35a473592d4c34b5abc3eff) file: file3 (record type "F", state "u", hash d5b0a58bc47161b1b8a831084b366f757c4f0b11) local path: file3 (flags "") ancestor path: file3 (node 2661d26c649684b482d10f91960cc3db683c38b4) @@ -165,6 +172,7 @@ $ hg co -C 1 files updated, 0 files merged, 0 files removed, 0 files unresolved + 1 other heads for branch "default" $ hg merge --config ui.interactive=true <<EOF > foo @@ -205,14 +213,17 @@ * version 2 records local: 13910f48cf7bdb2a0ba6e24b4900e4fdd5739dd4 other: 10f9a0a634e82080907e62f075ab119cbc565ea6 + file extras: file1 (ancestorlinknode = ab57bf49aa276a22d35a473592d4c34b5abc3eff) file: file1 (record type "C", state "r", hash 60b27f004e454aca81b0480209cce5081ec52390) local path: file1 (flags "") ancestor path: file1 (node b8e02f6433738021a065f94175c7cd23db5f05be) other path: file1 (node null) + file extras: file2 (ancestorlinknode = ab57bf49aa276a22d35a473592d4c34b5abc3eff) file: file2 (record type "C", state "r", hash null) local path: file2 (flags "") ancestor path: file2 (node 5d9299349fc01ddd25d0070d149b124d8f10411e) other path: file2 (node e7c1328648519852e723de86c0c0525acd779257) + file extras: file3 (ancestorlinknode = ab57bf49aa276a22d35a473592d4c34b5abc3eff) file: file3 (record type "F", state "u", hash d5b0a58bc47161b1b8a831084b366f757c4f0b11) local path: file3 (flags "") ancestor path: file3 (node 2661d26c649684b482d10f91960cc3db683c38b4) @@ -234,6 +245,7 @@ $ hg co -C 2 files updated, 0 files merged, 1 files removed, 0 files unresolved + 1 other heads for branch "default" $ hg merge --config ui.interactive=true <<EOF > d @@ -261,14 +273,17 @@ * version 2 records local: 13910f48cf7bdb2a0ba6e24b4900e4fdd5739dd4 other: 10f9a0a634e82080907e62f075ab119cbc565ea6 + file extras: file1 (ancestorlinknode = ab57bf49aa276a22d35a473592d4c34b5abc3eff) file: file1 (record type "C", state "r", hash 60b27f004e454aca81b0480209cce5081ec52390) local path: file1 (flags "") ancestor path: file1 (node b8e02f6433738021a065f94175c7cd23db5f05be) other path: file1 (node null) + file extras: file2 (ancestorlinknode = ab57bf49aa276a22d35a473592d4c34b5abc3eff) file: file2 (record type "C", state "u", hash null) local path: file2 (flags "") ancestor path: file2 (node 5d9299349fc01ddd25d0070d149b124d8f10411e) other path: file2 (node e7c1328648519852e723de86c0c0525acd779257) + file extras: file3 (ancestorlinknode = ab57bf49aa276a22d35a473592d4c34b5abc3eff) file: file3 (record type "F", state "u", hash d5b0a58bc47161b1b8a831084b366f757c4f0b11) local path: file3 (flags "") ancestor path: file3 (node 2661d26c649684b482d10f91960cc3db683c38b4) @@ -289,6 +304,7 @@ $ hg co -C 2 files updated, 0 files merged, 1 files removed, 0 files unresolved + 1 other heads for branch "default" $ hg merge --tool :local 0 files updated, 3 files merged, 0 files removed, 0 files unresolved @@ -306,14 +322,17 @@ * version 2 records local: 13910f48cf7bdb2a0ba6e24b4900e4fdd5739dd4 other: 10f9a0a634e82080907e62f075ab119cbc565ea6 + file extras: file1 (ancestorlinknode = ab57bf49aa276a22d35a473592d4c34b5abc3eff) file: file1 (record type "C", state "r", hash 60b27f004e454aca81b0480209cce5081ec52390) local path: file1 (flags "") ancestor path: file1 (node b8e02f6433738021a065f94175c7cd23db5f05be) other path: file1 (node null) + file extras: file2 (ancestorlinknode = ab57bf49aa276a22d35a473592d4c34b5abc3eff) file: file2 (record type "C", state "r", hash null) local path: file2 (flags "") ancestor path: file2 (node 5d9299349fc01ddd25d0070d149b124d8f10411e) other path: file2 (node e7c1328648519852e723de86c0c0525acd779257) + file extras: file3 (ancestorlinknode = ab57bf49aa276a22d35a473592d4c34b5abc3eff) file: file3 (record type "F", state "r", hash d5b0a58bc47161b1b8a831084b366f757c4f0b11) local path: file3 (flags "") ancestor path: file3 (node 2661d26c649684b482d10f91960cc3db683c38b4) @@ -330,6 +349,7 @@ $ hg co -C 1 files updated, 0 files merged, 0 files removed, 0 files unresolved + 1 other heads for branch "default" $ hg merge --tool :other 0 files updated, 2 files merged, 1 files removed, 0 files unresolved @@ -347,14 +367,17 @@ * version 2 records local: 13910f48cf7bdb2a0ba6e24b4900e4fdd5739dd4 other: 10f9a0a634e82080907e62f075ab119cbc565ea6 + file extras: file1 (ancestorlinknode = ab57bf49aa276a22d35a473592d4c34b5abc3eff) file: file1 (record type "C", state "r", hash 60b27f004e454aca81b0480209cce5081ec52390) local path: file1 (flags "") ancestor path: file1 (node b8e02f6433738021a065f94175c7cd23db5f05be) other path: file1 (node null) + file extras: file2 (ancestorlinknode = ab57bf49aa276a22d35a473592d4c34b5abc3eff) file: file2 (record type "C", state "r", hash null) local path: file2 (flags "") ancestor path: file2 (node 5d9299349fc01ddd25d0070d149b124d8f10411e) other path: file2 (node e7c1328648519852e723de86c0c0525acd779257) + file extras: file3 (ancestorlinknode = ab57bf49aa276a22d35a473592d4c34b5abc3eff) file: file3 (record type "F", state "r", hash d5b0a58bc47161b1b8a831084b366f757c4f0b11) local path: file3 (flags "") ancestor path: file3 (node 2661d26c649684b482d10f91960cc3db683c38b4) @@ -371,6 +394,7 @@ $ hg co -C 2 files updated, 0 files merged, 1 files removed, 0 files unresolved + 1 other heads for branch "default" $ hg merge --tool :fail 0 files updated, 0 files merged, 0 files removed, 3 files unresolved @@ -389,14 +413,17 @@ * version 2 records local: 13910f48cf7bdb2a0ba6e24b4900e4fdd5739dd4 other: 10f9a0a634e82080907e62f075ab119cbc565ea6 + file extras: file1 (ancestorlinknode = ab57bf49aa276a22d35a473592d4c34b5abc3eff) file: file1 (record type "C", state "u", hash 60b27f004e454aca81b0480209cce5081ec52390) local path: file1 (flags "") ancestor path: file1 (node b8e02f6433738021a065f94175c7cd23db5f05be) other path: file1 (node null) + file extras: file2 (ancestorlinknode = ab57bf49aa276a22d35a473592d4c34b5abc3eff) file: file2 (record type "C", state "u", hash null) local path: file2 (flags "") ancestor path: file2 (node 5d9299349fc01ddd25d0070d149b124d8f10411e) other path: file2 (node e7c1328648519852e723de86c0c0525acd779257) + file extras: file3 (ancestorlinknode = ab57bf49aa276a22d35a473592d4c34b5abc3eff) file: file3 (record type "F", state "u", hash d5b0a58bc47161b1b8a831084b366f757c4f0b11) local path: file3 (flags "") ancestor path: file3 (node 2661d26c649684b482d10f91960cc3db683c38b4) @@ -415,6 +442,7 @@ $ hg co -C 1 files updated, 0 files merged, 1 files removed, 0 files unresolved + 1 other heads for branch "default" $ hg merge --config ui.interactive=True --tool :prompt local changed file1 which remote deleted @@ -439,14 +467,17 @@ * version 2 records local: 13910f48cf7bdb2a0ba6e24b4900e4fdd5739dd4 other: 10f9a0a634e82080907e62f075ab119cbc565ea6 + file extras: file1 (ancestorlinknode = ab57bf49aa276a22d35a473592d4c34b5abc3eff) file: file1 (record type "C", state "u", hash 60b27f004e454aca81b0480209cce5081ec52390) local path: file1 (flags "") ancestor path: file1 (node b8e02f6433738021a065f94175c7cd23db5f05be) other path: file1 (node null) + file extras: file2 (ancestorlinknode = ab57bf49aa276a22d35a473592d4c34b5abc3eff) file: file2 (record type "C", state "u", hash null) local path: file2 (flags "") ancestor path: file2 (node 5d9299349fc01ddd25d0070d149b124d8f10411e) other path: file2 (node e7c1328648519852e723de86c0c0525acd779257) + file extras: file3 (ancestorlinknode = ab57bf49aa276a22d35a473592d4c34b5abc3eff) file: file3 (record type "F", state "u", hash d5b0a58bc47161b1b8a831084b366f757c4f0b11) local path: file3 (flags "") ancestor path: file3 (node 2661d26c649684b482d10f91960cc3db683c38b4) @@ -467,6 +498,7 @@ $ hg co -C 1 files updated, 0 files merged, 1 files removed, 0 files unresolved + 1 other heads for branch "default" $ hg merge --tool :prompt local changed file1 which remote deleted @@ -491,14 +523,17 @@ * version 2 records local: 13910f48cf7bdb2a0ba6e24b4900e4fdd5739dd4 other: 10f9a0a634e82080907e62f075ab119cbc565ea6 + file extras: file1 (ancestorlinknode = ab57bf49aa276a22d35a473592d4c34b5abc3eff) file: file1 (record type "C", state "u", hash 60b27f004e454aca81b0480209cce5081ec52390) local path: file1 (flags "") ancestor path: file1 (node b8e02f6433738021a065f94175c7cd23db5f05be) other path: file1 (node null) + file extras: file2 (ancestorlinknode = ab57bf49aa276a22d35a473592d4c34b5abc3eff) file: file2 (record type "C", state "u", hash null) local path: file2 (flags "") ancestor path: file2 (node 5d9299349fc01ddd25d0070d149b124d8f10411e) other path: file2 (node e7c1328648519852e723de86c0c0525acd779257) + file extras: file3 (ancestorlinknode = ab57bf49aa276a22d35a473592d4c34b5abc3eff) file: file3 (record type "F", state "u", hash d5b0a58bc47161b1b8a831084b366f757c4f0b11) local path: file3 (flags "") ancestor path: file3 (node 2661d26c649684b482d10f91960cc3db683c38b4) @@ -517,6 +552,7 @@ $ hg co -C 1 files updated, 0 files merged, 1 files removed, 0 files unresolved + 1 other heads for branch "default" $ hg merge --tool :merge3 local changed file1 which remote deleted @@ -541,14 +577,17 @@ * version 2 records local: 13910f48cf7bdb2a0ba6e24b4900e4fdd5739dd4 other: 10f9a0a634e82080907e62f075ab119cbc565ea6 + file extras: file1 (ancestorlinknode = ab57bf49aa276a22d35a473592d4c34b5abc3eff) file: file1 (record type "C", state "u", hash 60b27f004e454aca81b0480209cce5081ec52390) local path: file1 (flags "") ancestor path: file1 (node b8e02f6433738021a065f94175c7cd23db5f05be) other path: file1 (node null) + file extras: file2 (ancestorlinknode = ab57bf49aa276a22d35a473592d4c34b5abc3eff) file: file2 (record type "C", state "u", hash null) local path: file2 (flags "") ancestor path: file2 (node 5d9299349fc01ddd25d0070d149b124d8f10411e) other path: file2 (node e7c1328648519852e723de86c0c0525acd779257) + file extras: file3 (ancestorlinknode = ab57bf49aa276a22d35a473592d4c34b5abc3eff) file: file3 (record type "F", state "u", hash d5b0a58bc47161b1b8a831084b366f757c4f0b11) local path: file3 (flags "") ancestor path: file3 (node 2661d26c649684b482d10f91960cc3db683c38b4) @@ -697,10 +736,12 @@ * version 2 records local: ab57bf49aa276a22d35a473592d4c34b5abc3eff other: 10f9a0a634e82080907e62f075ab119cbc565ea6 + file extras: file1 (ancestorlinknode = ab57bf49aa276a22d35a473592d4c34b5abc3eff) file: file1 (record type "C", state "u", hash 60b27f004e454aca81b0480209cce5081ec52390) local path: file1 (flags "") ancestor path: file1 (node b8e02f6433738021a065f94175c7cd23db5f05be) other path: file1 (node null) + file extras: file2 (ancestorlinknode = ab57bf49aa276a22d35a473592d4c34b5abc3eff) file: file2 (record type "C", state "u", hash null) local path: file2 (flags "") ancestor path: file2 (node 5d9299349fc01ddd25d0070d149b124d8f10411e) @@ -735,10 +776,12 @@ * version 2 records local: ab57bf49aa276a22d35a473592d4c34b5abc3eff other: 10f9a0a634e82080907e62f075ab119cbc565ea6 + file extras: file1 (ancestorlinknode = ab57bf49aa276a22d35a473592d4c34b5abc3eff) file: file1 (record type "C", state "r", hash 60b27f004e454aca81b0480209cce5081ec52390) local path: file1 (flags "") ancestor path: file1 (node b8e02f6433738021a065f94175c7cd23db5f05be) other path: file1 (node null) + file extras: file2 (ancestorlinknode = ab57bf49aa276a22d35a473592d4c34b5abc3eff) file: file2 (record type "C", state "r", hash null) local path: file2 (flags "") ancestor path: file2 (node 5d9299349fc01ddd25d0070d149b124d8f10411e) @@ -771,10 +814,12 @@ * version 2 records local: ab57bf49aa276a22d35a473592d4c34b5abc3eff other: 10f9a0a634e82080907e62f075ab119cbc565ea6 + file extras: file1 (ancestorlinknode = ab57bf49aa276a22d35a473592d4c34b5abc3eff) file: file1 (record type "C", state "r", hash 60b27f004e454aca81b0480209cce5081ec52390) local path: file1 (flags "") ancestor path: file1 (node b8e02f6433738021a065f94175c7cd23db5f05be) other path: file1 (node null) + file extras: file2 (ancestorlinknode = ab57bf49aa276a22d35a473592d4c34b5abc3eff) file: file2 (record type "C", state "r", hash null) local path: file2 (flags "") ancestor path: file2 (node 5d9299349fc01ddd25d0070d149b124d8f10411e) @@ -809,10 +854,12 @@ * version 2 records local: ab57bf49aa276a22d35a473592d4c34b5abc3eff other: 10f9a0a634e82080907e62f075ab119cbc565ea6 + file extras: file1 (ancestorlinknode = ab57bf49aa276a22d35a473592d4c34b5abc3eff) file: file1 (record type "C", state "u", hash 60b27f004e454aca81b0480209cce5081ec52390) local path: file1 (flags "") ancestor path: file1 (node b8e02f6433738021a065f94175c7cd23db5f05be) other path: file1 (node null) + file extras: file2 (ancestorlinknode = ab57bf49aa276a22d35a473592d4c34b5abc3eff) file: file2 (record type "C", state "u", hash null) local path: file2 (flags "") ancestor path: file2 (node 5d9299349fc01ddd25d0070d149b124d8f10411e) @@ -853,10 +900,12 @@ * version 2 records local: ab57bf49aa276a22d35a473592d4c34b5abc3eff other: 10f9a0a634e82080907e62f075ab119cbc565ea6 + file extras: file1 (ancestorlinknode = ab57bf49aa276a22d35a473592d4c34b5abc3eff) file: file1 (record type "C", state "u", hash 60b27f004e454aca81b0480209cce5081ec52390) local path: file1 (flags "") ancestor path: file1 (node b8e02f6433738021a065f94175c7cd23db5f05be) other path: file1 (node null) + file extras: file2 (ancestorlinknode = ab57bf49aa276a22d35a473592d4c34b5abc3eff) file: file2 (record type "C", state "u", hash null) local path: file2 (flags "") ancestor path: file2 (node 5d9299349fc01ddd25d0070d149b124d8f10411e) @@ -898,10 +947,12 @@ * version 2 records local: ab57bf49aa276a22d35a473592d4c34b5abc3eff other: 10f9a0a634e82080907e62f075ab119cbc565ea6 + file extras: file1 (ancestorlinknode = ab57bf49aa276a22d35a473592d4c34b5abc3eff) file: file1 (record type "C", state "u", hash 60b27f004e454aca81b0480209cce5081ec52390) local path: file1 (flags "") ancestor path: file1 (node b8e02f6433738021a065f94175c7cd23db5f05be) other path: file1 (node null) + file extras: file2 (ancestorlinknode = ab57bf49aa276a22d35a473592d4c34b5abc3eff) file: file2 (record type "C", state "u", hash null) local path: file2 (flags "") ancestor path: file2 (node 5d9299349fc01ddd25d0070d149b124d8f10411e)
--- a/tests/test-merge-commit.t Sun Mar 13 02:29:11 2016 +0100 +++ b/tests/test-merge-commit.t Tue Mar 15 14:10:46 2016 -0700 @@ -72,6 +72,7 @@ branchmerge: True, force: False, partial: False ancestor: 0f2ff26688b9, local: 2263c1be0967+, remote: 0555950ead28 preserving bar for resolve of bar + starting 4 threads for background file closing (?) bar: versions differ -> m (premerge) picked tool ':merge' for bar (binary False symlink False changedelete False) merging bar @@ -158,6 +159,7 @@ branchmerge: True, force: False, partial: False ancestor: 0f2ff26688b9, local: 2263c1be0967+, remote: 3ffa6b9e35f0 preserving bar for resolve of bar + starting 4 threads for background file closing (?) bar: versions differ -> m (premerge) picked tool ':merge' for bar (binary False symlink False changedelete False) merging bar
--- a/tests/test-merge-criss-cross.t Sun Mar 13 02:29:11 2016 +0100 +++ b/tests/test-merge-criss-cross.t Tue Mar 15 14:10:46 2016 -0700 @@ -85,10 +85,10 @@ f2: versions differ -> m (premerge) picked tool ':dump' for f2 (binary False symlink False changedelete False) merging f2 - my f2@3b08d01b0ab5+ other f2@adfe50279922 ancestor f2@40494bf2444c + my f2@3b08d01b0ab5+ other f2@adfe50279922 ancestor f2@0f6b37dbe527 f2: versions differ -> m (merge) picked tool ':dump' for f2 (binary False symlink False changedelete False) - my f2@3b08d01b0ab5+ other f2@adfe50279922 ancestor f2@40494bf2444c + my f2@3b08d01b0ab5+ other f2@adfe50279922 ancestor f2@0f6b37dbe527 1 files updated, 0 files merged, 0 files removed, 1 files unresolved use 'hg resolve' to retry unresolved file merges or 'hg update -C .' to abandon [1] @@ -212,7 +212,7 @@ 1 files updated, 0 files merged, 0 files removed, 0 files unresolved (branch merge, don't forget to commit) - $ hg up -qC + $ hg up -qC tip $ hg merge -v note: merging 3b08d01b0ab5+ and adfe50279922 using bids from ancestors 0f6b37dbe527 and 40663881a6dd
--- a/tests/test-merge-default.t Sun Mar 13 02:29:11 2016 +0100 +++ b/tests/test-merge-default.t Tue Mar 15 14:10:46 2016 -0700 @@ -27,12 +27,13 @@ Should fail because not at a head: $ hg merge - abort: branch 'default' has 3 heads - please merge with an explicit rev - (run 'hg heads .' to see heads) + abort: working directory not at a head revision + (use 'hg update' or merge with an explicit revision) [255] $ hg up 1 files updated, 0 files merged, 0 files removed, 0 files unresolved + 2 other heads for branch "default" Should fail because > 2 heads: @@ -115,3 +116,36 @@ (run 'hg heads' to see all heads) [255] +(on a branch with a two heads) + + $ hg up 5 + 1 files updated, 0 files merged, 0 files removed, 0 files unresolved + $ echo f >> a + $ hg commit -mf + created new head + $ hg log -r '_destmerge()' + changeset: 6:e88e33f3bf62 + parent: 5:a431fabd6039 + parent: 3:ea9ff125ff88 + user: test + date: Thu Jan 01 00:00:00 1970 +0000 + summary: m2 + + +(from the other head) + + $ hg log -r '_destmerge(e88e33f3bf62)' + changeset: 8:b613918999e2 + tag: tip + parent: 5:a431fabd6039 + user: test + date: Thu Jan 01 00:00:00 1970 +0000 + summary: f + + +(from unrelated branch) + + $ hg log -r '_destmerge(foobranch)' + abort: branch 'foobranch' has one head - please merge with an explicit rev + (run 'hg heads' to see all heads) + [255]
--- a/tests/test-merge-force.t Sun Mar 13 02:29:11 2016 +0100 +++ b/tests/test-merge-force.t Tue Mar 15 14:10:46 2016 -0700 @@ -141,7 +141,7 @@ # - local and remote changed content1_content2_*_content2-untracked # in the same way, so it could potentially be left alone - $ hg merge -f --tool internal:merge3 'desc("remote")' + $ hg merge -f --tool internal:merge3 'desc("remote")' 2>&1 | tee $TESTTMP/merge-output-1 local changed content1_missing_content1_content4-tracked which remote deleted use (c)hanged version, (d)elete, or leave (u)nresolved? u local changed content1_missing_content3_content3-tracked which remote deleted @@ -217,7 +217,6 @@ warning: conflicts while merging missing_content2_missing_content4-untracked! (edit, then use 'hg resolve --mark') 18 files updated, 3 files merged, 8 files removed, 35 files unresolved use 'hg resolve' to retry unresolved file merges or 'hg update -C .' to abandon - [1] Check which files need to be resolved (should correspond to the output above). This should be the files for which the base (1st filename segment), the remote @@ -780,3 +779,17 @@ [1] $ checkstatus > $TESTTMP/status2 2>&1 $ cmp $TESTTMP/status1 $TESTTMP/status2 || diff -U8 $TESTTMP/status1 $TESTTMP/status2 + +Set up working directory again + + $ hg -q update --clean 2 + $ hg --config extensions.purge= purge + $ python $TESTDIR/generate-working-copy-states.py state 3 wc + $ hg addremove -q --similarity 0 + $ hg forget *_*_*_*-untracked + $ rm *_*_*_missing-* + +Merge with checkunknown = warn, see that behavior is the same as before + $ hg merge -f --tool internal:merge3 'desc("remote")' --config merge.checkunknown=warn > $TESTTMP/merge-output-2 2>&1 + [1] + $ cmp $TESTTMP/merge-output-1 $TESTTMP/merge-output-2 || diff -U8 $TESTTMP/merge-output-1 $TESTTMP/merge-output-2
--- a/tests/test-merge-types.t Sun Mar 13 02:29:11 2016 +0100 +++ b/tests/test-merge-types.t Tue Mar 15 14:10:46 2016 -0700 @@ -155,6 +155,7 @@ 1 files updated, 0 files merged, 0 files removed, 0 files unresolved $ hg up 1 files updated, 0 files merged, 0 files removed, 0 files unresolved + 1 other heads for branch "default" $ hg st ? a.orig @@ -175,6 +176,7 @@ keep (l)ocal, take (o)ther, or leave (u)nresolved? u 0 files updated, 0 files merged, 0 files removed, 1 files unresolved use 'hg resolve' to retry unresolved file merges + 1 other heads for branch "default" [1] $ hg diff --git diff --git a/a b/a
--- a/tests/test-merge5.t Sun Mar 13 02:29:11 2016 +0100 +++ b/tests/test-merge5.t Tue Mar 15 14:10:46 2016 -0700 @@ -13,16 +13,12 @@ created new head $ hg update 1 1 files updated, 0 files merged, 0 files removed, 0 files unresolved - $ hg update - abort: not a linear update - (merge or update --check to force update) - [255] $ rm b - $ hg update -c + $ hg update -c 2 abort: uncommitted changes [255] $ hg revert b - $ hg update -c + $ hg update -c 2 0 files updated, 0 files merged, 1 files removed, 0 files unresolved $ mv a c
--- a/tests/test-merge7.t Sun Mar 13 02:29:11 2016 +0100 +++ b/tests/test-merge7.t Tue Mar 15 14:10:46 2016 -0700 @@ -84,6 +84,7 @@ branchmerge: True, force: False, partial: False ancestor: 96b70246a118, local: 50c3a7e29886+, remote: 40d11a4173a8 preserving test.txt for resolve of test.txt + starting 4 threads for background file closing (?) test.txt: versions differ -> m (premerge) picked tool ':merge' for test.txt (binary False symlink False changedelete False) merging test.txt
--- a/tests/test-module-imports.t Sun Mar 13 02:29:11 2016 +0100 +++ /dev/null Thu Jan 01 00:00:00 1970 +0000 @@ -1,146 +0,0 @@ -#require test-repo - - $ import_checker="$TESTDIR"/../contrib/import-checker.py - -Run the doctests from the import checker, and make sure -it's working correctly. - $ TERM=dumb - $ export TERM - $ python -m doctest $import_checker - -Run additional tests for the import checker - - $ mkdir testpackage - - $ cat > testpackage/multiple.py << EOF - > from __future__ import absolute_import - > import os, sys - > EOF - - $ cat > testpackage/unsorted.py << EOF - > from __future__ import absolute_import - > import sys - > import os - > EOF - - $ cat > testpackage/stdafterlocal.py << EOF - > from __future__ import absolute_import - > from . import unsorted - > import os - > EOF - - $ cat > testpackage/requirerelative.py << EOF - > from __future__ import absolute_import - > import testpackage.unsorted - > EOF - - $ cat > testpackage/importalias.py << EOF - > from __future__ import absolute_import - > import ui - > EOF - - $ cat > testpackage/relativestdlib.py << EOF - > from __future__ import absolute_import - > from .. import os - > EOF - - $ cat > testpackage/symbolimport.py << EOF - > from __future__ import absolute_import - > from .unsorted import foo - > EOF - - $ cat > testpackage/latesymbolimport.py << EOF - > from __future__ import absolute_import - > from . import unsorted - > from mercurial.node import hex - > EOF - - $ cat > testpackage/multiplegroups.py << EOF - > from __future__ import absolute_import - > from . import unsorted - > from . import more - > EOF - - $ mkdir testpackage/subpackage - $ cat > testpackage/subpackage/levelpriority.py << EOF - > from __future__ import absolute_import - > from . import foo - > from .. import parent - > EOF - - $ touch testpackage/subpackage/foo.py - $ cat > testpackage/subpackage/__init__.py << EOF - > from __future__ import absolute_import - > from . import levelpriority # should not cause cycle - > EOF - - $ cat > testpackage/subpackage/localimport.py << EOF - > from __future__ import absolute_import - > from . import foo - > def bar(): - > # should not cause "higher-level import should come first" - > from .. import unsorted - > # but other errors should be detected - > from .. import more - > import testpackage.subpackage.levelpriority - > EOF - - $ cat > testpackage/importmodulefromsub.py << EOF - > from __future__ import absolute_import - > from .subpackage import foo # not a "direct symbol import" - > EOF - - $ cat > testpackage/importsymbolfromsub.py << EOF - > from __future__ import absolute_import - > from .subpackage import foo, nonmodule - > EOF - - $ cat > testpackage/sortedentries.py << EOF - > from __future__ import absolute_import - > from . import ( - > foo, - > bar, - > ) - > EOF - - $ cat > testpackage/importfromalias.py << EOF - > from __future__ import absolute_import - > from . import ui - > EOF - - $ cat > testpackage/importfromrelative.py << EOF - > from __future__ import absolute_import - > from testpackage.unsorted import foo - > EOF - - $ python "$import_checker" testpackage/*.py testpackage/subpackage/*.py - testpackage/importalias.py:2: ui module must be "as" aliased to uimod - testpackage/importfromalias.py:2: ui from testpackage must be "as" aliased to uimod - testpackage/importfromrelative.py:2: import should be relative: testpackage.unsorted - testpackage/importfromrelative.py:2: direct symbol import foo from testpackage.unsorted - testpackage/importsymbolfromsub.py:2: direct symbol import nonmodule from testpackage.subpackage - testpackage/latesymbolimport.py:3: symbol import follows non-symbol import: mercurial.node - testpackage/multiple.py:2: multiple imported names: os, sys - testpackage/multiplegroups.py:3: multiple "from . import" statements - testpackage/relativestdlib.py:2: relative import of stdlib module - testpackage/requirerelative.py:2: import should be relative: testpackage.unsorted - testpackage/sortedentries.py:2: imports from testpackage not lexically sorted: bar < foo - testpackage/stdafterlocal.py:3: stdlib import follows local import: os - testpackage/subpackage/levelpriority.py:3: higher-level import should come first: testpackage - testpackage/subpackage/localimport.py:7: multiple "from .. import" statements - testpackage/subpackage/localimport.py:8: import should be relative: testpackage.subpackage.levelpriority - testpackage/symbolimport.py:2: direct symbol import foo from testpackage.unsorted - testpackage/unsorted.py:3: imports not lexically sorted: os < sys - [1] - - $ cd "$TESTDIR"/.. - -There are a handful of cases here that require renaming a module so it -doesn't overlap with a stdlib module name. There are also some cycles -here that we should still endeavor to fix, and some cycles will be -hidden by deduplication algorithm in the cycle detector, so fixing -these may expose other cycles. - - $ hg locate 'mercurial/**.py' 'hgext/**.py' | sed 's-\\-/-g' | python "$import_checker" - - Import cycle: hgext.largefiles.basestore -> hgext.largefiles.localstore -> hgext.largefiles.basestore - [1]
--- a/tests/test-obsolete-tag-cache.t Sun Mar 13 02:29:11 2016 +0100 +++ b/tests/test-obsolete-tag-cache.t Tue Mar 15 14:10:46 2016 -0700 @@ -67,11 +67,12 @@ 042eb6bfcc4909bad84a1cbf6eb1ddf0ab587d41 head2 55482a6fb4b1881fa8f746fd52cf6f096bb21c89 test1 - $ hg blackbox -l 4 - 1970/01/01 00:00:00 bob (*)> tags (glob) - 1970/01/01 00:00:00 bob (*)> 2/2 cache hits/lookups in * seconds (glob) - 1970/01/01 00:00:00 bob (*)> writing .hg/cache/tags2-visible with 2 tags (glob) - 1970/01/01 00:00:00 bob (*)> tags exited 0 after * seconds (glob) + $ hg blackbox -l 5 + 1970/01/01 00:00:00 bob @2942a772f72a444bef4bef13874d515f50fa27b6 (5000)> tags + 1970/01/01 00:00:00 bob @2942a772f72a444bef4bef13874d515f50fa27b6 (5000)> 2/2 cache hits/lookups in * seconds (glob) + 1970/01/01 00:00:00 bob @2942a772f72a444bef4bef13874d515f50fa27b6 (5000)> writing .hg/cache/tags2-visible with 2 tags + 1970/01/01 00:00:00 bob @2942a772f72a444bef4bef13874d515f50fa27b6 (5000)> tags exited 0 after * seconds (glob) + 1970/01/01 00:00:00 bob @2942a772f72a444bef4bef13874d515f50fa27b6 (5000)> blackbox -l 5 Hiding another changeset should cause the filtered hash to change @@ -86,11 +87,12 @@ 5 2942a772f72a444bef4bef13874d515f50fa27b6 2fce1eec33263d08a4d04293960fc73a555230e4 042eb6bfcc4909bad84a1cbf6eb1ddf0ab587d41 head2 - $ hg blackbox -l 4 - 1970/01/01 00:00:00 bob (*)> tags (glob) - 1970/01/01 00:00:00 bob (*)> 1/1 cache hits/lookups in * seconds (glob) - 1970/01/01 00:00:00 bob (*)> writing .hg/cache/tags2-visible with 1 tags (glob) - 1970/01/01 00:00:00 bob (*)> tags exited 0 after * seconds (glob) + $ hg blackbox -l 5 + 1970/01/01 00:00:00 bob @2942a772f72a444bef4bef13874d515f50fa27b6 (5000)> tags + 1970/01/01 00:00:00 bob @2942a772f72a444bef4bef13874d515f50fa27b6 (5000)> 1/1 cache hits/lookups in * seconds (glob) + 1970/01/01 00:00:00 bob @2942a772f72a444bef4bef13874d515f50fa27b6 (5000)> writing .hg/cache/tags2-visible with 1 tags + 1970/01/01 00:00:00 bob @2942a772f72a444bef4bef13874d515f50fa27b6 (5000)> tags exited 0 after * seconds (glob) + 1970/01/01 00:00:00 bob @2942a772f72a444bef4bef13874d515f50fa27b6 (5000)> blackbox -l 5 Resolving tags on an unfiltered repo writes a separate tags cache @@ -106,8 +108,9 @@ 55482a6fb4b1881fa8f746fd52cf6f096bb21c89 test1 d75775ffbc6bca1794d300f5571272879bd280da test2 - $ hg blackbox -l 4 - 1970/01/01 00:00:00 bob (*)> --hidden tags (glob) - 1970/01/01 00:00:00 bob (*)> 2/2 cache hits/lookups in * seconds (glob) - 1970/01/01 00:00:00 bob (*)> writing .hg/cache/tags2 with 3 tags (glob) - 1970/01/01 00:00:00 bob (*)> --hidden tags exited 0 after * seconds (glob) + $ hg blackbox -l 5 + 1970/01/01 00:00:00 bob @2942a772f72a444bef4bef13874d515f50fa27b6 (5000)> --hidden tags + 1970/01/01 00:00:00 bob @2942a772f72a444bef4bef13874d515f50fa27b6 (5000)> 2/2 cache hits/lookups in * seconds (glob) + 1970/01/01 00:00:00 bob @2942a772f72a444bef4bef13874d515f50fa27b6 (5000)> writing .hg/cache/tags2 with 3 tags + 1970/01/01 00:00:00 bob @2942a772f72a444bef4bef13874d515f50fa27b6 (5000)> --hidden tags exited 0 after * seconds (glob) + 1970/01/01 00:00:00 bob @2942a772f72a444bef4bef13874d515f50fa27b6 (5000)> blackbox -l 5
--- a/tests/test-obsolete.t Sun Mar 13 02:29:11 2016 +0100 +++ b/tests/test-obsolete.t Tue Mar 15 14:10:46 2016 -0700 @@ -960,6 +960,7 @@ $ hg log -r . -T '{node}' --debug 8fd96dfc63e51ed5a8af1bec18eb4b19dbf83812 (no-eol) +#if unix-permissions Check that wrong hidden cache permission does not crash $ chmod 000 .hg/cache/hidden @@ -967,6 +968,7 @@ cannot read hidden cache error writing hidden changesets cache 8fd96dfc63e51ed5a8af1bec18eb4b19dbf83812 (no-eol) +#endif Test cache consistency for the visible filter 1) We want to make sure that the cached filtered revs are invalidated when
--- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/tests/test-pager.t Tue Mar 15 14:10:46 2016 -0700 @@ -0,0 +1,179 @@ + $ cat >> fakepager.py <<EOF + > import sys + > for line in sys.stdin: + > sys.stdout.write('paged! %r\n' % line) + > EOF + +Enable ui.formatted because pager won't fire without it, and set up +pager and tell it to use our fake pager that lets us see when the +pager was running. + $ cat >> $HGRCPATH <<EOF + > [ui] + > formatted = yes + > [extensions] + > pager= + > [pager] + > pager = python $TESTTMP/fakepager.py + > EOF + + $ hg init repo + $ cd repo + $ echo a >> a + $ hg add a + $ hg ci -m 'add a' + $ for x in `python $TESTDIR/seq.py 1 10`; do + > echo a $x >> a + > hg ci -m "modify a $x" + > done + +By default diff and log are paged, but summary is not: + + $ hg diff -c 2 --pager=yes + paged! 'diff -r f4be7687d414 -r bce265549556 a\n' + paged! '--- a/a\tThu Jan 01 00:00:00 1970 +0000\n' + paged! '+++ b/a\tThu Jan 01 00:00:00 1970 +0000\n' + paged! '@@ -1,2 +1,3 @@\n' + paged! ' a\n' + paged! ' a 1\n' + paged! '+a 2\n' + + $ hg log --limit 2 + paged! 'changeset: 10:46106edeeb38\n' + paged! 'tag: tip\n' + paged! 'user: test\n' + paged! 'date: Thu Jan 01 00:00:00 1970 +0000\n' + paged! 'summary: modify a 10\n' + paged! '\n' + paged! 'changeset: 9:6dd8ea7dd621\n' + paged! 'user: test\n' + paged! 'date: Thu Jan 01 00:00:00 1970 +0000\n' + paged! 'summary: modify a 9\n' + paged! '\n' + + $ hg summary + parent: 10:46106edeeb38 tip + modify a 10 + branch: default + commit: (clean) + update: (current) + phases: 11 draft + +We can enable the pager on summary: + + $ hg --config pager.attend-summary=yes summary + paged! 'parent: 10:46106edeeb38 tip\n' + paged! ' modify a 10\n' + paged! 'branch: default\n' + paged! 'commit: (clean)\n' + paged! 'update: (current)\n' + paged! 'phases: 11 draft\n' + +If we completely change the attend list that's respected: + + $ hg --config pager.attend-diff=no diff -c 2 + diff -r f4be7687d414 -r bce265549556 a + --- a/a Thu Jan 01 00:00:00 1970 +0000 + +++ b/a Thu Jan 01 00:00:00 1970 +0000 + @@ -1,2 +1,3 @@ + a + a 1 + +a 2 + + $ hg --config pager.attend=summary diff -c 2 + diff -r f4be7687d414 -r bce265549556 a + --- a/a Thu Jan 01 00:00:00 1970 +0000 + +++ b/a Thu Jan 01 00:00:00 1970 +0000 + @@ -1,2 +1,3 @@ + a + a 1 + +a 2 + +If 'log' is in attend, then 'history' should also be paged: + $ hg history --limit 2 --config pager.attend=log + paged! 'changeset: 10:46106edeeb38\n' + paged! 'tag: tip\n' + paged! 'user: test\n' + paged! 'date: Thu Jan 01 00:00:00 1970 +0000\n' + paged! 'summary: modify a 10\n' + paged! '\n' + paged! 'changeset: 9:6dd8ea7dd621\n' + paged! 'user: test\n' + paged! 'date: Thu Jan 01 00:00:00 1970 +0000\n' + paged! 'summary: modify a 9\n' + paged! '\n' + +Possible bug: history is explicitly ignored in pager config, but +because log is in the attend list it still gets pager treatment. + + $ hg history --limit 2 --config pager.attend=log \ + > --config pager.ignore=history + paged! 'changeset: 10:46106edeeb38\n' + paged! 'tag: tip\n' + paged! 'user: test\n' + paged! 'date: Thu Jan 01 00:00:00 1970 +0000\n' + paged! 'summary: modify a 10\n' + paged! '\n' + paged! 'changeset: 9:6dd8ea7dd621\n' + paged! 'user: test\n' + paged! 'date: Thu Jan 01 00:00:00 1970 +0000\n' + paged! 'summary: modify a 9\n' + paged! '\n' + +Possible bug: history is explicitly marked as attend-history=no, but +it doesn't fail to get paged because log is still in the attend list. + + $ hg history --limit 2 --config pager.attend-history=no + paged! 'changeset: 10:46106edeeb38\n' + paged! 'tag: tip\n' + paged! 'user: test\n' + paged! 'date: Thu Jan 01 00:00:00 1970 +0000\n' + paged! 'summary: modify a 10\n' + paged! '\n' + paged! 'changeset: 9:6dd8ea7dd621\n' + paged! 'user: test\n' + paged! 'date: Thu Jan 01 00:00:00 1970 +0000\n' + paged! 'summary: modify a 9\n' + paged! '\n' + +Possible bug: disabling pager for log but enabling it for history +doesn't result in history being paged. + + $ hg history --limit 2 --config pager.attend-log=no \ + > --config pager.attend-history=yes + changeset: 10:46106edeeb38 + tag: tip + user: test + date: Thu Jan 01 00:00:00 1970 +0000 + summary: modify a 10 + + changeset: 9:6dd8ea7dd621 + user: test + date: Thu Jan 01 00:00:00 1970 +0000 + summary: modify a 9 + + +Pager with color enabled allows colors to come through by default, +even though stdout is no longer a tty. + $ cat >> $HGRCPATH <<EOF + > [extensions] + > color= + > [color] + > mode = ansi + > EOF + $ hg log --limit 3 + paged! '\x1b[0;33mchangeset: 10:46106edeeb38\x1b[0m\n' + paged! 'tag: tip\n' + paged! 'user: test\n' + paged! 'date: Thu Jan 01 00:00:00 1970 +0000\n' + paged! 'summary: modify a 10\n' + paged! '\n' + paged! '\x1b[0;33mchangeset: 9:6dd8ea7dd621\x1b[0m\n' + paged! 'user: test\n' + paged! 'date: Thu Jan 01 00:00:00 1970 +0000\n' + paged! 'summary: modify a 9\n' + paged! '\n' + paged! '\x1b[0;33mchangeset: 8:cff05a6312fe\x1b[0m\n' + paged! 'user: test\n' + paged! 'date: Thu Jan 01 00:00:00 1970 +0000\n' + paged! 'summary: modify a 8\n' + paged! '\n'
--- a/tests/test-parse-date.t Sun Mar 13 02:29:11 2016 +0100 +++ b/tests/test-parse-date.t Tue Mar 15 14:10:46 2016 -0700 @@ -28,10 +28,12 @@ Check with local timezone other than GMT and with DST - $ TZ="PST+8PDT" + $ TZ="PST+8PDT+7,M4.1.0/02:00:00,M10.5.0/02:00:00" $ export TZ PST=UTC-8 / PDT=UTC-7 +Summer time begins on April's first Sunday at 2:00am, +and ends on October's last Sunday at 2:00am. $ hg debugrebuildstate $ echo "a" > a
--- a/tests/test-patchbomb.t Sun Mar 13 02:29:11 2016 +0100 +++ b/tests/test-patchbomb.t Tue Mar 15 14:10:46 2016 -0700 @@ -28,6 +28,9 @@ $ echo "[extensions]" >> $HGRCPATH $ echo "patchbomb=" >> $HGRCPATH +Ensure hg email output is sent to stdout + $ unset PAGER + $ hg init t $ cd t $ echo a > a
--- a/tests/test-paths.t Sun Mar 13 02:29:11 2016 +0100 +++ b/tests/test-paths.t Tue Mar 15 14:10:46 2016 -0700 @@ -59,24 +59,24 @@ formatter output with paths: $ echo 'dupe:pushurl = https://example.com/dupe' >> .hg/hgrc - $ hg paths -Tjson + $ hg paths -Tjson | sed 's|\\\\|\\|g' [ { "name": "dupe", "pushurl": "https://example.com/dupe", - "url": "$TESTTMP/b#tip" + "url": "$TESTTMP/b#tip" (glob) }, { "name": "expand", - "url": "$TESTTMP/a/$SOMETHING/bar" + "url": "$TESTTMP/a/$SOMETHING/bar" (glob) } ] - $ hg paths -Tjson dupe + $ hg paths -Tjson dupe | sed 's|\\\\|\\|g' [ { "name": "dupe", "pushurl": "https://example.com/dupe", - "url": "$TESTTMP/b#tip" + "url": "$TESTTMP/b#tip" (glob) } ] $ hg paths -Tjson -q unknown
--- a/tests/test-progress.t Sun Mar 13 02:29:11 2016 +0100 +++ b/tests/test-progress.t Tue Mar 15 14:10:46 2016 -0700 @@ -79,6 +79,12 @@ no progress with --quiet $ hg -y loop 3 --quiet +test plain mode exception + $ HGPLAINEXCEPT=progress hg -y loop 1 + \r (no-eol) (esc) + loop [ ] 0/1\r (no-eol) (esc) + \r (no-eol) (esc) + test nested short-lived topics (which shouldn't display with nestdelay): $ hg -y loop 3 --nested
--- a/tests/test-pull-branch.t Sun Mar 13 02:29:11 2016 +0100 +++ b/tests/test-pull-branch.t Tue Mar 15 14:10:46 2016 -0700 @@ -133,6 +133,7 @@ adding file changes added 4 changesets with 4 changes to 1 files (+1 heads) 1 files updated, 0 files merged, 0 files removed, 0 files unresolved + 1 other heads for branch "branchA" Make changes on new branch on tt
--- a/tests/test-pull-update.t Sun Mar 13 02:29:11 2016 +0100 +++ b/tests/test-pull-update.t Tue Mar 15 14:10:46 2016 -0700 @@ -16,7 +16,7 @@ $ echo 1.2 > foo $ hg ci -Am m -Should not update: +Should not update to the other topological branch: $ hg pull -u ../tt pulling from ../tt @@ -25,13 +25,12 @@ adding manifests adding file changes added 1 changesets with 1 changes to 1 files (+1 heads) - abort: not updating: not a linear update - (merge or update --check to force update) - [255] + 0 files updated, 0 files merged, 0 files removed, 0 files unresolved + 1 other heads for branch "default" $ cd ../tt -Should not update: +Should not update to the other branch: $ hg pull -u ../t pulling from ../t @@ -40,9 +39,8 @@ adding manifests adding file changes added 1 changesets with 1 changes to 1 files (+1 heads) - abort: not updating: not a linear update - (merge or update --check to force update) - [255] + 0 files updated, 0 files merged, 0 files removed, 0 files unresolved + 1 other heads for branch "default" $ HGMERGE=true hg merge merging foo @@ -63,4 +61,154 @@ added 1 changesets with 1 changes to 1 files (-1 heads) 1 files updated, 0 files merged, 0 files removed, 0 files unresolved +Similarity between "hg update" and "hg pull -u" in handling bookmark +==================================================================== + +Test that updating activates the bookmark, which matches with the +explicit destination of the update. + + $ echo 4 >> foo + $ hg commit -m "#4" + $ hg bookmark active-after-pull + $ cd ../tt + +(1) activating by --rev BOOKMARK + + $ hg bookmark -f active-before-pull + $ hg bookmarks + * active-before-pull 3:483b76ad4309 + + $ hg pull -u -r active-after-pull + pulling from $TESTTMP/t (glob) + searching for changes + adding changesets + adding manifests + adding file changes + added 1 changesets with 1 changes to 1 files + adding remote bookmark active-after-pull + 1 files updated, 0 files merged, 0 files removed, 0 files unresolved + (activating bookmark active-after-pull) + + $ hg parents -q + 4:f815b3da6163 + $ hg bookmarks + * active-after-pull 4:f815b3da6163 + active-before-pull 3:483b76ad4309 + +(discard pulled changes) + + $ hg update -q 483b76ad4309 + $ hg rollback -q + +(2) activating by URL#BOOKMARK + + $ hg bookmark -f active-before-pull + $ hg bookmarks + * active-before-pull 3:483b76ad4309 + + $ hg pull -u $TESTTMP/t#active-after-pull + pulling from $TESTTMP/t (glob) + searching for changes + adding changesets + adding manifests + adding file changes + added 1 changesets with 1 changes to 1 files + adding remote bookmark active-after-pull + 1 files updated, 0 files merged, 0 files removed, 0 files unresolved + (activating bookmark active-after-pull) + + $ hg parents -q + 4:f815b3da6163 + $ hg bookmarks + * active-after-pull 4:f815b3da6163 + active-before-pull 3:483b76ad4309 + +(discard pulled changes) + + $ hg update -q 483b76ad4309 + $ hg rollback -q + +Test that updating deactivates current active bookmark, if the +destination of the update is explicitly specified, and it doesn't +match with the name of any exsiting bookmarks. + + $ cd ../t + $ hg bookmark -d active-after-pull + $ hg branch bar -q + $ hg commit -m "#5 (bar #1)" + $ cd ../tt + +(1) deactivating by --rev REV + + $ hg bookmark -f active-before-pull + $ hg bookmarks + * active-before-pull 3:483b76ad4309 + + $ hg pull -u -r b5e4babfaaa7 + pulling from $TESTTMP/t (glob) + searching for changes + adding changesets + adding manifests + adding file changes + added 2 changesets with 1 changes to 1 files + 1 files updated, 0 files merged, 0 files removed, 0 files unresolved + (leaving bookmark active-before-pull) + + $ hg parents -q + 5:b5e4babfaaa7 + $ hg bookmarks + active-before-pull 3:483b76ad4309 + +(discard pulled changes) + + $ hg update -q 483b76ad4309 + $ hg rollback -q + +(2) deactivating by --branch BRANCH + + $ hg bookmark -f active-before-pull + $ hg bookmarks + * active-before-pull 3:483b76ad4309 + + $ hg pull -u -b bar + pulling from $TESTTMP/t (glob) + searching for changes + adding changesets + adding manifests + adding file changes + added 2 changesets with 1 changes to 1 files + 1 files updated, 0 files merged, 0 files removed, 0 files unresolved + (leaving bookmark active-before-pull) + + $ hg parents -q + 5:b5e4babfaaa7 + $ hg bookmarks + active-before-pull 3:483b76ad4309 + +(discard pulled changes) + + $ hg update -q 483b76ad4309 + $ hg rollback -q + +(3) deactivating by URL#ANOTHER-BRANCH + + $ hg bookmark -f active-before-pull + $ hg bookmarks + * active-before-pull 3:483b76ad4309 + + $ hg pull -u $TESTTMP/t#bar + pulling from $TESTTMP/t (glob) + searching for changes + adding changesets + adding manifests + adding file changes + added 2 changesets with 1 changes to 1 files + 1 files updated, 0 files merged, 0 files removed, 0 files unresolved + (leaving bookmark active-before-pull) + + $ hg parents -q + 5:b5e4babfaaa7 + $ hg bookmarks + active-before-pull 3:483b76ad4309 + $ cd ..
--- a/tests/test-push-validation.t Sun Mar 13 02:29:11 2016 +0100 +++ b/tests/test-push-validation.t Tue Mar 15 14:10:46 2016 -0700 @@ -72,7 +72,7 @@ checking manifests crosschecking files in changesets and manifests checking files - beta@1: dddc47b3ba30 in manifests not found + beta@1: manifest refers to unknown revision dddc47b3ba30 2 files, 2 changesets, 2 total revisions 1 integrity errors encountered! (first damaged changeset appears to be 1)
--- a/tests/test-rebase-abort.t Sun Mar 13 02:29:11 2016 +0100 +++ b/tests/test-rebase-abort.t Tue Mar 15 14:10:46 2016 -0700 @@ -76,6 +76,7 @@ local: 3e046f2ecedb793b97ed32108086edd1a162f8bc other: 46f0b057b5c061d276b91491c22151f78698abd2 unrecognized entry: x advisory record + file extras: common (ancestorlinknode = 3163e20567cc93074fbb7a53c8b93312e59dbf2c) file: common (record type "F", state "u", hash 94c8c21d08740f5da9eaa38d1f175c592692f0d1) local path: common (flags "") ancestor path: common (node de0a666fdd9c1a0b0698b90d85064d8bd34f74b6) @@ -90,6 +91,7 @@ * version 2 records local: 3e046f2ecedb793b97ed32108086edd1a162f8bc other: 46f0b057b5c061d276b91491c22151f78698abd2 + file extras: common (ancestorlinknode = 3163e20567cc93074fbb7a53c8b93312e59dbf2c) file: common (record type "F", state "u", hash 94c8c21d08740f5da9eaa38d1f175c592692f0d1) local path: common (flags "") ancestor path: common (node de0a666fdd9c1a0b0698b90d85064d8bd34f74b6) @@ -428,6 +430,7 @@ commit: (clean) update: 1 new changesets, 2 branch heads (merge) phases: 4 draft + $ cd .. test aborting a rebase succeeds after rebasing with skipped commits onto a public changeset (issue4896) @@ -461,4 +464,5 @@ [1] $ hg rebase --abort rebase aborted + $ cd ..
--- a/tests/test-rebase-bookmarks.t Sun Mar 13 02:29:11 2016 +0100 +++ b/tests/test-rebase-bookmarks.t Tue Mar 15 14:10:46 2016 -0700 @@ -167,7 +167,7 @@ created new head $ hg up 3 1 files updated, 0 files merged, 0 files removed, 0 files unresolved - $ hg rebase + $ hg rebase --dest 4 rebasing 3:3d5fa227f4b5 "C" (Y Z) merging c warning: conflicts while merging c! (edit, then use 'hg resolve --mark')
--- a/tests/test-rebase-collapse.t Sun Mar 13 02:29:11 2016 +0100 +++ b/tests/test-rebase-collapse.t Tue Mar 15 14:10:46 2016 -0700 @@ -58,7 +58,7 @@ > echo "====" > echo "edited manually" >> \$1 > EOF - $ HGEDITOR="sh $TESTTMP/editor.sh" hg rebase --collapse --keepbranches -e + $ HGEDITOR="sh $TESTTMP/editor.sh" hg rebase --collapse --keepbranches -e --dest 7 rebasing 1:42ccdea3bb16 "B" rebasing 2:5fddd98957c8 "C" rebasing 3:32af7686d403 "D" @@ -115,7 +115,7 @@ $ cd a2 $ hg phase --force --secret 6 - $ hg rebase --source 4 --collapse + $ hg rebase --source 4 --collapse --dest 7 rebasing 4:9520eea781bc "E" rebasing 6:eea13746799a "G" saved backup bundle to $TESTTMP/a2/.hg/strip-backup/9520eea781bc-fcd8edd4-backup.hg (glob) @@ -157,7 +157,7 @@ > env | grep HGEDITFORM > true > EOF - $ HGEDITOR="sh $TESTTMP/checkeditform.sh" hg rebase --source 4 --collapse -m 'custom message' -e + $ HGEDITOR="sh $TESTTMP/checkeditform.sh" hg rebase --source 4 --collapse -m 'custom message' -e --dest 7 rebasing 4:9520eea781bc "E" rebasing 6:eea13746799a "G" HGEDITFORM=rebase.collapse @@ -261,13 +261,13 @@ $ hg clone -q -u . b b1 $ cd b1 - $ hg rebase -s 2 --collapse + $ hg rebase -s 2 --dest 7 --collapse abort: unable to collapse on top of 7, there is more than one external parent: 1, 5 [255] Rebase and collapse - E onto H: - $ hg rebase -s 4 --collapse # root (4) is not a merge + $ hg rebase -s 4 --dest 7 --collapse # root (4) is not a merge rebasing 4:8a5212ebc852 "E" rebasing 5:7f219660301f "F" rebasing 6:c772a8b2dc17 "G" @@ -418,7 +418,7 @@ $ hg clone -q -u . c c1 $ cd c1 - $ hg rebase -s 4 --collapse # root (4) is not a merge + $ hg rebase -s 4 --dest 8 --collapse # root (4) is not a merge rebasing 4:8a5212ebc852 "E" rebasing 5:dca5924bb570 "F" merging E @@ -512,7 +512,7 @@ $ hg clone -q -u . d d1 $ cd d1 - $ hg rebase -s 1 --collapse + $ hg rebase -s 1 --collapse --dest 5 rebasing 1:27547f69f254 "B" rebasing 2:f838bfaca5c7 "C" rebasing 3:7bbcd6078bcc "D" @@ -804,3 +804,52 @@ base $ cd .. + +Test that rebase --collapse will remember message after +running into merge conflict and invoking rebase --continue. + + $ hg init collapse_remember_message + $ cd collapse_remember_message + $ touch a + $ hg add a + $ hg commit -m "a" + $ echo "a-default" > a + $ hg commit -m "a-default" + $ hg update -r 0 + 1 files updated, 0 files merged, 0 files removed, 0 files unresolved + $ hg branch dev + marked working directory as branch dev + (branches are permanent and global, did you want a bookmark?) + $ echo "a-dev" > a + $ hg commit -m "a-dev" + $ hg rebase --collapse -m "a-default-dev" -d 1 + rebasing 2:b8d8db2b242d "a-dev" (tip) + merging a + warning: conflicts while merging a! (edit, then use 'hg resolve --mark') + unresolved conflicts (see hg resolve, then hg rebase --continue) + [1] + $ rm a.orig + $ hg resolve --mark a + (no more unresolved files) + continue: hg rebase --continue + $ hg rebase --continue + rebasing 2:b8d8db2b242d "a-dev" (tip) + saved backup bundle to $TESTTMP/collapse_remember_message/.hg/strip-backup/b8d8db2b242d-f474c19a-backup.hg (glob) + $ hg log + changeset: 2:12bb766dceb1 + tag: tip + user: test + date: Thu Jan 01 00:00:00 1970 +0000 + summary: a-default-dev + + changeset: 1:3c8db56a44bc + user: test + date: Thu Jan 01 00:00:00 1970 +0000 + summary: a-default + + changeset: 0:3903775176ed + user: test + date: Thu Jan 01 00:00:00 1970 +0000 + summary: a + + $ cd ..
--- a/tests/test-rebase-conflicts.t Sun Mar 13 02:29:11 2016 +0100 +++ b/tests/test-rebase-conflicts.t Tue Mar 15 14:10:46 2016 -0700 @@ -305,3 +305,55 @@ rebase completed updating the branch cache truncating cache/rbc-revs-v1 to 72 + +Test minimization of merge conflicts + $ hg up -q null + $ echo a > a + $ hg add a + $ hg commit -q -m 'a' + $ echo b >> a + $ hg commit -q -m 'ab' + $ hg bookmark ab + $ hg up -q '.^' + $ echo b >> a + $ echo c >> a + $ hg commit -q -m 'abc' + $ hg rebase -s 7bc217434fc1 -d ab --keep + rebasing 13:7bc217434fc1 "abc" (tip) + merging a + warning: conflicts while merging a! (edit, then use 'hg resolve --mark') + unresolved conflicts (see hg resolve, then hg rebase --continue) + [1] + $ hg diff + diff -r 328e4ab1f7cc a + --- a/a Thu Jan 01 00:00:00 1970 +0000 + +++ b/a * (glob) + @@ -1,2 +1,6 @@ + a + b + +<<<<<<< dest: 328e4ab1f7cc ab - test: ab + +======= + +c + +>>>>>>> source: 7bc217434fc1 - test: abc + $ hg rebase --abort + rebase aborted + $ hg up -q -C 7bc217434fc1 + $ hg rebase -s . -d ab --keep -t internal:merge3 + rebasing 13:7bc217434fc1 "abc" (tip) + merging a + warning: conflicts while merging a! (edit, then use 'hg resolve --mark') + unresolved conflicts (see hg resolve, then hg rebase --continue) + [1] + $ hg diff + diff -r 328e4ab1f7cc a + --- a/a Thu Jan 01 00:00:00 1970 +0000 + +++ b/a * (glob) + @@ -1,2 +1,8 @@ + a + +<<<<<<< dest: 328e4ab1f7cc ab - test: ab + b + +||||||| base + +======= + +b + +c + +>>>>>>> source: 7bc217434fc1 - test: abc
--- a/tests/test-rebase-named-branches.t Sun Mar 13 02:29:11 2016 +0100 +++ b/tests/test-rebase-named-branches.t Tue Mar 15 14:10:46 2016 -0700 @@ -327,14 +327,13 @@ $ hg up -qr 2 $ hg rebase - nothing to rebase - working directory parent is also destination - [1] + rebasing 2:792845bb77ee "b2" + note: rebase of 2:792845bb77ee created no changes to commit + saved backup bundle to $TESTTMP/case1/.hg/strip-backup/792845bb77ee-627120ee-backup.hg (glob) $ hg tglog - o 3: 'c1' c + o 2: 'c1' c | - | @ 2: 'b2' b - |/ - | o 1: 'b1' b + | @ 1: 'b1' b |/ o 0: '0' @@ -373,8 +372,9 @@ o 0: '0' $ hg rebase - nothing to rebase - working directory parent is also destination - [1] + abort: branch 'c' has one head - please rebase to an explicit rev + (run 'hg heads' to see all heads) + [255] $ hg tglog _ 4: 'c2 closed' c |
--- a/tests/test-rebase-obsolete.t Sun Mar 13 02:29:11 2016 +0100 +++ b/tests/test-rebase-obsolete.t Tue Mar 15 14:10:46 2016 -0700 @@ -527,7 +527,7 @@ $ hg commit -m J $ hg debugobsolete `hg log --rev . -T '{node}'` - $ hg rebase --rev .~1::. --dest 'max(desc(D))' --traceback + $ hg rebase --rev .~1::. --dest 'max(desc(D))' --traceback --config experimental.rebaseskipobsolete=off rebasing 9:4bde274eefcf "I" rebasing 13:06edfc82198f "J" (tip) $ hg log -G @@ -771,8 +771,8 @@ phases: 8 draft unstable: 1 changesets $ hg rebase -s 10 -d 12 - abort: this rebase will cause divergence - (to force the rebase please set rebase.allowdivergence=True) + abort: this rebase will cause divergences from: 121d9e3bc4c6 + (to force the rebase please set experimental.allowdivergence=True) [255] $ hg log -G @ 15:73568ab6879d bar foo @@ -791,9 +791,9 @@ | o 0:4a2df7238c3b A -With rebase.allowdivergence=True, rebase can create divergence +With experimental.allowdivergence=True, rebase can create divergence - $ hg rebase -s 10 -d 12 --config rebase.allowdivergence=True + $ hg rebase -s 10 -d 12 --config experimental.allowdivergence=True rebasing 10:121d9e3bc4c6 "P" rebasing 15:73568ab6879d "bar foo" (tip) $ hg summary
--- a/tests/test-rebase-parameters.t Sun Mar 13 02:29:11 2016 +0100 +++ b/tests/test-rebase-parameters.t Tue Mar 15 14:10:46 2016 -0700 @@ -46,11 +46,14 @@ $ cd .. +Version with only two heads (to allow default destination to work) + + $ hg clone -q -u . a a2heads -r 3 -r 8 These fail: - $ hg clone -q -u . a a1 - $ cd a1 + $ hg clone -q -u . a a0 + $ cd a0 $ hg rebase -s 8 -d 7 nothing to rebase @@ -79,33 +82,38 @@ abort: cannot specify both a revision and a base [255] - $ hg rebase --rev '1 & !1' + $ hg rebase --base 6 + abort: branch 'default' has 3 heads - please rebase to an explicit rev + (run 'hg heads .' to see heads) + [255] + + $ hg rebase --rev '1 & !1' --dest 8 empty "rev" revision set - nothing to rebase [1] - $ hg rebase --source '1 & !1' + $ hg rebase --source '1 & !1' --dest 8 empty "source" revision set - nothing to rebase [1] - $ hg rebase --base '1 & !1' + $ hg rebase --base '1 & !1' --dest 8 empty "base" revision set - can't compute rebase set [1] - $ hg rebase + $ hg rebase --dest 8 nothing to rebase - working directory parent is also destination [1] - $ hg rebase -b. + $ hg rebase -b . --dest 8 nothing to rebase - e7ec4e813ba6 is both "base" and destination [1] $ hg up -q 7 - $ hg rebase --traceback + $ hg rebase --dest 8 --traceback nothing to rebase - working directory parent is already an ancestor of destination e7ec4e813ba6 [1] - $ hg rebase -b. + $ hg rebase --dest 8 -b. nothing to rebase - "base" 02de42196ebe is already an ancestor of destination e7ec4e813ba6 [1] @@ -117,6 +125,9 @@ Rebase with no arguments (from 3 onto 8): + $ cd .. + $ hg clone -q -u . a2heads a1 + $ cd a1 $ hg up -q -C 3 $ hg rebase @@ -126,22 +137,18 @@ saved backup bundle to $TESTTMP/a1/.hg/strip-backup/42ccdea3bb16-3cb021d3-backup.hg (glob) $ hg tglog - @ 8: 'D' - | - o 7: 'C' + @ 6: 'D' | - o 6: 'B' + o 5: 'C' | - o 5: 'I' + o 4: 'B' | - o 4: 'H' + o 3: 'I' | - | o 3: 'G' - |/| - o | 2: 'F' - | | - | o 1: 'E' - |/ + o 2: 'H' + | + o 1: 'F' + | o 0: 'A' Try to rollback after a rebase (fail): @@ -154,7 +161,7 @@ Rebase with base == '.' => same as no arguments (from 3 onto 8): - $ hg clone -q -u 3 a a2 + $ hg clone -q -u 3 a2heads a2 $ cd a2 $ hg rebase --base . @@ -164,22 +171,18 @@ saved backup bundle to $TESTTMP/a2/.hg/strip-backup/42ccdea3bb16-3cb021d3-backup.hg (glob) $ hg tglog - @ 8: 'D' - | - o 7: 'C' + @ 6: 'D' | - o 6: 'B' + o 5: 'C' | - o 5: 'I' + o 4: 'B' | - o 4: 'H' + o 3: 'I' | - | o 3: 'G' - |/| - o | 2: 'F' - | | - | o 1: 'E' - |/ + o 2: 'H' + | + o 1: 'F' + | o 0: 'A' $ cd .. @@ -220,7 +223,7 @@ Specify only source (from 2 onto 8): - $ hg clone -q -u . a a4 + $ hg clone -q -u . a2heads a4 $ cd a4 $ hg rebase --source 'desc("C")' @@ -229,20 +232,16 @@ saved backup bundle to $TESTTMP/a4/.hg/strip-backup/5fddd98957c8-f9244fa1-backup.hg (glob) $ hg tglog - o 8: 'D' + o 6: 'D' | - o 7: 'C' - | - @ 6: 'I' + o 5: 'C' | - o 5: 'H' + @ 4: 'I' | - | o 4: 'G' - |/| - o | 3: 'F' - | | - | o 2: 'E' - |/ + o 3: 'H' + | + o 2: 'F' + | | o 1: 'B' |/ o 0: 'A' @@ -285,7 +284,7 @@ Specify only base (from 1 onto 8): - $ hg clone -q -u . a a6 + $ hg clone -q -u . a2heads a6 $ cd a6 $ hg rebase --base 'desc("D")' @@ -295,22 +294,18 @@ saved backup bundle to $TESTTMP/a6/.hg/strip-backup/42ccdea3bb16-3cb021d3-backup.hg (glob) $ hg tglog - o 8: 'D' - | - o 7: 'C' + o 6: 'D' | - o 6: 'B' + o 5: 'C' | - @ 5: 'I' + o 4: 'B' | - o 4: 'H' + @ 3: 'I' | - | o 3: 'G' - |/| - o | 2: 'F' - | | - | o 1: 'E' - |/ + o 2: 'H' + | + o 1: 'F' + | o 0: 'A' $ cd .. @@ -383,7 +378,7 @@ Specify only revs (from 2 onto 8) - $ hg clone -q -u . a a9 + $ hg clone -q -u . a2heads a9 $ cd a9 $ hg rebase --rev 'desc("C")::' @@ -392,20 +387,16 @@ saved backup bundle to $TESTTMP/a9/.hg/strip-backup/5fddd98957c8-f9244fa1-backup.hg (glob) $ hg tglog - o 8: 'D' + o 6: 'D' | - o 7: 'C' - | - @ 6: 'I' + o 5: 'C' | - o 5: 'H' + @ 4: 'I' | - | o 4: 'G' - |/| - o | 3: 'F' - | | - | o 2: 'E' - |/ + o 3: 'H' + | + o 2: 'F' + | | o 1: 'B' |/ o 0: 'A' @@ -416,7 +407,7 @@ $ hg clone -q -u . a aX $ cd aX - $ hg rebase -r 3 -r 6 + $ hg rebase -r 3 -r 6 --dest 8 rebasing 3:32af7686d403 "D" rebasing 6:eea13746799a "G" saved backup bundle to $TESTTMP/aX/.hg/strip-backup/eea13746799a-ad273fd6-backup.hg (glob) @@ -495,6 +486,10 @@ $ hg resolve -m c2 (no more unresolved files) continue: hg rebase --continue + $ hg graft --continue + abort: no graft in progress + (continue: hg rebase --continue) + [255] $ hg rebase -c --tool internal:fail rebasing 2:e4e3f3546619 "c2b" (tip) note: rebase of 2:e4e3f3546619 created no changes to commit
--- a/tests/test-rebase-pull.t Sun Mar 13 02:29:11 2016 +0100 +++ b/tests/test-rebase-pull.t Tue Mar 15 14:10:46 2016 -0700 @@ -85,7 +85,7 @@ adding manifests adding file changes added 1 changesets with 1 changes to 1 files - nothing to rebase - working directory parent is already an ancestor of destination 77ae9631bcca + nothing to rebase - updating instead 1 files updated, 0 files merged, 0 files removed, 0 files unresolved updating bookmark norebase @@ -209,3 +209,103 @@ | o 0: 'C1' + +pull --rebase only update if there is nothing to rebase + + $ cd ../a + $ echo R5 > R5 + $ hg ci -Am R5 + adding R5 + $ hg tglog + @ 6: 'R5' + | + o 5: 'R4' + | + o 4: 'R3' + | + o 3: 'R2' + | + o 2: 'R1' + | + o 1: 'C2' + | + o 0: 'C1' + + $ cd ../c + $ echo L2 > L2 + $ hg ci -Am L2 + adding L2 + $ hg up 'desc(L1)' + 0 files updated, 0 files merged, 1 files removed, 0 files unresolved + $ hg pull --rebase + pulling from $TESTTMP/a (glob) + searching for changes + adding changesets + adding manifests + adding file changes + added 1 changesets with 1 changes to 1 files (+1 heads) + rebasing 6:0d0727eb7ce0 "L1" + rebasing 7:c1f58876e3bf "L2" + saved backup bundle to $TESTTMP/c/.hg/strip-backup/0d0727eb7ce0-ef61ccb2-backup.hg (glob) + $ hg tglog + o 8: 'L2' + | + @ 7: 'L1' + | + o 6: 'R5' + | + o 5: 'R4' + | + o 4: 'R3' + | + o 3: 'R2' + | + o 2: 'R1' + | + o 1: 'C2' + | + o 0: 'C1' + + +pull --rebase update (no rebase) use proper update: + +- warn about other head. + + $ cd ../a + $ echo R6 > R6 + $ hg ci -Am R6 + adding R6 + $ cd ../c + $ hg up 'desc(R5)' + 0 files updated, 0 files merged, 1 files removed, 0 files unresolved + $ hg pull --rebase + pulling from $TESTTMP/a (glob) + searching for changes + adding changesets + adding manifests + adding file changes + added 1 changesets with 1 changes to 1 files (+1 heads) + nothing to rebase - updating instead + 1 files updated, 0 files merged, 0 files removed, 0 files unresolved + 1 other heads for branch "default" + $ hg tglog + @ 9: 'R6' + | + | o 8: 'L2' + | | + | o 7: 'L1' + |/ + o 6: 'R5' + | + o 5: 'R4' + | + o 4: 'R3' + | + o 3: 'R2' + | + o 2: 'R1' + | + o 1: 'C2' + | + o 0: 'C1' +
--- a/tests/test-rebase-scenario-global.t Sun Mar 13 02:29:11 2016 +0100 +++ b/tests/test-rebase-scenario-global.t Tue Mar 15 14:10:46 2016 -0700 @@ -25,7 +25,8 @@ Rebasing D onto H - simple rebase: -(this also tests that editor is invoked if '--edit' is specified) +(this also tests that editor is invoked if '--edit' is specified, and that we +can abort or warn for colliding untracked files) $ hg clone -q -u . a a1 $ cd a1 @@ -50,8 +51,10 @@ $ hg status --rev "3^1" --rev 3 A D - $ HGEDITOR=cat hg rebase -s 3 -d 7 --edit + $ echo collide > D + $ HGEDITOR=cat hg rebase -s 3 -d 7 --edit --config merge.checkunknown=warn rebasing 3:32af7686d403 "D" + D: replacing untracked file D @@ -62,6 +65,9 @@ HG: branch 'default' HG: added D saved backup bundle to $TESTTMP/a1/.hg/strip-backup/32af7686d403-6f7dface-backup.hg (glob) + $ cat D.orig + collide + $ rm D.orig $ hg tglog o 7: 'D' @@ -84,14 +90,19 @@ D onto F - intermediate point: -(this also tests that editor is not invoked if '--edit' is not specified) +(this also tests that editor is not invoked if '--edit' is not specified, and +that we can ignore for colliding untracked files) $ hg clone -q -u . a a2 $ cd a2 + $ echo collide > D - $ HGEDITOR=cat hg rebase -s 3 -d 5 + $ HGEDITOR=cat hg rebase -s 3 -d 5 --config merge.checkunknown=ignore rebasing 3:32af7686d403 "D" saved backup bundle to $TESTTMP/a2/.hg/strip-backup/32af7686d403-6f7dface-backup.hg (glob) + $ cat D.orig + collide + $ rm D.orig $ hg tglog o 7: 'D' @@ -114,15 +125,21 @@ E onto H - skip of G: +(this also tests that we can overwrite untracked files and don't create backups +if they have the same contents) $ hg clone -q -u . a a3 $ cd a3 + $ hg cat -r 4 E | tee E + E $ hg rebase -s 4 -d 7 rebasing 4:9520eea781bc "E" rebasing 6:eea13746799a "G" note: rebase of 6:eea13746799a created no changes to commit saved backup bundle to $TESTTMP/a3/.hg/strip-backup/9520eea781bc-fcd8edd4-backup.hg (glob) + $ f E.orig + E.orig: file not found $ hg tglog o 6: 'E' @@ -745,12 +762,73 @@ saved backup bundle to $TESTTMP/cwd-vanish/.hg/strip-backup/779a07b1b7a0-853e0073-backup.hg (glob) Test experimental revset +======================== $ cd .. + +Make the repo a bit more interresting + + $ hg up 1 + 0 files updated, 0 files merged, 2 files removed, 0 files unresolved + $ echo aaa > aaa + $ hg add aaa + $ hg commit -m aaa + created new head + $ hg log -G + @ changeset: 4:5f7bc9025ed2 + | tag: tip + | parent: 1:58d79cc1cf43 + | user: test + | date: Thu Jan 01 00:00:00 1970 +0000 + | summary: aaa + | + | o changeset: 3:1910d5ff34ea + | | user: test + | | date: Thu Jan 01 00:00:00 1970 +0000 + | | summary: second source with subdir + | | + | o changeset: 2:82901330b6ef + |/ user: test + | date: Thu Jan 01 00:00:00 1970 +0000 + | summary: first source commit + | + o changeset: 1:58d79cc1cf43 + | user: test + | date: Thu Jan 01 00:00:00 1970 +0000 + | summary: dest commit + | + o changeset: 0:e94b687f7da3 + user: test + date: Thu Jan 01 00:00:00 1970 +0000 + summary: initial commit + + +Testing from lower head + + $ hg up 3 + 2 files updated, 0 files merged, 1 files removed, 0 files unresolved $ hg log -r '_destrebase()' + changeset: 4:5f7bc9025ed2 + tag: tip + parent: 1:58d79cc1cf43 + user: test + date: Thu Jan 01 00:00:00 1970 +0000 + summary: aaa + + +Testing from upper head + + $ hg log -r '_destrebase(4)' changeset: 3:1910d5ff34ea - tag: tip user: test date: Thu Jan 01 00:00:00 1970 +0000 summary: second source with subdir + $ hg up 4 + 1 files updated, 0 files merged, 2 files removed, 0 files unresolved + $ hg log -r '_destrebase()' + changeset: 3:1910d5ff34ea + user: test + date: Thu Jan 01 00:00:00 1970 +0000 + summary: second source with subdir +
--- a/tests/test-rename-dir-merge.t Sun Mar 13 02:29:11 2016 +0100 +++ b/tests/test-rename-dir-merge.t Tue Mar 15 14:10:46 2016 -0700 @@ -85,6 +85,7 @@ resolving manifests branchmerge: True, force: False, partial: False ancestor: f9b20c0d4c51, local: 397f8b00a740+, remote: ce36d17b18fb + starting 4 threads for background file closing (?) b/c: local directory rename - get from a/c -> dg getting a/c to b/c 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
--- a/tests/test-rename-merge2.t Sun Mar 13 02:29:11 2016 +0100 +++ b/tests/test-rename-merge2.t Tue Mar 15 14:10:46 2016 -0700 @@ -88,6 +88,7 @@ ancestor: 924404dff337, local: e300d1c794ec+, remote: 4ce40f5aca24 preserving a for resolve of b preserving rev for resolve of rev + starting 4 threads for background file closing (?) a: remote unchanged -> k b: remote copied from a -> m (premerge) picked tool 'python ../merge' for b (binary False symlink False changedelete False) @@ -168,6 +169,7 @@ preserving a for resolve of b preserving rev for resolve of rev removing a + starting 4 threads for background file closing (?) b: remote moved from a -> m (premerge) picked tool 'python ../merge' for b (binary False symlink False changedelete False) merging a and b to b @@ -205,6 +207,7 @@ ancestor: 924404dff337, local: 02963e448370+, remote: f4db7e329e71 preserving b for resolve of b preserving rev for resolve of rev + starting 4 threads for background file closing (?) b: local copied/moved from a -> m (premerge) picked tool 'python ../merge' for b (binary False symlink False changedelete False) merging b and a to b @@ -274,6 +277,7 @@ branchmerge: True, force: False, partial: False ancestor: 924404dff337, local: 86a2aa42fc76+, remote: 97c705ade336 preserving rev for resolve of rev + starting 4 threads for background file closing (?) rev: versions differ -> m (premerge) picked tool 'python ../merge' for rev (binary False symlink False changedelete False) merging rev @@ -339,6 +343,7 @@ branchmerge: True, force: False, partial: False ancestor: 924404dff337, local: 02963e448370+, remote: 97c705ade336 preserving rev for resolve of rev + starting 4 threads for background file closing (?) rev: versions differ -> m (premerge) picked tool 'python ../merge' for rev (binary False symlink False changedelete False) merging rev @@ -367,6 +372,7 @@ ancestor: 924404dff337, local: 62e7bf090eba+, remote: 49b6d8032493 preserving b for resolve of b preserving rev for resolve of rev + starting 4 threads for background file closing (?) b: both renamed from a -> m (premerge) picked tool 'python ../merge' for b (binary False symlink False changedelete False) merging b @@ -446,6 +452,7 @@ ancestor: 924404dff337, local: 86a2aa42fc76+, remote: af30c7647fc7 preserving b for resolve of b preserving rev for resolve of rev + starting 4 threads for background file closing (?) b: both created -> m (premerge) picked tool 'python ../merge' for b (binary False symlink False changedelete False) merging b @@ -486,6 +493,7 @@ preserving rev for resolve of rev a: other deleted -> r removing a + starting 4 threads for background file closing (?) b: both created -> m (premerge) picked tool 'python ../merge' for b (binary False symlink False changedelete False) merging b @@ -565,6 +573,7 @@ preserving rev for resolve of rev a: other deleted -> r removing a + starting 4 threads for background file closing (?) b: both created -> m (premerge) picked tool 'python ../merge' for b (binary False symlink False changedelete False) merging b @@ -642,6 +651,7 @@ ancestor: 924404dff337, local: 0b76e65c8289+, remote: 4ce40f5aca24 preserving b for resolve of b preserving rev for resolve of rev + starting 4 threads for background file closing (?) a: remote unchanged -> k b: both created -> m (premerge) picked tool 'python ../merge' for b (binary False symlink False changedelete False) @@ -681,6 +691,7 @@ ancestor: 924404dff337, local: 02963e448370+, remote: 8dbce441892a preserving b for resolve of b preserving rev for resolve of rev + starting 4 threads for background file closing (?) a: prompt deleted/changed -> m (premerge) picked tool ':prompt' for a (binary False symlink False changedelete True) remote changed a which local deleted @@ -725,6 +736,7 @@ preserving a for resolve of a preserving b for resolve of b preserving rev for resolve of rev + starting 4 threads for background file closing (?) a: prompt changed/deleted -> m (premerge) picked tool ':prompt' for a (binary False symlink False changedelete True) local changed a which remote deleted @@ -772,6 +784,7 @@ preserving a for resolve of b preserving rev for resolve of rev removing a + starting 4 threads for background file closing (?) b: remote moved from a -> m (premerge) picked tool 'python ../merge' for b (binary False symlink False changedelete False) merging a and b to b @@ -813,6 +826,7 @@ ancestor: 924404dff337, local: 62e7bf090eba+, remote: f4db7e329e71 preserving b for resolve of b preserving rev for resolve of rev + starting 4 threads for background file closing (?) b: local copied/moved from a -> m (premerge) picked tool 'python ../merge' for b (binary False symlink False changedelete False) merging b and a to b
--- a/tests/test-repair-strip.t Sun Mar 13 02:29:11 2016 +0100 +++ b/tests/test-repair-strip.t Tue Mar 15 14:10:46 2016 -0700 @@ -1,5 +1,12 @@ #require unix-permissions no-root + $ cat > $TESTTMP/dumpjournal.py <<EOF + > import sys + > for entry in sys.stdin.read().split('\n'): + > if entry: + > print entry.split('\x00')[0] + > EOF + $ echo "[extensions]" >> $HGRCPATH $ echo "mq=">> $HGRCPATH @@ -14,7 +21,7 @@ > hg verify > echo % journal contents > if [ -f .hg/store/journal ]; then - > sed -e 's/\.i[^\n]*/\.i/' .hg/store/journal + > cat .hg/store/journal | python $TESTTMP/dumpjournal.py > else > echo "(no journal)" > fi
--- a/tests/test-resolve.t Sun Mar 13 02:29:11 2016 +0100 +++ b/tests/test-resolve.t Tue Mar 15 14:10:46 2016 -0700 @@ -53,6 +53,34 @@ arguments do not match paths that need resolving $ hg resolve -l does-not-exist +tell users how they could have used resolve + + $ mkdir nested + $ cd nested + $ hg resolve -m file1 + arguments do not match paths that need resolving + (try: hg resolve -m path:file1) + $ hg resolve -m file1 filez + arguments do not match paths that need resolving + (try: hg resolve -m path:file1 path:filez) + $ hg resolve -m path:file1 path:filez + $ hg resolve -l + R file1 + U file2 + $ hg resolve -m filez file2 + arguments do not match paths that need resolving + (try: hg resolve -m path:filez path:file2) + $ hg resolve -m path:filez path:file2 + (no more unresolved files) + $ hg resolve -l + R file1 + R file2 + +cleanup + $ hg resolve -u + $ cd .. + $ rmdir nested + don't allow marking or unmarking driver-resolved files $ cat > $TESTTMP/markdriver.py << EOF @@ -263,10 +291,12 @@ local: 57653b9f834a4493f7240b0681efcb9ae7cab745 other: dc77451844e37f03f5c559e3b8529b2b48d381d1 unrecognized entry: x advisory record + file extras: file1 (ancestorlinknode = 99726c03216e233810a2564cbc0adfe395007eac) file: file1 (record type "F", state "r", hash 60b27f004e454aca81b0480209cce5081ec52390) local path: file1 (flags "") ancestor path: file1 (node 2ed2a3912a0b24502043eae84ee4b279c18b90dd) other path: file1 (node 6f4310b00b9a147241b071a60c28a650827fb03d) + file extras: file2 (ancestorlinknode = 99726c03216e233810a2564cbc0adfe395007eac) file: file2 (record type "F", state "u", hash cb99b709a1978bd205ab9dfd4c5aaa1fc91c7523) local path: file2 (flags "") ancestor path: file2 (node 2ed2a3912a0b24502043eae84ee4b279c18b90dd) @@ -282,10 +312,12 @@ * version 2 records local: 57653b9f834a4493f7240b0681efcb9ae7cab745 other: dc77451844e37f03f5c559e3b8529b2b48d381d1 + file extras: file1 (ancestorlinknode = 99726c03216e233810a2564cbc0adfe395007eac) file: file1 (record type "F", state "r", hash 60b27f004e454aca81b0480209cce5081ec52390) local path: file1 (flags "") ancestor path: file1 (node 2ed2a3912a0b24502043eae84ee4b279c18b90dd) other path: file1 (node 6f4310b00b9a147241b071a60c28a650827fb03d) + file extras: file2 (ancestorlinknode = 99726c03216e233810a2564cbc0adfe395007eac) file: file2 (record type "F", state "u", hash cb99b709a1978bd205ab9dfd4c5aaa1fc91c7523) local path: file2 (flags "") ancestor path: file2 (node 2ed2a3912a0b24502043eae84ee4b279c18b90dd)
--- a/tests/test-revert-interactive.t Sun Mar 13 02:29:11 2016 +0100 +++ b/tests/test-revert-interactive.t Tue Mar 15 14:10:46 2016 -0700 @@ -15,6 +15,7 @@ > interactive = true > [extensions] > record = + > purge = > EOF @@ -377,3 +378,26 @@ 5 d +lastline + + $ hg update -C . + 1 files updated, 0 files merged, 0 files removed, 0 files unresolved + $ hg purge + $ touch newfile + $ hg add newfile + $ hg status + A newfile + $ hg revert -i <<EOF + > n + > EOF + forgetting newfile + forget added file newfile (yn)? n + $ hg status + A newfile + $ hg revert -i <<EOF + > y + > EOF + forgetting newfile + forget added file newfile (yn)? y + $ hg status + ? newfile +
--- a/tests/test-revset.t Sun Mar 13 02:29:11 2016 +0100 +++ b/tests/test-revset.t Tue Mar 15 14:10:46 2016 -0700 @@ -169,7 +169,9 @@ ('symbol', 'a')) * set: <filteredset - <baseset [1]>> + <baseset [1]>, + <not + <baseset [0]>>> 1 $ try _a_b_c_ ('symbol', '_a_b_c_') @@ -182,7 +184,9 @@ ('symbol', 'a')) * set: <filteredset - <baseset [6]>> + <baseset [6]>, + <not + <baseset [0]>>> 6 $ try .a.b.c. ('symbol', '.a.b.c.') @@ -195,7 +199,9 @@ ('symbol', 'a')) * set: <filteredset - <baseset [7]>> + <baseset [7]>, + <not + <baseset [0]>>> 7 names that should be caught by fallback mechanism @@ -278,7 +284,9 @@ ('symbol', 'a')) * set: <filteredset - <baseset [4]>> + <baseset [4]>, + <not + <baseset [0]>>> 4 $ log '1 or 2' @@ -537,14 +545,16 @@ ('string', '\x08issue\\d+')) * set: <filteredset - <fullreposet+ 0:9>> + <fullreposet+ 0:9>, + <grep '\x08issue\\d+'>> $ try 'grep(r"\bissue\d+")' (func ('symbol', 'grep') ('string', '\\bissue\\d+')) * set: <filteredset - <fullreposet+ 0:9>> + <fullreposet+ 0:9>, + <grep '\\bissue\\d+'>> 6 $ try 'grep(r"\")' hg: parse error at 7: unterminated string @@ -693,12 +703,11 @@ * optimized: (func ('symbol', 'only') - (and + (difference (range ('symbol', '8') ('symbol', '9')) - (not - ('symbol', '8')))) + ('symbol', '8'))) * set: <baseset+ [8, 9]> 8 @@ -1230,7 +1239,7 @@ test that chained `or` operations never eat up stack (issue4624) (uses `0:1` instead of `0` to avoid future optimization of trivial revisions) - $ hg log -T '{rev}\n' -r "`python -c "print '|'.join(['0:1'] * 500)"`" + $ hg log -T '{rev}\n' -r `python -c "print '+'.join(['0:1'] * 500)"` 0 1 @@ -1586,7 +1595,8 @@ None) * set: <filteredset - <fullreposet+ 0:9>> + <fullreposet+ 0:9>, + <merge>> 6 $ HGPLAIN=1 @@ -1605,7 +1615,8 @@ None) * set: <filteredset - <fullreposet+ 0:9>> + <fullreposet+ 0:9>, + <merge>> 6 $ unset HGPLAIN @@ -1659,7 +1670,8 @@ * set: <addset+ <filteredset - <fullreposet+ 0:9>>, + <fullreposet+ 0:9>, + <merge>>, <generatorset+>> 6 7 @@ -1715,7 +1727,10 @@ ('symbol', '2') ('symbol', '5'))) * set: - <baseset [5]> + <baseset + <max + <fullreposet+ 0:9>, + <spanset+ 2:5>>> 5 test chained `or` operations are flattened at parsing phase @@ -1823,7 +1838,8 @@ <addset <baseset [9]>, <filteredset - <fullreposet+ 0:9>>> + <fullreposet+ 0:9>, + <desc '$1'>>> 9 $ try 'd(2:5)' @@ -1971,11 +1987,80 @@ issue2549 - correct optimizations - $ log 'limit(1 or 2 or 3, 2) and not 2' + $ try 'limit(1 or 2 or 3, 2) and not 2' + (and + (func + ('symbol', 'limit') + (list + (or + ('symbol', '1') + ('symbol', '2') + ('symbol', '3')) + ('symbol', '2'))) + (not + ('symbol', '2'))) + * set: + <filteredset + <baseset + <limit n=2, offset=0, + <fullreposet+ 0:9>, + <baseset [1, 2, 3]>>>, + <not + <baseset [2]>>> 1 - $ log 'max(1 or 2) and not 2' - $ log 'min(1 or 2) and not 1' - $ log 'last(1 or 2, 1) and not 2' + $ try 'max(1 or 2) and not 2' + (and + (func + ('symbol', 'max') + (or + ('symbol', '1') + ('symbol', '2'))) + (not + ('symbol', '2'))) + * set: + <filteredset + <baseset + <max + <fullreposet+ 0:9>, + <baseset [1, 2]>>>, + <not + <baseset [2]>>> + $ try 'min(1 or 2) and not 1' + (and + (func + ('symbol', 'min') + (or + ('symbol', '1') + ('symbol', '2'))) + (not + ('symbol', '1'))) + * set: + <filteredset + <baseset + <min + <fullreposet+ 0:9>, + <baseset [1, 2]>>>, + <not + <baseset [1]>>> + $ try 'last(1 or 2, 1) and not 2' + (and + (func + ('symbol', 'last') + (list + (or + ('symbol', '1') + ('symbol', '2')) + ('symbol', '1'))) + (not + ('symbol', '2'))) + * set: + <filteredset + <baseset + <last n=1, + <fullreposet+ 0:9>, + <baseset [2, 1]>>>, + <not + <baseset [2]>>> issue4289 - ordering of built-ins $ hg log -M -q -r 3:2 @@ -2192,28 +2277,21 @@ $ cd .. -Test registrar.delayregistrar via revset.extpredicate - -'extpredicate' decorator shouldn't register any functions until -'setup()' on it. +Test that revset predicate of extension isn't loaded at failure of +loading it $ cd repo $ cat <<EOF > $TESTTMP/custompredicate.py - > from mercurial import revset + > from mercurial import error, registrar, revset > - > revsetpredicate = revset.extpredicate() + > revsetpredicate = registrar.revsetpredicate() > > @revsetpredicate('custom1()') > def custom1(repo, subset, x): > return revset.baseset([1]) - > @revsetpredicate('custom2()') - > def custom2(repo, subset, x): - > return revset.baseset([2]) > - > def uisetup(ui): - > if ui.configbool('custompredicate', 'enabled'): - > revsetpredicate.setup() + > raise error.Abort('intentional failure of loading extension') > EOF $ cat <<EOF > .hg/hgrc > [extensions] @@ -2221,13 +2299,8 @@ > EOF $ hg debugrevspec "custom1()" + *** failed to import extension custompredicate from $TESTTMP/custompredicate.py: intentional failure of loading extension hg: parse error: unknown identifier: custom1 [255] - $ hg debugrevspec "custom2()" - hg: parse error: unknown identifier: custom2 - [255] - $ hg debugrevspec "custom1() or custom2()" --config custompredicate.enabled=true - 1 - 2 $ cd ..
--- a/tests/test-run-tests.t Sun Mar 13 02:29:11 2016 +0100 +++ b/tests/test-run-tests.t Tue Mar 15 14:10:46 2016 -0700 @@ -21,6 +21,27 @@ > run-tests.py --with-hg=`which hg` "$@" > } +error paths + +#if symlink + $ ln -s `which true` hg + $ run-tests.py --with-hg=./hg + warning: --with-hg should specify an hg script + + # Ran 0 tests, 0 skipped, 0 warned, 0 failed. + $ rm hg +#endif + +#if execbit + $ touch hg + $ run-tests.py --with-hg=./hg + Usage: run-tests.py [options] [tests] + + run-tests.py: error: --with-hg must specify an executable hg script + [2] + $ rm hg +#endif + a succesful test ======================= @@ -31,6 +52,12 @@ > never happens (?) > xyzzy > nor this (?) + > $ printf 'abc\ndef\nxyz\n' + > 123 (?) + > abc + > def (?) + > 456 (?) + > xyz > EOF $ rt @@ -45,6 +72,8 @@ > rataxes > This is a noop statement so that > this test is still more bytes than success. + > pad pad pad pad............................................................ + > pad pad pad pad............................................................ > EOF >>> fh = open('test-failure-unicode.t', 'wb') @@ -55,12 +84,13 @@ --- $TESTTMP/test-failure.t +++ $TESTTMP/test-failure.t.err - @@ -1,4 +1,4 @@ + @@ -1,5 +1,5 @@ $ echo babar - rataxes + babar This is a noop statement so that this test is still more bytes than success. + pad pad pad pad............................................................ ERROR: test-failure.t output changed !. @@ -84,12 +114,13 @@ --- $TESTTMP/test-failure.t +++ $TESTTMP/test-failure.t.err - @@ -1,4 +1,4 @@ + @@ -1,5 +1,5 @@ $ echo babar - rataxes + babar This is a noop statement so that this test is still more bytes than success. + pad pad pad pad............................................................ ERROR: test-failure.t output changed !. @@ -122,12 +153,13 @@ <testcase name="test-failure.t" time="*"> (glob) <![CDATA[--- $TESTTMP/test-failure.t +++ $TESTTMP/test-failure.t.err - @@ -1,4 +1,4 @@ + @@ -1,5 +1,5 @@ $ echo babar - rataxes + babar This is a noop statement so that this test is still more bytes than success. + pad pad pad pad............................................................ ]]> </testcase> </testsuite> @@ -140,12 +172,13 @@ --- $TESTTMP/test-failure.t +++ $TESTTMP/test-failure.t.err - @@ -1,4 +1,4 @@ + @@ -1,5 +1,5 @@ $ echo babar - rataxes + babar This is a noop statement so that this test is still more bytes than success. + pad pad pad pad............................................................ ERROR: test-failure.t output changed ! @@ -174,12 +207,13 @@ --- $TESTTMP/test-failure.t +++ $TESTTMP/test-failure.t.err - @@ -1,4 +1,4 @@ + @@ -1,5 +1,5 @@ $ echo babar - rataxes + babar This is a noop statement so that this test is still more bytes than success. + pad pad pad pad............................................................ ERROR: test-failure.t output changed ! @@ -193,12 +227,13 @@ --- $TESTTMP/test-failure.t +++ $TESTTMP/test-failure.t.err - @@ -1,4 +1,4 @@ + @@ -1,5 +1,5 @@ $ echo babar - rataxes + babar This is a noop statement so that this test is still more bytes than success. + pad pad pad pad............................................................ ERROR: test-failure.t output changed ! @@ -245,8 +280,8 @@ *SALT* 0 0 (glob) + echo babar babar - + echo *SALT* 4 0 (glob) - *SALT* 4 0 (glob) + + echo *SALT* 6 0 (glob) + *SALT* 6 0 (glob) *+ echo *SALT* 0 0 (glob) *SALT* 0 0 (glob) + echo babar @@ -257,6 +292,12 @@ xyzzy + echo *SALT* 6 0 (glob) *SALT* 6 0 (glob) + + printf *abc\ndef\nxyz\n* (glob) + abc + def + xyz + + echo *SALT* 12 0 (glob) + *SALT* 12 0 (glob) . # Ran 2 tests, 0 skipped, 0 warned, 0 failed. @@ -282,12 +323,13 @@ --- $TESTTMP/test-failure*.t (glob) +++ $TESTTMP/test-failure*.t.err (glob) - @@ -1,4 +1,4 @@ + @@ -1,5 +1,5 @@ $ echo babar - rataxes + babar This is a noop statement so that this test is still more bytes than success. + pad pad pad pad............................................................ Failed test-failure*.t: output changed (glob) Failed test-nothing.t: output changed @@ -312,12 +354,13 @@ --- $TESTTMP/test-failure.t +++ $TESTTMP/test-failure.t.err - @@ -1,4 +1,4 @@ + @@ -1,5 +1,5 @@ $ echo babar - rataxes + babar This is a noop statement so that this test is still more bytes than success. + pad pad pad pad............................................................ Accept this change? [n] ERROR: test-failure.t output changed !. @@ -331,6 +374,8 @@ rataxes This is a noop statement so that this test is still more bytes than success. + pad pad pad pad............................................................ + pad pad pad pad............................................................ Interactive with custom view @@ -368,12 +413,14 @@ --- $TESTTMP/test-failure.t +++ $TESTTMP/test-failure.t.err - @@ -1,9 +1,9 @@ + @@ -1,11 +1,11 @@ $ echo babar - rataxes + babar This is a noop statement so that this test is still more bytes than success. + pad pad pad pad............................................................ + pad pad pad pad............................................................ $ echo 'saved backup bundle to $TESTTMP/foo.hg' - saved backup bundle to $TESTTMP/foo.hg + saved backup bundle to $TESTTMP/foo.hg* (glob) @@ -388,6 +435,8 @@ babar This is a noop statement so that this test is still more bytes than success. + pad pad pad pad............................................................ + pad pad pad pad............................................................ $ echo 'saved backup bundle to $TESTTMP/foo.hg' saved backup bundle to $TESTTMP/foo.hg (glob)< $ echo 'saved backup bundle to $TESTTMP/foo.hg' @@ -509,8 +558,6 @@ "result": "skip" } } (no-eol) -#if json - test for --json ================== @@ -518,12 +565,13 @@ --- $TESTTMP/test-failure.t +++ $TESTTMP/test-failure.t.err - @@ -1,4 +1,4 @@ + @@ -1,5 +1,5 @@ $ echo babar - rataxes + babar This is a noop statement so that this test is still more bytes than success. + pad pad pad pad............................................................ ERROR: test-failure.t output changed !.s @@ -571,12 +619,13 @@ --- $TESTTMP/test-failure.t +++ $TESTTMP/test-failure.t.err - @@ -1,4 +1,4 @@ + @@ -1,5 +1,5 @@ $ echo babar - rataxes + babar This is a noop statement so that this test is still more bytes than success. + pad pad pad pad............................................................ Accept this change? [n] ..s Skipped test-skip.t: missing feature: nail clipper # Ran 2 tests, 1 skipped, 0 warned, 0 failed. @@ -613,8 +662,6 @@ } (no-eol) $ mv backup test-failure.t -#endif - backslash on end of line with glob matching is handled properly $ cat > test-glob-backslash.t << EOF @@ -701,3 +748,13 @@ $ rt $HGTEST_RUN_TESTS_PURE --allow-slow-tests test-very-slow-test.t . # Ran 1 tests, 0 skipped, 0 warned, 0 failed. + +support for running a test outside the current directory + $ mkdir nonlocal + $ cat > nonlocal/test-is-not-here.t << EOF + > $ echo pass + > pass + > EOF + $ rt nonlocal/test-is-not-here.t + . + # Ran 1 tests, 0 skipped, 0 warned, 0 failed.
--- a/tests/test-schemes.t Sun Mar 13 02:29:11 2016 +0100 +++ b/tests/test-schemes.t Tue Mar 15 14:10:46 2016 -0700 @@ -52,6 +52,21 @@ no changes found [1] +check that debugexpandscheme outputs the canonical form + + $ hg debugexpandscheme bb://user/repo + https://bitbucket.org/user/repo + +expanding an unknown scheme emits the input + + $ hg debugexpandscheme foobar://this/that + foobar://this/that + +expanding a canonical URL emits the input + + $ hg debugexpandscheme https://bitbucket.org/user/repo + https://bitbucket.org/user/repo + errors $ cat errors.log
--- a/tests/test-shelve.t Sun Mar 13 02:29:11 2016 +0100 +++ b/tests/test-shelve.t Tue Mar 15 14:10:46 2016 -0700 @@ -373,7 +373,7 @@ try to continue with no unshelve underway $ hg unshelve -c - abort: no unshelve operation underway + abort: no unshelve in progress [255] $ hg status A foo/foo @@ -403,6 +403,10 @@ (use 'hg unshelve --continue' or 'hg unshelve --abort') [255] + $ hg graft --continue + abort: no graft in progress + (continue: hg unshelve --continue) + [255] $ hg unshelve -c rebasing 5:32c69314e062 "changes to: [mq]: second.patch" (tip) unshelve of 'default' complete
--- a/tests/test-status-color.t Sun Mar 13 02:29:11 2016 +0100 +++ b/tests/test-status-color.t Tue Mar 15 14:10:46 2016 -0700 @@ -30,6 +30,15 @@ [status.unknown|? ][status.unknown|b/in_b] [status.unknown|? ][status.unknown|in_root] +hg status with template + $ hg status -T "{label('red', path)}\n" --color=debug + [red|a/1/in_a_1] + [red|a/in_a] + [red|b/1/in_b_1] + [red|b/2/in_b_2] + [red|b/in_b] + [red|in_root] + hg status . in repo root: $ hg status --color=always .
--- a/tests/test-strip.t Sun Mar 13 02:29:11 2016 +0100 +++ b/tests/test-strip.t Tue Mar 15 14:10:46 2016 -0700 @@ -287,6 +287,7 @@ $ hg up 1 files updated, 0 files merged, 0 files removed, 0 files unresolved + 1 other heads for branch "default" $ hg log -G @ changeset: 4:264128213d29 | tag: tip @@ -791,6 +792,7 @@ removing c d: other deleted -> r removing d + starting 4 threads for background file closing (?) 0 files updated, 0 files merged, 2 files removed, 0 files unresolved 2 changesets found list of changesets:
--- a/tests/test-subrepo-deep-nested-change.t Sun Mar 13 02:29:11 2016 +0100 +++ b/tests/test-subrepo-deep-nested-change.t Tue Mar 15 14:10:46 2016 -0700 @@ -1,3 +1,9 @@ + $ cat >> $HGRCPATH <<EOF + > [extdiff] + > # for portability: + > pdiff = sh "$RUNTESTDIR/pdiff" + > EOF + Preparing the subrepository 'sub2' $ hg init sub2 @@ -717,92 +723,92 @@ Interaction with extdiff, largefiles and subrepos - $ hg --config extensions.extdiff= extdiff -S + $ hg --config extensions.extdiff= pdiff -S - $ hg --config extensions.extdiff= extdiff -r '.^' -S - diff -Npru cloned.*/.hgsub cloned/.hgsub (glob) - --- cloned.*/.hgsub * +0000 (glob) - +++ cloned/.hgsub * +0000 (glob) - @@ -1,2 +1 @@ + $ hg --config extensions.extdiff= pdiff -r '.^' -S + diff -Nru cloned.*/.hgsub cloned/.hgsub (glob) + --- cloned.*/.hgsub * (glob) + +++ cloned/.hgsub * (glob) + @@ -1,2 +1* @@ (glob) sub1 = ../sub1 -sub3 = sub3 - diff -Npru cloned.*/.hgsubstate cloned/.hgsubstate (glob) - --- cloned.*/.hgsubstate * +0000 (glob) - +++ cloned/.hgsubstate * +0000 (glob) - @@ -1,2 +1 @@ + diff -Nru cloned.*/.hgsubstate cloned/.hgsubstate (glob) + --- cloned.*/.hgsubstate * (glob) + +++ cloned/.hgsubstate * (glob) + @@ -1,2 +1* @@ (glob) 7a36fa02b66e61f27f3d4a822809f159479b8ab2 sub1 -b1a26de6f2a045a9f079323693614ee322f1ff7e sub3 [1] - $ hg --config extensions.extdiff= extdiff -r 0 -r '.^' -S - diff -Npru cloned.*/.hglf/b.dat cloned.*/.hglf/b.dat (glob) + $ hg --config extensions.extdiff= pdiff -r 0 -r '.^' -S + diff -Nru cloned.*/.hglf/b.dat cloned.*/.hglf/b.dat (glob) --- cloned.*/.hglf/b.dat * (glob) +++ cloned.*/.hglf/b.dat * (glob) - @@ -0,0 +1 @@ + @@ -*,0 +1* @@ (glob) +da39a3ee5e6b4b0d3255bfef95601890afd80709 - diff -Npru cloned.*/.hglf/foo/bar/large.dat cloned.*/.hglf/foo/bar/large.dat (glob) + diff -Nru cloned.*/.hglf/foo/bar/large.dat cloned.*/.hglf/foo/bar/large.dat (glob) --- cloned.*/.hglf/foo/bar/large.dat * (glob) +++ cloned.*/.hglf/foo/bar/large.dat * (glob) - @@ -0,0 +1 @@ + @@ -*,0 +1* @@ (glob) +2f6933b5ee0f5fdd823d9717d8729f3c2523811b - diff -Npru cloned.*/.hglf/large.bin cloned.*/.hglf/large.bin (glob) + diff -Nru cloned.*/.hglf/large.bin cloned.*/.hglf/large.bin (glob) --- cloned.*/.hglf/large.bin * (glob) +++ cloned.*/.hglf/large.bin * (glob) - @@ -0,0 +1 @@ + @@ -*,0 +1* @@ (glob) +7f7097b041ccf68cc5561e9600da4655d21c6d18 - diff -Npru cloned.*/.hgsub cloned.*/.hgsub (glob) + diff -Nru cloned.*/.hgsub cloned.*/.hgsub (glob) --- cloned.*/.hgsub * (glob) +++ cloned.*/.hgsub * (glob) - @@ -1 +1,2 @@ + @@ -1* +1,2 @@ (glob) sub1 = ../sub1 +sub3 = sub3 - diff -Npru cloned.*/.hgsubstate cloned.*/.hgsubstate (glob) + diff -Nru cloned.*/.hgsubstate cloned.*/.hgsubstate (glob) --- cloned.*/.hgsubstate * (glob) +++ cloned.*/.hgsubstate * (glob) - @@ -1 +1,2 @@ + @@ -1* +1,2 @@ (glob) -fc3b4ce2696f7741438c79207583768f2ce6b0dd sub1 +7a36fa02b66e61f27f3d4a822809f159479b8ab2 sub1 +b1a26de6f2a045a9f079323693614ee322f1ff7e sub3 - diff -Npru cloned.*/foo/bar/def cloned.*/foo/bar/def (glob) + diff -Nru cloned.*/foo/bar/def cloned.*/foo/bar/def (glob) --- cloned.*/foo/bar/def * (glob) +++ cloned.*/foo/bar/def * (glob) - @@ -0,0 +1 @@ + @@ -*,0 +1* @@ (glob) +changed - diff -Npru cloned.*/main cloned.*/main (glob) + diff -Nru cloned.*/main cloned.*/main (glob) --- cloned.*/main * (glob) +++ cloned.*/main * (glob) - @@ -1 +1 @@ + @@ -1* +1* @@ (glob) -main +foo - diff -Npru cloned.*/sub1/.hgsubstate cloned.*/sub1/.hgsubstate (glob) + diff -Nru cloned.*/sub1/.hgsubstate cloned.*/sub1/.hgsubstate (glob) --- cloned.*/sub1/.hgsubstate * (glob) +++ cloned.*/sub1/.hgsubstate * (glob) - @@ -1 +1 @@ + @@ -1* +1* @@ (glob) -c57a0840e3badd667ef3c3ef65471609acb2ba3c sub2 +c77908c81ccea3794a896c79e98b0e004aee2e9e sub2 - diff -Npru cloned.*/sub1/sub2/folder/test.txt cloned.*/sub1/sub2/folder/test.txt (glob) + diff -Nru cloned.*/sub1/sub2/folder/test.txt cloned.*/sub1/sub2/folder/test.txt (glob) --- cloned.*/sub1/sub2/folder/test.txt * (glob) +++ cloned.*/sub1/sub2/folder/test.txt * (glob) - @@ -0,0 +1 @@ + @@ -*,0 +1* @@ (glob) +subfolder - diff -Npru cloned.*/sub1/sub2/sub2 cloned.*/sub1/sub2/sub2 (glob) + diff -Nru cloned.*/sub1/sub2/sub2 cloned.*/sub1/sub2/sub2 (glob) --- cloned.*/sub1/sub2/sub2 * (glob) +++ cloned.*/sub1/sub2/sub2 * (glob) - @@ -1 +1 @@ + @@ -1* +1* @@ (glob) -sub2 +modified - diff -Npru cloned.*/sub3/a.txt cloned.*/sub3/a.txt (glob) + diff -Nru cloned.*/sub3/a.txt cloned.*/sub3/a.txt (glob) --- cloned.*/sub3/a.txt * (glob) +++ cloned.*/sub3/a.txt * (glob) - @@ -0,0 +1 @@ + @@ -*,0 +1* @@ (glob) +xyz [1] $ echo mod > sub1/sub2/sub2 - $ hg --config extensions.extdiff= extdiff -S + $ hg --config extensions.extdiff= pdiff -S --- */cloned.*/sub1/sub2/sub2 * (glob) +++ */cloned/sub1/sub2/sub2 * (glob) - @@ -1 +1 @@ + @@ -1* +1* @@ (glob) -modified +mod [1]
--- a/tests/test-subrepo-git.t Sun Mar 13 02:29:11 2016 +0100 +++ b/tests/test-subrepo-git.t Tue Mar 15 14:10:46 2016 -0700 @@ -785,7 +785,7 @@ index 0000000..257cc56 --- /dev/null +++ b/s/barfoo - @@ -0,0 +1 @@ + @@ -0,0 +1* @@ (glob) +foo $ hg diff --subrepos s/foobar diff --git a/s/foobar b/s/foobar @@ -827,7 +827,7 @@ index 0000000..257cc56 --- /dev/null +++ b/s/barfoo - @@ -0,0 +1 @@ + @@ -0,0 +1* @@ (glob) +foo moving a file should show a removal and an add
--- a/tests/test-subrepo.t Sun Mar 13 02:29:11 2016 +0100 +++ b/tests/test-subrepo.t Tue Mar 15 14:10:46 2016 -0700 @@ -259,6 +259,7 @@ resolving manifests branchmerge: True, force: False, partial: False ancestor: 1f14a2e2d3ec, local: f0d2028bf86d+, remote: 1831e14459c4 + starting 4 threads for background file closing (?) .hgsubstate: versions differ -> m (premerge) subrepo merge f0d2028bf86d+ 1831e14459c4 1f14a2e2d3ec subrepo t: other changed, get t:6747d179aa9a688023c4b0cad32e4c92bb7f34ad:hg @@ -285,6 +286,7 @@ resolving manifests branchmerge: True, force: False, partial: False ancestor: 1831e14459c4, local: e45c8b14af55+, remote: f94576341bcf + starting 4 threads for background file closing (?) .hgsubstate: versions differ -> m (premerge) subrepo merge e45c8b14af55+ f94576341bcf 1831e14459c4 subrepo t: both sides changed @@ -296,6 +298,7 @@ branchmerge: True, force: False, partial: False ancestor: 6747d179aa9a, local: 20a0db6fbf6c+, remote: 7af322bc1198 preserving t for resolve of t + starting 4 threads for background file closing (?) t: versions differ -> m (premerge) picked tool ':merge' for t (binary False symlink False changedelete False) merging t @@ -664,6 +667,7 @@ $ cd ../t $ hg up -C # discard our earlier merge 1 files updated, 0 files merged, 0 files removed, 0 files unresolved + 2 other heads for branch "default" $ echo blah > t/t $ hg ci -m13 committing subrepository t @@ -677,6 +681,7 @@ $ hg up -C # discard changes 1 files updated, 0 files merged, 0 files removed, 0 files unresolved + 2 other heads for branch "default" pull @@ -718,6 +723,7 @@ adding file changes added 1 changesets with 1 changes to 1 files 0 files updated, 0 files merged, 0 files removed, 0 files unresolved + 2 other heads for branch "default" $ cat t/t blah @@ -1185,6 +1191,7 @@ ? s/c $ hg update -C 1 files updated, 0 files merged, 0 files removed, 0 files unresolved + 2 other heads for branch "default" $ hg status -S ? s/b ? s/c
--- a/tests/test-tags.t Sun Mar 13 02:29:11 2016 +0100 +++ b/tests/test-tags.t Tue Mar 15 14:10:46 2016 -0700 @@ -136,12 +136,13 @@ $ rm -f .hg/cache/tags2-visible .hg/cache/hgtagsfnodes1 $ hg identify b9154636be93 tip - $ hg blackbox -l 5 - 1970/01/01 00:00:00 bob (*)> identify (glob) - 1970/01/01 00:00:00 bob (*)> writing 48 bytes to cache/hgtagsfnodes1 (glob) - 1970/01/01 00:00:00 bob (*)> 0/1 cache hits/lookups in * seconds (glob) - 1970/01/01 00:00:00 bob (*)> writing .hg/cache/tags2-visible with 1 tags (glob) - 1970/01/01 00:00:00 bob (*)> identify exited 0 after ?.?? seconds (glob) + $ hg blackbox -l 6 + 1970/01/01 00:00:00 bob @b9154636be938d3d431e75a7c906504a079bfe07 (5000)> identify + 1970/01/01 00:00:00 bob @b9154636be938d3d431e75a7c906504a079bfe07 (5000)> writing 48 bytes to cache/hgtagsfnodes1 + 1970/01/01 00:00:00 bob @b9154636be938d3d431e75a7c906504a079bfe07 (5000)> 0/1 cache hits/lookups in * seconds (glob) + 1970/01/01 00:00:00 bob @b9154636be938d3d431e75a7c906504a079bfe07 (5000)> writing .hg/cache/tags2-visible with 1 tags + 1970/01/01 00:00:00 bob @b9154636be938d3d431e75a7c906504a079bfe07 (5000)> identify exited 0 after * seconds (glob) + 1970/01/01 00:00:00 bob @b9154636be938d3d431e75a7c906504a079bfe07 (5000)> blackbox -l 6 Failure to acquire lock results in no write @@ -149,12 +150,13 @@ $ echo 'foo:1' > .hg/wlock $ hg identify b9154636be93 tip - $ hg blackbox -l 5 - 1970/01/01 00:00:00 bob (*)> identify (glob) - 1970/01/01 00:00:00 bob (*)> not writing .hg/cache/hgtagsfnodes1 because lock cannot be acquired (glob) - 1970/01/01 00:00:00 bob (*)> 0/1 cache hits/lookups in * seconds (glob) - 1970/01/01 00:00:00 bob (*)> writing .hg/cache/tags2-visible with 1 tags (glob) - 1970/01/01 00:00:00 bob (*)> identify exited 0 after * seconds (glob) + $ hg blackbox -l 6 + 1970/01/01 00:00:00 bob @b9154636be938d3d431e75a7c906504a079bfe07 (5000)> identify + 1970/01/01 00:00:00 bob @b9154636be938d3d431e75a7c906504a079bfe07 (5000)> not writing .hg/cache/hgtagsfnodes1 because lock cannot be acquired + 1970/01/01 00:00:00 bob @b9154636be938d3d431e75a7c906504a079bfe07 (5000)> 0/1 cache hits/lookups in * seconds (glob) + 1970/01/01 00:00:00 bob @b9154636be938d3d431e75a7c906504a079bfe07 (5000)> writing .hg/cache/tags2-visible with 1 tags + 1970/01/01 00:00:00 bob @b9154636be938d3d431e75a7c906504a079bfe07 (5000)> identify exited 0 after * seconds (glob) + 1970/01/01 00:00:00 bob @b9154636be938d3d431e75a7c906504a079bfe07 (5000)> blackbox -l 6 $ fnodescacheexists no fnodes cache @@ -214,6 +216,10 @@ $ hg merge 1 1 files updated, 0 files merged, 0 files removed, 0 files unresolved (branch merge, don't forget to commit) + $ hg blackbox -l3 + 1970/01/01 00:00:00 bob @c8edf04160c7f731e4589d66ab3ab3486a64ac28 (5000)> merge 1 + 1970/01/01 00:00:00 bob @c8edf04160c7f731e4589d66ab3ab3486a64ac28+b9154636be938d3d431e75a7c906504a079bfe07 (5000)> merge 1 exited 0 after * seconds (glob) + 1970/01/01 00:00:00 bob @c8edf04160c7f731e4589d66ab3ab3486a64ac28+b9154636be938d3d431e75a7c906504a079bfe07 (5000)> blackbox -l3 $ hg id c8edf04160c7+b9154636be93+ tip $ hg status @@ -348,12 +354,13 @@ tip 5:8dbfe60eff30 bar 1:78391a272241 - $ hg blackbox -l 5 - 1970/01/01 00:00:00 bob (*)> tags (glob) - 1970/01/01 00:00:00 bob (*)> writing 24 bytes to cache/hgtagsfnodes1 (glob) - 1970/01/01 00:00:00 bob (*)> 2/3 cache hits/lookups in * seconds (glob) - 1970/01/01 00:00:00 bob (*)> writing .hg/cache/tags2-visible with 1 tags (glob) - 1970/01/01 00:00:00 bob (*)> tags exited 0 after * seconds (glob) + $ hg blackbox -l 6 + 1970/01/01 00:00:00 bob @8dbfe60eff306a54259cfe007db9e330e7ecf866 (5000)> tags + 1970/01/01 00:00:00 bob @8dbfe60eff306a54259cfe007db9e330e7ecf866 (5000)> writing 24 bytes to cache/hgtagsfnodes1 + 1970/01/01 00:00:00 bob @8dbfe60eff306a54259cfe007db9e330e7ecf866 (5000)> 2/3 cache hits/lookups in * seconds (glob) + 1970/01/01 00:00:00 bob @8dbfe60eff306a54259cfe007db9e330e7ecf866 (5000)> writing .hg/cache/tags2-visible with 1 tags + 1970/01/01 00:00:00 bob @8dbfe60eff306a54259cfe007db9e330e7ecf866 (5000)> tags exited 0 after * seconds (glob) + 1970/01/01 00:00:00 bob @8dbfe60eff306a54259cfe007db9e330e7ecf866 (5000)> blackbox -l 6 #if unix-permissions no-root Errors writing to .hgtags fnodes cache are silently ignored @@ -368,12 +375,13 @@ tip 6:b968051b5cf3 bar 1:78391a272241 - $ hg blackbox -l 5 - 1970/01/01 00:00:00 bob (*)> tags (glob) - 1970/01/01 00:00:00 bob (*)> couldn't write cache/hgtagsfnodes1: [Errno 13] Permission denied: '$TESTTMP/t2/.hg/cache/hgtagsfnodes1' (glob) - 1970/01/01 00:00:00 bob (*)> 2/3 cache hits/lookups in * seconds (glob) - 1970/01/01 00:00:00 bob (*)> writing .hg/cache/tags2-visible with 1 tags (glob) - 1970/01/01 00:00:00 bob (*)> tags exited 0 after * seconds (glob) + $ hg blackbox -l 6 + 1970/01/01 00:00:00 bob @b968051b5cf3f624b771779c6d5f84f1d4c3fb5d (5000)> tags + 1970/01/01 00:00:00 bob @b968051b5cf3f624b771779c6d5f84f1d4c3fb5d (5000)> couldn't write cache/hgtagsfnodes1: [Errno 13] Permission denied: '$TESTTMP/t2/.hg/cache/hgtagsfnodes1' + 1970/01/01 00:00:00 bob @b968051b5cf3f624b771779c6d5f84f1d4c3fb5d (5000)> 2/3 cache hits/lookups in * seconds (glob) + 1970/01/01 00:00:00 bob @b968051b5cf3f624b771779c6d5f84f1d4c3fb5d (5000)> writing .hg/cache/tags2-visible with 1 tags + 1970/01/01 00:00:00 bob @b968051b5cf3f624b771779c6d5f84f1d4c3fb5d (5000)> tags exited 0 after * seconds (glob) + 1970/01/01 00:00:00 bob @b968051b5cf3f624b771779c6d5f84f1d4c3fb5d (5000)> blackbox -l 6 $ chmod a+w .hg/cache/hgtagsfnodes1 @@ -382,12 +390,13 @@ tip 6:b968051b5cf3 bar 1:78391a272241 - $ hg blackbox -l 5 - 1970/01/01 00:00:00 bob (*)> tags (glob) - 1970/01/01 00:00:00 bob (*)> writing 24 bytes to cache/hgtagsfnodes1 (glob) - 1970/01/01 00:00:00 bob (*)> 2/3 cache hits/lookups in * seconds (glob) - 1970/01/01 00:00:00 bob (*)> writing .hg/cache/tags2-visible with 1 tags (glob) - 1970/01/01 00:00:00 bob (*)> tags exited 0 after * seconds (glob) + $ hg blackbox -l 6 + 1970/01/01 00:00:00 bob @b968051b5cf3f624b771779c6d5f84f1d4c3fb5d (5000)> tags + 1970/01/01 00:00:00 bob @b968051b5cf3f624b771779c6d5f84f1d4c3fb5d (5000)> writing 24 bytes to cache/hgtagsfnodes1 + 1970/01/01 00:00:00 bob @b968051b5cf3f624b771779c6d5f84f1d4c3fb5d (5000)> 2/3 cache hits/lookups in * seconds (glob) + 1970/01/01 00:00:00 bob @b968051b5cf3f624b771779c6d5f84f1d4c3fb5d (5000)> writing .hg/cache/tags2-visible with 1 tags + 1970/01/01 00:00:00 bob @b968051b5cf3f624b771779c6d5f84f1d4c3fb5d (5000)> tags exited 0 after * seconds (glob) + 1970/01/01 00:00:00 bob @b968051b5cf3f624b771779c6d5f84f1d4c3fb5d (5000)> blackbox -l 6 $ f --size .hg/cache/hgtagsfnodes1 .hg/cache/hgtagsfnodes1: size=168 @@ -410,11 +419,12 @@ tip 4:0c192d7d5e6b bar 1:78391a272241 - $ hg blackbox -l 4 - 1970/01/01 00:00:00 bob (*)> writing 24 bytes to cache/hgtagsfnodes1 (glob) - 1970/01/01 00:00:00 bob (*)> 2/3 cache hits/lookups in * seconds (glob) - 1970/01/01 00:00:00 bob (*)> writing .hg/cache/tags2-visible with 1 tags (glob) - 1970/01/01 00:00:00 bob (*)> tags exited 0 after * seconds (glob) + $ hg blackbox -l 5 + 1970/01/01 00:00:00 bob @0c192d7d5e6b78a714de54a2e9627952a877e25a (5000)> writing 24 bytes to cache/hgtagsfnodes1 + 1970/01/01 00:00:00 bob @0c192d7d5e6b78a714de54a2e9627952a877e25a (5000)> 2/3 cache hits/lookups in * seconds (glob) + 1970/01/01 00:00:00 bob @0c192d7d5e6b78a714de54a2e9627952a877e25a (5000)> writing .hg/cache/tags2-visible with 1 tags + 1970/01/01 00:00:00 bob @0c192d7d5e6b78a714de54a2e9627952a877e25a (5000)> tags exited 0 after * seconds (glob) + 1970/01/01 00:00:00 bob @0c192d7d5e6b78a714de54a2e9627952a877e25a (5000)> blackbox -l 5 $ f --size .hg/cache/hgtagsfnodes1 .hg/cache/hgtagsfnodes1: size=120 @@ -426,12 +436,13 @@ tip 5:035f65efb448 bar 1:78391a272241 - $ hg blackbox -l 5 - 1970/01/01 00:00:00 bob (*)> tags (glob) - 1970/01/01 00:00:00 bob (*)> writing 24 bytes to cache/hgtagsfnodes1 (glob) - 1970/01/01 00:00:00 bob (*)> 2/3 cache hits/lookups in * seconds (glob) - 1970/01/01 00:00:00 bob (*)> writing .hg/cache/tags2-visible with 1 tags (glob) - 1970/01/01 00:00:00 bob (*)> tags exited 0 after * seconds (glob) + $ hg blackbox -l 6 + 1970/01/01 00:00:00 bob @035f65efb448350f4772141702a81ab1df48c465 (5000)> tags + 1970/01/01 00:00:00 bob @035f65efb448350f4772141702a81ab1df48c465 (5000)> writing 24 bytes to cache/hgtagsfnodes1 + 1970/01/01 00:00:00 bob @035f65efb448350f4772141702a81ab1df48c465 (5000)> 2/3 cache hits/lookups in * seconds (glob) + 1970/01/01 00:00:00 bob @035f65efb448350f4772141702a81ab1df48c465 (5000)> writing .hg/cache/tags2-visible with 1 tags + 1970/01/01 00:00:00 bob @035f65efb448350f4772141702a81ab1df48c465 (5000)> tags exited 0 after * seconds (glob) + 1970/01/01 00:00:00 bob @035f65efb448350f4772141702a81ab1df48c465 (5000)> blackbox -l 6 $ f --size .hg/cache/hgtagsfnodes1 .hg/cache/hgtagsfnodes1: size=144
--- a/tests/test-template-engine.t Sun Mar 13 02:29:11 2016 +0100 +++ b/tests/test-template-engine.t Tue Mar 15 14:10:46 2016 -0700 @@ -44,17 +44,4 @@ 0 97e5f848f0936960273bbf75be6388cd0350a32b -1 0000000000000000000000000000000000000000 -1 0000000000000000000000000000000000000000 -1 0000000000000000000000000000000000000000 -Fuzzing the unicode escaper to ensure it produces valid data - -#if hypothesis - - >>> from hypothesishelpers import * - >>> import mercurial.templatefilters as tf - >>> import json - >>> @check(st.text().map(lambda s: s.encode('utf-8'))) - ... def testtfescapeproducesvalidjson(text): - ... json.loads('"' + tf.jsonescape(text) + '"') - -#endif - $ cd ..
--- a/tests/test-transplant.t Sun Mar 13 02:29:11 2016 +0100 +++ b/tests/test-transplant.t Tue Mar 15 14:10:46 2016 -0700 @@ -409,6 +409,7 @@ $ hg up -C 1 files updated, 0 files merged, 0 files removed, 0 files unresolved + 1 other heads for branch "default" $ rm added $ hg transplant --continue abort: no transplant to continue
--- a/tests/test-treemanifest.t Sun Mar 13 02:29:11 2016 +0100 +++ b/tests/test-treemanifest.t Tue Mar 15 14:10:46 2016 -0700 @@ -1,3 +1,5 @@ +#require killdaemons + $ cat << EOF >> $HGRCPATH > [format] > usegeneraldelta=yes @@ -361,13 +363,20 @@ added 11 changesets with 15 changes to 10 files (+3 heads) $ grep treemanifest clone/.hg/requires treemanifest + $ hg -R clone verify + checking changesets + checking manifests + checking directory manifests + crosschecking files in changesets and manifests + checking files + 10 files, 11 changesets, 15 total revisions Create deeper repo with tree manifests. $ hg --config experimental.treemanifest=True init deeprepo $ cd deeprepo - $ mkdir a + $ mkdir .A $ mkdir b $ mkdir b/bar $ mkdir b/bar/orange @@ -376,8 +385,8 @@ $ mkdir b/foo/apple $ mkdir b/foo/apple/bees - $ touch a/one.txt - $ touch a/two.txt + $ touch .A/one.txt + $ touch .A/two.txt $ touch b/bar/fruits.txt $ touch b/bar/orange/fly/gnat.py $ touch b/bar/orange/fly/housefly.txt @@ -393,8 +402,8 @@ Test files from the root. $ hg files -r . - a/one.txt (glob) - a/two.txt (glob) + .A/one.txt (glob) + .A/two.txt (glob) b/bar/fruits.txt (glob) b/bar/orange/fly/gnat.py (glob) b/bar/orange/fly/housefly.txt (glob) @@ -408,61 +417,56 @@ b/bar/fruits.txt (glob) b/bar/orange/fly/gnat.py (glob) b/bar/orange/fly/housefly.txt (glob) + $ cp -r .hg/store .hg/store-copy Test files for a subdirectory. - $ mv .hg/store/meta/a oldmf + $ rm -r .hg/store/meta/~2e_a $ hg files -r . b b/bar/fruits.txt (glob) b/bar/orange/fly/gnat.py (glob) b/bar/orange/fly/housefly.txt (glob) b/foo/apple/bees/flower.py (glob) - $ mv oldmf .hg/store/meta/a + $ cp -r .hg/store-copy/. .hg/store Test files with just includes and excludes. - $ mv .hg/store/meta/a oldmf - $ mv .hg/store/meta/b/bar/orange/fly oldmf2 - $ mv .hg/store/meta/b/foo/apple/bees oldmf3 + $ rm -r .hg/store/meta/~2e_a + $ rm -r .hg/store/meta/b/bar/orange/fly + $ rm -r .hg/store/meta/b/foo/apple/bees $ hg files -r . -I path:b/bar -X path:b/bar/orange/fly -I path:b/foo -X path:b/foo/apple/bees b/bar/fruits.txt (glob) - $ mv oldmf .hg/store/meta/a - $ mv oldmf2 .hg/store/meta/b/bar/orange/fly - $ mv oldmf3 .hg/store/meta/b/foo/apple/bees + $ cp -r .hg/store-copy/. .hg/store Test files for a subdirectory, excluding a directory within it. - $ mv .hg/store/meta/a oldmf - $ mv .hg/store/meta/b/foo oldmf2 + $ rm -r .hg/store/meta/~2e_a + $ rm -r .hg/store/meta/b/foo $ hg files -r . -X path:b/foo b b/bar/fruits.txt (glob) b/bar/orange/fly/gnat.py (glob) b/bar/orange/fly/housefly.txt (glob) - $ mv oldmf .hg/store/meta/a - $ mv oldmf2 .hg/store/meta/b/foo + $ cp -r .hg/store-copy/. .hg/store Test files for a sub directory, including only a directory within it, and including an unrelated directory. - $ mv .hg/store/meta/a oldmf - $ mv .hg/store/meta/b/foo oldmf2 + $ rm -r .hg/store/meta/~2e_a + $ rm -r .hg/store/meta/b/foo $ hg files -r . -I path:b/bar/orange -I path:a b b/bar/orange/fly/gnat.py (glob) b/bar/orange/fly/housefly.txt (glob) - $ mv oldmf .hg/store/meta/a - $ mv oldmf2 .hg/store/meta/b/foo + $ cp -r .hg/store-copy/. .hg/store Test files for a pattern, including a directory, and excluding a directory within that. - $ mv .hg/store/meta/a oldmf - $ mv .hg/store/meta/b/foo oldmf2 - $ mv .hg/store/meta/b/bar/orange oldmf3 + $ rm -r .hg/store/meta/~2e_a + $ rm -r .hg/store/meta/b/foo + $ rm -r .hg/store/meta/b/bar/orange $ hg files -r . glob:**.txt -I path:b/bar -X path:b/bar/orange b/bar/fruits.txt (glob) - $ mv oldmf .hg/store/meta/a - $ mv oldmf2 .hg/store/meta/b/foo - $ mv oldmf3 .hg/store/meta/b/bar/orange + $ cp -r .hg/store-copy/. .hg/store Add some more changes to the deep repo $ echo narf >> b/bar/fruits.txt @@ -470,14 +474,108 @@ $ echo troz >> b/bar/orange/fly/gnat.py $ hg ci -m troz +Verify works + $ hg verify + checking changesets + checking manifests + checking directory manifests + crosschecking files in changesets and manifests + checking files + 8 files, 3 changesets, 10 total revisions + +Dirlogs are included in fncache + $ grep meta/.A/00manifest.i .hg/store/fncache + meta/.A/00manifest.i + +Rebuilt fncache includes dirlogs + $ rm .hg/store/fncache + $ hg debugrebuildfncache + adding data/.A/one.txt.i + adding data/.A/two.txt.i + adding data/b/bar/fruits.txt.i + adding data/b/bar/orange/fly/gnat.py.i + adding data/b/bar/orange/fly/housefly.txt.i + adding data/b/foo/apple/bees/flower.py.i + adding data/c.txt.i + adding data/d.py.i + adding meta/.A/00manifest.i + adding meta/b/00manifest.i + adding meta/b/bar/00manifest.i + adding meta/b/bar/orange/00manifest.i + adding meta/b/bar/orange/fly/00manifest.i + adding meta/b/foo/00manifest.i + adding meta/b/foo/apple/00manifest.i + adding meta/b/foo/apple/bees/00manifest.i + 16 items added, 0 removed from fncache + +Finish first server + $ killdaemons.py + +Back up the recently added revlogs + $ cp -r .hg/store .hg/store-newcopy + +Verify reports missing dirlog + $ rm .hg/store/meta/b/00manifest.* + $ hg verify + checking changesets + checking manifests + checking directory manifests + 0: empty or missing b/ + b/@0: parent-directory manifest refers to unknown revision 67688a370455 + b/@1: parent-directory manifest refers to unknown revision f38e85d334c5 + b/@2: parent-directory manifest refers to unknown revision 99c9792fd4b0 + warning: orphan revlog 'meta/b/bar/00manifest.i' + warning: orphan revlog 'meta/b/bar/orange/00manifest.i' + warning: orphan revlog 'meta/b/bar/orange/fly/00manifest.i' + warning: orphan revlog 'meta/b/foo/00manifest.i' + warning: orphan revlog 'meta/b/foo/apple/00manifest.i' + warning: orphan revlog 'meta/b/foo/apple/bees/00manifest.i' + crosschecking files in changesets and manifests + b/bar/fruits.txt@0: in changeset but not in manifest + b/bar/orange/fly/gnat.py@0: in changeset but not in manifest + b/bar/orange/fly/housefly.txt@0: in changeset but not in manifest + b/foo/apple/bees/flower.py@0: in changeset but not in manifest + checking files + 8 files, 3 changesets, 10 total revisions + 6 warnings encountered! + 8 integrity errors encountered! + (first damaged changeset appears to be 0) + [1] + $ cp -r .hg/store-newcopy/. .hg/store + +Verify reports missing dirlog entry + $ mv -f .hg/store-copy/meta/b/00manifest.* .hg/store/meta/b/ + $ hg verify + checking changesets + checking manifests + checking directory manifests + b/@1: parent-directory manifest refers to unknown revision f38e85d334c5 + b/@2: parent-directory manifest refers to unknown revision 99c9792fd4b0 + b/bar/@?: rev 1 points to unexpected changeset 1 + b/bar/@?: 5e03c4ee5e4a not in parent-directory manifest + b/bar/@?: rev 2 points to unexpected changeset 2 + b/bar/@?: 1b16940d66d6 not in parent-directory manifest + b/bar/orange/@?: rev 1 points to unexpected changeset 2 + (expected None) + b/bar/orange/fly/@?: rev 1 points to unexpected changeset 2 + (expected None) + crosschecking files in changesets and manifests + checking files + 8 files, 3 changesets, 10 total revisions + 2 warnings encountered! + 8 integrity errors encountered! + (first damaged changeset appears to be 1) + [1] + $ cp -r .hg/store-newcopy/. .hg/store + Test cloning a treemanifest repo over http. - $ hg serve -p $HGPORT2 -d --pid-file=hg.pid --errorlog=errors.log + $ hg serve -p $HGPORT -d --pid-file=hg.pid --errorlog=errors.log $ cat hg.pid >> $DAEMON_PIDS $ cd .. We can clone even with the knob turned off and we'll get a treemanifest repo. $ hg clone --config experimental.treemanifest=False \ > --config experimental.changegroup3=True \ - > http://localhost:$HGPORT2 deepclone + > http://localhost:$HGPORT deepclone requesting all changes adding changesets adding manifests @@ -493,8 +591,6 @@ Tree manifest revlogs exist. $ find deepclone/.hg/store/meta | sort deepclone/.hg/store/meta - deepclone/.hg/store/meta/a - deepclone/.hg/store/meta/a/00manifest.i deepclone/.hg/store/meta/b deepclone/.hg/store/meta/b/00manifest.i deepclone/.hg/store/meta/b/bar @@ -509,12 +605,134 @@ deepclone/.hg/store/meta/b/foo/apple/00manifest.i deepclone/.hg/store/meta/b/foo/apple/bees deepclone/.hg/store/meta/b/foo/apple/bees/00manifest.i + deepclone/.hg/store/meta/~2e_a + deepclone/.hg/store/meta/~2e_a/00manifest.i Verify passes. $ cd deepclone $ hg verify checking changesets checking manifests + checking directory manifests crosschecking files in changesets and manifests checking files 8 files, 3 changesets, 10 total revisions $ cd .. + +Create clones using old repo formats to use in later tests + $ hg clone --config format.usestore=False \ + > --config experimental.changegroup3=True \ + > http://localhost:$HGPORT deeprepo-basicstore + requesting all changes + adding changesets + adding manifests + adding file changes + added 3 changesets with 10 changes to 8 files + updating to branch default + 8 files updated, 0 files merged, 0 files removed, 0 files unresolved + $ cd deeprepo-basicstore + $ grep store .hg/requires + [1] + $ hg serve -p $HGPORT1 -d --pid-file=hg.pid --errorlog=errors.log + $ cat hg.pid >> $DAEMON_PIDS + $ cd .. + $ hg clone --config format.usefncache=False \ + > --config experimental.changegroup3=True \ + > http://localhost:$HGPORT deeprepo-encodedstore + requesting all changes + adding changesets + adding manifests + adding file changes + added 3 changesets with 10 changes to 8 files + updating to branch default + 8 files updated, 0 files merged, 0 files removed, 0 files unresolved + $ cd deeprepo-encodedstore + $ grep fncache .hg/requires + [1] + $ hg serve -p $HGPORT2 -d --pid-file=hg.pid --errorlog=errors.log + $ cat hg.pid >> $DAEMON_PIDS + $ cd .. + +Local clone with basicstore + $ hg clone -U deeprepo-basicstore local-clone-basicstore + $ hg -R local-clone-basicstore verify + checking changesets + checking manifests + checking directory manifests + crosschecking files in changesets and manifests + checking files + 8 files, 3 changesets, 10 total revisions + +Local clone with encodedstore + $ hg clone -U deeprepo-encodedstore local-clone-encodedstore + $ hg -R local-clone-encodedstore verify + checking changesets + checking manifests + checking directory manifests + crosschecking files in changesets and manifests + checking files + 8 files, 3 changesets, 10 total revisions + +Local clone with fncachestore + $ hg clone -U deeprepo local-clone-fncachestore + $ hg -R local-clone-fncachestore verify + checking changesets + checking manifests + checking directory manifests + crosschecking files in changesets and manifests + checking files + 8 files, 3 changesets, 10 total revisions + +Stream clone with basicstore + $ hg clone --config experimental.changegroup3=True --uncompressed -U \ + > http://localhost:$HGPORT1 stream-clone-basicstore + streaming all changes + 18 files to transfer, * of data (glob) + transferred * in * seconds (*) (glob) + searching for changes + no changes found + $ hg -R stream-clone-basicstore verify + checking changesets + checking manifests + checking directory manifests + crosschecking files in changesets and manifests + checking files + 8 files, 3 changesets, 10 total revisions + +Stream clone with encodedstore + $ hg clone --config experimental.changegroup3=True --uncompressed -U \ + > http://localhost:$HGPORT2 stream-clone-encodedstore + streaming all changes + 18 files to transfer, * of data (glob) + transferred * in * seconds (*) (glob) + searching for changes + no changes found + $ hg -R stream-clone-encodedstore verify + checking changesets + checking manifests + checking directory manifests + crosschecking files in changesets and manifests + checking files + 8 files, 3 changesets, 10 total revisions + +Stream clone with fncachestore + $ hg clone --config experimental.changegroup3=True --uncompressed -U \ + > http://localhost:$HGPORT stream-clone-fncachestore + streaming all changes + 18 files to transfer, * of data (glob) + transferred * in * seconds (*) (glob) + searching for changes + no changes found + $ hg -R stream-clone-fncachestore verify + checking changesets + checking manifests + checking directory manifests + crosschecking files in changesets and manifests + checking files + 8 files, 3 changesets, 10 total revisions + +Packed bundle + $ hg -R deeprepo debugcreatestreamclonebundle repo-packed.hg + writing 3349 bytes for 18 files + bundle requirements: generaldelta, revlogv1, treemanifest + $ hg debugbundle --spec repo-packed.hg + none-packed1;requirements%3Dgeneraldelta%2Crevlogv1%2Ctreemanifest
--- a/tests/test-ui-config.py.out Sun Mar 13 02:29:11 2016 +0100 +++ b/tests/test-ui-config.py.out Tue Mar 15 14:10:46 2016 -0700 @@ -1,5 +1,5 @@ [('string', 'string value'), ('bool1', 'true'), ('bool2', 'false'), ('boolinvalid', 'foo'), ('int1', '42'), ('int2', '-42'), ('intinvalid', 'foo')] -[('list1', 'foo'), ('list2', 'foo bar baz'), ('list3', 'alice, bob'), ('list4', 'foo bar baz alice, bob'), ('list5', 'abc d"ef"g "hij def"'), ('list6', '"hello world", "how are you?"'), ('list7', 'Do"Not"Separate'), ('list8', '"Do"Separate'), ('list9', '"Do\\"NotSeparate"'), ('list10', 'string "with extraneous" quotation mark"'), ('list11', 'x, y'), ('list12', '"x", "y"'), ('list13', '""" key = "x", "y" """'), ('list14', ',,,, '), ('list15', '" just with starting quotation'), ('list16', '"longer quotation" with "no ending quotation'), ('list17', 'this is \\" "not a quotation mark"'), ('list18', '\n \n\nding\ndong')] +[('list1', 'foo'), ('list2', 'foo bar baz'), ('list3', 'alice, bob'), ('list4', 'foo bar baz alice, bob'), ('list5', 'abc d"ef"g "hij def"'), ('list6', '"hello world", "how are you?"'), ('list7', 'Do"Not"Separate'), ('list8', '"Do"Separate'), ('list9', '"Do\\"NotSeparate"'), ('list10', 'string "with extraneous" quotation mark"'), ('list11', 'x, y'), ('list12', '"x", "y"'), ('list13', '""" key = "x", "y" """'), ('list14', ',,,,'), ('list15', '" just with starting quotation'), ('list16', '"longer quotation" with "no ending quotation'), ('list17', 'this is \\" "not a quotation mark"'), ('list18', 'ding\ndong')] --- 'string value' 'true'
--- a/tests/test-up-local-change.t Sun Mar 13 02:29:11 2016 +0100 +++ b/tests/test-up-local-change.t Tue Mar 15 14:10:46 2016 -0700 @@ -73,6 +73,7 @@ preserving a for resolve of a b: other deleted -> r removing b + starting 4 threads for background file closing (?) a: versions differ -> m (premerge) picked tool 'true' for a (binary False symlink False changedelete False) merging a @@ -172,9 +173,8 @@ summary: 2 $ hg --debug up - abort: uncommitted changes - (commit and merge, or update --clean to discard changes) - [255] + 0 files updated, 0 files merged, 0 files removed, 0 files unresolved + 1 other heads for branch "default" test conflicting untracked files
--- a/tests/test-update-branches.t Sun Mar 13 02:29:11 2016 +0100 +++ b/tests/test-update-branches.t Tue Mar 15 14:10:46 2016 -0700 @@ -93,8 +93,8 @@ parent=5 $ norevtest 'none clean same' clean 2 - abort: not a linear update - (merge or update --check to force update) + 0 files updated, 0 files merged, 0 files removed, 0 files unresolved + 1 other heads for branch "default" parent=2 @@ -140,8 +140,8 @@ M foo $ norevtest 'none dirty cross' dirty 2 - abort: uncommitted changes - (commit and merge, or update --clean to discard changes) + 0 files updated, 0 files merged, 0 files removed, 0 files unresolved + 1 other heads for branch "default" parent=2 M foo @@ -166,14 +166,133 @@ M sub/suba $ norevtest '-c clean same' clean 2 -c - 1 files updated, 0 files merged, 0 files removed, 0 files unresolved - parent=3 + 0 files updated, 0 files merged, 0 files removed, 0 files unresolved + 1 other heads for branch "default" + parent=2 $ revtest '-cC dirty linear' dirty 1 2 -cC abort: cannot specify both -c/--check and -C/--clean parent=1 M foo + $ cd .. + +Test updating with closed head +--------------------------------------------------------------------- + + $ hg clone -U -q b1 closed-heads + $ cd closed-heads + +Test updating if at least one non-closed branch head exists + +if on the closed branch head: +- update to "." +- "updated to a closed branch head ...." message is displayed +- "N other heads for ...." message is displayed + + $ hg update -q -C 3 + $ hg commit --close-branch -m 6 + $ norevtest "on closed branch head" clean 6 + 0 files updated, 0 files merged, 0 files removed, 0 files unresolved + updated to a closed branch head, because all descendant heads are closed. + beware of re-opening closed head by subsequent commit here. + 1 other heads for branch "default" + parent=6 + +if descendant non-closed branch head exists, and it is only one branch head: +- update to it, even if its revision is less than closed one +- "N other heads for ...." message isn't displayed + + $ norevtest "non-closed 2 should be chosen" clean 1 + 1 files updated, 0 files merged, 0 files removed, 0 files unresolved + parent=2 + +if all descendant branch heads are closed, but there is another branch head: +- update to the tipmost descendant head +- "updated to a closed branch head ...." message is displayed +- "N other heads for ...." message is displayed + + $ norevtest "all descendant branch heads are closed" clean 3 + 0 files updated, 0 files merged, 0 files removed, 0 files unresolved + updated to a closed branch head, because all descendant heads are closed. + beware of re-opening closed head by subsequent commit here. + 1 other heads for branch "default" + parent=6 + +Test updating if all branch heads are closed + +if on the closed branch head: +- update to "." +- "updated to a closed branch head ...." message is displayed +- "all heads of branch ...." message is displayed + + $ hg update -q -C 2 + $ hg commit --close-branch -m 7 + $ norevtest "all heads of branch default are closed" clean 6 + 0 files updated, 0 files merged, 0 files removed, 0 files unresolved + updated to a closed branch head, because all descendant heads are closed. + beware of re-opening closed head by subsequent commit here. + all heads for branch "default" are closed. + parent=6 + +if not on the closed branch head: +- update to the tipmost descendant (closed) head +- "updated to a closed branch head ...." message is displayed +- "all heads of branch ...." message is displayed + + $ norevtest "all heads of branch default are closed" clean 1 + 1 files updated, 0 files merged, 0 files removed, 0 files unresolved + updated to a closed branch head, because all descendant heads are closed. + beware of re-opening closed head by subsequent commit here. + all heads for branch "default" are closed. + parent=7 + + $ cd .. + +Test updating if "default" branch doesn't exist and no revision is +checked out (= "default" is used as current branch) + + $ hg init no-default-branch + $ cd no-default-branch + + $ hg branch foobar + marked working directory as branch foobar + (branches are permanent and global, did you want a bookmark?) + $ echo a > a + $ hg commit -m "#0" -A + adding a + $ echo 1 >> a + $ hg commit -m "#1" + $ hg update -q 0 + $ echo 3 >> a + $ hg commit -m "#2" + created new head + $ hg commit --close-branch -m "#3" + +if there is at least one non-closed branch head: +- update to the tipmost branch head + + $ norevtest "non-closed 1 should be chosen" clean null + 1 files updated, 0 files merged, 0 files removed, 0 files unresolved + parent=1 + +if all branch heads are closed +- update to "tip" +- "updated to a closed branch head ...." message is displayed +- "all heads for branch "XXXX" are closed" message is displayed + + $ hg update -q -C 1 + $ hg commit --close-branch -m "#4" + + $ norevtest "all branches are closed" clean null + 1 files updated, 0 files merged, 0 files removed, 0 files unresolved + updated to a closed branch head, because all descendant heads are closed. + beware of re-opening closed head by subsequent commit here. + all heads for branch "foobar" are closed. + parent=4 + + $ cd ../b1 + Test obsolescence behavior ---------------------------------------------------------------------
--- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/tests/test-verify-repo-operations.py Tue Mar 15 14:10:46 2016 -0700 @@ -0,0 +1,603 @@ +from __future__ import print_function, absolute_import + +"""Fuzz testing for operations against a Mercurial repository + +This uses Hypothesis's stateful testing to generate random repository +operations and test Mercurial using them, both to see if there are any +unexpected errors and to compare different versions of it.""" + +import os +import subprocess +import sys + +# Only run if slow tests are allowed +if subprocess.call(['python', '%s/hghave' % os.environ['TESTDIR'], + 'slow']): + sys.exit(80) + +# These tests require Hypothesis and pytz to be installed. +# Running 'pip install hypothesis pytz' will achieve that. +# Note: This won't work if you're running Python < 2.7. +try: + from hypothesis.extra.datetime import datetimes +except ImportError: + sys.stderr.write("skipped: hypothesis or pytz not installed" + os.linesep) + sys.exit(80) + +# If you are running an old version of pip you may find that the enum34 +# backport is not installed automatically. If so 'pip install enum34' will +# fix this problem. +try: + import enum + assert enum # Silence pyflakes +except ImportError: + sys.stderr.write("skipped: enum34 not installed" + os.linesep) + sys.exit(80) + +import binascii +from contextlib import contextmanager +import errno +import pipes +import shutil +import silenttestrunner +import subprocess + +from hypothesis.errors import HypothesisException +from hypothesis.stateful import ( + rule, RuleBasedStateMachine, Bundle, precondition) +from hypothesis import settings, note, strategies as st +from hypothesis.configuration import set_hypothesis_home_dir +from hypothesis.database import ExampleDatabase + +testdir = os.path.abspath(os.environ["TESTDIR"]) + +# We store Hypothesis examples here rather in the temporary test directory +# so that when rerunning a failing test this always results in refinding the +# previous failure. This directory is in .hgignore and should not be checked in +# but is useful to have for development. +set_hypothesis_home_dir(os.path.join(testdir, ".hypothesis")) + +runtests = os.path.join(os.environ["RUNTESTDIR"], "run-tests.py") +testtmp = os.environ["TESTTMP"] +assert os.path.isdir(testtmp) + +generatedtests = os.path.join(testdir, "hypothesis-generated") + +try: + os.makedirs(generatedtests) +except OSError: + pass + +# We write out generated .t files to a file in order to ease debugging and to +# give a starting point for turning failures Hypothesis finds into normal +# tests. In order to ensure that multiple copies of this test can be run in +# parallel we use atomic file create to ensure that we always get a unique +# name. +file_index = 0 +while True: + file_index += 1 + savefile = os.path.join(generatedtests, "test-generated-%d.t" % ( + file_index, + )) + try: + os.close(os.open(savefile, os.O_CREAT | os.O_EXCL | os.O_WRONLY)) + break + except OSError as e: + if e.errno != errno.EEXIST: + raise +assert os.path.exists(savefile) + +hgrc = os.path.join(".hg", "hgrc") + +filecharacters = ( + "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789" + "[]^_`;=@{}~ !#$%&'()+,-" +) + +files = st.text(filecharacters, min_size=1).map(lambda x: x.strip()).filter( + bool).map(lambda s: s.encode('ascii')) + +safetext = st.text(st.characters( + min_codepoint=1, max_codepoint=127, + blacklist_categories=('Cc', 'Cs')), min_size=1).map( + lambda s: s.encode('utf-8') +) + +extensions = st.sampled_from(('shelve', 'mq', 'blackbox',)) + +@contextmanager +def acceptableerrors(*args): + """Sometimes we know an operation we're about to perform might fail, and + we're OK with some of the failures. In those cases this may be used as a + context manager and will swallow expected failures, as identified by + substrings of the error message Mercurial emits.""" + try: + yield + except subprocess.CalledProcessError as e: + if not any(a in e.output for a in args): + note(e.output) + raise + +reponames = st.text("abcdefghijklmnopqrstuvwxyz01234556789", min_size=1).map( + lambda s: s.encode('ascii') +) + +class verifyingstatemachine(RuleBasedStateMachine): + """This defines the set of acceptable operations on a Mercurial repository + using Hypothesis's RuleBasedStateMachine. + + The general concept is that we manage multiple repositories inside a + repos/ directory in our temporary test location. Some of these are freshly + inited, some are clones of the others. Our current working directory is + always inside one of these repositories while the tests are running. + + Hypothesis then performs a series of operations against these repositories, + including hg commands, generating contents and editing the .hgrc file. + If these operations fail in unexpected ways or behave differently in + different configurations of Mercurial, the test will fail and a minimized + .t test file will be written to the hypothesis-generated directory to + exhibit that failure. + + Operations are defined as methods with @rule() decorators. See the + Hypothesis documentation at + http://hypothesis.readthedocs.org/en/release/stateful.html for more + details.""" + + # A bundle is a reusable collection of previously generated data which may + # be provided as arguments to future operations. + repos = Bundle('repos') + paths = Bundle('paths') + contents = Bundle('contents') + branches = Bundle('branches') + committimes = Bundle('committimes') + + def __init__(self): + super(verifyingstatemachine, self).__init__() + self.repodir = os.path.join(testtmp, "repos") + if os.path.exists(self.repodir): + shutil.rmtree(self.repodir) + os.chdir(testtmp) + self.log = [] + self.failed = False + self.configperrepo = {} + self.all_extensions = set() + self.non_skippable_extensions = set() + + self.mkdirp("repos") + self.cd("repos") + self.mkdirp("repo1") + self.cd("repo1") + self.hg("init") + + def teardown(self): + """On teardown we clean up after ourselves as usual, but we also + do some additional testing: We generate a .t file based on our test + run using run-test.py -i to get the correct output. + + We then test it in a number of other configurations, verifying that + each passes the same test.""" + super(verifyingstatemachine, self).teardown() + try: + shutil.rmtree(self.repodir) + except OSError: + pass + ttest = os.linesep.join(" " + l for l in self.log) + os.chdir(testtmp) + path = os.path.join(testtmp, "test-generated.t") + with open(path, 'w') as o: + o.write(ttest + os.linesep) + with open(os.devnull, "w") as devnull: + rewriter = subprocess.Popen( + [runtests, "--local", "-i", path], stdin=subprocess.PIPE, + stdout=devnull, stderr=devnull, + ) + rewriter.communicate("yes") + with open(path, 'r') as i: + ttest = i.read() + + e = None + if not self.failed: + try: + output = subprocess.check_output([ + runtests, path, "--local", "--pure" + ], stderr=subprocess.STDOUT) + assert "Ran 1 test" in output, output + for ext in ( + self.all_extensions - self.non_skippable_extensions + ): + tf = os.path.join(testtmp, "test-generated-no-%s.t" % ( + ext, + )) + with open(tf, 'w') as o: + for l in ttest.splitlines(): + if l.startswith(" $ hg"): + l = l.replace( + "--config %s=" % ( + extensionconfigkey(ext),), "") + o.write(l + os.linesep) + with open(tf, 'r') as r: + t = r.read() + assert ext not in t, t + output = subprocess.check_output([ + runtests, tf, "--local", + ], stderr=subprocess.STDOUT) + assert "Ran 1 test" in output, output + except subprocess.CalledProcessError as e: + note(e.output) + if self.failed or e is not None: + with open(savefile, "wb") as o: + o.write(ttest) + if e is not None: + raise e + + def execute_step(self, step): + try: + return super(verifyingstatemachine, self).execute_step(step) + except (HypothesisException, KeyboardInterrupt): + raise + except Exception: + self.failed = True + raise + + # Section: Basic commands. + def mkdirp(self, path): + if os.path.exists(path): + return + self.log.append( + "$ mkdir -p -- %s" % (pipes.quote(os.path.relpath(path)),)) + os.makedirs(path) + + def cd(self, path): + path = os.path.relpath(path) + if path == ".": + return + os.chdir(path) + self.log.append("$ cd -- %s" % (pipes.quote(path),)) + + def hg(self, *args): + extra_flags = [] + for key, value in self.config.items(): + extra_flags.append("--config") + extra_flags.append("%s=%s" % (key, value)) + self.command("hg", *(tuple(extra_flags) + args)) + + def command(self, *args): + self.log.append("$ " + ' '.join(map(pipes.quote, args))) + subprocess.check_output(args, stderr=subprocess.STDOUT) + + # Section: Set up basic data + # This section has no side effects but generates data that we will want + # to use later. + @rule( + target=paths, + source=st.lists(files, min_size=1).map(lambda l: os.path.join(*l))) + def genpath(self, source): + return source + + @rule( + target=committimes, + when=datetimes(min_year=1970, max_year=2038) | st.none()) + def gentime(self, when): + return when + + @rule( + target=contents, + content=st.one_of( + st.binary(), + st.text().map(lambda x: x.encode('utf-8')) + )) + def gencontent(self, content): + return content + + @rule( + target=branches, + name=safetext, + ) + def genbranch(self, name): + return name + + @rule(target=paths, source=paths) + def lowerpath(self, source): + return source.lower() + + @rule(target=paths, source=paths) + def upperpath(self, source): + return source.upper() + + # Section: Basic path operations + @rule(path=paths, content=contents) + def writecontent(self, path, content): + self.unadded_changes = True + if os.path.isdir(path): + return + parent = os.path.dirname(path) + if parent: + try: + self.mkdirp(parent) + except OSError: + # It may be the case that there is a regular file that has + # previously been created that has the same name as an ancestor + # of the current path. This will cause mkdirp to fail with this + # error. We just turn this into a no-op in that case. + return + with open(path, 'wb') as o: + o.write(content) + self.log.append(( + "$ python -c 'import binascii; " + "print(binascii.unhexlify(\"%s\"))' > %s") % ( + binascii.hexlify(content), + pipes.quote(path), + )) + + @rule(path=paths) + def addpath(self, path): + if os.path.exists(path): + self.hg("add", "--", path) + + @rule(path=paths) + def forgetpath(self, path): + if os.path.exists(path): + with acceptableerrors( + "file is already untracked", + ): + self.hg("forget", "--", path) + + @rule(s=st.none() | st.integers(0, 100)) + def addremove(self, s): + args = ["addremove"] + if s is not None: + args.extend(["-s", str(s)]) + self.hg(*args) + + @rule(path=paths) + def removepath(self, path): + if os.path.exists(path): + with acceptableerrors( + 'file is untracked', + 'file has been marked for add', + 'file is modified', + ): + self.hg("remove", "--", path) + + @rule( + message=safetext, + amend=st.booleans(), + when=committimes, + addremove=st.booleans(), + secret=st.booleans(), + close_branch=st.booleans(), + ) + def maybecommit( + self, message, amend, when, addremove, secret, close_branch + ): + command = ["commit"] + errors = ["nothing changed"] + if amend: + errors.append("cannot amend public changesets") + command.append("--amend") + command.append("-m" + pipes.quote(message)) + if secret: + command.append("--secret") + if close_branch: + command.append("--close-branch") + errors.append("can only close branch heads") + if addremove: + command.append("--addremove") + if when is not None: + if when.year == 1970: + errors.append('negative date value') + if when.year == 2038: + errors.append('exceeds 32 bits') + command.append("--date=%s" % ( + when.strftime('%Y-%m-%d %H:%M:%S %z'),)) + + with acceptableerrors(*errors): + self.hg(*command) + + # Section: Repository management + @property + def currentrepo(self): + return os.path.basename(os.getcwd()) + + @property + def config(self): + return self.configperrepo.setdefault(self.currentrepo, {}) + + @rule( + target=repos, + source=repos, + name=reponames, + ) + def clone(self, source, name): + if not os.path.exists(os.path.join("..", name)): + self.cd("..") + self.hg("clone", source, name) + self.cd(name) + return name + + @rule( + target=repos, + name=reponames, + ) + def fresh(self, name): + if not os.path.exists(os.path.join("..", name)): + self.cd("..") + self.mkdirp(name) + self.cd(name) + self.hg("init") + return name + + @rule(name=repos) + def switch(self, name): + self.cd(os.path.join("..", name)) + assert self.currentrepo == name + assert os.path.exists(".hg") + + @rule(target=repos) + def origin(self): + return "repo1" + + @rule() + def pull(self, repo=repos): + with acceptableerrors( + "repository default not found", + "repository is unrelated", + ): + self.hg("pull") + + @rule(newbranch=st.booleans()) + def push(self, newbranch): + with acceptableerrors( + "default repository not configured", + "no changes found", + ): + if newbranch: + self.hg("push", "--new-branch") + else: + with acceptableerrors( + "creates new branches" + ): + self.hg("push") + + # Section: Simple side effect free "check" operations + @rule() + def log(self): + self.hg("log") + + @rule() + def verify(self): + self.hg("verify") + + @rule() + def diff(self): + self.hg("diff", "--nodates") + + @rule() + def status(self): + self.hg("status") + + @rule() + def export(self): + self.hg("export") + + # Section: Branch management + @rule() + def checkbranch(self): + self.hg("branch") + + @rule(branch=branches) + def switchbranch(self, branch): + with acceptableerrors( + 'cannot use an integer as a name', + 'cannot be used in a name', + 'a branch of the same name already exists', + 'is reserved', + ): + self.hg("branch", "--", branch) + + @rule(branch=branches, clean=st.booleans()) + def update(self, branch, clean): + with acceptableerrors( + 'unknown revision', + 'parse error', + ): + if clean: + self.hg("update", "-C", "--", branch) + else: + self.hg("update", "--", branch) + + # Section: Extension management + def hasextension(self, extension): + return extensionconfigkey(extension) in self.config + + def commandused(self, extension): + assert extension in self.all_extensions + self.non_skippable_extensions.add(extension) + + @rule(extension=extensions) + def addextension(self, extension): + self.all_extensions.add(extension) + self.config[extensionconfigkey(extension)] = "" + + @rule(extension=extensions) + def removeextension(self, extension): + self.config.pop(extensionconfigkey(extension), None) + + # Section: Commands from the shelve extension + @rule() + @precondition(lambda self: self.hasextension("shelve")) + def shelve(self): + self.commandused("shelve") + with acceptableerrors("nothing changed"): + self.hg("shelve") + + @rule() + @precondition(lambda self: self.hasextension("shelve")) + def unshelve(self): + self.commandused("shelve") + with acceptableerrors("no shelved changes to apply"): + self.hg("unshelve") + +class writeonlydatabase(ExampleDatabase): + def __init__(self, underlying): + super(ExampleDatabase, self).__init__() + self.underlying = underlying + + def fetch(self, key): + return () + + def save(self, key, value): + self.underlying.save(key, value) + + def delete(self, key, value): + self.underlying.delete(key, value) + + def close(self): + self.underlying.close() + +def extensionconfigkey(extension): + return "extensions." + extension + +settings.register_profile( + 'default', settings( + timeout=300, + stateful_step_count=50, + max_examples=10, + ) +) + +settings.register_profile( + 'fast', settings( + timeout=10, + stateful_step_count=20, + max_examples=5, + min_satisfying_examples=1, + max_shrinks=0, + ) +) + +settings.register_profile( + 'continuous', settings( + timeout=-1, + stateful_step_count=1000, + max_examples=10 ** 8, + max_iterations=10 ** 8, + database=writeonlydatabase(settings.default.database) + ) +) + +settings.load_profile(os.getenv('HYPOTHESIS_PROFILE', 'default')) + +verifyingtest = verifyingstatemachine.TestCase + +verifyingtest.settings = settings.default + +if __name__ == '__main__': + try: + silenttestrunner.main(__name__) + finally: + # So as to prevent proliferation of useless test files, if we never + # actually wrote a failing test we clean up after ourselves and delete + # the file for doing so that we owned. + if os.path.exists(savefile) and os.path.getsize(savefile) == 0: + os.unlink(savefile)
--- a/tests/test-verify.t Sun Mar 13 02:29:11 2016 +0100 +++ b/tests/test-verify.t Tue Mar 15 14:10:46 2016 -0700 @@ -46,13 +46,13 @@ checking files warning: revlog 'data/FOO.txt.i' not in fncache! 0: empty or missing FOO.txt - FOO.txt@0: f62022d3d590 in manifests not found + FOO.txt@0: manifest refers to unknown revision f62022d3d590 warning: revlog 'data/QUICK.txt.i' not in fncache! 0: empty or missing QUICK.txt - QUICK.txt@0: 88b857db8eba in manifests not found + QUICK.txt@0: manifest refers to unknown revision 88b857db8eba warning: revlog 'data/bar.txt.i' not in fncache! 0: empty or missing bar.txt - bar.txt@0: 256559129457 in manifests not found + bar.txt@0: manifest refers to unknown revision 256559129457 3 files, 1 changesets, 0 total revisions 3 warnings encountered! hint: run "hg debugrebuildfncache" to recover from corrupt fncache @@ -63,6 +63,208 @@ $ cd ../../.. $ cd .. +Set up a repo for testing missing revlog entries + + $ hg init missing-entries + $ cd missing-entries + $ echo 0 > file + $ hg ci -Aqm0 + $ cp -r .hg/store .hg/store-partial + $ echo 1 > file + $ hg ci -Aqm1 + $ cp -r .hg/store .hg/store-full + +Entire changelog missing + + $ rm .hg/store/00changelog.* + $ hg verify -q + 0: empty or missing changelog + manifest@0: d0b6632564d4 not in changesets + manifest@1: 941fc4534185 not in changesets + 3 integrity errors encountered! + (first damaged changeset appears to be 0) + [1] + $ cp -r .hg/store-full/. .hg/store + +Entire manifest log missing + + $ rm .hg/store/00manifest.* + $ hg verify -q + 0: empty or missing manifest + 1 integrity errors encountered! + (first damaged changeset appears to be 0) + [1] + $ cp -r .hg/store-full/. .hg/store + +Entire filelog missing + + $ rm .hg/store/data/file.* + $ hg verify -q + warning: revlog 'data/file.i' not in fncache! + 0: empty or missing file + file@0: manifest refers to unknown revision 362fef284ce2 + file@1: manifest refers to unknown revision c10f2164107d + 1 warnings encountered! + hint: run "hg debugrebuildfncache" to recover from corrupt fncache + 3 integrity errors encountered! + (first damaged changeset appears to be 0) + [1] + $ cp -r .hg/store-full/. .hg/store + +Entire changelog and manifest log missing + + $ rm .hg/store/00changelog.* + $ rm .hg/store/00manifest.* + $ hg verify -q + warning: orphan revlog 'data/file.i' + 1 warnings encountered! + $ cp -r .hg/store-full/. .hg/store + +Entire changelog and filelog missing + + $ rm .hg/store/00changelog.* + $ rm .hg/store/data/file.* + $ hg verify -q + 0: empty or missing changelog + manifest@0: d0b6632564d4 not in changesets + manifest@1: 941fc4534185 not in changesets + warning: revlog 'data/file.i' not in fncache! + ?: empty or missing file + file@0: manifest refers to unknown revision 362fef284ce2 + file@1: manifest refers to unknown revision c10f2164107d + 1 warnings encountered! + hint: run "hg debugrebuildfncache" to recover from corrupt fncache + 6 integrity errors encountered! + (first damaged changeset appears to be 0) + [1] + $ cp -r .hg/store-full/. .hg/store + +Entire manifest log and filelog missing + + $ rm .hg/store/00manifest.* + $ rm .hg/store/data/file.* + $ hg verify -q + 0: empty or missing manifest + warning: revlog 'data/file.i' not in fncache! + 0: empty or missing file + 1 warnings encountered! + hint: run "hg debugrebuildfncache" to recover from corrupt fncache + 2 integrity errors encountered! + (first damaged changeset appears to be 0) + [1] + $ cp -r .hg/store-full/. .hg/store + +Changelog missing entry + + $ cp -f .hg/store-partial/00changelog.* .hg/store + $ hg verify -q + manifest@?: rev 1 points to nonexistent changeset 1 + manifest@?: 941fc4534185 not in changesets + file@?: rev 1 points to nonexistent changeset 1 + (expected 0) + 1 warnings encountered! + 3 integrity errors encountered! + [1] + $ cp -r .hg/store-full/. .hg/store + +Manifest log missing entry + + $ cp -f .hg/store-partial/00manifest.* .hg/store + $ hg verify -q + manifest@1: changeset refers to unknown revision 941fc4534185 + file@1: c10f2164107d not in manifests + 2 integrity errors encountered! + (first damaged changeset appears to be 1) + [1] + $ cp -r .hg/store-full/. .hg/store + +Filelog missing entry + + $ cp -f .hg/store-partial/data/file.* .hg/store/data + $ hg verify -q + file@1: manifest refers to unknown revision c10f2164107d + 1 integrity errors encountered! + (first damaged changeset appears to be 1) + [1] + $ cp -r .hg/store-full/. .hg/store + +Changelog and manifest log missing entry + + $ cp -f .hg/store-partial/00changelog.* .hg/store + $ cp -f .hg/store-partial/00manifest.* .hg/store + $ hg verify -q + file@?: rev 1 points to nonexistent changeset 1 + (expected 0) + file@?: c10f2164107d not in manifests + 1 warnings encountered! + 2 integrity errors encountered! + [1] + $ cp -r .hg/store-full/. .hg/store + +Changelog and filelog missing entry + + $ cp -f .hg/store-partial/00changelog.* .hg/store + $ cp -f .hg/store-partial/data/file.* .hg/store/data + $ hg verify -q + manifest@?: rev 1 points to nonexistent changeset 1 + manifest@?: 941fc4534185 not in changesets + file@?: manifest refers to unknown revision c10f2164107d + 3 integrity errors encountered! + [1] + $ cp -r .hg/store-full/. .hg/store + +Manifest and filelog missing entry + + $ cp -f .hg/store-partial/00manifest.* .hg/store + $ cp -f .hg/store-partial/data/file.* .hg/store/data + $ hg verify -q + manifest@1: changeset refers to unknown revision 941fc4534185 + 1 integrity errors encountered! + (first damaged changeset appears to be 1) + [1] + $ cp -r .hg/store-full/. .hg/store + +Corrupt changelog base node to cause failure to read revision + + $ printf abcd | dd conv=notrunc of=.hg/store/00changelog.i bs=1 seek=16 \ + > 2> /dev/null + $ hg verify -q + 0: unpacking changeset 08b1860757c2: * (glob) + manifest@?: rev 0 points to unexpected changeset 0 + manifest@?: d0b6632564d4 not in changesets + file@?: rev 0 points to unexpected changeset 0 + (expected 1) + 1 warnings encountered! + 4 integrity errors encountered! + (first damaged changeset appears to be 0) + [1] + $ cp -r .hg/store-full/. .hg/store + +Corrupt manifest log base node to cause failure to read revision + + $ printf abcd | dd conv=notrunc of=.hg/store/00manifest.i bs=1 seek=16 \ + > 2> /dev/null + $ hg verify -q + manifest@0: reading delta d0b6632564d4: * (glob) + file@0: 362fef284ce2 not in manifests + 2 integrity errors encountered! + (first damaged changeset appears to be 0) + [1] + $ cp -r .hg/store-full/. .hg/store + +Corrupt filelog base node to cause failure to read revision + + $ printf abcd | dd conv=notrunc of=.hg/store/data/file.i bs=1 seek=16 \ + > 2> /dev/null + $ hg verify -q + file@0: unpacking 362fef284ce2: * (glob) + 1 integrity errors encountered! + (first damaged changeset appears to be 0) + [1] + $ cp -r .hg/store-full/. .hg/store + + $ cd .. + test changelog without a manifest $ hg init b
--- a/tests/test-wireproto.py Sun Mar 13 02:29:11 2016 +0100 +++ b/tests/test-wireproto.py Tue Mar 15 14:10:46 2016 -0700 @@ -1,5 +1,7 @@ from __future__ import absolute_import +import StringIO + from mercurial import wireproto class proto(object): @@ -21,6 +23,9 @@ def _call(self, cmd, **args): return wireproto.dispatch(self.serverrepo, proto(args), cmd) + def _callstream(self, cmd, **args): + return StringIO.StringIO(self._call(cmd, **args)) + @wireproto.batchable def greet(self, name): f = wireproto.future()
--- a/tests/test-wireproto.t Sun Mar 13 02:29:11 2016 +0100 +++ b/tests/test-wireproto.t Tue Mar 15 14:10:46 2016 -0700 @@ -19,7 +19,9 @@ HTTP: - $ hg serve -R repo -p $HGPORT -d --pid-file=hg1.pid -E error.log -A access.log + $ hg serve -R repo -p $HGPORT -d --pid-file=hg1.pid \ + > -E error.log -A access.log \ + > --config experimental.httppostargs=yes $ cat hg1.pid >> $DAEMON_PIDS $ hg debugwireargs http://localhost:$HGPORT/ un deux trois quatre @@ -37,6 +39,61 @@ $ cat error.log $ cat access.log * - - [*] "GET /?cmd=capabilities HTTP/1.1" 200 - (glob) + * - - [*] "POST /?cmd=debugwireargs HTTP/1.1" 200 - x-hgargs-post:39 (glob) + * - - [*] "POST /?cmd=debugwireargs HTTP/1.1" 200 - x-hgargs-post:39 (glob) + * - - [*] "GET /?cmd=capabilities HTTP/1.1" 200 - (glob) + * - - [*] "POST /?cmd=debugwireargs HTTP/1.1" 200 - x-hgargs-post:43 (glob) + * - - [*] "POST /?cmd=debugwireargs HTTP/1.1" 200 - x-hgargs-post:43 (glob) + * - - [*] "GET /?cmd=capabilities HTTP/1.1" 200 - (glob) + * - - [*] "POST /?cmd=debugwireargs HTTP/1.1" 200 - x-hgargs-post:27 (glob) + * - - [*] "POST /?cmd=debugwireargs HTTP/1.1" 200 - x-hgargs-post:27 (glob) + * - - [*] "GET /?cmd=capabilities HTTP/1.1" 200 - (glob) + * - - [*] "POST /?cmd=debugwireargs HTTP/1.1" 200 - x-hgargs-post:17 (glob) + * - - [*] "POST /?cmd=debugwireargs HTTP/1.1" 200 - x-hgargs-post:17 (glob) + * - - [*] "GET /?cmd=capabilities HTTP/1.1" 200 - (glob) + * - - [*] "POST /?cmd=debugwireargs HTTP/1.1" 200 - x-hgargs-post:17 (glob) + * - - [*] "POST /?cmd=debugwireargs HTTP/1.1" 200 - x-hgargs-post:17 (glob) + * - - [*] "GET /?cmd=capabilities HTTP/1.1" 200 - (glob) + * - - [*] "POST /?cmd=debugwireargs HTTP/1.1" 200 - x-hgargs-post:1033 (glob) + * - - [*] "POST /?cmd=debugwireargs HTTP/1.1" 200 - x-hgargs-post:1033 (glob) + +HTTP without args-in-POST: + $ hg serve -R repo -p $HGPORT1 -d --pid-file=hg1.pid -E error.log -A access.log + $ cat hg1.pid >> $DAEMON_PIDS + + $ hg debugwireargs http://localhost:$HGPORT1/ un deux trois quatre + un deux trois quatre None + $ hg debugwireargs http://localhost:$HGPORT1/ \ un deux trois\ qu\ \ atre + un deux trois qu atre None + $ hg debugwireargs http://localhost:$HGPORT1/ eins zwei --four vier + eins zwei None vier None + $ hg debugwireargs http://localhost:$HGPORT1/ eins zwei + eins zwei None None None + $ hg debugwireargs http://localhost:$HGPORT1/ eins zwei --five fuenf + eins zwei None None None + $ hg debugwireargs http://localhost:$HGPORT1/ un deux trois onethousandcharactersxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx + un deux trois onethousandcharactersxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx None + $ cat error.log + $ cat access.log + * - - [*] "GET /?cmd=capabilities HTTP/1.1" 200 - (glob) + * - - [*] "POST /?cmd=debugwireargs HTTP/1.1" 200 - x-hgargs-post:39 (glob) + * - - [*] "POST /?cmd=debugwireargs HTTP/1.1" 200 - x-hgargs-post:39 (glob) + * - - [*] "GET /?cmd=capabilities HTTP/1.1" 200 - (glob) + * - - [*] "POST /?cmd=debugwireargs HTTP/1.1" 200 - x-hgargs-post:43 (glob) + * - - [*] "POST /?cmd=debugwireargs HTTP/1.1" 200 - x-hgargs-post:43 (glob) + * - - [*] "GET /?cmd=capabilities HTTP/1.1" 200 - (glob) + * - - [*] "POST /?cmd=debugwireargs HTTP/1.1" 200 - x-hgargs-post:27 (glob) + * - - [*] "POST /?cmd=debugwireargs HTTP/1.1" 200 - x-hgargs-post:27 (glob) + * - - [*] "GET /?cmd=capabilities HTTP/1.1" 200 - (glob) + * - - [*] "POST /?cmd=debugwireargs HTTP/1.1" 200 - x-hgargs-post:17 (glob) + * - - [*] "POST /?cmd=debugwireargs HTTP/1.1" 200 - x-hgargs-post:17 (glob) + * - - [*] "GET /?cmd=capabilities HTTP/1.1" 200 - (glob) + * - - [*] "POST /?cmd=debugwireargs HTTP/1.1" 200 - x-hgargs-post:17 (glob) + * - - [*] "POST /?cmd=debugwireargs HTTP/1.1" 200 - x-hgargs-post:17 (glob) + * - - [*] "GET /?cmd=capabilities HTTP/1.1" 200 - (glob) + * - - [*] "POST /?cmd=debugwireargs HTTP/1.1" 200 - x-hgargs-post:1033 (glob) + * - - [*] "POST /?cmd=debugwireargs HTTP/1.1" 200 - x-hgargs-post:1033 (glob) + * - - [*] "GET /?cmd=capabilities HTTP/1.1" 200 - (glob) * - - [*] "GET /?cmd=debugwireargs HTTP/1.1" 200 - x-hgarg-1:four=quatre&one=un&three=trois&two=deux (glob) * - - [*] "GET /?cmd=debugwireargs HTTP/1.1" 200 - x-hgarg-1:four=quatre&one=un&three=trois&two=deux (glob) * - - [*] "GET /?cmd=capabilities HTTP/1.1" 200 - (glob)