# HG changeset patch
# User Augie Fackler
# Date 1516661582 18000
# Node ID 27b6df1b5adbdf647cf5c6675b40575e1b197c60
# Parent 87676e8ee05692bda0144e29b0478f2cc339aa4d# Parent 4fb2bb61597cb34c69c4af7a2d1fb0bb43145eb1
merge with stable to begin 4.5 freeze
# no-check-commit because it's a clean merge
diff -r 87676e8ee056 -r 27b6df1b5adb .hgignore
--- a/.hgignore Mon Jan 08 16:07:51 2018 -0800
+++ b/.hgignore Mon Jan 22 17:53:02 2018 -0500
@@ -24,6 +24,7 @@
tests/.hypothesis
tests/hypothesis-generated
tests/annotated
+tests/exceptions
tests/*.err
tests/htmlcov
build
@@ -55,6 +56,8 @@
locale/*/LC_MESSAGES/hg.mo
hgext/__index__.py
+rust/target/
+
# Generated wheels
wheelhouse/
diff -r 87676e8ee056 -r 27b6df1b5adb .jshintrc
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/.jshintrc Mon Jan 22 17:53:02 2018 -0500
@@ -0,0 +1,11 @@
+{
+ // Enforcing
+ "eqeqeq" : true, // true: Require triple equals (===) for comparison
+ "forin" : true, // true: Require filtering for..in loops with obj.hasOwnProperty()
+ "freeze" : true, // true: prohibits overwriting prototypes of native objects such as Array, Date etc.
+ "nonbsp" : true, // true: Prohibit "non-breaking whitespace" characters.
+ "undef" : true, // true: Require all non-global variables to be declared (prevents global leaks)
+
+ // Environments
+ "browser" : true // Web Browser (window, document, etc)
+}
diff -r 87676e8ee056 -r 27b6df1b5adb Makefile
--- a/Makefile Mon Jan 08 16:07:51 2018 -0800
+++ b/Makefile Mon Jan 22 17:53:02 2018 -0500
@@ -124,7 +124,7 @@
format-c:
clang-format --style file -i \
- `hg files 'set:(**.c or **.h) and not "listfile:contrib/clang-format-blacklist"'`
+ `hg files 'set:(**.c or **.cc or **.h) and not "listfile:contrib/clang-format-blacklist"'`
update-pot: i18n/hg.pot
diff -r 87676e8ee056 -r 27b6df1b5adb contrib/bash_completion
--- a/contrib/bash_completion Mon Jan 08 16:07:51 2018 -0800
+++ b/contrib/bash_completion Mon Jan 22 17:53:02 2018 -0500
@@ -296,7 +296,7 @@
merge)
_hg_labels
;;
- commit|ci|record)
+ commit|ci|record|amend)
_hg_status "mar"
;;
remove|rm)
@@ -309,7 +309,7 @@
_hg_status "mar"
;;
revert)
- _hg_debugpathcomplete
+ _hg_status "mard"
;;
clone)
local count=$(_hg_count_non_option)
diff -r 87676e8ee056 -r 27b6df1b5adb contrib/check-code.py
--- a/contrib/check-code.py Mon Jan 08 16:07:51 2018 -0800
+++ b/contrib/check-code.py Mon Jan 22 17:53:02 2018 -0500
@@ -135,7 +135,6 @@
(r'if\s*!', "don't use '!' to negate exit status"),
(r'/dev/u?random', "don't use entropy, use /dev/zero"),
(r'do\s*true;\s*done', "don't use true as loop body, use sleep 0"),
- (r'^( *)\t', "don't use tabs to indent"),
(r'sed (-e )?\'(\d+|/[^/]*/)i(?!\\\n)',
"put a backslash-escaped newline after sed 'i' command"),
(r'^diff *-\w*[uU].*$\n(^ \$ |^$)', "prefix diff -u/-U with cmp"),
@@ -148,7 +147,9 @@
(r'\bsed\b.*[^\\]\\n', "don't use 'sed ... \\n', use a \\ and a newline"),
(r'env.*-u', "don't use 'env -u VAR', use 'unset VAR'"),
(r'cp.* -r ', "don't use 'cp -r', use 'cp -R'"),
- (r'grep.* -[ABC] ', "don't use grep's context flags"),
+ (r'grep.* -[ABC]', "don't use grep's context flags"),
+ (r'find.*-printf',
+ "don't use 'find -printf', it doesn't exist on BSD find(1)"),
],
# warnings
[
@@ -165,7 +166,6 @@
(r"<<(\S+)((.|\n)*?\n\1)", rephere),
]
-winglobmsg = "use (glob) to match Windows paths too"
uprefix = r"^ \$ "
utestpats = [
[
@@ -181,25 +181,11 @@
(uprefix + r'.*:\.\S*/', "x:.y in a path does not work on msys, rewrite "
"as x://.y, or see `hg log -k msys` for alternatives", r'-\S+:\.|' #-Rxxx
'# no-msys'), # in test-pull.t which is skipped on windows
- (r'^ saved backup bundle to \$TESTTMP.*\.hg$', winglobmsg),
- (r'^ changeset .* references (corrupted|missing) \$TESTTMP/.*[^)]$',
- winglobmsg),
- (r'^ pulling from \$TESTTMP/.*[^)]$', winglobmsg,
- '\$TESTTMP/unix-repo$'), # in test-issue1802.t which skipped on windows
- (r'^ reverting (?!subrepo ).*/.*[^)]$', winglobmsg),
- (r'^ cloning subrepo \S+/.*[^)]$', winglobmsg),
- (r'^ pushing to \$TESTTMP/.*[^)]$', winglobmsg),
- (r'^ pushing subrepo \S+/\S+ to.*[^)]$', winglobmsg),
- (r'^ moving \S+/.*[^)]$', winglobmsg),
- (r'^ no changes made to subrepo since.*/.*[^)]$', winglobmsg),
- (r'^ .*: largefile \S+ not available from file:.*/.*[^)]$', winglobmsg),
- (r'^ .*file://\$TESTTMP',
- 'write "file:/*/$TESTTMP" + (glob) to match on windows too'),
(r'^ [^$>].*27\.0\.0\.1',
'use $LOCALIP not an explicit loopback address'),
- (r'^ [^$>].*\$LOCALIP.*[^)]$',
+ (r'^ (?![>$] ).*\$LOCALIP.*[^)]$',
'mark $LOCALIP output lines with (glob) to help tests in BSD jails'),
- (r'^ (cat|find): .*: No such file or directory',
+ (r'^ (cat|find): .*: \$ENOENT\$',
'use test -f to test for file existence'),
(r'^ diff -[^ -]*p',
"don't use (external) diff with -p for portability"),
@@ -223,6 +209,7 @@
]
]
+# transform plain test rules to unified test's
for i in [0, 1]:
for tp in testpats[i]:
p = tp[0]
@@ -233,6 +220,11 @@
p = r"^ [$>] .*(%s)" % p
utestpats[i].append((p, m) + tp[2:])
+# don't transform the following rules:
+# " > \t" and " \t" should be allowed in unified tests
+testpats[0].append((r'^( *)\t', "don't use tabs to indent"))
+utestpats[0].append((r'^( ?)\t', "don't use tabs to indent"))
+
utestfilters = [
(r"<<(\S+)((.|\n)*?\n > \1)", rephere),
(r"( +)(#([^!][^\n]*\S)?)", repcomment),
diff -r 87676e8ee056 -r 27b6df1b5adb contrib/debian/copyright
--- a/contrib/debian/copyright Mon Jan 08 16:07:51 2018 -0800
+++ b/contrib/debian/copyright Mon Jan 22 17:53:02 2018 -0500
@@ -3,7 +3,7 @@
Source: https://www.mercurial-scm.org/
Files: *
-Copyright: 2005-2017, Matt Mackall and others.
+Copyright: 2005-2018, Matt Mackall and others.
License: GPL-2+
This program is free software; you can redistribute it
and/or modify it under the terms of the GNU General Public
diff -r 87676e8ee056 -r 27b6df1b5adb contrib/fuzz/Makefile
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/contrib/fuzz/Makefile Mon Jan 22 17:53:02 2018 -0500
@@ -0,0 +1,20 @@
+bdiff.o: ../../mercurial/bdiff.c
+ clang -g -O1 -fsanitize=fuzzer-no-link,address -c -o bdiff.o \
+ ../../mercurial/bdiff.c
+
+bdiff: bdiff.cc bdiff.o
+ clang -DHG_FUZZER_INCLUDE_MAIN=1 -g -O1 -fsanitize=fuzzer-no-link,address \
+ -I../../mercurial bdiff.cc bdiff.o -o bdiff
+
+bdiff-oss-fuzz.o: ../../mercurial/bdiff.c
+ $$CC $$CFLAGS -c -o bdiff-oss-fuzz.o ../../mercurial/bdiff.c
+
+bdiff_fuzzer: bdiff.cc bdiff-oss-fuzz.o
+ $$CXX $$CXXFLAGS -std=c++11 -I../../mercurial bdiff.cc \
+ bdiff-oss-fuzz.o -lFuzzingEngine -o $$OUT/bdiff_fuzzer
+
+all: bdiff
+
+oss-fuzz: bdiff_fuzzer
+
+.PHONY: all oss-fuzz
diff -r 87676e8ee056 -r 27b6df1b5adb contrib/fuzz/bdiff.cc
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/contrib/fuzz/bdiff.cc Mon Jan 22 17:53:02 2018 -0500
@@ -0,0 +1,49 @@
+/*
+ * bdiff.cc - fuzzer harness for bdiff.c
+ *
+ * Copyright 2018, Google Inc.
+ *
+ * This software may be used and distributed according to the terms of
+ * the GNU General Public License, incorporated herein by reference.
+ */
+#include
+
+extern "C" {
+#include "bdiff.h"
+
+int LLVMFuzzerTestOneInput(const uint8_t *Data, size_t Size)
+{
+ if (!Size) {
+ return 0;
+ }
+ // figure out a random point in [0, Size] to split our input.
+ size_t split = Data[0] / 255.0 * Size;
+
+ // left input to diff is data[1:split]
+ const uint8_t *left = Data + 1;
+ // which has len split-1
+ size_t left_size = split - 1;
+ // right starts at the next byte after left ends
+ const uint8_t *right = left + left_size;
+ size_t right_size = Size - split;
+
+ struct bdiff_line *a, *b;
+ int an = bdiff_splitlines((const char *)left, split - 1, &a);
+ int bn = bdiff_splitlines((const char *)right, right_size, &b);
+ struct bdiff_hunk l;
+ bdiff_diff(a, an, b, bn, &l);
+ free(a);
+ free(b);
+ bdiff_freehunks(l.next);
+ return 0; // Non-zero return values are reserved for future use.
+}
+
+#ifdef HG_FUZZER_INCLUDE_MAIN
+int main(int argc, char **argv)
+{
+ const char data[] = "asdf";
+ return LLVMFuzzerTestOneInput((const uint8_t *)data, 4);
+}
+#endif
+
+} // extern "C"
diff -r 87676e8ee056 -r 27b6df1b5adb contrib/perf.py
--- a/contrib/perf.py Mon Jan 08 16:07:51 2018 -0800
+++ b/contrib/perf.py Mon Jan 22 17:53:02 2018 -0500
@@ -25,6 +25,7 @@
import random
import struct
import sys
+import threading
import time
from mercurial import (
changegroup,
@@ -488,6 +489,122 @@
timer(d)
fm.end()
+@command('perfbundleread', formatteropts, 'BUNDLE')
+def perfbundleread(ui, repo, bundlepath, **opts):
+ """Benchmark reading of bundle files.
+
+ This command is meant to isolate the I/O part of bundle reading as
+ much as possible.
+ """
+ from mercurial import (
+ bundle2,
+ exchange,
+ streamclone,
+ )
+
+ def makebench(fn):
+ def run():
+ with open(bundlepath, 'rb') as fh:
+ bundle = exchange.readbundle(ui, fh, bundlepath)
+ fn(bundle)
+
+ return run
+
+ def makereadnbytes(size):
+ def run():
+ with open(bundlepath, 'rb') as fh:
+ bundle = exchange.readbundle(ui, fh, bundlepath)
+ while bundle.read(size):
+ pass
+
+ return run
+
+ def makestdioread(size):
+ def run():
+ with open(bundlepath, 'rb') as fh:
+ while fh.read(size):
+ pass
+
+ return run
+
+ # bundle1
+
+ def deltaiter(bundle):
+ for delta in bundle.deltaiter():
+ pass
+
+ def iterchunks(bundle):
+ for chunk in bundle.getchunks():
+ pass
+
+ # bundle2
+
+ def forwardchunks(bundle):
+ for chunk in bundle._forwardchunks():
+ pass
+
+ def iterparts(bundle):
+ for part in bundle.iterparts():
+ pass
+
+ def iterpartsseekable(bundle):
+ for part in bundle.iterparts(seekable=True):
+ pass
+
+ def seek(bundle):
+ for part in bundle.iterparts(seekable=True):
+ part.seek(0, os.SEEK_END)
+
+ def makepartreadnbytes(size):
+ def run():
+ with open(bundlepath, 'rb') as fh:
+ bundle = exchange.readbundle(ui, fh, bundlepath)
+ for part in bundle.iterparts():
+ while part.read(size):
+ pass
+
+ return run
+
+ benches = [
+ (makestdioread(8192), 'read(8k)'),
+ (makestdioread(16384), 'read(16k)'),
+ (makestdioread(32768), 'read(32k)'),
+ (makestdioread(131072), 'read(128k)'),
+ ]
+
+ with open(bundlepath, 'rb') as fh:
+ bundle = exchange.readbundle(ui, fh, bundlepath)
+
+ if isinstance(bundle, changegroup.cg1unpacker):
+ benches.extend([
+ (makebench(deltaiter), 'cg1 deltaiter()'),
+ (makebench(iterchunks), 'cg1 getchunks()'),
+ (makereadnbytes(8192), 'cg1 read(8k)'),
+ (makereadnbytes(16384), 'cg1 read(16k)'),
+ (makereadnbytes(32768), 'cg1 read(32k)'),
+ (makereadnbytes(131072), 'cg1 read(128k)'),
+ ])
+ elif isinstance(bundle, bundle2.unbundle20):
+ benches.extend([
+ (makebench(forwardchunks), 'bundle2 forwardchunks()'),
+ (makebench(iterparts), 'bundle2 iterparts()'),
+ (makebench(iterpartsseekable), 'bundle2 iterparts() seekable'),
+ (makebench(seek), 'bundle2 part seek()'),
+ (makepartreadnbytes(8192), 'bundle2 part read(8k)'),
+ (makepartreadnbytes(16384), 'bundle2 part read(16k)'),
+ (makepartreadnbytes(32768), 'bundle2 part read(32k)'),
+ (makepartreadnbytes(131072), 'bundle2 part read(128k)'),
+ ])
+ elif isinstance(bundle, streamclone.streamcloneapplier):
+ raise error.Abort('stream clone bundles not supported')
+ else:
+ raise error.Abort('unhandled bundle type: %s' % type(bundle))
+
+ for fn, title in benches:
+ timer, fm = gettimer(ui, opts)
+ timer(fn, title=title)
+ fm.end()
+
@command('perfchangegroupchangelog', formatteropts +
[('', 'version', '02', 'changegroup version'),
('r', 'rev', '', 'revisions to add to changegroup')])
@@ -525,8 +642,8 @@
dirstate = repo.dirstate
'a' in dirstate
def d():
- dirstate.dirs()
- del dirstate._map.dirs
+ dirstate.hasdir('a')
+ del dirstate._map._dirs
timer(d)
fm.end()
@@ -545,8 +662,8 @@
timer, fm = gettimer(ui, opts)
"a" in repo.dirstate
def d():
- "a" in repo.dirstate._map.dirs
- del repo.dirstate._map.dirs
+ repo.dirstate.hasdir("a")
+ del repo.dirstate._map._dirs
timer(d)
fm.end()
@@ -569,7 +686,7 @@
def d():
dirstate._map.dirfoldmap.get('a')
del dirstate._map.dirfoldmap
- del dirstate._map.dirs
+ del dirstate._map._dirs
timer(d)
fm.end()
@@ -817,11 +934,25 @@
timer(d)
fm.end()
+def _bdiffworker(q, ready, done):
+ while not done.is_set():
+ pair = q.get()
+ while pair is not None:
+ mdiff.textdiff(*pair)
+ q.task_done()
+ pair = q.get()
+ q.task_done() # for the None one
+ with ready:
+ ready.wait()
+
@command('perfbdiff', revlogopts + formatteropts + [
('', 'count', 1, 'number of revisions to test (when using --startrev)'),
- ('', 'alldata', False, 'test bdiffs for all associated revisions')],
+ ('', 'alldata', False, 'test bdiffs for all associated revisions'),
+ ('', 'threads', 0, 'number of thread to use (disable with 0)'),
+ ],
+
'-c|-m|FILE REV')
-def perfbdiff(ui, repo, file_, rev=None, count=None, **opts):
+def perfbdiff(ui, repo, file_, rev=None, count=None, threads=0, **opts):
"""benchmark a bdiff between revisions
By default, benchmark a bdiff between its delta parent and itself.
@@ -867,14 +998,39 @@
dp = r.deltaparent(rev)
textpairs.append((r.revision(dp), r.revision(rev)))
- def d():
- for pair in textpairs:
- mdiff.textdiff(*pair)
-
+ withthreads = threads > 0
+ if not withthreads:
+ def d():
+ for pair in textpairs:
+ mdiff.textdiff(*pair)
+ else:
+ q = util.queue()
+ for i in xrange(threads):
+ q.put(None)
+ ready = threading.Condition()
+ done = threading.Event()
+ for i in xrange(threads):
+ threading.Thread(target=_bdiffworker, args=(q, ready, done)).start()
+ q.join()
+ def d():
+ for pair in textpairs:
+ q.put(pair)
+ for i in xrange(threads):
+ q.put(None)
+ with ready:
+ ready.notify_all()
+ q.join()
timer, fm = gettimer(ui, opts)
timer(d)
fm.end()
+ if withthreads:
+ done.set()
+ for i in xrange(threads):
+ q.put(None)
+ with ready:
+ ready.notify_all()
+
@command('perfdiffwd', formatteropts)
def perfdiffwd(ui, repo, **opts):
"""Profile diff of working directory changes"""
diff -r 87676e8ee056 -r 27b6df1b5adb contrib/phabricator.py
--- a/contrib/phabricator.py Mon Jan 08 16:07:51 2018 -0800
+++ b/contrib/phabricator.py Mon Jan 22 17:53:02 2018 -0500
@@ -166,7 +166,7 @@
_differentialrevisiontagre = re.compile('\AD([1-9][0-9]*)\Z')
_differentialrevisiondescre = re.compile(
- '^Differential Revision:\s*(?:.*)D([1-9][0-9]*)$', re.M)
+ '^Differential Revision:\s*(?P(?:.*)D(?P[1-9][0-9]*))$', re.M)
def getoldnodedrevmap(repo, nodelist):
"""find previous nodes that has been sent to Phabricator
@@ -207,7 +207,7 @@
# Check commit message
m = _differentialrevisiondescre.search(ctx.description())
if m:
- toconfirm[node] = (1, set(precnodes), int(m.group(1)))
+ toconfirm[node] = (1, set(precnodes), int(m.group('id')))
# Double check if tags are genuine by collecting all old nodes from
# Phabricator, and expect precursors overlap with it.
@@ -442,7 +442,7 @@
# Create a local tag to note the association, if commit message
# does not have it already
m = _differentialrevisiondescre.search(ctx.description())
- if not m or int(m.group(1)) != newrevid:
+ if not m or int(m.group('id')) != newrevid:
tagname = 'D%d' % newrevid
tags.tag(repo, tagname, ctx.node(), message=None, user=None,
date=None, local=True)
@@ -865,3 +865,17 @@
params = {'objectIdentifier': drev[r'phid'],
'transactions': actions}
callconduit(repo, 'differential.revision.edit', params)
+
+templatekeyword = registrar.templatekeyword()
+
+@templatekeyword('phabreview')
+def template_review(repo, ctx, revcache, **args):
+ """:phabreview: Object describing the review for this changeset.
+ Has attributes `url` and `id`.
+ """
+ m = _differentialrevisiondescre.search(ctx.description())
+ if m:
+ return {
+ 'url': m.group('url'),
+ 'id': "D{}".format(m.group('id')),
+ }
diff -r 87676e8ee056 -r 27b6df1b5adb contrib/python3-whitelist
--- a/contrib/python3-whitelist Mon Jan 08 16:07:51 2018 -0800
+++ b/contrib/python3-whitelist Mon Jan 22 17:53:02 2018 -0500
@@ -1,5 +1,8 @@
+test-add.t
+test-addremove-similar.t
test-addremove.t
test-ancestor.py
+test-automv.t
test-backwards-remove.t
test-bheads.t
test-bisect2.t
@@ -7,6 +10,7 @@
test-bookmarks-strip.t
test-branch-tag-confict.t
test-casecollision.t
+test-cat.t
test-changelog-exec.t
test-check-commit.t
test-check-execute.t
@@ -14,7 +18,9 @@
test-check-pyflakes.t
test-check-pylint.t
test-check-shbang.t
+test-children.t
test-commit-unresolved.t
+test-completion.t
test-contrib-check-code.t
test-contrib-check-commit.t
test-debugrename.t
@@ -24,6 +30,8 @@
test-diff-newlines.t
test-diff-reverse.t
test-diff-subdir.t
+test-diffdir.t
+test-directaccess.t
test-dirstate-nonnormalset.t
test-doctest.py
test-double-merge.t
@@ -33,11 +41,17 @@
test-empty.t
test-encoding-func.py
test-excessive-merge.t
+test-execute-bit.t
+test-gpg.t
test-hghave.t
test-imports-checker.t
test-issue1089.t
+test-issue1175.t
+test-issue1502.t
+test-issue1802.t
test-issue1877.t
test-issue1993.t
+test-issue522.t
test-issue612.t
test-issue619.t
test-issue672.t
@@ -46,30 +60,72 @@
test-locate.t
test-lrucachedict.py
test-manifest.py
+test-manifest-merging.t
test-match.py
test-merge-default.t
+test-merge-internal-tools-pattern.t
+test-merge-remove.t
+test-merge-revert.t
+test-merge-revert2.t
+test-merge-subrepos.t
+test-merge10.t
test-merge2.t
test-merge4.t
test-merge5.t
+test-merge6.t
+test-merge7.t
+test-merge8.t
+test-mq-qimport-fail-cleanup.t
+test-obshistory.t
test-permissions.t
+test-push-checkheads-partial-C1.t
+test-push-checkheads-partial-C2.t
+test-push-checkheads-partial-C3.t
+test-push-checkheads-partial-C4.t
test-push-checkheads-pruned-B1.t
+test-push-checkheads-pruned-B2.t
+test-push-checkheads-pruned-B3.t
+test-push-checkheads-pruned-B4.t
+test-push-checkheads-pruned-B5.t
test-push-checkheads-pruned-B6.t
test-push-checkheads-pruned-B7.t
+test-push-checkheads-pruned-B8.t
test-push-checkheads-superceed-A1.t
+test-push-checkheads-superceed-A2.t
+test-push-checkheads-superceed-A3.t
test-push-checkheads-superceed-A4.t
test-push-checkheads-superceed-A5.t
+test-push-checkheads-superceed-A6.t
+test-push-checkheads-superceed-A7.t
test-push-checkheads-superceed-A8.t
test-push-checkheads-unpushed-D1.t
+test-push-checkheads-unpushed-D2.t
+test-push-checkheads-unpushed-D3.t
+test-push-checkheads-unpushed-D4.t
+test-push-checkheads-unpushed-D5.t
test-push-checkheads-unpushed-D6.t
test-push-checkheads-unpushed-D7.t
+test-record.t
+test-rename-dir-merge.t
test-rename-merge1.t
test-rename.t
+test-revert-flags.t
+test-revert-unknown.t
+test-revlog-group-emptyiter.t
+test-revlog-mmapindex.t
test-revlog-packentry.t
test-run-tests.py
test-show-stack.t
+test-simple-update.t
+test-sparse-clear.t
+test-sparse-merges.t
+test-sparse-requirement.t
+test-sparse-verbose-json.t
test-status-terse.t
-test-terse-status.t
+test-uncommit.t
test-unified-test.t
+test-unrelated-pull.t
test-update-issue1456.t
+test-update-names.t
test-update-reverse.t
test-xdg.t
diff -r 87676e8ee056 -r 27b6df1b5adb contrib/showstack.py
--- a/contrib/showstack.py Mon Jan 08 16:07:51 2018 -0800
+++ b/contrib/showstack.py Mon Jan 22 17:53:02 2018 -0500
@@ -1,6 +1,8 @@
# showstack.py - extension to dump a Python stack trace on signal
#
# binds to both SIGQUIT (Ctrl-\) and SIGINFO (Ctrl-T on BSDs)
+"""dump stack trace when receiving SIGQUIT (Ctrl-\) and SIGINFO (Ctrl-T on BSDs)
+"""
from __future__ import absolute_import
import signal
diff -r 87676e8ee056 -r 27b6df1b5adb contrib/synthrepo.py
--- a/contrib/synthrepo.py Mon Jan 08 16:07:51 2018 -0800
+++ b/contrib/synthrepo.py Mon Jan 22 17:53:02 2018 -0500
@@ -369,14 +369,14 @@
while not validpath(path):
path = pickpath()
data = '%s contents\n' % path
- files[path] = context.memfilectx(repo, path, data)
+ files[path] = data
dir = os.path.dirname(path)
while dir and dir not in dirs:
dirs.add(dir)
dir = os.path.dirname(dir)
def filectxfn(repo, memctx, path):
- return files[path]
+ return context.memfilectx(repo, memctx, path, files[path])
ui.progress(_synthesizing, None)
message = 'synthesized wide repo with %d files' % (len(files),)
@@ -444,14 +444,12 @@
for __ in xrange(add):
lines.insert(random.randint(0, len(lines)), makeline())
path = fctx.path()
- changes[path] = context.memfilectx(repo, path,
- '\n'.join(lines) + '\n')
+ changes[path] = '\n'.join(lines) + '\n'
for __ in xrange(pick(filesremoved)):
path = random.choice(mfk)
for __ in xrange(10):
path = random.choice(mfk)
if path not in changes:
- changes[path] = None
break
if filesadded:
dirs = list(pctx.dirs())
@@ -466,9 +464,11 @@
pathstr = '/'.join(filter(None, path))
data = '\n'.join(makeline()
for __ in xrange(pick(linesinfilesadded))) + '\n'
- changes[pathstr] = context.memfilectx(repo, pathstr, data)
+ changes[pathstr] = data
def filectxfn(repo, memctx, path):
- return changes[path]
+ if path not in changes:
+ return None
+ return context.memfilectx(repo, memctx, path, changes[path])
if not changes:
continue
if revs:
diff -r 87676e8ee056 -r 27b6df1b5adb contrib/win32/ReadMe.html
--- a/contrib/win32/ReadMe.html Mon Jan 08 16:07:51 2018 -0800
+++ b/contrib/win32/ReadMe.html Mon Jan 22 17:53:02 2018 -0500
@@ -140,7 +140,7 @@
- Mercurial is Copyright 2005-2017 Matt Mackall and others. See
+ Mercurial is Copyright 2005-2018 Matt Mackall and others. See
the Contributors.txt file for a list of contributors.
diff -r 87676e8ee056 -r 27b6df1b5adb contrib/win32/mercurial.iss
--- a/contrib/win32/mercurial.iss Mon Jan 08 16:07:51 2018 -0800
+++ b/contrib/win32/mercurial.iss Mon Jan 22 17:53:02 2018 -0500
@@ -21,7 +21,7 @@
#endif
[Setup]
-AppCopyright=Copyright 2005-2017 Matt Mackall and others
+AppCopyright=Copyright 2005-2018 Matt Mackall and others
AppName=Mercurial
AppVersion={#VERSION}
#if ARCH == "x64"
@@ -45,7 +45,7 @@
DefaultDirName={pf}\Mercurial
SourceDir=..\..
VersionInfoDescription=Mercurial distributed SCM (version {#VERSION})
-VersionInfoCopyright=Copyright 2005-2017 Matt Mackall and others
+VersionInfoCopyright=Copyright 2005-2018 Matt Mackall and others
VersionInfoCompany=Matt Mackall and others
InternalCompressLevel=max
SolidCompression=true
diff -r 87676e8ee056 -r 27b6df1b5adb contrib/wix/COPYING.rtf
Binary file contrib/wix/COPYING.rtf has changed
diff -r 87676e8ee056 -r 27b6df1b5adb contrib/wix/help.wxs
--- a/contrib/wix/help.wxs Mon Jan 08 16:07:51 2018 -0800
+++ b/contrib/wix/help.wxs Mon Jan 22 17:53:02 2018 -0500
@@ -23,6 +23,7 @@
+
diff -r 87676e8ee056 -r 27b6df1b5adb contrib/wix/templates.wxs
--- a/contrib/wix/templates.wxs Mon Jan 08 16:07:51 2018 -0800
+++ b/contrib/wix/templates.wxs Mon Jan 22 17:53:02 2018 -0500
@@ -42,6 +42,7 @@
+
@@ -85,6 +86,7 @@
+
@@ -114,6 +116,7 @@
+
@@ -143,6 +146,7 @@
+
@@ -208,6 +212,7 @@
+
@@ -225,7 +230,6 @@
-
diff -r 87676e8ee056 -r 27b6df1b5adb hgdemandimport/demandimportpy3.py
--- a/hgdemandimport/demandimportpy3.py Mon Jan 08 16:07:51 2018 -0800
+++ b/hgdemandimport/demandimportpy3.py Mon Jan 22 17:53:02 2018 -0500
@@ -46,7 +46,7 @@
super().exec_module(module)
# This is 3.6+ because with Python 3.5 it isn't possible to lazily load
-# extensions. See the discussion in https://python.org/sf/26186 for more.
+# extensions. See the discussion in https://bugs.python.org/issue26186 for more.
_extensions_loader = _lazyloaderex.factory(
importlib.machinery.ExtensionFileLoader)
_bytecode_loader = _lazyloaderex.factory(
diff -r 87676e8ee056 -r 27b6df1b5adb hgext/amend.py
--- a/hgext/amend.py Mon Jan 08 16:07:51 2018 -0800
+++ b/hgext/amend.py Mon Jan 22 17:53:02 2018 -0500
@@ -17,6 +17,7 @@
cmdutil,
commands,
error,
+ pycompat,
registrar,
)
@@ -46,10 +47,11 @@
See :hg:`help commit` for more details.
"""
+ opts = pycompat.byteskwargs(opts)
if len(opts['note']) > 255:
raise error.Abort(_("cannot store a note of more than 255 bytes"))
with repo.wlock(), repo.lock():
if not opts.get('logfile'):
opts['message'] = opts.get('message') or repo['.'].description()
opts['amend'] = True
- return commands._docommit(ui, repo, *pats, **opts)
+ return commands._docommit(ui, repo, *pats, **pycompat.strkwargs(opts))
diff -r 87676e8ee056 -r 27b6df1b5adb hgext/automv.py
--- a/hgext/automv.py Mon Jan 08 16:07:51 2018 -0800
+++ b/hgext/automv.py Mon Jan 22 17:53:02 2018 -0500
@@ -32,6 +32,7 @@
copies,
error,
extensions,
+ pycompat,
registrar,
scmutil,
similar
@@ -53,6 +54,7 @@
def mvcheck(orig, ui, repo, *pats, **opts):
"""Hook to check for moves at commit time"""
+ opts = pycompat.byteskwargs(opts)
renames = None
disabled = opts.pop('no_automv', False)
if not disabled:
@@ -68,7 +70,7 @@
with repo.wlock():
if renames is not None:
scmutil._markchanges(repo, (), (), renames)
- return orig(ui, repo, *pats, **opts)
+ return orig(ui, repo, *pats, **pycompat.strkwargs(opts))
def _interestingfiles(repo, matcher):
"""Find what files were added or removed in this commit.
diff -r 87676e8ee056 -r 27b6df1b5adb hgext/blackbox.py
--- a/hgext/blackbox.py Mon Jan 08 16:07:51 2018 -0800
+++ b/hgext/blackbox.py Mon Jan 22 17:53:02 2018 -0500
@@ -44,6 +44,7 @@
from mercurial.node import hex
from mercurial import (
+ encoding,
registrar,
ui as uimod,
util,
@@ -129,6 +130,11 @@
def track(self):
return self.configlist('blackbox', 'track')
+ def debug(self, *msg, **opts):
+ super(blackboxui, self).debug(*msg, **opts)
+ if self.debugflag:
+ self.log('debug', '%s', ''.join(msg))
+
def log(self, event, *msg, **opts):
global lastui
super(blackboxui, self).log(event, *msg, **opts)
@@ -182,7 +188,7 @@
fp.write(fmt % args)
except (IOError, OSError) as err:
self.debug('warning: cannot write to blackbox.log: %s\n' %
- err.strerror)
+ encoding.strtolocal(err.strerror))
# do not restore _bbinlog intentionally to avoid failed
# logging again
else:
@@ -226,7 +232,7 @@
if not repo.vfs.exists('blackbox.log'):
return
- limit = opts.get('limit')
+ limit = opts.get(r'limit')
fp = repo.vfs('blackbox.log', 'r')
lines = fp.read().split('\n')
diff -r 87676e8ee056 -r 27b6df1b5adb hgext/bugzilla.py
--- a/hgext/bugzilla.py Mon Jan 08 16:07:51 2018 -0800
+++ b/hgext/bugzilla.py Mon Jan 22 17:53:02 2018 -0500
@@ -580,7 +580,7 @@
self.ui.warn(_("Bugzilla/MySQL cannot update bug state\n"))
(user, userid) = self.get_bugzilla_user(committer)
- now = time.strftime('%Y-%m-%d %H:%M:%S')
+ now = time.strftime(r'%Y-%m-%d %H:%M:%S')
self.run('''insert into longdescs
(bug_id, who, bug_when, thetext)
values (%s, %s, %s, %s)''',
diff -r 87676e8ee056 -r 27b6df1b5adb hgext/children.py
--- a/hgext/children.py Mon Jan 08 16:07:51 2018 -0800
+++ b/hgext/children.py Mon Jan 22 17:53:02 2018 -0500
@@ -19,6 +19,7 @@
from mercurial.i18n import _
from mercurial import (
cmdutil,
+ pycompat,
registrar,
)
@@ -55,6 +56,7 @@
See :hg:`help log` and :hg:`help revsets.children`.
"""
+ opts = pycompat.byteskwargs(opts)
rev = opts.get('rev')
if file_:
fctx = repo.filectx(file_, changeid=rev)
diff -r 87676e8ee056 -r 27b6df1b5adb hgext/churn.py
--- a/hgext/churn.py Mon Jan 08 16:07:51 2018 -0800
+++ b/hgext/churn.py Mon Jan 22 17:53:02 2018 -0500
@@ -19,6 +19,7 @@
cmdutil,
encoding,
patch,
+ pycompat,
registrar,
scmutil,
util,
@@ -45,6 +46,7 @@
def countrate(ui, repo, amap, *pats, **opts):
"""Calculate stats"""
+ opts = pycompat.byteskwargs(opts)
if opts.get('dateformat'):
def getkey(ctx):
t, tz = ctx.date()
@@ -154,7 +156,7 @@
return s + " " * (l - encoding.colwidth(s))
amap = {}
- aliases = opts.get('aliases')
+ aliases = opts.get(r'aliases')
if not aliases and os.path.exists(repo.wjoin('.hgchurn')):
aliases = repo.wjoin('.hgchurn')
if aliases:
@@ -172,7 +174,7 @@
if not rate:
return
- if opts.get('sort'):
+ if opts.get(r'sort'):
rate.sort()
else:
rate.sort(key=lambda x: (-sum(x[1]), x))
@@ -185,7 +187,7 @@
ui.debug("assuming %i character terminal\n" % ttywidth)
width = ttywidth - maxname - 2 - 2 - 2
- if opts.get('diffstat'):
+ if opts.get(r'diffstat'):
width -= 15
def format(name, diffstat):
added, removed = diffstat
diff -r 87676e8ee056 -r 27b6df1b5adb hgext/commitextras.py
--- a/hgext/commitextras.py Mon Jan 08 16:07:51 2018 -0800
+++ b/hgext/commitextras.py Mon Jan 22 17:53:02 2018 -0500
@@ -46,7 +46,7 @@
origcommit = repo.commit
try:
def _wrappedcommit(*innerpats, **inneropts):
- extras = opts.get('extra')
+ extras = opts.get(r'extra')
if extras:
for raw in extras:
if '=' not in raw:
@@ -65,7 +65,7 @@
msg = _("key '%s' is used internally, can't be set "
"manually")
raise error.Abort(msg % k)
- inneropts['extra'][k] = v
+ inneropts[r'extra'][k] = v
return origcommit(*innerpats, **inneropts)
# This __dict__ logic is needed because the normal
diff -r 87676e8ee056 -r 27b6df1b5adb hgext/convert/bzr.py
--- a/hgext/convert/bzr.py Mon Jan 08 16:07:51 2018 -0800
+++ b/hgext/convert/bzr.py Mon Jan 22 17:53:02 2018 -0500
@@ -44,8 +44,8 @@
class bzr_source(common.converter_source):
"""Reads Bazaar repositories by using the Bazaar Python libraries"""
- def __init__(self, ui, path, revs=None):
- super(bzr_source, self).__init__(ui, path, revs=revs)
+ def __init__(self, ui, repotype, path, revs=None):
+ super(bzr_source, self).__init__(ui, repotype, path, revs=revs)
if not os.path.exists(os.path.join(path, '.bzr')):
raise common.NoRepo(_('%s does not look like a Bazaar repository')
diff -r 87676e8ee056 -r 27b6df1b5adb hgext/convert/common.py
--- a/hgext/convert/common.py Mon Jan 08 16:07:51 2018 -0800
+++ b/hgext/convert/common.py Mon Jan 22 17:53:02 2018 -0500
@@ -73,12 +73,13 @@
class converter_source(object):
"""Conversion source interface"""
- def __init__(self, ui, path=None, revs=None):
+ def __init__(self, ui, repotype, path=None, revs=None):
"""Initialize conversion source (or raise NoRepo("message")
exception if path is not a valid repository)"""
self.ui = ui
self.path = path
self.revs = revs
+ self.repotype = repotype
self.encoding = 'utf-8'
@@ -218,7 +219,7 @@
class converter_sink(object):
"""Conversion sink (target) interface"""
- def __init__(self, ui, path):
+ def __init__(self, ui, repotype, path):
"""Initialize conversion sink (or raise NoRepo("message")
exception if path is not a valid repository)
@@ -227,6 +228,7 @@
self.ui = ui
self.path = path
self.created = []
+ self.repotype = repotype
def revmapfile(self):
"""Path to a file that will contain lines
diff -r 87676e8ee056 -r 27b6df1b5adb hgext/convert/convcmd.py
--- a/hgext/convert/convcmd.py Mon Jan 08 16:07:51 2018 -0800
+++ b/hgext/convert/convcmd.py Mon Jan 22 17:53:02 2018 -0500
@@ -6,6 +6,7 @@
# GNU General Public License version 2 or any later version.
from __future__ import absolute_import
+import collections
import os
import shlex
import shutil
@@ -15,6 +16,7 @@
encoding,
error,
hg,
+ scmutil,
util,
)
@@ -114,7 +116,7 @@
for name, source, sortmode in source_converters:
try:
if not type or name == type:
- return source(ui, path, revs), sortmode
+ return source(ui, name, path, revs), sortmode
except (NoRepo, MissingTool) as inst:
exceptions.append(inst)
if not ui.quiet:
@@ -128,7 +130,7 @@
for name, sink in sink_converters:
try:
if not type or name == type:
- return sink(ui, path)
+ return sink(ui, name, path)
except NoRepo as inst:
ui.note(_("convert: %s\n") % inst)
except MissingTool as inst:
@@ -289,13 +291,13 @@
revisions without parents. 'parents' must be a mapping of revision
identifier to its parents ones.
"""
- visit = sorted(parents)
+ visit = collections.deque(sorted(parents))
seen = set()
children = {}
roots = []
while visit:
- n = visit.pop(0)
+ n = visit.popleft()
if n in seen:
continue
seen.add(n)
@@ -449,7 +451,7 @@
commit = self.commitcache[rev]
full = self.opts.get('full')
changes = self.source.getchanges(rev, full)
- if isinstance(changes, basestring):
+ if isinstance(changes, bytes):
if changes == SKIPREV:
dest = SKIPREV
else:
@@ -575,6 +577,7 @@
ui.status(_("assuming destination %s\n") % dest)
destc = convertsink(ui, dest, opts.get('dest_type'))
+ destc = scmutil.wrapconvertsink(destc)
try:
srcc, defaultsort = convertsource(ui, src, opts.get('source_type'),
diff -r 87676e8ee056 -r 27b6df1b5adb hgext/convert/cvs.py
--- a/hgext/convert/cvs.py Mon Jan 08 16:07:51 2018 -0800
+++ b/hgext/convert/cvs.py Mon Jan 22 17:53:02 2018 -0500
@@ -32,8 +32,8 @@
NoRepo = common.NoRepo
class convert_cvs(converter_source):
- def __init__(self, ui, path, revs=None):
- super(convert_cvs, self).__init__(ui, path, revs=revs)
+ def __init__(self, ui, repotype, path, revs=None):
+ super(convert_cvs, self).__init__(ui, repotype, path, revs=revs)
cvs = os.path.join(path, "CVS")
if not os.path.exists(cvs):
diff -r 87676e8ee056 -r 27b6df1b5adb hgext/convert/darcs.py
--- a/hgext/convert/darcs.py Mon Jan 08 16:07:51 2018 -0800
+++ b/hgext/convert/darcs.py Mon Jan 22 17:53:02 2018 -0500
@@ -40,8 +40,8 @@
pass
class darcs_source(common.converter_source, common.commandline):
- def __init__(self, ui, path, revs=None):
- common.converter_source.__init__(self, ui, path, revs=revs)
+ def __init__(self, ui, repotype, path, revs=None):
+ common.converter_source.__init__(self, ui, repotype, path, revs=revs)
common.commandline.__init__(self, ui, 'darcs')
# check for _darcs, ElementTree so that we can easily skip
diff -r 87676e8ee056 -r 27b6df1b5adb hgext/convert/filemap.py
--- a/hgext/convert/filemap.py Mon Jan 08 16:07:51 2018 -0800
+++ b/hgext/convert/filemap.py Mon Jan 22 17:53:02 2018 -0500
@@ -172,7 +172,7 @@
class filemap_source(common.converter_source):
def __init__(self, ui, baseconverter, filemap):
- super(filemap_source, self).__init__(ui)
+ super(filemap_source, self).__init__(ui, baseconverter.repotype)
self.base = baseconverter
self.filemapper = filemapper(ui, filemap)
self.commits = {}
diff -r 87676e8ee056 -r 27b6df1b5adb hgext/convert/git.py
--- a/hgext/convert/git.py Mon Jan 08 16:07:51 2018 -0800
+++ b/hgext/convert/git.py Mon Jan 22 17:53:02 2018 -0500
@@ -66,8 +66,8 @@
def gitpipe(self, *args, **kwargs):
return self._gitcmd(self._run3, *args, **kwargs)
- def __init__(self, ui, path, revs=None):
- super(convert_git, self).__init__(ui, path, revs=revs)
+ def __init__(self, ui, repotype, path, revs=None):
+ super(convert_git, self).__init__(ui, repotype, path, revs=revs)
common.commandline.__init__(self, ui, 'git')
# Pass an absolute path to git to prevent from ever being interpreted
@@ -342,13 +342,15 @@
p = v.split()
tm, tz = p[-2:]
author = " ".join(p[:-2])
- if author[0] == "<": author = author[1:-1]
+ if author[0] == "<":
+ author = author[1:-1]
author = self.recode(author)
if n == "committer":
p = v.split()
tm, tz = p[-2:]
committer = " ".join(p[:-2])
- if committer[0] == "<": committer = committer[1:-1]
+ if committer[0] == "<":
+ committer = committer[1:-1]
committer = self.recode(committer)
if n == "parent":
parents.append(v)
diff -r 87676e8ee056 -r 27b6df1b5adb hgext/convert/gnuarch.py
--- a/hgext/convert/gnuarch.py Mon Jan 08 16:07:51 2018 -0800
+++ b/hgext/convert/gnuarch.py Mon Jan 22 17:53:02 2018 -0500
@@ -7,7 +7,7 @@
# GNU General Public License version 2 or any later version.
from __future__ import absolute_import
-import email
+import email.parser as emailparser
import os
import shutil
import stat
@@ -36,8 +36,8 @@
self.ren_files = {}
self.ren_dirs = {}
- def __init__(self, ui, path, revs=None):
- super(gnuarch_source, self).__init__(ui, path, revs=revs)
+ def __init__(self, ui, repotype, path, revs=None):
+ super(gnuarch_source, self).__init__(ui, repotype, path, revs=revs)
if not os.path.exists(os.path.join(path, '{arch}')):
raise common.NoRepo(_("%s does not look like a GNU Arch repository")
@@ -63,7 +63,7 @@
self.changes = {}
self.parents = {}
self.tags = {}
- self.catlogparser = email.Parser.Parser()
+ self.catlogparser = emailparser.Parser()
self.encoding = encoding.encoding
self.archives = []
diff -r 87676e8ee056 -r 27b6df1b5adb hgext/convert/hg.py
--- a/hgext/convert/hg.py Mon Jan 08 16:07:51 2018 -0800
+++ b/hgext/convert/hg.py Mon Jan 22 17:53:02 2018 -0500
@@ -45,8 +45,8 @@
sha1re = re.compile(r'\b[0-9a-f]{12,40}\b')
class mercurial_sink(common.converter_sink):
- def __init__(self, ui, path):
- common.converter_sink.__init__(self, ui, path)
+ def __init__(self, ui, repotype, path):
+ common.converter_sink.__init__(self, ui, repotype, path)
self.branchnames = ui.configbool('convert', 'hg.usebranchnames')
self.clonebranches = ui.configbool('convert', 'hg.clonebranches')
self.tagsbranch = ui.config('convert', 'hg.tagsbranch')
@@ -253,7 +253,7 @@
data = self._rewritetags(source, revmap, data)
if f == '.hgsubstate':
data = self._rewritesubstate(source, data)
- return context.memfilectx(self.repo, f, data, 'l' in mode,
+ return context.memfilectx(self.repo, memctx, f, data, 'l' in mode,
'x' in mode, copies.get(f))
pl = []
@@ -401,7 +401,7 @@
data = "".join(newlines)
def getfilectx(repo, memctx, f):
- return context.memfilectx(repo, f, data, False, False, None)
+ return context.memfilectx(repo, memctx, f, data, False, False, None)
self.ui.status(_("updating tags\n"))
date = "%s 0" % int(time.mktime(time.gmtime()))
@@ -444,8 +444,8 @@
return rev in self.repo
class mercurial_source(common.converter_source):
- def __init__(self, ui, path, revs=None):
- common.converter_source.__init__(self, ui, path, revs)
+ def __init__(self, ui, repotype, path, revs=None):
+ common.converter_source.__init__(self, ui, repotype, path, revs)
self.ignoreerrors = ui.configbool('convert', 'hg.ignoreerrors')
self.ignored = set()
self.saverev = ui.configbool('convert', 'hg.saverev')
diff -r 87676e8ee056 -r 27b6df1b5adb hgext/convert/monotone.py
--- a/hgext/convert/monotone.py Mon Jan 08 16:07:51 2018 -0800
+++ b/hgext/convert/monotone.py Mon Jan 22 17:53:02 2018 -0500
@@ -19,8 +19,8 @@
from . import common
class monotone_source(common.converter_source, common.commandline):
- def __init__(self, ui, path=None, revs=None):
- common.converter_source.__init__(self, ui, path, revs)
+ def __init__(self, ui, repotype, path=None, revs=None):
+ common.converter_source.__init__(self, ui, repotype, path, revs)
if revs and len(revs) > 1:
raise error.Abort(_('monotone source does not support specifying '
'multiple revs'))
diff -r 87676e8ee056 -r 27b6df1b5adb hgext/convert/p4.py
--- a/hgext/convert/p4.py Mon Jan 08 16:07:51 2018 -0800
+++ b/hgext/convert/p4.py Mon Jan 22 17:53:02 2018 -0500
@@ -43,11 +43,11 @@
return filename
class p4_source(common.converter_source):
- def __init__(self, ui, path, revs=None):
+ def __init__(self, ui, repotype, path, revs=None):
# avoid import cycle
from . import convcmd
- super(p4_source, self).__init__(ui, path, revs=revs)
+ super(p4_source, self).__init__(ui, repotype, path, revs=revs)
if "/" in path and not path.startswith('//'):
raise common.NoRepo(_('%s does not look like a P4 repository') %
diff -r 87676e8ee056 -r 27b6df1b5adb hgext/convert/subversion.py
--- a/hgext/convert/subversion.py Mon Jan 08 16:07:51 2018 -0800
+++ b/hgext/convert/subversion.py Mon Jan 22 17:53:02 2018 -0500
@@ -285,8 +285,8 @@
# the parent module. A revision has at most one parent.
#
class svn_source(converter_source):
- def __init__(self, ui, url, revs=None):
- super(svn_source, self).__init__(ui, url, revs=revs)
+ def __init__(self, ui, repotype, url, revs=None):
+ super(svn_source, self).__init__(ui, repotype, url, revs=revs)
if not (url.startswith('svn://') or url.startswith('svn+ssh://') or
(os.path.exists(url) and
@@ -1112,9 +1112,9 @@
def authorfile(self):
return self.join('hg-authormap')
- def __init__(self, ui, path):
+ def __init__(self, ui, repotype, path):
- converter_sink.__init__(self, ui, path)
+ converter_sink.__init__(self, ui, repotype, path)
commandline.__init__(self, ui, 'svn')
self.delete = []
self.setexec = []
diff -r 87676e8ee056 -r 27b6df1b5adb hgext/extdiff.py
--- a/hgext/extdiff.py Mon Jan 08 16:07:51 2018 -0800
+++ b/hgext/extdiff.py Mon Jan 22 17:53:02 2018 -0500
@@ -338,6 +338,7 @@
that revision is compared to the working directory, and, when no
revisions are specified, the working directory files are compared
to its parent.'''
+ opts = pycompat.byteskwargs(opts)
program = opts.get('program')
option = opts.get('option')
if not program:
@@ -369,6 +370,7 @@
self._cmdline = cmdline
def __call__(self, ui, repo, *pats, **opts):
+ opts = pycompat.byteskwargs(opts)
options = ' '.join(map(util.shellquote, opts['option']))
if options:
options = ' ' + options
diff -r 87676e8ee056 -r 27b6df1b5adb hgext/fetch.py
--- a/hgext/fetch.py Mon Jan 08 16:07:51 2018 -0800
+++ b/hgext/fetch.py Mon Jan 22 17:53:02 2018 -0500
@@ -19,6 +19,7 @@
exchange,
hg,
lock,
+ pycompat,
registrar,
util,
)
@@ -60,6 +61,7 @@
Returns 0 on success.
'''
+ opts = pycompat.byteskwargs(opts)
date = opts.get('date')
if date:
opts['date'] = util.parsedate(date)
diff -r 87676e8ee056 -r 27b6df1b5adb hgext/fsmonitor/__init__.py
--- a/hgext/fsmonitor/__init__.py Mon Jan 08 16:07:51 2018 -0800
+++ b/hgext/fsmonitor/__init__.py Mon Jan 22 17:53:02 2018 -0500
@@ -117,7 +117,6 @@
from mercurial.i18n import _
from mercurial.node import (
hex,
- nullid,
)
from mercurial import (
@@ -165,9 +164,6 @@
configitem('experimental', 'fsmonitor.transaction_notify',
default=False,
)
-configitem('experimental', 'fsmonitor.wc_change_notify',
- default=False,
-)
# This extension is incompatible with the following blacklisted extensions
# and will disable itself when encountering one of these:
@@ -224,16 +220,21 @@
Whenever full is False, ignored is False, and the Watchman client is
available, use Watchman combined with saved state to possibly return only a
subset of files.'''
- def bail():
+ def bail(reason):
+ self._ui.debug('fsmonitor: fallback to core status, %s\n' % reason)
return orig(match, subrepos, unknown, ignored, full=True)
- if full or ignored or not self._watchmanclient.available():
- return bail()
+ if full:
+ return bail('full rewalk requested')
+ if ignored:
+ return bail('listing ignored files')
+ if not self._watchmanclient.available():
+ return bail('client unavailable')
state = self._fsmonitorstate
clock, ignorehash, notefiles = state.get()
if not clock:
if state.walk_on_invalidate:
- return bail()
+ return bail('no clock')
# Initial NULL clock value, see
# https://facebook.github.io/watchman/docs/clockspec.html
clock = 'c:0:0'
@@ -263,7 +264,7 @@
if _hashignore(ignore) != ignorehash and clock != 'c:0:0':
# ignore list changed -- can't rely on Watchman state any more
if state.walk_on_invalidate:
- return bail()
+ return bail('ignore rules changed')
notefiles = []
clock = 'c:0:0'
else:
@@ -273,7 +274,11 @@
matchfn = match.matchfn
matchalways = match.always()
- dmap = self._map._map
+ dmap = self._map
+ if util.safehasattr(dmap, '_map'):
+ # for better performance, directly access the inner dirstate map if the
+ # standard dirstate implementation is in use.
+ dmap = dmap._map
nonnormalset = self._map.nonnormalset
copymap = self._map.copymap
@@ -334,7 +339,7 @@
except Exception as ex:
_handleunavailable(self._ui, state, ex)
self._watchmanclient.clearconnection()
- return bail()
+ return bail('exception during run')
else:
# We need to propagate the last observed clock up so that we
# can use it for our next query
@@ -342,7 +347,7 @@
if result['is_fresh_instance']:
if state.walk_on_invalidate:
state.invalidate()
- return bail()
+ return bail('fresh instance')
fresh_instance = True
# Ignore any prior noteable files from the state info
notefiles = []
@@ -600,14 +605,6 @@
self._fsmonitorstate.invalidate()
return super(fsmonitordirstate, self).invalidate(*args, **kwargs)
- if dirstate._ui.configbool(
- "experimental", "fsmonitor.wc_change_notify"):
- def setparents(self, p1, p2=nullid):
- with state_update(self._repo, name="hg.wc_change",
- oldnode=self._pl[0], newnode=p1,
- partial=False):
- return super(fsmonitordirstate, self).setparents(p1, p2)
-
dirstate.__class__ = fsmonitordirstate
dirstate._fsmonitorinit(repo)
@@ -662,14 +659,18 @@
self.enter()
def enter(self):
- # We explicitly need to take a lock here, before we proceed to update
- # watchman about the update operation, so that we don't race with
- # some other actor. merge.update is going to take the wlock almost
- # immediately anyway, so this is effectively extending the lock
- # around a couple of short sanity checks.
+ # Make sure we have a wlock prior to sending notifications to watchman.
+ # We don't want to race with other actors. In the update case,
+ # merge.update is going to take the wlock almost immediately. We are
+ # effectively extending the lock around several short sanity checks.
if self.oldnode is None:
self.oldnode = self.repo['.'].node()
- self._lock = self.repo.wlock()
+
+ if self.repo.currentwlock() is None:
+ if util.safehasattr(self.repo, 'wlocknostateupdate'):
+ self._lock = self.repo.wlocknostateupdate()
+ else:
+ self._lock = self.repo.wlock()
self.need_leave = self._state(
'state-enter',
hex(self.oldnode))
@@ -790,32 +791,34 @@
orig = super(fsmonitorrepo, self).status
return overridestatus(orig, self, *args, **kwargs)
- if ui.configbool("experimental", "fsmonitor.transaction_notify"):
- def transaction(self, *args, **kwargs):
- tr = super(fsmonitorrepo, self).transaction(
- *args, **kwargs)
- if tr.count != 1:
- return tr
- stateupdate = state_update(self, name="hg.transaction")
- stateupdate.enter()
+ def wlocknostateupdate(self, *args, **kwargs):
+ return super(fsmonitorrepo, self).wlock(*args, **kwargs)
+
+ def wlock(self, *args, **kwargs):
+ l = super(fsmonitorrepo, self).wlock(*args, **kwargs)
+ if not ui.configbool(
+ "experimental", "fsmonitor.transaction_notify"):
+ return l
+ if l.held != 1:
+ return l
+ origrelease = l.releasefn
- class fsmonitortrans(tr.__class__):
- def _abort(self):
- try:
- result = super(fsmonitortrans, self)._abort()
- finally:
- stateupdate.exit(abort=True)
- return result
+ def staterelease():
+ if origrelease:
+ origrelease()
+ if l.stateupdate:
+ l.stateupdate.exit()
+ l.stateupdate = None
- def close(self):
- try:
- result = super(fsmonitortrans, self).close()
- finally:
- if self.count == 0:
- stateupdate.exit()
- return result
-
- tr.__class__ = fsmonitortrans
- return tr
+ try:
+ l.stateupdate = None
+ l.stateupdate = state_update(self, name="hg.transaction")
+ l.stateupdate.enter()
+ l.releasefn = staterelease
+ except Exception as e:
+ # Swallow any errors; fire and forget
+ self.ui.log(
+ 'watchman', 'Exception in state update %s\n', e)
+ return l
repo.__class__ = fsmonitorrepo
diff -r 87676e8ee056 -r 27b6df1b5adb hgext/githelp.py
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/hgext/githelp.py Mon Jan 22 17:53:02 2018 -0500
@@ -0,0 +1,1073 @@
+# githelp.py - Try to map Git commands to Mercurial equivalents.
+#
+# Copyright 2013 Facebook, Inc.
+#
+# This software may be used and distributed according to the terms of the
+# GNU General Public License version 2 or any later version.
+"""try mapping git commands to Mercurial commands
+
+Tries to map a given git command to a Mercurial command:
+
+ $ hg githelp -- git checkout master
+ hg update master
+
+If an unknown command or parameter combination is detected, an error is
+produced.
+"""
+
+from __future__ import absolute_import
+
+import getopt
+import re
+
+from mercurial.i18n import _
+from mercurial import (
+ error,
+ fancyopts,
+ registrar,
+ util,
+)
+
+# Note for extension authors: ONLY specify testedwith = 'ships-with-hg-core' for
+# extensions which SHIP WITH MERCURIAL. Non-mainline extensions should
+# be specifying the version(s) of Mercurial they are tested with, or
+# leave the attribute unspecified.
+testedwith = 'ships-with-hg-core'
+
+cmdtable = {}
+command = registrar.command(cmdtable)
+
+def convert(s):
+ if s.startswith("origin/"):
+ return s[7:]
+ if 'HEAD' in s:
+ s = s.replace('HEAD', '.')
+ # HEAD~ in git is .~1 in mercurial
+ s = re.sub('~$', '~1', s)
+ return s
+
+@command('^githelp|git', [
+ ], _('hg githelp'))
+def githelp(ui, repo, *args, **kwargs):
+ '''suggests the Mercurial equivalent of the given git command
+
+ Usage: hg githelp --
+ '''
+
+ if len(args) == 0 or (len(args) == 1 and args[0] =='git'):
+ raise error.Abort(_('missing git command - '
+ 'usage: hg githelp -- '))
+
+ if args[0] == 'git':
+ args = args[1:]
+
+ cmd = args[0]
+ if not cmd in gitcommands:
+ raise error.Abort("error: unknown git command %s" % (cmd))
+
+ ui.pager('githelp')
+ args = args[1:]
+ return gitcommands[cmd](ui, repo, *args, **kwargs)
+
+def parseoptions(ui, cmdoptions, args):
+ cmdoptions = list(cmdoptions)
+ opts = {}
+ args = list(args)
+ while True:
+ try:
+ args = fancyopts.fancyopts(list(args), cmdoptions, opts, True)
+ break
+ except getopt.GetoptError as ex:
+ flag = None
+ if "requires argument" in ex.msg:
+ raise
+ if ('--' + ex.opt) in ex.msg:
+ flag = '--' + ex.opt
+ elif ('-' + ex.opt) in ex.msg:
+ flag = '-' + ex.opt
+ else:
+ raise error.Abort("unknown option %s" % ex.opt)
+ try:
+ args.remove(flag)
+ except Exception:
+ raise error.Abort(
+ "unknown option {0} packed with other options\n"
+ "Please try passing the option as it's own flag: -{0}" \
+ .format(ex.opt))
+
+ ui.warn(_("ignoring unknown option %s\n") % flag)
+
+ args = list([convert(x) for x in args])
+ opts = dict([(k, convert(v)) if isinstance(v, str) else (k, v)
+ for k, v in opts.iteritems()])
+
+ return args, opts
+
+class Command(object):
+ def __init__(self, name):
+ self.name = name
+ self.args = []
+ self.opts = {}
+
+ def __str__(self):
+ cmd = "hg " + self.name
+ if self.opts:
+ for k, values in sorted(self.opts.iteritems()):
+ for v in values:
+ if v:
+ cmd += " %s %s" % (k, v)
+ else:
+ cmd += " %s" % (k,)
+ if self.args:
+ cmd += " "
+ cmd += " ".join(self.args)
+ return cmd
+
+ def append(self, value):
+ self.args.append(value)
+
+ def extend(self, values):
+ self.args.extend(values)
+
+ def __setitem__(self, key, value):
+ values = self.opts.setdefault(key, [])
+ values.append(value)
+
+ def __and__(self, other):
+ return AndCommand(self, other)
+
+class AndCommand(object):
+ def __init__(self, left, right):
+ self.left = left
+ self.right = right
+
+ def __str__(self):
+ return "%s && %s" % (self.left, self.right)
+
+ def __and__(self, other):
+ return AndCommand(self, other)
+
+def add(ui, repo, *args, **kwargs):
+ cmdoptions = [
+ ('A', 'all', None, ''),
+ ('p', 'patch', None, ''),
+ ]
+ args, opts = parseoptions(ui, cmdoptions, args)
+
+ if (opts.get('patch')):
+ ui.status(_("note: Mercurial will commit when complete, "
+ "as there is no staging area in Mercurial\n\n"))
+ cmd = Command('commit --interactive')
+ else:
+ cmd = Command("add")
+
+ if not opts.get('all'):
+ cmd.extend(args)
+ else:
+ ui.status(_("note: use hg addremove to remove files that have "
+ "been deleted.\n\n"))
+
+ ui.status((str(cmd)), "\n")
+
+def am(ui, repo, *args, **kwargs):
+ cmdoptions=[
+ ]
+ args, opts = parseoptions(ui, cmdoptions, args)
+ cmd = Command('import')
+ ui.status(str(cmd), "\n")
+
+def apply(ui, repo, *args, **kwargs):
+ cmdoptions = [
+ ('p', 'p', int, ''),
+ ]
+ args, opts = parseoptions(ui, cmdoptions, args)
+
+ cmd = Command('import --no-commit')
+ if (opts.get('p')):
+ cmd['-p'] = opts.get('p')
+ cmd.extend(args)
+
+ ui.status((str(cmd)), "\n")
+
+def bisect(ui, repo, *args, **kwargs):
+ ui.status(_("See 'hg help bisect' for how to use bisect.\n\n"))
+
+def blame(ui, repo, *args, **kwargs):
+ cmdoptions = [
+ ]
+ args, opts = parseoptions(ui, cmdoptions, args)
+ cmd = Command('annotate -udl')
+ cmd.extend([convert(v) for v in args])
+ ui.status((str(cmd)), "\n")
+
+def branch(ui, repo, *args, **kwargs):
+ cmdoptions = [
+ ('', 'set-upstream', None, ''),
+ ('', 'set-upstream-to', '', ''),
+ ('d', 'delete', None, ''),
+ ('D', 'delete', None, ''),
+ ('m', 'move', None, ''),
+ ('M', 'move', None, ''),
+ ]
+ args, opts = parseoptions(ui, cmdoptions, args)
+
+ cmd = Command("bookmark")
+
+ if opts.get('set_upstream') or opts.get('set_upstream_to'):
+ ui.status(_("Mercurial has no concept of upstream branches\n"))
+ return
+ elif opts.get('delete'):
+ cmd = Command("strip")
+ for branch in args:
+ cmd['-B'] = branch
+ else:
+ cmd['-B'] = None
+ elif opts.get('move'):
+ if len(args) > 0:
+ if len(args) > 1:
+ old = args.pop(0)
+ else:
+ # shell command to output the active bookmark for the active
+ # revision
+ old = '`hg log -T"{activebookmark}" -r .`'
+ new = args[0]
+ cmd['-m'] = old
+ cmd.append(new)
+ else:
+ if len(args) > 1:
+ cmd['-r'] = args[1]
+ cmd.append(args[0])
+ elif len(args) == 1:
+ cmd.append(args[0])
+ ui.status((str(cmd)), "\n")
+
+def ispath(repo, string):
+ """
+ The first argument to git checkout can either be a revision or a path. Let's
+ generally assume it's a revision, unless it's obviously a path. There are
+ too many ways to spell revisions in git for us to reasonably catch all of
+ them, so let's be conservative.
+ """
+ if string in repo:
+ # if it's definitely a revision let's not even check if a file of the
+ # same name exists.
+ return False
+
+ cwd = repo.getcwd()
+ if cwd == '':
+ repopath = string
+ else:
+ repopath = cwd + '/' + string
+
+ exists = repo.wvfs.exists(repopath)
+ if exists:
+ return True
+
+ manifest = repo['.'].manifest()
+
+ didexist = (repopath in manifest) or manifest.hasdir(repopath)
+
+ return didexist
+
+def checkout(ui, repo, *args, **kwargs):
+ cmdoptions = [
+ ('b', 'branch', '', ''),
+ ('B', 'branch', '', ''),
+ ('f', 'force', None, ''),
+ ('p', 'patch', None, ''),
+ ]
+ paths = []
+ if '--' in args:
+ sepindex = args.index('--')
+ paths.extend(args[sepindex + 1:])
+ args = args[:sepindex]
+
+ args, opts = parseoptions(ui, cmdoptions, args)
+
+ rev = None
+ if args and ispath(repo, args[0]):
+ paths = args + paths
+ elif args:
+ rev = args[0]
+ paths = args[1:] + paths
+
+ cmd = Command('update')
+
+ if opts.get('force'):
+ if paths or rev:
+ cmd['-C'] = None
+
+ if opts.get('patch'):
+ cmd = Command('revert')
+ cmd['-i'] = None
+
+ if opts.get('branch'):
+ if len(args) == 0:
+ cmd = Command('bookmark')
+ cmd.append(opts.get('branch'))
+ else:
+ cmd.append(args[0])
+ bookcmd = Command('bookmark')
+ bookcmd.append(opts.get('branch'))
+ cmd = cmd & bookcmd
+ # if there is any path argument supplied, use revert instead of update
+ elif len(paths) > 0:
+ ui.status(_("note: use --no-backup to avoid creating .orig files\n\n"))
+ cmd = Command('revert')
+ if opts.get('patch'):
+ cmd['-i'] = None
+ if rev:
+ cmd['-r'] = rev
+ cmd.extend(paths)
+ elif rev:
+ if opts.get('patch'):
+ cmd['-r'] = rev
+ else:
+ cmd.append(rev)
+ elif opts.get('force'):
+ cmd = Command('revert')
+ cmd['--all'] = None
+ else:
+ raise error.Abort("a commit must be specified")
+
+ ui.status((str(cmd)), "\n")
+
+def cherrypick(ui, repo, *args, **kwargs):
+ cmdoptions = [
+ ('', 'continue', None, ''),
+ ('', 'abort', None, ''),
+ ('e', 'edit', None, ''),
+ ]
+ args, opts = parseoptions(ui, cmdoptions, args)
+
+ cmd = Command('graft')
+
+ if opts.get('edit'):
+ cmd['--edit'] = None
+ if opts.get('continue'):
+ cmd['--continue'] = None
+ elif opts.get('abort'):
+ ui.status(_("note: hg graft does not have --abort.\n\n"))
+ return
+ else:
+ cmd.extend(args)
+
+ ui.status((str(cmd)), "\n")
+
+def clean(ui, repo, *args, **kwargs):
+ cmdoptions = [
+ ('d', 'd', None, ''),
+ ('f', 'force', None, ''),
+ ('x', 'x', None, ''),
+ ]
+ args, opts = parseoptions(ui, cmdoptions, args)
+
+ cmd = Command('purge')
+ if opts.get('x'):
+ cmd['--all'] = None
+ cmd.extend(args)
+
+ ui.status((str(cmd)), "\n")
+
+def clone(ui, repo, *args, **kwargs):
+ cmdoptions = [
+ ('', 'bare', None, ''),
+ ('n', 'no-checkout', None, ''),
+ ('b', 'branch', '', ''),
+ ]
+ args, opts = parseoptions(ui, cmdoptions, args)
+
+ if len(args) == 0:
+ raise error.Abort("a repository to clone must be specified")
+
+ cmd = Command('clone')
+ cmd.append(args[0])
+ if len(args) > 1:
+ cmd.append(args[1])
+
+ if opts.get('bare'):
+ cmd['-U'] = None
+ ui.status(_("note: Mercurial does not have bare clones. " +
+ "-U will clone the repo without checking out a commit\n\n"))
+ elif opts.get('no_checkout'):
+ cmd['-U'] = None
+
+ if opts.get('branch'):
+ cocmd = Command("update")
+ cocmd.append(opts.get('branch'))
+ cmd = cmd & cocmd
+
+ ui.status((str(cmd)), "\n")
+
+def commit(ui, repo, *args, **kwargs):
+ cmdoptions = [
+ ('a', 'all', None, ''),
+ ('m', 'message', '', ''),
+ ('p', 'patch', None, ''),
+ ('C', 'reuse-message', '', ''),
+ ('F', 'file', '', ''),
+ ('', 'author', '', ''),
+ ('', 'date', '', ''),
+ ('', 'amend', None, ''),
+ ('', 'no-edit', None, ''),
+ ]
+ args, opts = parseoptions(ui, cmdoptions, args)
+
+ cmd = Command('commit')
+ if opts.get('patch'):
+ cmd = Command('commit --interactive')
+
+ if opts.get('amend'):
+ if opts.get('no_edit'):
+ cmd = Command('amend')
+ else:
+ cmd['--amend'] = None
+
+ if opts.get('reuse_message'):
+ cmd['-M'] = opts.get('reuse_message')
+
+ if opts.get('message'):
+ cmd['-m'] = "'%s'" % (opts.get('message'),)
+
+ if opts.get('all'):
+ ui.status(_("note: Mercurial doesn't have a staging area, " +
+ "so there is no --all. -A will add and remove files " +
+ "for you though.\n\n"))
+
+ if opts.get('file'):
+ cmd['-l'] = opts.get('file')
+
+ if opts.get('author'):
+ cmd['-u'] = opts.get('author')
+
+ if opts.get('date'):
+ cmd['-d'] = opts.get('date')
+
+ cmd.extend(args)
+
+ ui.status((str(cmd)), "\n")
+
+def deprecated(ui, repo, *args, **kwargs):
+ ui.warn(_('This command has been deprecated in the git project, ' +
+ 'thus isn\'t supported by this tool.\n\n'))
+
+def diff(ui, repo, *args, **kwargs):
+ cmdoptions = [
+ ('a', 'all', None, ''),
+ ('', 'cached', None, ''),
+ ('R', 'reverse', None, ''),
+ ]
+ args, opts = parseoptions(ui, cmdoptions, args)
+
+ cmd = Command('diff')
+
+ if opts.get('cached'):
+ ui.status(_('note: Mercurial has no concept of a staging area, ' +
+ 'so --cached does nothing.\n\n'))
+
+ if opts.get('reverse'):
+ cmd['--reverse'] = None
+
+ for a in list(args):
+ args.remove(a)
+ try:
+ repo.revs(a)
+ cmd['-r'] = a
+ except Exception:
+ cmd.append(a)
+
+ ui.status((str(cmd)), "\n")
+
+def difftool(ui, repo, *args, **kwargs):
+ ui.status(_('Mercurial does not enable external difftool by default. You '
+ 'need to enable the extdiff extension in your .hgrc file by adding\n'
+ 'extdiff =\n'
+ 'to the [extensions] section and then running\n\n'
+ 'hg extdiff -p \n\n'
+ 'See \'hg help extdiff\' and \'hg help -e extdiff\' for more '
+ 'information.\n'))
+
+def fetch(ui, repo, *args, **kwargs):
+ cmdoptions = [
+ ('', 'all', None, ''),
+ ('f', 'force', None, ''),
+ ]
+ args, opts = parseoptions(ui, cmdoptions, args)
+
+ cmd = Command('pull')
+
+ if len(args) > 0:
+ cmd.append(args[0])
+ if len(args) > 1:
+ ui.status(_("note: Mercurial doesn't have refspecs. " +
+ "-r can be used to specify which commits you want to pull. " +
+ "-B can be used to specify which bookmark you want to pull." +
+ "\n\n"))
+ for v in args[1:]:
+ if v in repo._bookmarks:
+ cmd['-B'] = v
+ else:
+ cmd['-r'] = v
+
+ ui.status((str(cmd)), "\n")
+
+def grep(ui, repo, *args, **kwargs):
+ cmdoptions = [
+ ]
+ args, opts = parseoptions(ui, cmdoptions, args)
+
+ cmd = Command('grep')
+
+ # For basic usage, git grep and hg grep are the same. They both have the
+ # pattern first, followed by paths.
+ cmd.extend(args)
+
+ ui.status((str(cmd)), "\n")
+
+def init(ui, repo, *args, **kwargs):
+ cmdoptions = [
+ ]
+ args, opts = parseoptions(ui, cmdoptions, args)
+
+ cmd = Command('init')
+
+ if len(args) > 0:
+ cmd.append(args[0])
+
+ ui.status((str(cmd)), "\n")
+
+def log(ui, repo, *args, **kwargs):
+ cmdoptions = [
+ ('', 'follow', None, ''),
+ ('', 'decorate', None, ''),
+ ('n', 'number', '', ''),
+ ('1', '1', None, ''),
+ ('', 'pretty', '', ''),
+ ('', 'format', '', ''),
+ ('', 'oneline', None, ''),
+ ('', 'stat', None, ''),
+ ('', 'graph', None, ''),
+ ('p', 'patch', None, ''),
+ ]
+ args, opts = parseoptions(ui, cmdoptions, args)
+ ui.status(_('note: -v prints the entire commit message like Git does. To ' +
+ 'print just the first line, drop the -v.\n\n'))
+ ui.status(_("note: see hg help revset for information on how to filter " +
+ "log output.\n\n"))
+
+ cmd = Command('log')
+ cmd['-v'] = None
+
+ if opts.get('number'):
+ cmd['-l'] = opts.get('number')
+ if opts.get('1'):
+ cmd['-l'] = '1'
+ if opts.get('stat'):
+ cmd['--stat'] = None
+ if opts.get('graph'):
+ cmd['-G'] = None
+ if opts.get('patch'):
+ cmd['-p'] = None
+
+ if opts.get('pretty') or opts.get('format') or opts.get('oneline'):
+ format = opts.get('format', '')
+ if 'format:' in format:
+ ui.status(_("note: --format format:??? equates to Mercurial's " +
+ "--template. See hg help templates for more info.\n\n"))
+ cmd['--template'] = '???'
+ else:
+ ui.status(_("note: --pretty/format/oneline equate to Mercurial's " +
+ "--style or --template. See hg help templates for more info." +
+ "\n\n"))
+ cmd['--style'] = '???'
+
+ if len(args) > 0:
+ if '..' in args[0]:
+ since, until = args[0].split('..')
+ cmd['-r'] = "'%s::%s'" % (since, until)
+ del args[0]
+ cmd.extend(args)
+
+ ui.status((str(cmd)), "\n")
+
+def lsfiles(ui, repo, *args, **kwargs):
+ cmdoptions = [
+ ('c', 'cached', None, ''),
+ ('d', 'deleted', None, ''),
+ ('m', 'modified', None, ''),
+ ('o', 'others', None, ''),
+ ('i', 'ignored', None, ''),
+ ('s', 'stage', None, ''),
+ ('z', '_zero', None, ''),
+ ]
+ args, opts = parseoptions(ui, cmdoptions, args)
+
+ if (opts.get('modified') or opts.get('deleted')
+ or opts.get('others') or opts.get('ignored')):
+ cmd = Command('status')
+ if opts.get('deleted'):
+ cmd['-d'] = None
+ if opts.get('modified'):
+ cmd['-m'] = None
+ if opts.get('others'):
+ cmd['-o'] = None
+ if opts.get('ignored'):
+ cmd['-i'] = None
+ else:
+ cmd = Command('files')
+ if opts.get('stage'):
+ ui.status(_("note: Mercurial doesn't have a staging area, ignoring "
+ "--stage\n"))
+ if opts.get('_zero'):
+ cmd['-0'] = None
+ cmd.append('.')
+ for include in args:
+ cmd['-I'] = util.shellquote(include)
+
+ ui.status((str(cmd)), "\n")
+
+def merge(ui, repo, *args, **kwargs):
+ cmdoptions = [
+ ]
+ args, opts = parseoptions(ui, cmdoptions, args)
+
+ cmd = Command('merge')
+
+ if len(args) > 0:
+ cmd.append(args[len(args) - 1])
+
+ ui.status((str(cmd)), "\n")
+
+def mergebase(ui, repo, *args, **kwargs):
+ cmdoptions = []
+ args, opts = parseoptions(ui, cmdoptions, args)
+
+ if len(args) != 2:
+ args = ['A', 'B']
+
+ cmd = Command("log -T '{node}\\n' -r 'ancestor(%s,%s)'"
+ % (args[0], args[1]))
+
+ ui.status(_('NOTE: ancestors() is part of the revset language.\n'),
+ _("Learn more about revsets with 'hg help revsets'\n\n"))
+ ui.status((str(cmd)), "\n")
+
+def mergetool(ui, repo, *args, **kwargs):
+ cmdoptions = []
+ args, opts = parseoptions(ui, cmdoptions, args)
+
+ cmd = Command("resolve")
+
+ if len(args) == 0:
+ cmd['--all'] = None
+ cmd.extend(args)
+ ui.status((str(cmd)), "\n")
+
+def mv(ui, repo, *args, **kwargs):
+ cmdoptions = [
+ ('f', 'force', None, ''),
+ ]
+ args, opts = parseoptions(ui, cmdoptions, args)
+
+ cmd = Command('mv')
+ cmd.extend(args)
+
+ if opts.get('force'):
+ cmd['-f'] = None
+
+ ui.status((str(cmd)), "\n")
+
+def pull(ui, repo, *args, **kwargs):
+ cmdoptions = [
+ ('', 'all', None, ''),
+ ('f', 'force', None, ''),
+ ('r', 'rebase', None, ''),
+ ]
+ args, opts = parseoptions(ui, cmdoptions, args)
+
+ cmd = Command('pull')
+ cmd['--rebase'] = None
+
+ if len(args) > 0:
+ cmd.append(args[0])
+ if len(args) > 1:
+ ui.status(_("note: Mercurial doesn't have refspecs. " +
+ "-r can be used to specify which commits you want to pull. " +
+ "-B can be used to specify which bookmark you want to pull." +
+ "\n\n"))
+ for v in args[1:]:
+ if v in repo._bookmarks:
+ cmd['-B'] = v
+ else:
+ cmd['-r'] = v
+
+ ui.status((str(cmd)), "\n")
+
+def push(ui, repo, *args, **kwargs):
+ cmdoptions = [
+ ('', 'all', None, ''),
+ ('f', 'force', None, ''),
+ ]
+ args, opts = parseoptions(ui, cmdoptions, args)
+
+ cmd = Command('push')
+
+ if len(args) > 0:
+ cmd.append(args[0])
+ if len(args) > 1:
+ ui.status(_("note: Mercurial doesn't have refspecs. " +
+ "-r can be used to specify which commits you want to push. " +
+ "-B can be used to specify which bookmark you want to push." +
+ "\n\n"))
+ for v in args[1:]:
+ if v in repo._bookmarks:
+ cmd['-B'] = v
+ else:
+ cmd['-r'] = v
+
+ if opts.get('force'):
+ cmd['-f'] = None
+
+ ui.status((str(cmd)), "\n")
+
+def rebase(ui, repo, *args, **kwargs):
+ cmdoptions = [
+ ('', 'all', None, ''),
+ ('i', 'interactive', None, ''),
+ ('', 'onto', '', ''),
+ ('', 'abort', None, ''),
+ ('', 'continue', None, ''),
+ ('', 'skip', None, ''),
+ ]
+ args, opts = parseoptions(ui, cmdoptions, args)
+
+ if opts.get('interactive'):
+ ui.status(_("note: hg histedit does not perform a rebase. " +
+ "It just edits history.\n\n"))
+ cmd = Command('histedit')
+ if len(args) > 0:
+ ui.status(_("also note: 'hg histedit' will automatically detect"
+ " your stack, so no second argument is necessary.\n\n"))
+ ui.status((str(cmd)), "\n")
+ return
+
+ if opts.get('skip'):
+ cmd = Command('revert --all -r .')
+ ui.status((str(cmd)), "\n")
+
+ cmd = Command('rebase')
+
+ if opts.get('continue') or opts.get('skip'):
+ cmd['--continue'] = None
+ if opts.get('abort'):
+ cmd['--abort'] = None
+
+ if opts.get('onto'):
+ ui.status(_("note: if you're trying to lift a commit off one branch, " +
+ "try hg rebase -d -s " +
+ "\n\n"))
+ cmd['-d'] = convert(opts.get('onto'))
+ if len(args) < 2:
+ raise error.Abort("Expected format: git rebase --onto X Y Z")
+ cmd['-s'] = "'::%s - ::%s'" % (convert(args[1]), convert(args[0]))
+ else:
+ if len(args) == 1:
+ cmd['-d'] = convert(args[0])
+ elif len(args) == 2:
+ cmd['-d'] = convert(args[0])
+ cmd['-b'] = convert(args[1])
+
+ ui.status((str(cmd)), "\n")
+
+def reflog(ui, repo, *args, **kwargs):
+ cmdoptions = [
+ ('', 'all', None, ''),
+ ]
+ args, opts = parseoptions(ui, cmdoptions, args)
+
+ cmd = Command('journal')
+ if opts.get('all'):
+ cmd['--all'] = None
+ if len(args) > 0:
+ cmd.append(args[0])
+
+ ui.status(str(cmd), "\n\n")
+ ui.status(_("note: in hg commits can be deleted from repo but we always"
+ " have backups.\n"))
+
+def reset(ui, repo, *args, **kwargs):
+ cmdoptions = [
+ ('', 'soft', None, ''),
+ ('', 'hard', None, ''),
+ ('', 'mixed', None, ''),
+ ]
+ args, opts = parseoptions(ui, cmdoptions, args)
+
+ commit = convert(args[0] if len(args) > 0 else '.')
+ hard = opts.get('hard')
+
+ if opts.get('mixed'):
+ ui.status(_('NOTE: --mixed has no meaning since Mercurial has no '
+ 'staging area\n\n'))
+ if opts.get('soft'):
+ ui.status(_('NOTE: --soft has no meaning since Mercurial has no '
+ 'staging area\n\n'))
+
+ cmd = Command('update')
+ if hard:
+ cmd.append('--clean')
+
+ cmd.append(commit)
+
+ ui.status((str(cmd)), "\n")
+
+def revert(ui, repo, *args, **kwargs):
+ cmdoptions = [
+ ]
+ args, opts = parseoptions(ui, cmdoptions, args)
+
+ if len(args) > 1:
+ ui.status(_("note: hg backout doesn't support multiple commits at " +
+ "once\n\n"))
+
+ cmd = Command('backout')
+ if args:
+ cmd.append(args[0])
+
+ ui.status((str(cmd)), "\n")
+
+def revparse(ui, repo, *args, **kwargs):
+ cmdoptions = [
+ ('', 'show-cdup', None, ''),
+ ('', 'show-toplevel', None, ''),
+ ]
+ args, opts = parseoptions(ui, cmdoptions, args)
+
+ if opts.get('show_cdup') or opts.get('show_toplevel'):
+ cmd = Command('root')
+ if opts.get('show_cdup'):
+ ui.status(_("note: hg root prints the root of the repository\n\n"))
+ ui.status((str(cmd)), "\n")
+ else:
+ ui.status(_("note: see hg help revset for how to refer to commits\n"))
+
+def rm(ui, repo, *args, **kwargs):
+ cmdoptions = [
+ ('f', 'force', None, ''),
+ ('n', 'dry-run', None, ''),
+ ]
+ args, opts = parseoptions(ui, cmdoptions, args)
+
+ cmd = Command('rm')
+ cmd.extend(args)
+
+ if opts.get('force'):
+ cmd['-f'] = None
+ if opts.get('dry_run'):
+ cmd['-n'] = None
+
+ ui.status((str(cmd)), "\n")
+
+def show(ui, repo, *args, **kwargs):
+ cmdoptions = [
+ ('', 'name-status', None, ''),
+ ('', 'pretty', '', ''),
+ ('U', 'unified', int, ''),
+ ]
+ args, opts = parseoptions(ui, cmdoptions, args)
+
+ if opts.get('name_status'):
+ if opts.get('pretty') == 'format:':
+ cmd = Command('status')
+ cmd['--change'] = '.'
+ else:
+ cmd = Command('log')
+ cmd.append('--style status')
+ cmd.append('-r .')
+ elif len(args) > 0:
+ if ispath(repo, args[0]):
+ cmd = Command('cat')
+ else:
+ cmd = Command('export')
+ cmd.extend(args)
+ if opts.get('unified'):
+ cmd.append('--config diff.unified=%d' % (opts['unified'],))
+ elif opts.get('unified'):
+ cmd = Command('export')
+ cmd.append('--config diff.unified=%d' % (opts['unified'],))
+ else:
+ cmd = Command('export')
+
+ ui.status((str(cmd)), "\n")
+
+def stash(ui, repo, *args, **kwargs):
+ cmdoptions = [
+ ]
+ args, opts = parseoptions(ui, cmdoptions, args)
+
+ cmd = Command('shelve')
+ action = args[0] if len(args) > 0 else None
+
+ if action == 'list':
+ cmd['-l'] = None
+ elif action == 'drop':
+ cmd['-d'] = None
+ if len(args) > 1:
+ cmd.append(args[1])
+ else:
+ cmd.append('')
+ elif action == 'pop' or action == 'apply':
+ cmd = Command('unshelve')
+ if len(args) > 1:
+ cmd.append(args[1])
+ if action == 'apply':
+ cmd['--keep'] = None
+ elif (action == 'branch' or action == 'show' or action == 'clear'
+ or action == 'create'):
+ ui.status(_("note: Mercurial doesn't have equivalents to the " +
+ "git stash branch, show, clear, or create actions.\n\n"))
+ return
+ else:
+ if len(args) > 0:
+ if args[0] != 'save':
+ cmd['--name'] = args[0]
+ elif len(args) > 1:
+ cmd['--name'] = args[1]
+
+ ui.status((str(cmd)), "\n")
+
+def status(ui, repo, *args, **kwargs):
+ cmdoptions = [
+ ('', 'ignored', None, ''),
+ ]
+ args, opts = parseoptions(ui, cmdoptions, args)
+
+ cmd = Command('status')
+ cmd.extend(args)
+
+ if opts.get('ignored'):
+ cmd['-i'] = None
+
+ ui.status((str(cmd)), "\n")
+
+def svn(ui, repo, *args, **kwargs):
+ svncmd = args[0]
+ if not svncmd in gitsvncommands:
+ ui.warn(_("error: unknown git svn command %s\n") % (svncmd))
+
+ args = args[1:]
+ return gitsvncommands[svncmd](ui, repo, *args, **kwargs)
+
+def svndcommit(ui, repo, *args, **kwargs):
+ cmdoptions = [
+ ]
+ args, opts = parseoptions(ui, cmdoptions, args)
+
+ cmd = Command('push')
+
+ ui.status((str(cmd)), "\n")
+
+def svnfetch(ui, repo, *args, **kwargs):
+ cmdoptions = [
+ ]
+ args, opts = parseoptions(ui, cmdoptions, args)
+
+ cmd = Command('pull')
+ cmd.append('default-push')
+
+ ui.status((str(cmd)), "\n")
+
+def svnfindrev(ui, repo, *args, **kwargs):
+ cmdoptions = [
+ ]
+ args, opts = parseoptions(ui, cmdoptions, args)
+
+ cmd = Command('log')
+ cmd['-r'] = args[0]
+
+ ui.status((str(cmd)), "\n")
+
+def svnrebase(ui, repo, *args, **kwargs):
+ cmdoptions = [
+ ('l', 'local', None, ''),
+ ]
+ args, opts = parseoptions(ui, cmdoptions, args)
+
+ pullcmd = Command('pull')
+ pullcmd.append('default-push')
+ rebasecmd = Command('rebase')
+ rebasecmd.append('tip')
+
+ cmd = pullcmd & rebasecmd
+
+ ui.status((str(cmd)), "\n")
+
+def tag(ui, repo, *args, **kwargs):
+ cmdoptions = [
+ ('f', 'force', None, ''),
+ ('l', 'list', None, ''),
+ ('d', 'delete', None, ''),
+ ]
+ args, opts = parseoptions(ui, cmdoptions, args)
+
+ if opts.get('list'):
+ cmd = Command('tags')
+ else:
+ cmd = Command('tag')
+ cmd.append(args[0])
+ if len(args) > 1:
+ cmd['-r'] = args[1]
+
+ if opts.get('delete'):
+ cmd['--remove'] = None
+
+ if opts.get('force'):
+ cmd['-f'] = None
+
+ ui.status((str(cmd)), "\n")
+
+gitcommands = {
+ 'add': add,
+ 'am': am,
+ 'apply': apply,
+ 'bisect': bisect,
+ 'blame': blame,
+ 'branch': branch,
+ 'checkout': checkout,
+ 'cherry-pick': cherrypick,
+ 'clean': clean,
+ 'clone': clone,
+ 'commit': commit,
+ 'diff': diff,
+ 'difftool': difftool,
+ 'fetch': fetch,
+ 'grep': grep,
+ 'init': init,
+ 'log': log,
+ 'ls-files': lsfiles,
+ 'merge': merge,
+ 'merge-base': mergebase,
+ 'mergetool': mergetool,
+ 'mv': mv,
+ 'pull': pull,
+ 'push': push,
+ 'rebase': rebase,
+ 'reflog': reflog,
+ 'reset': reset,
+ 'revert': revert,
+ 'rev-parse': revparse,
+ 'rm': rm,
+ 'show': show,
+ 'stash': stash,
+ 'status': status,
+ 'svn': svn,
+ 'tag': tag,
+ 'whatchanged': deprecated,
+}
+
+gitsvncommands = {
+ 'dcommit': svndcommit,
+ 'fetch': svnfetch,
+ 'find-rev': svnfindrev,
+ 'rebase': svnrebase,
+}
diff -r 87676e8ee056 -r 27b6df1b5adb hgext/gpg.py
--- a/hgext/gpg.py Mon Jan 08 16:07:51 2018 -0800
+++ b/hgext/gpg.py Mon Jan 22 17:53:02 2018 -0500
@@ -106,7 +106,7 @@
def newgpg(ui, **opts):
"""create a new gpg instance"""
gpgpath = ui.config("gpg", "cmd")
- gpgkey = opts.get('key')
+ gpgkey = opts.get(r'key')
if not gpgkey:
gpgkey = ui.config("gpg", "key")
return gpg(gpgpath, gpgkey)
@@ -253,6 +253,7 @@
def _dosign(ui, repo, *revs, **opts):
mygpg = newgpg(ui, **opts)
+ opts = pycompat.byteskwargs(opts)
sigver = "0"
sigmessage = ""
@@ -312,7 +313,8 @@
% hgnode.short(n)
for n in nodes])
try:
- editor = cmdutil.getcommiteditor(editform='gpg.sign', **opts)
+ editor = cmdutil.getcommiteditor(editform='gpg.sign',
+ **pycompat.strkwargs(opts))
repo.commit(message, opts['user'], opts['date'], match=msigs,
editor=editor)
except ValueError as inst:
diff -r 87676e8ee056 -r 27b6df1b5adb hgext/graphlog.py
--- a/hgext/graphlog.py Mon Jan 08 16:07:51 2018 -0800
+++ b/hgext/graphlog.py Mon Jan 22 17:53:02 2018 -0500
@@ -66,5 +66,5 @@
This is an alias to :hg:`log -G`.
"""
- opts['graph'] = True
+ opts[r'graph'] = True
return commands.log(ui, repo, *pats, **opts)
diff -r 87676e8ee056 -r 27b6df1b5adb hgext/hgk.py
--- a/hgext/hgk.py Mon Jan 08 16:07:51 2018 -0800
+++ b/hgext/hgk.py Mon Jan 22 17:53:02 2018 -0500
@@ -48,6 +48,7 @@
commands,
obsolete,
patch,
+ pycompat,
registrar,
scmutil,
util,
@@ -79,6 +80,7 @@
inferrepo=True)
def difftree(ui, repo, node1=None, node2=None, *files, **opts):
"""diff trees from two commits"""
+
def __difftree(repo, node1, node2, files=None):
assert node2 is not None
if files is None:
@@ -102,7 +104,7 @@
##
while True:
- if opts['stdin']:
+ if opts[r'stdin']:
try:
line = util.bytesinput(ui.fin, ui.fout).split(' ')
node1 = line[0]
@@ -118,8 +120,8 @@
else:
node2 = node1
node1 = repo.changelog.parents(node1)[0]
- if opts['patch']:
- if opts['pretty']:
+ if opts[r'patch']:
+ if opts[r'pretty']:
catcommit(ui, repo, node2, "")
m = scmutil.match(repo[node1], files)
diffopts = patch.difffeatureopts(ui)
@@ -130,7 +132,7 @@
ui.write(chunk)
else:
__difftree(repo, node1, node2, files=files)
- if not opts['stdin']:
+ if not opts[r'stdin']:
break
def catcommit(ui, repo, n, prefix, ctx=None):
@@ -183,7 +185,7 @@
# strings
#
prefix = ""
- if opts['stdin']:
+ if opts[r'stdin']:
try:
(type, r) = util.bytesinput(ui.fin, ui.fout).split(' ')
prefix = " "
@@ -201,7 +203,7 @@
return 1
n = repo.lookup(r)
catcommit(ui, repo, n, prefix)
- if opts['stdin']:
+ if opts[r'stdin']:
try:
(type, r) = util.bytesinput(ui.fin, ui.fout).split(' ')
except EOFError:
@@ -340,7 +342,7 @@
else:
full = None
copy = [x for x in revs]
- revtree(ui, copy, repo, full, opts['max_count'], opts['parents'])
+ revtree(ui, copy, repo, full, opts[r'max_count'], opts[r'parents'])
@command('view',
[('l', 'limit', '',
@@ -348,6 +350,7 @@
_('[-l LIMIT] [REVRANGE]'))
def view(ui, repo, *etc, **opts):
"start interactive history viewer"
+ opts = pycompat.byteskwargs(opts)
os.chdir(repo.root)
optstr = ' '.join(['--%s %s' % (k, v) for k, v in opts.iteritems() if v])
if repo.filtername is None:
diff -r 87676e8ee056 -r 27b6df1b5adb hgext/highlight/highlight.py
--- a/hgext/highlight/highlight.py Mon Jan 08 16:07:51 2018 -0800
+++ b/hgext/highlight/highlight.py Mon Jan 22 17:53:02 2018 -0500
@@ -22,8 +22,12 @@
import pygments
import pygments.formatters
import pygments.lexers
+ import pygments.plugin
import pygments.util
+ for unused in pygments.plugin.find_plugin_lexers():
+ pass
+
highlight = pygments.highlight
ClassNotFound = pygments.util.ClassNotFound
guess_lexer = pygments.lexers.guess_lexer
diff -r 87676e8ee056 -r 27b6df1b5adb hgext/histedit.py
--- a/hgext/histedit.py Mon Jan 08 16:07:51 2018 -0800
+++ b/hgext/histedit.py Mon Jan 22 17:53:02 2018 -0500
@@ -203,6 +203,7 @@
mergeutil,
node,
obsolete,
+ pycompat,
registrar,
repair,
scmutil,
@@ -542,9 +543,9 @@
def commitfunc(**kwargs):
overrides = {('phases', 'new-commit'): phasemin}
with repo.ui.configoverride(overrides, 'histedit'):
- extra = kwargs.get('extra', {}).copy()
+ extra = kwargs.get(r'extra', {}).copy()
extra['histedit_source'] = src.hex()
- kwargs['extra'] = extra
+ kwargs[r'extra'] = extra
return repo.commit(**kwargs)
return commitfunc
@@ -602,7 +603,7 @@
if path in headmf:
fctx = last[path]
flags = fctx.flags()
- mctx = context.memfilectx(repo,
+ mctx = context.memfilectx(repo, ctx,
fctx.path(), fctx.data(),
islink='l' in flags,
isexec='x' in flags,
@@ -917,7 +918,8 @@
('o', 'outgoing', False, _('changesets not found in destination')),
('f', 'force', False,
_('force outgoing even for unrelated repositories')),
- ('r', 'rev', [], _('first revision to be edited'), _('REV'))],
+ ('r', 'rev', [], _('first revision to be edited'), _('REV'))] +
+ cmdutil.formatteropts,
_("[OPTIONS] ([ANCESTOR] | --outgoing [URL])"))
def histedit(ui, repo, *freeargs, **opts):
"""interactively edit changeset history
@@ -1094,6 +1096,9 @@
_('histedit requires exactly one ancestor revision'))
def _histedit(ui, repo, state, *freeargs, **opts):
+ opts = pycompat.byteskwargs(opts)
+ fm = ui.formatter('histedit', opts)
+ fm.startitem()
goal = _getgoal(opts)
revs = opts.get('rev', [])
rules = opts.get('commands', '')
@@ -1116,7 +1121,8 @@
_newhistedit(ui, repo, state, revs, freeargs, opts)
_continuehistedit(ui, repo, state)
- _finishhistedit(ui, repo, state)
+ _finishhistedit(ui, repo, state, fm)
+ fm.end()
def _continuehistedit(ui, repo, state):
"""This function runs after either:
@@ -1163,7 +1169,7 @@
state.write()
ui.progress(_("editing"), None)
-def _finishhistedit(ui, repo, state):
+def _finishhistedit(ui, repo, state, fm):
"""This action runs when histedit is finishing its session"""
repo.ui.pushbuffer()
hg.update(repo, state.parentctxnode, quietempty=True)
@@ -1197,6 +1203,13 @@
mapping = {k: v for k, v in mapping.items()
if k in nodemap and all(n in nodemap for n in v)}
scmutil.cleanupnodes(repo, mapping, 'histedit')
+ hf = fm.hexfunc
+ fl = fm.formatlist
+ fd = fm.formatdict
+ nodechanges = fd({hf(oldn): fl([hf(n) for n in newn], name='node')
+ for oldn, newn in mapping.iteritems()},
+ key="oldnode", value="newnodes")
+ fm.data(nodechanges=nodechanges)
state.clear()
if os.path.exists(repo.sjoin('undo')):
@@ -1297,6 +1310,9 @@
state.topmost = topmost
state.replacements = []
+ ui.log("histedit", "%d actions to histedit", len(actions),
+ histedit_num_actions=len(actions))
+
# Create a backup so we can always abort completely.
backupfile = None
if not obsolete.isenabled(repo, obsolete.createmarkersopt):
diff -r 87676e8ee056 -r 27b6df1b5adb hgext/journal.py
--- a/hgext/journal.py Mon Jan 08 16:07:51 2018 -0800
+++ b/hgext/journal.py Mon Jan 22 17:53:02 2018 -0500
@@ -30,6 +30,7 @@
localrepo,
lock,
node,
+ pycompat,
registrar,
util,
)
@@ -133,7 +134,7 @@
Note that by default entries go from most recent to oldest.
"""
- order = kwargs.pop('order', max)
+ order = kwargs.pop(r'order', max)
iterables = [iter(it) for it in iterables]
# this tracks still active iterables; iterables are deleted as they are
# exhausted, which is why this is a dictionary and why each entry also
@@ -303,7 +304,7 @@
# default to 600 seconds timeout
l = lock.lock(
vfs, 'namejournal.lock',
- int(self.ui.config("ui", "timeout")), desc=desc)
+ self.ui.configint("ui", "timeout"), desc=desc)
self.ui.warn(_("got lock after %s seconds\n") % l.delay)
self._lockref = weakref.ref(l)
return l
@@ -458,6 +459,7 @@
`hg journal -T json` can be used to produce machine readable output.
"""
+ opts = pycompat.byteskwargs(opts)
name = '.'
if opts.get('all'):
if args:
@@ -478,6 +480,7 @@
limit = cmdutil.loglimit(opts)
entry = None
+ ui.pager('journal')
for count, entry in enumerate(repo.journal.filtered(name=name)):
if count == limit:
break
diff -r 87676e8ee056 -r 27b6df1b5adb hgext/keyword.py
--- a/hgext/keyword.py Mon Jan 08 16:07:51 2018 -0800
+++ b/hgext/keyword.py Mon Jan 22 17:53:02 2018 -0500
@@ -104,6 +104,7 @@
match,
patch,
pathutil,
+ pycompat,
registrar,
scmutil,
templatefilters,
@@ -380,6 +381,7 @@
'''Bails out if [keyword] configuration is not active.
Returns status of working directory.'''
if kwt:
+ opts = pycompat.byteskwargs(opts)
return repo.status(match=scmutil.match(wctx, pats, opts), clean=True,
unknown=opts.get('unknown') or opts.get('all'))
if ui.configitems('keyword'):
@@ -436,16 +438,16 @@
ui.setconfig('keywordset', 'svn', svn, 'keyword')
uikwmaps = ui.configitems('keywordmaps')
- if args or opts.get('rcfile'):
+ if args or opts.get(r'rcfile'):
ui.status(_('\n\tconfiguration using custom keyword template maps\n'))
if uikwmaps:
ui.status(_('\textending current template maps\n'))
- if opts.get('default') or not uikwmaps:
+ if opts.get(r'default') or not uikwmaps:
if svn:
ui.status(_('\toverriding default svn keywordset\n'))
else:
ui.status(_('\toverriding default cvs keywordset\n'))
- if opts.get('rcfile'):
+ if opts.get(r'rcfile'):
ui.readconfig(opts.get('rcfile'))
if args:
# simulate hgrc parsing
@@ -453,7 +455,7 @@
repo.vfs.write('hgrc', rcmaps)
ui.readconfig(repo.vfs.join('hgrc'))
kwmaps = dict(ui.configitems('keywordmaps'))
- elif opts.get('default'):
+ elif opts.get(r'default'):
if svn:
ui.status(_('\n\tconfiguration using default svn keywordset\n'))
else:
@@ -543,6 +545,7 @@
else:
cwd = ''
files = []
+ opts = pycompat.byteskwargs(opts)
if not opts.get('unknown') or opts.get('all'):
files = sorted(status.modified + status.added + status.clean)
kwfiles = kwt.iskwfile(files, wctx)
diff -r 87676e8ee056 -r 27b6df1b5adb hgext/largefiles/lfcommands.py
--- a/hgext/largefiles/lfcommands.py Mon Jan 08 16:07:51 2018 -0800
+++ b/hgext/largefiles/lfcommands.py Mon Jan 22 17:53:02 2018 -0500
@@ -24,6 +24,7 @@
lock,
match as matchmod,
node,
+ pycompat,
registrar,
scmutil,
util,
@@ -74,6 +75,7 @@
Use --to-normal to convert largefiles back to normal files; after
this, the DEST repository can be used without largefiles at all.'''
+ opts = pycompat.byteskwargs(opts)
if opts['to_normal']:
tolfile = False
else:
@@ -177,7 +179,7 @@
convcmd.converter = converter
try:
- convcmd.convert(ui, src, dest)
+ convcmd.convert(ui, src, dest, source_type='hg', dest_type='hg')
finally:
convcmd.converter = orig
success = True
@@ -259,7 +261,8 @@
# doesn't change after rename or copy
renamed = lfutil.standin(renamed[0])
- return context.memfilectx(repo, f, lfiletohash[srcfname] + '\n',
+ return context.memfilectx(repo, memctx, f,
+ lfiletohash[srcfname] + '\n',
'l' in fctx.flags(), 'x' in fctx.flags(),
renamed)
else:
@@ -311,7 +314,7 @@
data = fctx.data()
if f == '.hgtags':
data = _converttags (repo.ui, revmap, data)
- return context.memfilectx(repo, f, data, 'l' in fctx.flags(),
+ return context.memfilectx(repo, ctx, f, data, 'l' in fctx.flags(),
'x' in fctx.flags(), renamed)
# Remap tag data using a revision map
@@ -579,7 +582,7 @@
"""
repo.lfpullsource = source
- revs = opts.get('rev', [])
+ revs = opts.get(r'rev', [])
if not revs:
raise error.Abort(_('no revisions specified'))
revs = scmutil.revrange(repo, revs)
@@ -590,3 +593,12 @@
(cached, missing) = cachelfiles(ui, repo, rev)
numcached += len(cached)
ui.status(_("%d largefiles cached\n") % numcached)
+
+@command('debuglfput',
+ [] + cmdutil.remoteopts,
+ _('FILE'))
+def debuglfput(ui, repo, filepath, **kwargs):
+ hash = lfutil.hashfile(filepath)
+ storefactory.openstore(repo).put(filepath, hash)
+ ui.write('%s\n' % hash)
+ return 0
diff -r 87676e8ee056 -r 27b6df1b5adb hgext/largefiles/lfutil.py
--- a/hgext/largefiles/lfutil.py Mon Jan 08 16:07:51 2018 -0800
+++ b/hgext/largefiles/lfutil.py Mon Jan 22 17:53:02 2018 -0500
@@ -69,31 +69,31 @@
to preserve download bandwidth and storage space.'''
return os.path.join(_usercachedir(ui), hash)
-def _usercachedir(ui):
+def _usercachedir(ui, name=longname):
'''Return the location of the "global" largefiles cache.'''
- path = ui.configpath(longname, 'usercache')
+ path = ui.configpath(name, 'usercache')
if path:
return path
if pycompat.iswindows:
appdata = encoding.environ.get('LOCALAPPDATA',\
encoding.environ.get('APPDATA'))
if appdata:
- return os.path.join(appdata, longname)
+ return os.path.join(appdata, name)
elif pycompat.isdarwin:
home = encoding.environ.get('HOME')
if home:
- return os.path.join(home, 'Library', 'Caches', longname)
+ return os.path.join(home, 'Library', 'Caches', name)
elif pycompat.isposix:
path = encoding.environ.get('XDG_CACHE_HOME')
if path:
- return os.path.join(path, longname)
+ return os.path.join(path, name)
home = encoding.environ.get('HOME')
if home:
- return os.path.join(home, '.cache', longname)
+ return os.path.join(home, '.cache', name)
else:
raise error.Abort(_('unknown operating system: %s\n')
% pycompat.osname)
- raise error.Abort(_('unknown %s usercache location') % longname)
+ raise error.Abort(_('unknown %s usercache location') % name)
def inusercache(ui, hash):
path = usercachepath(ui, hash)
diff -r 87676e8ee056 -r 27b6df1b5adb hgext/largefiles/overrides.py
--- a/hgext/largefiles/overrides.py Mon Jan 08 16:07:51 2018 -0800
+++ b/hgext/largefiles/overrides.py Mon Jan 22 17:53:02 2018 -0500
@@ -21,6 +21,7 @@
hg,
match as matchmod,
pathutil,
+ pycompat,
registrar,
scmutil,
smartset,
@@ -156,7 +157,7 @@
# Need to lock, otherwise there could be a race condition between
# when standins are created and added to the repo.
with repo.wlock():
- if not opts.get('dry_run'):
+ if not opts.get(r'dry_run'):
standins = []
lfdirstate = lfutil.openlfdirstate(ui, repo)
for f in lfnames:
@@ -177,7 +178,7 @@
return added, bad
def removelargefiles(ui, repo, isaddremove, matcher, **opts):
- after = opts.get('after')
+ after = opts.get(r'after')
m = composelargefilematcher(matcher, repo[None].manifest())
try:
repo.lfstatus = True
@@ -221,11 +222,11 @@
name = m.rel(f)
ui.status(_('removing %s\n') % name)
- if not opts.get('dry_run'):
+ if not opts.get(r'dry_run'):
if not after:
repo.wvfs.unlinkpath(f, ignoremissing=True)
- if opts.get('dry_run'):
+ if opts.get(r'dry_run'):
return result
remove = [lfutil.standin(f) for f in remove]
@@ -252,7 +253,7 @@
# -- Wrappers: modify existing commands --------------------------------
def overrideadd(orig, ui, repo, *pats, **opts):
- if opts.get('normal') and opts.get('large'):
+ if opts.get(r'normal') and opts.get(r'large'):
raise error.Abort(_('--normal cannot be used with --large'))
return orig(ui, repo, *pats, **opts)
@@ -403,9 +404,9 @@
setattr(cmdutil, '_makenofollowlogfilematcher', oldmakelogfilematcher)
def overrideverify(orig, ui, repo, *pats, **opts):
- large = opts.pop('large', False)
- all = opts.pop('lfa', False)
- contents = opts.pop('lfc', False)
+ large = opts.pop(r'large', False)
+ all = opts.pop(r'lfa', False)
+ contents = opts.pop(r'lfc', False)
result = orig(ui, repo, *pats, **opts)
if large or all or contents:
@@ -413,7 +414,7 @@
return result
def overridedebugstate(orig, ui, repo, *pats, **opts):
- large = opts.pop('large', False)
+ large = opts.pop(r'large', False)
if large:
class fakerepo(object):
dirstate = lfutil.openlfdirstate(ui, repo)
@@ -802,8 +803,8 @@
repo.lfpullsource = source
result = orig(ui, repo, source, **opts)
revspostpull = len(repo)
- lfrevs = opts.get('lfrev', [])
- if opts.get('all_largefiles'):
+ lfrevs = opts.get(r'lfrev', [])
+ if opts.get(r'all_largefiles'):
lfrevs.append('pulled()')
if lfrevs and revspostpull > revsprepull:
numcached = 0
@@ -820,7 +821,7 @@
def overridepush(orig, ui, repo, *args, **kwargs):
"""Override push command and store --lfrev parameters in opargs"""
- lfrevs = kwargs.pop('lfrev', None)
+ lfrevs = kwargs.pop(r'lfrev', None)
if lfrevs:
opargs = kwargs.setdefault('opargs', {})
opargs['lfrevs'] = scmutil.revrange(repo, lfrevs)
@@ -828,7 +829,7 @@
def exchangepushoperation(orig, *args, **kwargs):
"""Override pushoperation constructor and store lfrevs parameter"""
- lfrevs = kwargs.pop('lfrevs', None)
+ lfrevs = kwargs.pop(r'lfrevs', None)
pushop = orig(*args, **kwargs)
pushop.lfrevs = lfrevs
return pushop
@@ -865,7 +866,7 @@
d = dest
if d is None:
d = hg.defaultdest(source)
- if opts.get('all_largefiles') and not hg.islocal(d):
+ if opts.get(r'all_largefiles') and not hg.islocal(d):
raise error.Abort(_(
'--all-largefiles is incompatible with non-local destination %s') %
d)
@@ -887,13 +888,13 @@
# If largefiles is required for this repo, permanently enable it locally
if 'largefiles' in repo.requirements:
- with repo.vfs('hgrc', 'a', text=True) as fp:
- fp.write('\n[extensions]\nlargefiles=\n')
+ repo.vfs.append('hgrc',
+ util.tonativeeol('\n[extensions]\nlargefiles=\n'))
# Caching is implicitly limited to 'rev' option, since the dest repo was
# truncated at that point. The user may expect a download count with
# this option, so attempt whether or not this is a largefile repo.
- if opts.get('all_largefiles'):
+ if opts.get(r'all_largefiles'):
success, missing = lfcommands.downloadlfiles(ui, repo, None)
if missing != 0:
@@ -906,14 +907,14 @@
# If largefiles is required for this repo, permanently enable it locally
if 'largefiles' in destrepo.requirements:
- with destrepo.vfs('hgrc', 'a+', text=True) as fp:
- fp.write('\n[extensions]\nlargefiles=\n')
+ destrepo.vfs.append('hgrc',
+ util.tonativeeol('\n[extensions]\nlargefiles=\n'))
def overriderebase(orig, ui, repo, **opts):
if not util.safehasattr(repo, '_largefilesenabled'):
return orig(ui, repo, **opts)
- resuming = opts.get('continue')
+ resuming = opts.get(r'continue')
repo._lfcommithooks.append(lfutil.automatedcommithook(resuming))
repo._lfstatuswriters.append(lambda *msg, **opts: None)
try:
@@ -1272,6 +1273,7 @@
repo.status = overridestatus
orig(ui, repo, *dirs, **opts)
repo.status = oldstatus
+
def overriderollback(orig, ui, repo, **opts):
with repo.wlock():
before = repo.dirstate.parents()
@@ -1310,7 +1312,7 @@
return result
def overridetransplant(orig, ui, repo, *revs, **opts):
- resuming = opts.get('continue')
+ resuming = opts.get(r'continue')
repo._lfcommithooks.append(lfutil.automatedcommithook(resuming))
repo._lfstatuswriters.append(lambda *msg, **opts: None)
try:
@@ -1321,6 +1323,7 @@
return result
def overridecat(orig, ui, repo, file1, *pats, **opts):
+ opts = pycompat.byteskwargs(opts)
ctx = scmutil.revsingle(repo, opts.get('rev'))
err = 1
notbad = set()
@@ -1382,7 +1385,7 @@
def mergeupdate(orig, repo, node, branchmerge, force,
*args, **kwargs):
- matcher = kwargs.get('matcher', None)
+ matcher = kwargs.get(r'matcher', None)
# note if this is a partial update
partial = matcher and not matcher.always()
with repo.wlock():
@@ -1437,7 +1440,7 @@
# Make sure the merge runs on disk, not in-memory. largefiles is not a
# good candidate for in-memory merge (large files, custom dirstate,
# matcher usage).
- kwargs['wc'] = repo[None]
+ kwargs[r'wc'] = repo[None]
result = orig(repo, node, branchmerge, force, *args, **kwargs)
newstandins = lfutil.getstandinsstate(repo)
@@ -1470,3 +1473,20 @@
printmessage=False, normallookup=True)
return result
+
+def upgraderequirements(orig, repo):
+ reqs = orig(repo)
+ if 'largefiles' in repo.requirements:
+ reqs.add('largefiles')
+ return reqs
+
+_lfscheme = 'largefile://'
+def openlargefile(orig, ui, url_, data=None):
+ if url_.startswith(_lfscheme):
+ if data:
+ msg = "cannot use data on a 'largefile://' url"
+ raise error.ProgrammingError(msg)
+ lfid = url_[len(_lfscheme):]
+ return storefactory.getlfile(ui, lfid)
+ else:
+ return orig(ui, url_, data=data)
diff -r 87676e8ee056 -r 27b6df1b5adb hgext/largefiles/proto.py
--- a/hgext/largefiles/proto.py Mon Jan 08 16:07:51 2018 -0800
+++ b/hgext/largefiles/proto.py Mon Jan 22 17:53:02 2018 -0500
@@ -28,7 +28,6 @@
'file.\n')
# these will all be replaced by largefiles.uisetup
-capabilitiesorig = None
ssholdcallstream = None
httpoldcallstream = None
@@ -76,7 +75,7 @@
yield '%d\n' % length
for chunk in util.filechunkiter(f):
yield chunk
- return wireproto.streamres(gen=generator())
+ return wireproto.streamres_legacy(gen=generator())
def statlfile(repo, proto, sha):
'''Server command for checking if a largefile is present - returns '2\n' if
@@ -161,9 +160,11 @@
repo.__class__ = lfileswirerepository
# advertise the largefiles=serve capability
-def capabilities(repo, proto):
- '''Wrap server command to announce largefile server capability'''
- return capabilitiesorig(repo, proto) + ' largefiles=serve'
+def _capabilities(orig, repo, proto):
+ '''announce largefile server capability'''
+ caps = orig(repo, proto)
+ caps.append('largefiles=serve')
+ return caps
def heads(repo, proto):
'''Wrap server command - largefile capable clients will know to call
@@ -176,7 +177,7 @@
if cmd == 'heads' and self.capable('largefiles'):
cmd = 'lheads'
if cmd == 'batch' and self.capable('largefiles'):
- args['cmds'] = args['cmds'].replace('heads ', 'lheads ')
+ args[r'cmds'] = args[r'cmds'].replace('heads ', 'lheads ')
return ssholdcallstream(self, cmd, **args)
headsre = re.compile(r'(^|;)heads\b')
@@ -185,5 +186,5 @@
if cmd == 'heads' and self.capable('largefiles'):
cmd = 'lheads'
if cmd == 'batch' and self.capable('largefiles'):
- args['cmds'] = headsre.sub('lheads', args['cmds'])
+ args[r'cmds'] = headsre.sub('lheads', args[r'cmds'])
return httpoldcallstream(self, cmd, **args)
diff -r 87676e8ee056 -r 27b6df1b5adb hgext/largefiles/remotestore.py
--- a/hgext/largefiles/remotestore.py Mon Jan 08 16:07:51 2018 -0800
+++ b/hgext/largefiles/remotestore.py Mon Jan 22 17:53:02 2018 -0500
@@ -27,7 +27,9 @@
'''a largefile store accessed over a network'''
def __init__(self, ui, repo, url):
super(remotestore, self).__init__(ui, repo, url)
- self._lstore = localstore.localstore(self.ui, self.repo, self.repo)
+ self._lstore = None
+ if repo is not None:
+ self._lstore = localstore.localstore(self.ui, self.repo, self.repo)
def put(self, source, hash):
if self.sendfile(source, hash):
diff -r 87676e8ee056 -r 27b6df1b5adb hgext/largefiles/reposetup.py
--- a/hgext/largefiles/reposetup.py Mon Jan 08 16:07:51 2018 -0800
+++ b/hgext/largefiles/reposetup.py Mon Jan 22 17:53:02 2018 -0500
@@ -138,7 +138,7 @@
sf = lfutil.standin(f)
if sf in dirstate:
newfiles.append(sf)
- elif sf in dirstate.dirs():
+ elif dirstate.hasdir(sf):
# Directory entries could be regular or
# standin, check both
newfiles.extend((f, sf))
@@ -156,7 +156,7 @@
def sfindirstate(f):
sf = lfutil.standin(f)
dirstate = self.dirstate
- return sf in dirstate or sf in dirstate.dirs()
+ return sf in dirstate or dirstate.hasdir(sf)
match._files = [f for f in match._files
if sfindirstate(f)]
diff -r 87676e8ee056 -r 27b6df1b5adb hgext/largefiles/storefactory.py
--- a/hgext/largefiles/storefactory.py Mon Jan 08 16:07:51 2018 -0800
+++ b/hgext/largefiles/storefactory.py Mon Jan 22 17:53:02 2018 -0500
@@ -22,8 +22,9 @@
# During clone this function is passed the src's ui object
# but it needs the dest's ui object so it can read out of
# the config file. Use repo.ui instead.
-def openstore(repo, remote=None, put=False):
- ui = repo.ui
+def openstore(repo=None, remote=None, put=False, ui=None):
+ if ui is None:
+ ui = repo.ui
if not remote:
lfpullsource = getattr(repo, 'lfpullsource', None)
@@ -37,12 +38,16 @@
# ui.expandpath() leaves 'default-push' and 'default' alone if
# they cannot be expanded: fallback to the empty string,
# meaning the current directory.
- if path == 'default-push' or path == 'default':
+ if repo is None:
+ path = ui.expandpath('default')
+ path, _branches = hg.parseurl(path)
+ remote = hg.peer(repo or ui, {}, path)
+ elif path == 'default-push' or path == 'default':
path = ''
remote = repo
else:
path, _branches = hg.parseurl(path)
- remote = hg.peer(repo, {}, path)
+ remote = hg.peer(repo or ui, {}, path)
# The path could be a scheme so use Mercurial's normal functionality
# to resolve the scheme to a repository and use its path
@@ -76,3 +81,6 @@
}
_scheme_re = re.compile(r'^([a-zA-Z0-9+-.]+)://')
+
+def getlfile(ui, hash):
+ return util.chunkbuffer(openstore(ui=ui)._get(hash))
diff -r 87676e8ee056 -r 27b6df1b5adb hgext/largefiles/uisetup.py
--- a/hgext/largefiles/uisetup.py Mon Jan 08 16:07:51 2018 -0800
+++ b/hgext/largefiles/uisetup.py Mon Jan 22 17:53:02 2018 -0500
@@ -30,6 +30,8 @@
scmutil,
sshpeer,
subrepo,
+ upgrade,
+ url,
wireproto,
)
@@ -60,6 +62,12 @@
extensions.wrapfunction(copies, 'pathcopies', overrides.copiespathcopies)
+ extensions.wrapfunction(upgrade, 'preservedrequirements',
+ overrides.upgraderequirements)
+
+ extensions.wrapfunction(upgrade, 'supporteddestrequirements',
+ overrides.upgraderequirements)
+
# Subrepos call status function
entry = extensions.wrapcommand(commands.table, 'status',
overrides.overridestatus)
@@ -153,13 +161,15 @@
extensions.wrapfunction(scmutil, 'marktouched',
overrides.scmutilmarktouched)
+ extensions.wrapfunction(url, 'open',
+ overrides.openlargefile)
+
# create the new wireproto commands ...
wireproto.commands['putlfile'] = (proto.putlfile, 'sha')
wireproto.commands['getlfile'] = (proto.getlfile, 'sha')
wireproto.commands['statlfile'] = (proto.statlfile, 'sha')
# ... and wrap some existing ones
- wireproto.commands['capabilities'] = (proto.capabilities, '')
wireproto.commands['heads'] = (proto.heads, '')
wireproto.commands['lheads'] = (wireproto.heads, '')
@@ -171,10 +181,7 @@
extensions.wrapfunction(webcommands, 'decodepath', overrides.decodepath)
- # the hello wireproto command uses wireproto.capabilities, so it won't see
- # our largefiles capability unless we replace the actual function as well.
- proto.capabilitiesorig = wireproto.capabilities
- wireproto.capabilities = proto.capabilities
+ extensions.wrapfunction(wireproto, '_capabilities', proto._capabilities)
# can't do this in reposetup because it needs to have happened before
# wirerepo.__init__ is called
diff -r 87676e8ee056 -r 27b6df1b5adb hgext/lfs/__init__.py
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/hgext/lfs/__init__.py Mon Jan 22 17:53:02 2018 -0500
@@ -0,0 +1,387 @@
+# lfs - hash-preserving large file support using Git-LFS protocol
+#
+# Copyright 2017 Facebook, Inc.
+#
+# This software may be used and distributed according to the terms of the
+# GNU General Public License version 2 or any later version.
+
+"""lfs - large file support (EXPERIMENTAL)
+
+This extension allows large files to be tracked outside of the normal
+repository storage and stored on a centralized server, similar to the
+``largefiles`` extension. The ``git-lfs`` protocol is used when
+communicating with the server, so existing git infrastructure can be
+harnessed. Even though the files are stored outside of the repository,
+they are still integrity checked in the same manner as normal files.
+
+The files stored outside of the repository are downloaded on demand,
+which reduces the time to clone, and possibly the local disk usage.
+This changes fundamental workflows in a DVCS, so careful thought
+should be given before deploying it. :hg:`convert` can be used to
+convert LFS repositories to normal repositories that no longer
+require this extension, and do so without changing the commit hashes.
+This allows the extension to be disabled if the centralized workflow
+becomes burdensome. However, the pre and post convert clones will
+not be able to communicate with each other unless the extension is
+enabled on both.
+
+To start a new repository, or add new LFS files, just create and add
+an ``.hglfs`` file as described below. Because the file is tracked in
+the repository, all clones will use the same selection policy. During
+subsequent commits, Mercurial will consult this file to determine if
+an added or modified file should be stored externally. The type of
+storage depends on the characteristics of the file at each commit. A
+file that is near a size threshold may switch back and forth between
+LFS and normal storage, as needed.
+
+Alternately, both normal repositories and largefile controlled
+repositories can be converted to LFS by using :hg:`convert` and the
+``lfs.track`` config option described below. The ``.hglfs`` file
+should then be created and added, to control subsequent LFS selection.
+The hashes are also unchanged in this case. The LFS and non-LFS
+repositories can be distinguished because the LFS repository will
+abort any command if this extension is disabled.
+
+Committed LFS files are held locally, until the repository is pushed.
+Prior to pushing the normal repository data, the LFS files that are
+tracked by the outgoing commits are automatically uploaded to the
+configured central server. No LFS files are transferred on
+:hg:`pull` or :hg:`clone`. Instead, the files are downloaded on
+demand as they need to be read, if a cached copy cannot be found
+locally. Both committing and downloading an LFS file will link the
+file to a usercache, to speed up future access. See the `usercache`
+config setting described below.
+
+.hglfs::
+
+ The extension reads its configuration from a versioned ``.hglfs``
+ configuration file found in the root of the working directory. The
+ ``.hglfs`` file uses the same syntax as all other Mercurial
+ configuration files. It uses a single section, ``[track]``.
+
+ The ``[track]`` section specifies which files are stored as LFS (or
+ not). Each line is keyed by a file pattern, with a predicate value.
+ The first file pattern match is used, so put more specific patterns
+ first. The available predicates are ``all()``, ``none()``, and
+ ``size()``. See "hg help filesets.size" for the latter.
+
+ Example versioned ``.hglfs`` file::
+
+ [track]
+ # No Makefile or python file, anywhere, will be LFS
+ **Makefile = none()
+ **.py = none()
+
+ **.zip = all()
+ **.exe = size(">1MB")
+
+ # Catchall for everything not matched above
+ ** = size(">10MB")
+
+Configs::
+
+ [lfs]
+ # Remote endpoint. Multiple protocols are supported:
+ # - http(s)://user:pass@example.com/path
+ # git-lfs endpoint
+ # - file:///tmp/path
+ # local filesystem, usually for testing
+ # if unset, lfs will prompt setting this when it must use this value.
+ # (default: unset)
+ url = https://example.com/repo.git/info/lfs
+
+ # Which files to track in LFS. Path tests are "**.extname" for file
+ # extensions, and "path:under/some/directory" for path prefix. Both
+ # are relative to the repository root.
+ # File size can be tested with the "size()" fileset, and tests can be
+ # joined with fileset operators. (See "hg help filesets.operators".)
+ #
+ # Some examples:
+ # - all() # everything
+ # - none() # nothing
+ # - size(">20MB") # larger than 20MB
+ # - !**.txt # anything not a *.txt file
+ # - **.zip | **.tar.gz | **.7z # some types of compressed files
+ # - path:bin # files under "bin" in the project root
+ # - (**.php & size(">2MB")) | (**.js & size(">5MB")) | **.tar.gz
+ # | (path:bin & !path:/bin/README) | size(">1GB")
+ # (default: none())
+ #
+ # This is ignored if there is a tracked '.hglfs' file, and this setting
+ # will eventually be deprecated and removed.
+ track = size(">10M")
+
+ # how many times to retry before giving up on transferring an object
+ retry = 5
+
+ # the local directory to store lfs files for sharing across local clones.
+ # If not set, the cache is located in an OS specific cache location.
+ usercache = /path/to/global/cache
+"""
+
+from __future__ import absolute_import
+
+from mercurial.i18n import _
+
+from mercurial import (
+ bundle2,
+ changegroup,
+ cmdutil,
+ config,
+ context,
+ error,
+ exchange,
+ extensions,
+ filelog,
+ fileset,
+ hg,
+ localrepo,
+ minifileset,
+ node,
+ pycompat,
+ registrar,
+ revlog,
+ scmutil,
+ templatekw,
+ upgrade,
+ util,
+ vfs as vfsmod,
+ wireproto,
+)
+
+from . import (
+ blobstore,
+ wrapper,
+)
+
+# Note for extension authors: ONLY specify testedwith = 'ships-with-hg-core' for
+# extensions which SHIP WITH MERCURIAL. Non-mainline extensions should
+# be specifying the version(s) of Mercurial they are tested with, or
+# leave the attribute unspecified.
+testedwith = 'ships-with-hg-core'
+
+configtable = {}
+configitem = registrar.configitem(configtable)
+
+configitem('experimental', 'lfs.user-agent',
+ default=None,
+)
+configitem('experimental', 'lfs.worker-enable',
+ default=False,
+)
+
+configitem('lfs', 'url',
+ default=None,
+)
+configitem('lfs', 'usercache',
+ default=None,
+)
+# Deprecated
+configitem('lfs', 'threshold',
+ default=None,
+)
+configitem('lfs', 'track',
+ default='none()',
+)
+configitem('lfs', 'retry',
+ default=5,
+)
+
+cmdtable = {}
+command = registrar.command(cmdtable)
+
+templatekeyword = registrar.templatekeyword()
+
+def featuresetup(ui, supported):
+ # don't die on seeing a repo with the lfs requirement
+ supported |= {'lfs'}
+
+def uisetup(ui):
+ localrepo.localrepository.featuresetupfuncs.add(featuresetup)
+
+def reposetup(ui, repo):
+ # Nothing to do with a remote repo
+ if not repo.local():
+ return
+
+ repo.svfs.lfslocalblobstore = blobstore.local(repo)
+ repo.svfs.lfsremoteblobstore = blobstore.remote(repo)
+
+ class lfsrepo(repo.__class__):
+ @localrepo.unfilteredmethod
+ def commitctx(self, ctx, error=False):
+ repo.svfs.options['lfstrack'] = _trackedmatcher(self, ctx)
+ return super(lfsrepo, self).commitctx(ctx, error)
+
+ repo.__class__ = lfsrepo
+
+ if 'lfs' not in repo.requirements:
+ def checkrequireslfs(ui, repo, **kwargs):
+ if 'lfs' not in repo.requirements:
+ last = kwargs.get('node_last')
+ _bin = node.bin
+ if last:
+ s = repo.set('%n:%n', _bin(kwargs['node']), _bin(last))
+ else:
+ s = repo.set('%n', _bin(kwargs['node']))
+ for ctx in s:
+ # TODO: is there a way to just walk the files in the commit?
+ if any(ctx[f].islfs() for f in ctx.files() if f in ctx):
+ repo.requirements.add('lfs')
+ repo._writerequirements()
+ repo.prepushoutgoinghooks.add('lfs', wrapper.prepush)
+ break
+
+ ui.setconfig('hooks', 'commit.lfs', checkrequireslfs, 'lfs')
+ ui.setconfig('hooks', 'pretxnchangegroup.lfs', checkrequireslfs, 'lfs')
+ else:
+ repo.prepushoutgoinghooks.add('lfs', wrapper.prepush)
+
+def _trackedmatcher(repo, ctx):
+ """Return a function (path, size) -> bool indicating whether or not to
+ track a given file with lfs."""
+ data = ''
+
+ if '.hglfs' in ctx.added() or '.hglfs' in ctx.modified():
+ data = ctx['.hglfs'].data()
+ elif '.hglfs' not in ctx.removed():
+ p1 = repo['.']
+
+ if '.hglfs' not in p1:
+ # No '.hglfs' in wdir or in parent. Fallback to config
+ # for now.
+ trackspec = repo.ui.config('lfs', 'track')
+
+ # deprecated config: lfs.threshold
+ threshold = repo.ui.configbytes('lfs', 'threshold')
+ if threshold:
+ fileset.parse(trackspec) # make sure syntax errors are confined
+ trackspec = "(%s) | size('>%d')" % (trackspec, threshold)
+
+ return minifileset.compile(trackspec)
+
+ data = p1['.hglfs'].data()
+
+ # In removed, or not in parent
+ if not data:
+ return lambda p, s: False
+
+ # Parse errors here will abort with a message that points to the .hglfs file
+ # and line number.
+ cfg = config.config()
+ cfg.parse('.hglfs', data)
+
+ try:
+ rules = [(minifileset.compile(pattern), minifileset.compile(rule))
+ for pattern, rule in cfg.items('track')]
+ except error.ParseError as e:
+ # The original exception gives no indicator that the error is in the
+ # .hglfs file, so add that.
+
+ # TODO: See if the line number of the file can be made available.
+ raise error.Abort(_('parse error in .hglfs: %s') % e)
+
+ def _match(path, size):
+ for pat, rule in rules:
+ if pat(path, size):
+ return rule(path, size)
+
+ return False
+
+ return _match
+
+def wrapfilelog(filelog):
+ wrapfunction = extensions.wrapfunction
+
+ wrapfunction(filelog, 'addrevision', wrapper.filelogaddrevision)
+ wrapfunction(filelog, 'renamed', wrapper.filelogrenamed)
+ wrapfunction(filelog, 'size', wrapper.filelogsize)
+
+def extsetup(ui):
+ wrapfilelog(filelog.filelog)
+
+ wrapfunction = extensions.wrapfunction
+
+ wrapfunction(cmdutil, '_updatecatformatter', wrapper._updatecatformatter)
+ wrapfunction(scmutil, 'wrapconvertsink', wrapper.convertsink)
+
+ wrapfunction(upgrade, '_finishdatamigration',
+ wrapper.upgradefinishdatamigration)
+
+ wrapfunction(upgrade, 'preservedrequirements',
+ wrapper.upgraderequirements)
+
+ wrapfunction(upgrade, 'supporteddestrequirements',
+ wrapper.upgraderequirements)
+
+ wrapfunction(changegroup,
+ 'supportedoutgoingversions',
+ wrapper.supportedoutgoingversions)
+ wrapfunction(changegroup,
+ 'allsupportedversions',
+ wrapper.allsupportedversions)
+
+ wrapfunction(exchange, 'push', wrapper.push)
+ wrapfunction(wireproto, '_capabilities', wrapper._capabilities)
+
+ wrapfunction(context.basefilectx, 'cmp', wrapper.filectxcmp)
+ wrapfunction(context.basefilectx, 'isbinary', wrapper.filectxisbinary)
+ context.basefilectx.islfs = wrapper.filectxislfs
+
+ revlog.addflagprocessor(
+ revlog.REVIDX_EXTSTORED,
+ (
+ wrapper.readfromstore,
+ wrapper.writetostore,
+ wrapper.bypasscheckhash,
+ ),
+ )
+
+ wrapfunction(hg, 'clone', wrapper.hgclone)
+ wrapfunction(hg, 'postshare', wrapper.hgpostshare)
+
+ # Make bundle choose changegroup3 instead of changegroup2. This affects
+ # "hg bundle" command. Note: it does not cover all bundle formats like
+ # "packed1". Using "packed1" with lfs will likely cause trouble.
+ names = [k for k, v in exchange._bundlespeccgversions.items() if v == '02']
+ for k in names:
+ exchange._bundlespeccgversions[k] = '03'
+
+ # bundlerepo uses "vfsmod.readonlyvfs(othervfs)", we need to make sure lfs
+ # options and blob stores are passed from othervfs to the new readonlyvfs.
+ wrapfunction(vfsmod.readonlyvfs, '__init__', wrapper.vfsinit)
+
+ # when writing a bundle via "hg bundle" command, upload related LFS blobs
+ wrapfunction(bundle2, 'writenewbundle', wrapper.writenewbundle)
+
+@templatekeyword('lfs_files')
+def lfsfiles(repo, ctx, **args):
+ """List of strings. LFS files added or modified by the changeset."""
+ args = pycompat.byteskwargs(args)
+
+ pointers = wrapper.pointersfromctx(ctx) # {path: pointer}
+ files = sorted(pointers.keys())
+
+ def pointer(v):
+ # In the file spec, version is first and the other keys are sorted.
+ sortkeyfunc = lambda x: (x[0] != 'version', x)
+ items = sorted(pointers[v].iteritems(), key=sortkeyfunc)
+ return util.sortdict(items)
+
+ makemap = lambda v: {
+ 'file': v,
+ 'oid': pointers[v].oid(),
+ 'pointer': templatekw.hybriddict(pointer(v)),
+ }
+
+ # TODO: make the separator ', '?
+ f = templatekw._showlist('lfs_file', files, args)
+ return templatekw._hybrid(f, files, makemap, pycompat.identity)
+
+@command('debuglfsupload',
+ [('r', 'rev', [], _('upload large files introduced by REV'))])
+def debuglfsupload(ui, repo, **opts):
+ """upload lfs blobs added by the working copy parent or given revisions"""
+ revs = opts.get('rev', [])
+ pointers = wrapper.extractpointers(repo, scmutil.revrange(repo, revs))
+ wrapper.uploadblobs(repo, pointers)
diff -r 87676e8ee056 -r 27b6df1b5adb hgext/lfs/blobstore.py
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/hgext/lfs/blobstore.py Mon Jan 22 17:53:02 2018 -0500
@@ -0,0 +1,463 @@
+# blobstore.py - local and remote (speaking Git-LFS protocol) blob storages
+#
+# Copyright 2017 Facebook, Inc.
+#
+# This software may be used and distributed according to the terms of the
+# GNU General Public License version 2 or any later version.
+
+from __future__ import absolute_import
+
+import hashlib
+import json
+import os
+import re
+import socket
+
+from mercurial.i18n import _
+
+from mercurial import (
+ error,
+ pathutil,
+ url as urlmod,
+ util,
+ vfs as vfsmod,
+ worker,
+)
+
+from ..largefiles import lfutil
+
+# 64 bytes for SHA256
+_lfsre = re.compile(r'\A[a-f0-9]{64}\Z')
+
+class lfsvfs(vfsmod.vfs):
+ def join(self, path):
+ """split the path at first two characters, like: XX/XXXXX..."""
+ if not _lfsre.match(path):
+ raise error.ProgrammingError('unexpected lfs path: %s' % path)
+ return super(lfsvfs, self).join(path[0:2], path[2:])
+
+ def walk(self, path=None, onerror=None):
+ """Yield (dirpath, [], oids) tuple for blobs under path
+
+ Oids only exist in the root of this vfs, so dirpath is always ''.
+ """
+ root = os.path.normpath(self.base)
+ # when dirpath == root, dirpath[prefixlen:] becomes empty
+ # because len(dirpath) < prefixlen.
+ prefixlen = len(pathutil.normasprefix(root))
+ oids = []
+
+ for dirpath, dirs, files in os.walk(self.reljoin(self.base, path or ''),
+ onerror=onerror):
+ dirpath = dirpath[prefixlen:]
+
+ # Silently skip unexpected files and directories
+ if len(dirpath) == 2:
+ oids.extend([dirpath + f for f in files
+ if _lfsre.match(dirpath + f)])
+
+ yield ('', [], oids)
+
+class filewithprogress(object):
+ """a file-like object that supports __len__ and read.
+
+ Useful to provide progress information for how many bytes are read.
+ """
+
+ def __init__(self, fp, callback):
+ self._fp = fp
+ self._callback = callback # func(readsize)
+ fp.seek(0, os.SEEK_END)
+ self._len = fp.tell()
+ fp.seek(0)
+
+ def __len__(self):
+ return self._len
+
+ def read(self, size):
+ if self._fp is None:
+ return b''
+ data = self._fp.read(size)
+ if data:
+ if self._callback:
+ self._callback(len(data))
+ else:
+ self._fp.close()
+ self._fp = None
+ return data
+
+class local(object):
+ """Local blobstore for large file contents.
+
+ This blobstore is used both as a cache and as a staging area for large blobs
+ to be uploaded to the remote blobstore.
+ """
+
+ def __init__(self, repo):
+ fullpath = repo.svfs.join('lfs/objects')
+ self.vfs = lfsvfs(fullpath)
+ usercache = lfutil._usercachedir(repo.ui, 'lfs')
+ self.cachevfs = lfsvfs(usercache)
+ self.ui = repo.ui
+
+ def open(self, oid):
+ """Open a read-only file descriptor to the named blob, in either the
+ usercache or the local store."""
+ # The usercache is the most likely place to hold the file. Commit will
+ # write to both it and the local store, as will anything that downloads
+ # the blobs. However, things like clone without an update won't
+ # populate the local store. For an init + push of a local clone,
+ # the usercache is the only place it _could_ be. If not present, the
+ # missing file msg here will indicate the local repo, not the usercache.
+ if self.cachevfs.exists(oid):
+ return self.cachevfs(oid, 'rb')
+
+ return self.vfs(oid, 'rb')
+
+ def download(self, oid, src):
+ """Read the blob from the remote source in chunks, verify the content,
+ and write to this local blobstore."""
+ sha256 = hashlib.sha256()
+
+ with self.vfs(oid, 'wb', atomictemp=True) as fp:
+ for chunk in util.filechunkiter(src, size=1048576):
+ fp.write(chunk)
+ sha256.update(chunk)
+
+ realoid = sha256.hexdigest()
+ if realoid != oid:
+ raise error.Abort(_('corrupt remote lfs object: %s') % oid)
+
+ # XXX: should we verify the content of the cache, and hardlink back to
+ # the local store on success, but truncate, write and link on failure?
+ if not self.cachevfs.exists(oid):
+ self.ui.note(_('lfs: adding %s to the usercache\n') % oid)
+ lfutil.link(self.vfs.join(oid), self.cachevfs.join(oid))
+
+ def write(self, oid, data):
+ """Write blob to local blobstore.
+
+ This should only be called from the filelog during a commit or similar.
+ As such, there is no need to verify the data. Imports from a remote
+ store must use ``download()`` instead."""
+ with self.vfs(oid, 'wb', atomictemp=True) as fp:
+ fp.write(data)
+
+ # XXX: should we verify the content of the cache, and hardlink back to
+ # the local store on success, but truncate, write and link on failure?
+ if not self.cachevfs.exists(oid):
+ self.ui.note(_('lfs: adding %s to the usercache\n') % oid)
+ lfutil.link(self.vfs.join(oid), self.cachevfs.join(oid))
+
+ def read(self, oid, verify=True):
+ """Read blob from local blobstore."""
+ if not self.vfs.exists(oid):
+ blob = self._read(self.cachevfs, oid, verify)
+
+ # Even if revlog will verify the content, it needs to be verified
+ # now before making the hardlink to avoid propagating corrupt blobs.
+ # Don't abort if corruption is detected, because `hg verify` will
+ # give more useful info about the corruption- simply don't add the
+ # hardlink.
+ if verify or hashlib.sha256(blob).hexdigest() == oid:
+ self.ui.note(_('lfs: found %s in the usercache\n') % oid)
+ lfutil.link(self.cachevfs.join(oid), self.vfs.join(oid))
+ else:
+ self.ui.note(_('lfs: found %s in the local lfs store\n') % oid)
+ blob = self._read(self.vfs, oid, verify)
+ return blob
+
+ def _read(self, vfs, oid, verify):
+ """Read blob (after verifying) from the given store"""
+ blob = vfs.read(oid)
+ if verify:
+ _verify(oid, blob)
+ return blob
+
+ def has(self, oid):
+ """Returns True if the local blobstore contains the requested blob,
+ False otherwise."""
+ return self.cachevfs.exists(oid) or self.vfs.exists(oid)
+
+class _gitlfsremote(object):
+
+ def __init__(self, repo, url):
+ ui = repo.ui
+ self.ui = ui
+ baseurl, authinfo = url.authinfo()
+ self.baseurl = baseurl.rstrip('/')
+ useragent = repo.ui.config('experimental', 'lfs.user-agent')
+ if not useragent:
+ useragent = 'git-lfs/2.3.4 (Mercurial %s)' % util.version()
+ self.urlopener = urlmod.opener(ui, authinfo, useragent)
+ self.retry = ui.configint('lfs', 'retry')
+
+ def writebatch(self, pointers, fromstore):
+ """Batch upload from local to remote blobstore."""
+ self._batch(pointers, fromstore, 'upload')
+
+ def readbatch(self, pointers, tostore):
+ """Batch download from remote to local blostore."""
+ self._batch(pointers, tostore, 'download')
+
+ def _batchrequest(self, pointers, action):
+ """Get metadata about objects pointed by pointers for given action
+
+ Return decoded JSON object like {'objects': [{'oid': '', 'size': 1}]}
+ See https://github.com/git-lfs/git-lfs/blob/master/docs/api/batch.md
+ """
+ objects = [{'oid': p.oid(), 'size': p.size()} for p in pointers]
+ requestdata = json.dumps({
+ 'objects': objects,
+ 'operation': action,
+ })
+ batchreq = util.urlreq.request('%s/objects/batch' % self.baseurl,
+ data=requestdata)
+ batchreq.add_header('Accept', 'application/vnd.git-lfs+json')
+ batchreq.add_header('Content-Type', 'application/vnd.git-lfs+json')
+ try:
+ rawjson = self.urlopener.open(batchreq).read()
+ except util.urlerr.httperror as ex:
+ raise LfsRemoteError(_('LFS HTTP error: %s (action=%s)')
+ % (ex, action))
+ try:
+ response = json.loads(rawjson)
+ except ValueError:
+ raise LfsRemoteError(_('LFS server returns invalid JSON: %s')
+ % rawjson)
+ return response
+
+ def _checkforservererror(self, pointers, responses, action):
+ """Scans errors from objects
+
+ Raises LfsRemoteError if any objects have an error"""
+ for response in responses:
+ # The server should return 404 when objects cannot be found. Some
+ # server implementation (ex. lfs-test-server) does not set "error"
+ # but just removes "download" from "actions". Treat that case
+ # as the same as 404 error.
+ notfound = (response.get('error', {}).get('code') == 404
+ or (action == 'download'
+ and action not in response.get('actions', [])))
+ if notfound:
+ ptrmap = {p.oid(): p for p in pointers}
+ p = ptrmap.get(response['oid'], None)
+ if p:
+ filename = getattr(p, 'filename', 'unknown')
+ raise LfsRemoteError(
+ _(('LFS server error. Remote object '
+ 'for "%s" not found: %r')) % (filename, response))
+ else:
+ raise LfsRemoteError(
+ _('LFS server error. Unsolicited response for oid %s')
+ % response['oid'])
+ if 'error' in response:
+ raise LfsRemoteError(_('LFS server error: %r') % response)
+
+ def _extractobjects(self, response, pointers, action):
+ """extract objects from response of the batch API
+
+ response: parsed JSON object returned by batch API
+ return response['objects'] filtered by action
+ raise if any object has an error
+ """
+ # Scan errors from objects - fail early
+ objects = response.get('objects', [])
+ self._checkforservererror(pointers, objects, action)
+
+ # Filter objects with given action. Practically, this skips uploading
+ # objects which exist in the server.
+ filteredobjects = [o for o in objects if action in o.get('actions', [])]
+
+ return filteredobjects
+
+ def _basictransfer(self, obj, action, localstore):
+ """Download or upload a single object using basic transfer protocol
+
+ obj: dict, an object description returned by batch API
+ action: string, one of ['upload', 'download']
+ localstore: blobstore.local
+
+ See https://github.com/git-lfs/git-lfs/blob/master/docs/api/\
+ basic-transfers.md
+ """
+ oid = str(obj['oid'])
+
+ href = str(obj['actions'][action].get('href'))
+ headers = obj['actions'][action].get('header', {}).items()
+
+ request = util.urlreq.request(href)
+ if action == 'upload':
+ # If uploading blobs, read data from local blobstore.
+ with localstore.open(oid) as fp:
+ _verifyfile(oid, fp)
+ request.data = filewithprogress(localstore.open(oid), None)
+ request.get_method = lambda: 'PUT'
+
+ for k, v in headers:
+ request.add_header(k, v)
+
+ response = b''
+ try:
+ req = self.urlopener.open(request)
+ if action == 'download':
+ # If downloading blobs, store downloaded data to local blobstore
+ localstore.download(oid, req)
+ else:
+ while True:
+ data = req.read(1048576)
+ if not data:
+ break
+ response += data
+ if response:
+ self.ui.debug('lfs %s response: %s' % (action, response))
+ except util.urlerr.httperror as ex:
+ if self.ui.debugflag:
+ self.ui.debug('%s: %s\n' % (oid, ex.read()))
+ raise LfsRemoteError(_('HTTP error: %s (oid=%s, action=%s)')
+ % (ex, oid, action))
+
+ def _batch(self, pointers, localstore, action):
+ if action not in ['upload', 'download']:
+ raise error.ProgrammingError('invalid Git-LFS action: %s' % action)
+
+ response = self._batchrequest(pointers, action)
+ objects = self._extractobjects(response, pointers, action)
+ total = sum(x.get('size', 0) for x in objects)
+ sizes = {}
+ for obj in objects:
+ sizes[obj.get('oid')] = obj.get('size', 0)
+ topic = {'upload': _('lfs uploading'),
+ 'download': _('lfs downloading')}[action]
+ if len(objects) > 1:
+ self.ui.note(_('lfs: need to transfer %d objects (%s)\n')
+ % (len(objects), util.bytecount(total)))
+ self.ui.progress(topic, 0, total=total)
+ def transfer(chunk):
+ for obj in chunk:
+ objsize = obj.get('size', 0)
+ if self.ui.verbose:
+ if action == 'download':
+ msg = _('lfs: downloading %s (%s)\n')
+ elif action == 'upload':
+ msg = _('lfs: uploading %s (%s)\n')
+ self.ui.note(msg % (obj.get('oid'),
+ util.bytecount(objsize)))
+ retry = self.retry
+ while True:
+ try:
+ self._basictransfer(obj, action, localstore)
+ yield 1, obj.get('oid')
+ break
+ except socket.error as ex:
+ if retry > 0:
+ self.ui.note(
+ _('lfs: failed: %r (remaining retry %d)\n')
+ % (ex, retry))
+ retry -= 1
+ continue
+ raise
+
+ # Until https multiplexing gets sorted out
+ if self.ui.configbool('experimental', 'lfs.worker-enable'):
+ oids = worker.worker(self.ui, 0.1, transfer, (),
+ sorted(objects, key=lambda o: o.get('oid')))
+ else:
+ oids = transfer(sorted(objects, key=lambda o: o.get('oid')))
+
+ processed = 0
+ for _one, oid in oids:
+ processed += sizes[oid]
+ self.ui.progress(topic, processed, total=total)
+ self.ui.note(_('lfs: processed: %s\n') % oid)
+ self.ui.progress(topic, pos=None, total=total)
+
+ def __del__(self):
+ # copied from mercurial/httppeer.py
+ urlopener = getattr(self, 'urlopener', None)
+ if urlopener:
+ for h in urlopener.handlers:
+ h.close()
+ getattr(h, "close_all", lambda : None)()
+
+class _dummyremote(object):
+ """Dummy store storing blobs to temp directory."""
+
+ def __init__(self, repo, url):
+ fullpath = repo.vfs.join('lfs', url.path)
+ self.vfs = lfsvfs(fullpath)
+
+ def writebatch(self, pointers, fromstore):
+ for p in pointers:
+ content = fromstore.read(p.oid(), verify=True)
+ with self.vfs(p.oid(), 'wb', atomictemp=True) as fp:
+ fp.write(content)
+
+ def readbatch(self, pointers, tostore):
+ for p in pointers:
+ with self.vfs(p.oid(), 'rb') as fp:
+ tostore.download(p.oid(), fp)
+
+class _nullremote(object):
+ """Null store storing blobs to /dev/null."""
+
+ def __init__(self, repo, url):
+ pass
+
+ def writebatch(self, pointers, fromstore):
+ pass
+
+ def readbatch(self, pointers, tostore):
+ pass
+
+class _promptremote(object):
+ """Prompt user to set lfs.url when accessed."""
+
+ def __init__(self, repo, url):
+ pass
+
+ def writebatch(self, pointers, fromstore, ui=None):
+ self._prompt()
+
+ def readbatch(self, pointers, tostore, ui=None):
+ self._prompt()
+
+ def _prompt(self):
+ raise error.Abort(_('lfs.url needs to be configured'))
+
+_storemap = {
+ 'https': _gitlfsremote,
+ 'http': _gitlfsremote,
+ 'file': _dummyremote,
+ 'null': _nullremote,
+ None: _promptremote,
+}
+
+def _verify(oid, content):
+ realoid = hashlib.sha256(content).hexdigest()
+ if realoid != oid:
+ raise error.Abort(_('detected corrupt lfs object: %s') % oid,
+ hint=_('run hg verify'))
+
+def _verifyfile(oid, fp):
+ sha256 = hashlib.sha256()
+ while True:
+ data = fp.read(1024 * 1024)
+ if not data:
+ break
+ sha256.update(data)
+ realoid = sha256.hexdigest()
+ if realoid != oid:
+ raise error.Abort(_('detected corrupt lfs object: %s') % oid,
+ hint=_('run hg verify'))
+
+def remote(repo):
+ """remotestore factory. return a store in _storemap depending on config"""
+ url = util.url(repo.ui.config('lfs', 'url') or '')
+ scheme = url.scheme
+ if scheme not in _storemap:
+ raise error.Abort(_('lfs: unknown url scheme: %s') % scheme)
+ return _storemap[scheme](repo, url)
+
+class LfsRemoteError(error.RevlogError):
+ pass
diff -r 87676e8ee056 -r 27b6df1b5adb hgext/lfs/pointer.py
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/hgext/lfs/pointer.py Mon Jan 22 17:53:02 2018 -0500
@@ -0,0 +1,73 @@
+# pointer.py - Git-LFS pointer serialization
+#
+# Copyright 2017 Facebook, Inc.
+#
+# This software may be used and distributed according to the terms of the
+# GNU General Public License version 2 or any later version.
+
+from __future__ import absolute_import
+
+import re
+
+from mercurial.i18n import _
+
+from mercurial import (
+ error,
+)
+
+class InvalidPointer(error.RevlogError):
+ pass
+
+class gitlfspointer(dict):
+ VERSION = 'https://git-lfs.github.com/spec/v1'
+
+ def __init__(self, *args, **kwargs):
+ self['version'] = self.VERSION
+ super(gitlfspointer, self).__init__(*args, **kwargs)
+
+ @classmethod
+ def deserialize(cls, text):
+ try:
+ return cls(l.split(' ', 1) for l in text.splitlines()).validate()
+ except ValueError: # l.split returns 1 item instead of 2
+ raise InvalidPointer(_('cannot parse git-lfs text: %r') % text)
+
+ def serialize(self):
+ sortkeyfunc = lambda x: (x[0] != 'version', x)
+ items = sorted(self.validate().iteritems(), key=sortkeyfunc)
+ return ''.join('%s %s\n' % (k, v) for k, v in items)
+
+ def oid(self):
+ return self['oid'].split(':')[-1]
+
+ def size(self):
+ return int(self['size'])
+
+ # regular expressions used by _validate
+ # see https://github.com/git-lfs/git-lfs/blob/master/docs/spec.md
+ _keyre = re.compile(r'\A[a-z0-9.-]+\Z')
+ _valuere = re.compile(r'\A[^\n]*\Z')
+ _requiredre = {
+ 'size': re.compile(r'\A[0-9]+\Z'),
+ 'oid': re.compile(r'\Asha256:[0-9a-f]{64}\Z'),
+ 'version': re.compile(r'\A%s\Z' % re.escape(VERSION)),
+ }
+
+ def validate(self):
+ """raise InvalidPointer on error. return self if there is no error"""
+ requiredcount = 0
+ for k, v in self.iteritems():
+ if k in self._requiredre:
+ if not self._requiredre[k].match(v):
+ raise InvalidPointer(_('unexpected value: %s=%r') % (k, v))
+ requiredcount += 1
+ elif not self._keyre.match(k):
+ raise InvalidPointer(_('unexpected key: %s') % k)
+ if not self._valuere.match(v):
+ raise InvalidPointer(_('unexpected value: %s=%r') % (k, v))
+ if len(self._requiredre) != requiredcount:
+ miss = sorted(set(self._requiredre.keys()).difference(self.keys()))
+ raise InvalidPointer(_('missed keys: %s') % ', '.join(miss))
+ return self
+
+deserialize = gitlfspointer.deserialize
diff -r 87676e8ee056 -r 27b6df1b5adb hgext/lfs/wrapper.py
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/hgext/lfs/wrapper.py Mon Jan 22 17:53:02 2018 -0500
@@ -0,0 +1,350 @@
+# wrapper.py - methods wrapping core mercurial logic
+#
+# Copyright 2017 Facebook, Inc.
+#
+# This software may be used and distributed according to the terms of the
+# GNU General Public License version 2 or any later version.
+
+from __future__ import absolute_import
+
+import hashlib
+
+from mercurial.i18n import _
+from mercurial.node import bin, nullid, short
+
+from mercurial import (
+ error,
+ filelog,
+ revlog,
+ util,
+)
+
+from ..largefiles import lfutil
+
+from . import (
+ blobstore,
+ pointer,
+)
+
+def supportedoutgoingversions(orig, repo):
+ versions = orig(repo)
+ if 'lfs' in repo.requirements:
+ versions.discard('01')
+ versions.discard('02')
+ versions.add('03')
+ return versions
+
+def allsupportedversions(orig, ui):
+ versions = orig(ui)
+ versions.add('03')
+ return versions
+
+def _capabilities(orig, repo, proto):
+ '''Wrap server command to announce lfs server capability'''
+ caps = orig(repo, proto)
+ # XXX: change to 'lfs=serve' when separate git server isn't required?
+ caps.append('lfs')
+ return caps
+
+def bypasscheckhash(self, text):
+ return False
+
+def readfromstore(self, text):
+ """Read filelog content from local blobstore transform for flagprocessor.
+
+ Default tranform for flagprocessor, returning contents from blobstore.
+ Returns a 2-typle (text, validatehash) where validatehash is True as the
+ contents of the blobstore should be checked using checkhash.
+ """
+ p = pointer.deserialize(text)
+ oid = p.oid()
+ store = self.opener.lfslocalblobstore
+ if not store.has(oid):
+ p.filename = self.filename
+ self.opener.lfsremoteblobstore.readbatch([p], store)
+
+ # The caller will validate the content
+ text = store.read(oid, verify=False)
+
+ # pack hg filelog metadata
+ hgmeta = {}
+ for k in p.keys():
+ if k.startswith('x-hg-'):
+ name = k[len('x-hg-'):]
+ hgmeta[name] = p[k]
+ if hgmeta or text.startswith('\1\n'):
+ text = filelog.packmeta(hgmeta, text)
+
+ return (text, True)
+
+def writetostore(self, text):
+ # hg filelog metadata (includes rename, etc)
+ hgmeta, offset = filelog.parsemeta(text)
+ if offset and offset > 0:
+ # lfs blob does not contain hg filelog metadata
+ text = text[offset:]
+
+ # git-lfs only supports sha256
+ oid = hashlib.sha256(text).hexdigest()
+ self.opener.lfslocalblobstore.write(oid, text)
+
+ # replace contents with metadata
+ longoid = 'sha256:%s' % oid
+ metadata = pointer.gitlfspointer(oid=longoid, size=str(len(text)))
+
+ # by default, we expect the content to be binary. however, LFS could also
+ # be used for non-binary content. add a special entry for non-binary data.
+ # this will be used by filectx.isbinary().
+ if not util.binary(text):
+ # not hg filelog metadata (affecting commit hash), no "x-hg-" prefix
+ metadata['x-is-binary'] = '0'
+
+ # translate hg filelog metadata to lfs metadata with "x-hg-" prefix
+ if hgmeta is not None:
+ for k, v in hgmeta.iteritems():
+ metadata['x-hg-%s' % k] = v
+
+ rawtext = metadata.serialize()
+ return (rawtext, False)
+
+def _islfs(rlog, node=None, rev=None):
+ if rev is None:
+ if node is None:
+ # both None - likely working copy content where node is not ready
+ return False
+ rev = rlog.rev(node)
+ else:
+ node = rlog.node(rev)
+ if node == nullid:
+ return False
+ flags = rlog.flags(rev)
+ return bool(flags & revlog.REVIDX_EXTSTORED)
+
+def filelogaddrevision(orig, self, text, transaction, link, p1, p2,
+ cachedelta=None, node=None,
+ flags=revlog.REVIDX_DEFAULT_FLAGS, **kwds):
+ textlen = len(text)
+ # exclude hg rename meta from file size
+ meta, offset = filelog.parsemeta(text)
+ if offset:
+ textlen -= offset
+
+ lfstrack = self.opener.options['lfstrack']
+
+ # Always exclude hg owned files
+ if not self.filename.startswith('.hg') and lfstrack(self.filename, textlen):
+ flags |= revlog.REVIDX_EXTSTORED
+
+ return orig(self, text, transaction, link, p1, p2, cachedelta=cachedelta,
+ node=node, flags=flags, **kwds)
+
+def filelogrenamed(orig, self, node):
+ if _islfs(self, node):
+ rawtext = self.revision(node, raw=True)
+ if not rawtext:
+ return False
+ metadata = pointer.deserialize(rawtext)
+ if 'x-hg-copy' in metadata and 'x-hg-copyrev' in metadata:
+ return metadata['x-hg-copy'], bin(metadata['x-hg-copyrev'])
+ else:
+ return False
+ return orig(self, node)
+
+def filelogsize(orig, self, rev):
+ if _islfs(self, rev=rev):
+ # fast path: use lfs metadata to answer size
+ rawtext = self.revision(rev, raw=True)
+ metadata = pointer.deserialize(rawtext)
+ return int(metadata['size'])
+ return orig(self, rev)
+
+def filectxcmp(orig, self, fctx):
+ """returns True if text is different than fctx"""
+ # some fctx (ex. hg-git) is not based on basefilectx and do not have islfs
+ if self.islfs() and getattr(fctx, 'islfs', lambda: False)():
+ # fast path: check LFS oid
+ p1 = pointer.deserialize(self.rawdata())
+ p2 = pointer.deserialize(fctx.rawdata())
+ return p1.oid() != p2.oid()
+ return orig(self, fctx)
+
+def filectxisbinary(orig, self):
+ if self.islfs():
+ # fast path: use lfs metadata to answer isbinary
+ metadata = pointer.deserialize(self.rawdata())
+ # if lfs metadata says nothing, assume it's binary by default
+ return bool(int(metadata.get('x-is-binary', 1)))
+ return orig(self)
+
+def filectxislfs(self):
+ return _islfs(self.filelog(), self.filenode())
+
+def _updatecatformatter(orig, fm, ctx, matcher, path, decode):
+ orig(fm, ctx, matcher, path, decode)
+ fm.data(rawdata=ctx[path].rawdata())
+
+def convertsink(orig, sink):
+ sink = orig(sink)
+ if sink.repotype == 'hg':
+ class lfssink(sink.__class__):
+ def putcommit(self, files, copies, parents, commit, source, revmap,
+ full, cleanp2):
+ pc = super(lfssink, self).putcommit
+ node = pc(files, copies, parents, commit, source, revmap, full,
+ cleanp2)
+
+ if 'lfs' not in self.repo.requirements:
+ ctx = self.repo[node]
+
+ # The file list may contain removed files, so check for
+ # membership before assuming it is in the context.
+ if any(f in ctx and ctx[f].islfs() for f, n in files):
+ self.repo.requirements.add('lfs')
+ self.repo._writerequirements()
+
+ # Permanently enable lfs locally
+ self.repo.vfs.append(
+ 'hgrc', util.tonativeeol('\n[extensions]\nlfs=\n'))
+
+ return node
+
+ sink.__class__ = lfssink
+
+ return sink
+
+def vfsinit(orig, self, othervfs):
+ orig(self, othervfs)
+ # copy lfs related options
+ for k, v in othervfs.options.items():
+ if k.startswith('lfs'):
+ self.options[k] = v
+ # also copy lfs blobstores. note: this can run before reposetup, so lfs
+ # blobstore attributes are not always ready at this time.
+ for name in ['lfslocalblobstore', 'lfsremoteblobstore']:
+ if util.safehasattr(othervfs, name):
+ setattr(self, name, getattr(othervfs, name))
+
+def hgclone(orig, ui, opts, *args, **kwargs):
+ result = orig(ui, opts, *args, **kwargs)
+
+ if result is not None:
+ sourcerepo, destrepo = result
+ repo = destrepo.local()
+
+ # When cloning to a remote repo (like through SSH), no repo is available
+ # from the peer. Therefore the hgrc can't be updated.
+ if not repo:
+ return result
+
+ # If lfs is required for this repo, permanently enable it locally
+ if 'lfs' in repo.requirements:
+ repo.vfs.append('hgrc',
+ util.tonativeeol('\n[extensions]\nlfs=\n'))
+
+ return result
+
+def hgpostshare(orig, sourcerepo, destrepo, bookmarks=True, defaultpath=None):
+ orig(sourcerepo, destrepo, bookmarks, defaultpath)
+
+ # If lfs is required for this repo, permanently enable it locally
+ if 'lfs' in destrepo.requirements:
+ destrepo.vfs.append('hgrc', util.tonativeeol('\n[extensions]\nlfs=\n'))
+
+def _canskipupload(repo):
+ # if remotestore is a null store, upload is a no-op and can be skipped
+ return isinstance(repo.svfs.lfsremoteblobstore, blobstore._nullremote)
+
+def candownload(repo):
+ # if remotestore is a null store, downloads will lead to nothing
+ return not isinstance(repo.svfs.lfsremoteblobstore, blobstore._nullremote)
+
+def uploadblobsfromrevs(repo, revs):
+ '''upload lfs blobs introduced by revs
+
+ Note: also used by other extensions e. g. infinitepush. avoid renaming.
+ '''
+ if _canskipupload(repo):
+ return
+ pointers = extractpointers(repo, revs)
+ uploadblobs(repo, pointers)
+
+def prepush(pushop):
+ """Prepush hook.
+
+ Read through the revisions to push, looking for filelog entries that can be
+ deserialized into metadata so that we can block the push on their upload to
+ the remote blobstore.
+ """
+ return uploadblobsfromrevs(pushop.repo, pushop.outgoing.missing)
+
+def push(orig, repo, remote, *args, **kwargs):
+ """bail on push if the extension isn't enabled on remote when needed"""
+ if 'lfs' in repo.requirements:
+ # If the remote peer is for a local repo, the requirement tests in the
+ # base class method enforce lfs support. Otherwise, some revisions in
+ # this repo use lfs, and the remote repo needs the extension loaded.
+ if not remote.local() and not remote.capable('lfs'):
+ # This is a copy of the message in exchange.push() when requirements
+ # are missing between local repos.
+ m = _("required features are not supported in the destination: %s")
+ raise error.Abort(m % 'lfs',
+ hint=_('enable the lfs extension on the server'))
+ return orig(repo, remote, *args, **kwargs)
+
+def writenewbundle(orig, ui, repo, source, filename, bundletype, outgoing,
+ *args, **kwargs):
+ """upload LFS blobs added by outgoing revisions on 'hg bundle'"""
+ uploadblobsfromrevs(repo, outgoing.missing)
+ return orig(ui, repo, source, filename, bundletype, outgoing, *args,
+ **kwargs)
+
+def extractpointers(repo, revs):
+ """return a list of lfs pointers added by given revs"""
+ repo.ui.debug('lfs: computing set of blobs to upload\n')
+ pointers = {}
+ for r in revs:
+ ctx = repo[r]
+ for p in pointersfromctx(ctx).values():
+ pointers[p.oid()] = p
+ return sorted(pointers.values())
+
+def pointersfromctx(ctx):
+ """return a dict {path: pointer} for given single changectx"""
+ result = {}
+ for f in ctx.files():
+ if f not in ctx:
+ continue
+ fctx = ctx[f]
+ if not _islfs(fctx.filelog(), fctx.filenode()):
+ continue
+ try:
+ result[f] = pointer.deserialize(fctx.rawdata())
+ except pointer.InvalidPointer as ex:
+ raise error.Abort(_('lfs: corrupted pointer (%s@%s): %s\n')
+ % (f, short(ctx.node()), ex))
+ return result
+
+def uploadblobs(repo, pointers):
+ """upload given pointers from local blobstore"""
+ if not pointers:
+ return
+
+ remoteblob = repo.svfs.lfsremoteblobstore
+ remoteblob.writebatch(pointers, repo.svfs.lfslocalblobstore)
+
+def upgradefinishdatamigration(orig, ui, srcrepo, dstrepo, requirements):
+ orig(ui, srcrepo, dstrepo, requirements)
+
+ srclfsvfs = srcrepo.svfs.lfslocalblobstore.vfs
+ dstlfsvfs = dstrepo.svfs.lfslocalblobstore.vfs
+
+ for dirpath, dirs, files in srclfsvfs.walk():
+ for oid in files:
+ ui.write(_('copying lfs blob %s\n') % oid)
+ lfutil.link(srclfsvfs.join(oid), dstlfsvfs.join(oid))
+
+def upgraderequirements(orig, repo):
+ reqs = orig(repo)
+ if 'lfs' in repo.requirements:
+ reqs.add('lfs')
+ return reqs
diff -r 87676e8ee056 -r 27b6df1b5adb hgext/logtoprocess.py
--- a/hgext/logtoprocess.py Mon Jan 08 16:07:51 2018 -0800
+++ b/hgext/logtoprocess.py Mon Jan 22 17:53:02 2018 -0500
@@ -124,8 +124,6 @@
env = dict(itertools.chain(encoding.environ.items(),
msgpairs, optpairs),
EVENT=event, HGPID=str(os.getpid()))
- # Connect stdin to /dev/null to prevent child processes messing
- # with mercurial's stdin.
runshellcommand(script, env)
return super(logtoprocessui, self).log(event, *msg, **opts)
diff -r 87676e8ee056 -r 27b6df1b5adb hgext/mq.py
--- a/hgext/mq.py Mon Jan 08 16:07:51 2018 -0800
+++ b/hgext/mq.py Mon Jan 22 17:53:02 2018 -0500
@@ -565,7 +565,7 @@
return index
return None
- guard_re = re.compile(r'\s?#([-+][^-+# \t\r\n\f][^# \t\r\n\f]*)')
+ guard_re = re.compile(br'\s?#([-+][^-+# \t\r\n\f][^# \t\r\n\f]*)')
def parseseries(self):
self.series = []
diff -r 87676e8ee056 -r 27b6df1b5adb hgext/notify.py
--- a/hgext/notify.py Mon Jan 08 16:07:51 2018 -0800
+++ b/hgext/notify.py Mon Jan 22 17:53:02 2018 -0500
@@ -135,6 +135,7 @@
from __future__ import absolute_import
import email
+import email.parser as emailparser
import fnmatch
import socket
import time
@@ -339,7 +340,7 @@
'and revset\n')
return
- p = email.Parser.Parser()
+ p = emailparser.Parser()
try:
msg = p.parsestr(data)
except email.Errors.MessageParseError as inst:
diff -r 87676e8ee056 -r 27b6df1b5adb hgext/patchbomb.py
--- a/hgext/patchbomb.py Mon Jan 08 16:07:51 2018 -0800
+++ b/hgext/patchbomb.py Mon Jan 22 17:53:02 2018 -0500
@@ -89,6 +89,7 @@
mail,
node as nodemod,
patch,
+ pycompat,
registrar,
repair,
scmutil,
@@ -318,7 +319,7 @@
tmpfn = os.path.join(tmpdir, 'bundle')
btype = ui.config('patchbomb', 'bundletype')
if btype:
- opts['type'] = btype
+ opts[r'type'] = btype
try:
commands.bundle(ui, repo, tmpfn, dest, **opts)
return util.readfile(tmpfn)
@@ -338,8 +339,8 @@
the user through the editor.
"""
ui = repo.ui
- if opts.get('desc'):
- body = open(opts.get('desc')).read()
+ if opts.get(r'desc'):
+ body = open(opts.get(r'desc')).read()
else:
ui.write(_('\nWrite the introductory message for the '
'patch series.\n\n'))
@@ -359,21 +360,21 @@
"""
ui = repo.ui
_charsets = mail._charsets(ui)
- subj = (opts.get('subject')
+ subj = (opts.get(r'subject')
or prompt(ui, 'Subject:', 'A bundle for your repository'))
body = _getdescription(repo, '', sender, **opts)
msg = emailmod.MIMEMultipart.MIMEMultipart()
if body:
- msg.attach(mail.mimeencode(ui, body, _charsets, opts.get('test')))
+ msg.attach(mail.mimeencode(ui, body, _charsets, opts.get(r'test')))
datapart = emailmod.MIMEBase.MIMEBase('application', 'x-mercurial-bundle')
datapart.set_payload(bundle)
- bundlename = '%s.hg' % opts.get('bundlename', 'bundle')
+ bundlename = '%s.hg' % opts.get(r'bundlename', 'bundle')
datapart.add_header('Content-Disposition', 'attachment',
filename=bundlename)
emailmod.Encoders.encode_base64(datapart)
msg.attach(datapart)
- msg['Subject'] = mail.headencode(ui, subj, _charsets, opts.get('test'))
+ msg['Subject'] = mail.headencode(ui, subj, _charsets, opts.get(r'test'))
return [(msg, subj, None)]
def _makeintro(repo, sender, revs, patches, **opts):
@@ -384,9 +385,9 @@
_charsets = mail._charsets(ui)
# use the last revision which is likely to be a bookmarked head
- prefix = _formatprefix(ui, repo, revs.last(), opts.get('flag'),
+ prefix = _formatprefix(ui, repo, revs.last(), opts.get(r'flag'),
0, len(patches), numbered=True)
- subj = (opts.get('subject') or
+ subj = (opts.get(r'subject') or
prompt(ui, '(optional) Subject: ', rest=prefix, default=''))
if not subj:
return None # skip intro if the user doesn't bother
@@ -394,7 +395,7 @@
subj = prefix + ' ' + subj
body = ''
- if opts.get('diffstat'):
+ if opts.get(r'diffstat'):
# generate a cumulative diffstat of the whole patch series
diffstat = patch.diffstat(sum(patches, []))
body = '\n' + diffstat
@@ -402,9 +403,9 @@
diffstat = None
body = _getdescription(repo, body, sender, **opts)
- msg = mail.mimeencode(ui, body, _charsets, opts.get('test'))
+ msg = mail.mimeencode(ui, body, _charsets, opts.get(r'test'))
msg['Subject'] = mail.headencode(ui, subj, _charsets,
- opts.get('test'))
+ opts.get(r'test'))
return (msg, subj, diffstat)
def _getpatchmsgs(repo, sender, revs, patchnames=None, **opts):
@@ -414,6 +415,7 @@
This function returns a list of "email" tuples (subject, content, None).
"""
+ bytesopts = pycompat.byteskwargs(opts)
ui = repo.ui
_charsets = mail._charsets(ui)
patches = list(_getpatches(repo, revs, **opts))
@@ -423,7 +425,7 @@
% len(patches))
# build the intro message, or skip it if the user declines
- if introwanted(ui, opts, len(patches)):
+ if introwanted(ui, bytesopts, len(patches)):
msg = _makeintro(repo, sender, revs, patches, **opts)
if msg:
msgs.append(msg)
@@ -437,8 +439,8 @@
for i, (r, p) in enumerate(zip(revs, patches)):
if patchnames:
name = patchnames[i]
- msg = makepatch(ui, repo, r, p, opts, _charsets, i + 1,
- len(patches), numbered, name)
+ msg = makepatch(ui, repo, r, p, bytesopts, _charsets,
+ i + 1, len(patches), numbered, name)
msgs.append(msg)
return msgs
@@ -452,7 +454,7 @@
revs = [r for r in revs if r >= 0]
if not revs:
- revs = [len(repo) - 1]
+ revs = [repo.changelog.tiprev()]
revs = repo.revs('outgoing(%s) and ::%ld', dest or '', revs)
if not revs:
ui.status(_("no changes found\n"))
@@ -579,6 +581,7 @@
Before using this command, you will need to enable email in your
hgrc. See the [email] section in hgrc(5) for details.
'''
+ opts = pycompat.byteskwargs(opts)
_charsets = mail._charsets(ui)
@@ -629,7 +632,7 @@
# check if revision exist on the public destination
publicurl = repo.ui.config('patchbomb', 'publicurl')
if publicurl:
- repo.ui.debug('checking that revision exist in the public repo')
+ repo.ui.debug('checking that revision exist in the public repo\n')
try:
publicpeer = hg.peer(repo, {}, publicurl)
except error.RepoError:
@@ -637,7 +640,7 @@
% publicurl)
raise
if not publicpeer.capable('known'):
- repo.ui.debug('skipping existence checks: public repo too old')
+ repo.ui.debug('skipping existence checks: public repo too old\n')
else:
out = [repo[r] for r in revs]
known = publicpeer.known(h.node() for h in out)
@@ -672,12 +675,13 @@
prompt(ui, 'From', ui.username()))
if bundle:
- bundledata = _getbundle(repo, dest, **opts)
- bundleopts = opts.copy()
- bundleopts.pop('bundle', None) # already processed
+ stropts = pycompat.strkwargs(opts)
+ bundledata = _getbundle(repo, dest, **stropts)
+ bundleopts = stropts.copy()
+ bundleopts.pop(r'bundle', None) # already processed
msgs = _getbundlemsgs(repo, sender, bundledata, **bundleopts)
else:
- msgs = _getpatchmsgs(repo, sender, revs, **opts)
+ msgs = _getpatchmsgs(repo, sender, revs, **pycompat.strkwargs(opts))
showaddrs = []
diff -r 87676e8ee056 -r 27b6df1b5adb hgext/rebase.py
--- a/hgext/rebase.py Mon Jan 08 16:07:51 2018 -0800
+++ b/hgext/rebase.py Mon Jan 22 17:53:02 2018 -0500
@@ -21,7 +21,6 @@
from mercurial.i18n import _
from mercurial.node import (
- hex,
nullid,
nullrev,
short,
@@ -43,6 +42,7 @@
obsutil,
patch,
phases,
+ pycompat,
registrar,
repair,
revset,
@@ -53,7 +53,6 @@
)
release = lock.release
-templateopts = cmdutil.templateopts
# The following constants are used throughout the rebase module. The ordering of
# their values must be maintained.
@@ -137,7 +136,7 @@
class rebaseruntime(object):
"""This class is a container for rebase runtime state"""
- def __init__(self, repo, ui, opts=None):
+ def __init__(self, repo, ui, inmemory=False, opts=None):
if opts is None:
opts = {}
@@ -179,6 +178,8 @@
# other extensions
self.keepopen = opts.get('keepopen', False)
self.obsoletenotrebased = {}
+ self.obsoletewithoutsuccessorindestination = set()
+ self.inmemory = inmemory
@property
def repo(self):
@@ -311,9 +312,10 @@
if not self.ui.configbool('experimental', 'rebaseskipobsolete'):
return
obsoleteset = set(obsoleterevs)
- self.obsoletenotrebased = _computeobsoletenotrebased(self.repo,
- obsoleteset, destmap)
+ self.obsoletenotrebased, self.obsoletewithoutsuccessorindestination = \
+ _computeobsoletenotrebased(self.repo, obsoleteset, destmap)
skippedset = set(self.obsoletenotrebased)
+ skippedset.update(self.obsoletewithoutsuccessorindestination)
_checkobsrebase(self.repo, self.ui, obsoleteset, skippedset)
def _prepareabortorcontinue(self, isabort):
@@ -380,7 +382,18 @@
self.prepared = True
+ def _assignworkingcopy(self):
+ if self.inmemory:
+ from mercurial.context import overlayworkingctx
+ self.wctx = overlayworkingctx(self.repo)
+ self.repo.ui.debug("rebasing in-memory\n")
+ else:
+ self.wctx = self.repo[None]
+ self.repo.ui.debug("rebasing on disk\n")
+ self.repo.ui.log("rebase", "", rebase_imm_used=self.wctx.isinmemory())
+
def _performrebase(self, tr):
+ self._assignworkingcopy()
repo, ui = self.repo, self.ui
if self.keepbranchesf:
# insert _savebranch at the start of extrafns so if
@@ -419,12 +432,26 @@
def _performrebasesubset(self, tr, subset, pos, total):
repo, ui, opts = self.repo, self.ui, self.opts
sortedrevs = repo.revs('sort(%ld, -topo)', subset)
+ allowdivergence = self.ui.configbool(
+ 'experimental', 'evolution.allowdivergence')
+ if not allowdivergence:
+ sortedrevs -= repo.revs(
+ 'descendants(%ld) and not %ld',
+ self.obsoletewithoutsuccessorindestination,
+ self.obsoletewithoutsuccessorindestination,
+ )
for rev in sortedrevs:
dest = self.destmap[rev]
ctx = repo[rev]
desc = _ctxdesc(ctx)
if self.state[rev] == rev:
ui.status(_('already rebased %s\n') % desc)
+ elif (not allowdivergence
+ and rev in self.obsoletewithoutsuccessorindestination):
+ msg = _('note: not rebasing %s and its descendants as '
+ 'this would cause divergence\n') % desc
+ repo.ui.status(msg)
+ self.skipped.add(rev)
elif rev in self.obsoletenotrebased:
succ = self.obsoletenotrebased[rev]
if succ is None:
@@ -459,22 +486,35 @@
ui.setconfig('ui', 'forcemerge', opts.get('tool', ''),
'rebase')
stats = rebasenode(repo, rev, p1, base, self.state,
- self.collapsef, dest)
+ self.collapsef, dest, wctx=self.wctx)
if stats and stats[3] > 0:
- raise error.InterventionRequired(
- _('unresolved conflicts (see hg '
- 'resolve, then hg rebase --continue)'))
+ if self.wctx.isinmemory():
+ raise error.InMemoryMergeConflictsError()
+ else:
+ raise error.InterventionRequired(
+ _('unresolved conflicts (see hg '
+ 'resolve, then hg rebase --continue)'))
finally:
ui.setconfig('ui', 'forcemerge', '', 'rebase')
if not self.collapsef:
merging = p2 != nullrev
editform = cmdutil.mergeeditform(merging, 'rebase')
editor = cmdutil.getcommiteditor(editform=editform, **opts)
- newnode = concludenode(repo, rev, p1, p2,
- extrafn=_makeextrafn(self.extrafns),
- editor=editor,
- keepbranches=self.keepbranchesf,
- date=self.date)
+ if self.wctx.isinmemory():
+ newnode = concludememorynode(repo, rev, p1, p2,
+ wctx=self.wctx,
+ extrafn=_makeextrafn(self.extrafns),
+ editor=editor,
+ keepbranches=self.keepbranchesf,
+ date=self.date)
+ mergemod.mergestate.clean(repo)
+ else:
+ newnode = concludenode(repo, rev, p1, p2,
+ extrafn=_makeextrafn(self.extrafns),
+ editor=editor,
+ keepbranches=self.keepbranchesf,
+ date=self.date)
+
if newnode is None:
# If it ended up being a no-op commit, then the normal
# merge state clean-up path doesn't happen, so do it
@@ -482,7 +522,10 @@
mergemod.mergestate.clean(repo)
else:
# Skip commit if we are collapsing
- repo.setparents(repo[p1].node())
+ if self.wctx.isinmemory():
+ self.wctx.setbase(repo[p1])
+ else:
+ repo.setparents(repo[p1].node())
newnode = None
# Update the state
if newnode is not None:
@@ -522,15 +565,24 @@
revtoreuse = max(self.state)
dsguard = None
- if ui.configbool('rebase', 'singletransaction'):
- dsguard = dirstateguard.dirstateguard(repo, 'rebase')
- with util.acceptintervention(dsguard):
- newnode = concludenode(repo, revtoreuse, p1, self.external,
- commitmsg=commitmsg,
- extrafn=_makeextrafn(self.extrafns),
- editor=editor,
- keepbranches=self.keepbranchesf,
- date=self.date)
+ if self.inmemory:
+ newnode = concludememorynode(repo, revtoreuse, p1,
+ self.external,
+ commitmsg=commitmsg,
+ extrafn=_makeextrafn(self.extrafns),
+ editor=editor,
+ keepbranches=self.keepbranchesf,
+ date=self.date, wctx=self.wctx)
+ else:
+ if ui.configbool('rebase', 'singletransaction'):
+ dsguard = dirstateguard.dirstateguard(repo, 'rebase')
+ with util.acceptintervention(dsguard):
+ newnode = concludenode(repo, revtoreuse, p1, self.external,
+ commitmsg=commitmsg,
+ extrafn=_makeextrafn(self.extrafns),
+ editor=editor,
+ keepbranches=self.keepbranchesf,
+ date=self.date)
if newnode is not None:
newrev = repo[newnode].rev()
for oldrev in self.state.iterkeys():
@@ -545,7 +597,8 @@
if newwd < 0:
# original directory is a parent of rebase set root or ignored
newwd = self.originalwd
- if newwd not in [c.rev() for c in repo[None].parents()]:
+ if (newwd not in [c.rev() for c in repo[None].parents()] and
+ not self.inmemory):
ui.note(_("update back to initial working directory parent\n"))
hg.updaterepo(repo, newwd, False)
@@ -594,7 +647,7 @@
('t', 'tool', '', _('specify merge tool')),
('c', 'continue', False, _('continue an interrupted rebase')),
('a', 'abort', False, _('abort an interrupted rebase'))] +
- templateopts,
+ cmdutil.formatteropts,
_('[-s REV | -b REV] [-d REV] [OPTION]'))
def rebase(ui, repo, **opts):
"""move changeset (and descendants) to a different branch
@@ -628,6 +681,11 @@
4. If you do not specify any of ``--rev``, ``source``, or ``--base``,
rebase will use ``--base .`` as above.
+ If ``--source`` or ``--rev`` is used, special names ``SRC`` and ``ALLSRC``
+ can be used in ``--dest``. Destination would be calculated per source
+ revision with ``SRC`` substituted by that single source revision and
+ ``ALLSRC`` substituted by all source revisions.
+
Rebase will destroy original changesets unless you use ``--keep``.
It will also move your bookmarks (even if you do).
@@ -676,6 +734,12 @@
hg rebase -r "branch(featureX)" -d 1.3 --keepbranches
+ - stabilize orphaned changesets so history looks linear::
+
+ hg rebase -r 'orphan()-obsolete()'\
+ -d 'first(max((successors(max(roots(ALLSRC) & ::SRC)^)-obsolete())::) +\
+ max(::((roots(ALLSRC) & ::SRC)^)-obsolete()))'
+
Configuration Options:
You can make rebase require a destination if you set the following config
@@ -693,13 +757,43 @@
[rebase]
singletransaction = True
+ By default, rebase writes to the working copy, but you can configure it to
+ run in-memory for for better performance, and to allow it to run if the
+ working copy is dirty::
+
+ [rebase]
+ experimental.inmemory = True
+
Return Values:
Returns 0 on success, 1 if nothing to rebase or there are
unresolved conflicts.
"""
- rbsrt = rebaseruntime(repo, ui, opts)
+ inmemory = ui.configbool('rebase', 'experimental.inmemory')
+ if (opts.get('continue') or opts.get('abort') or
+ repo.currenttransaction() is not None):
+ # in-memory rebase is not compatible with resuming rebases.
+ # (Or if it is run within a transaction, since the restart logic can
+ # fail the entire transaction.)
+ inmemory = False
+
+ if inmemory:
+ try:
+ # in-memory merge doesn't support conflicts, so if we hit any, abort
+ # and re-run as an on-disk merge.
+ return _origrebase(ui, repo, inmemory=inmemory, **opts)
+ except error.InMemoryMergeConflictsError:
+ ui.warn(_('hit merge conflicts; re-running rebase without in-memory'
+ ' merge\n'))
+ _origrebase(ui, repo, **{'abort': True})
+ return _origrebase(ui, repo, inmemory=False, **opts)
+ else:
+ return _origrebase(ui, repo, **opts)
+
+def _origrebase(ui, repo, inmemory=False, **opts):
+ opts = pycompat.byteskwargs(opts)
+ rbsrt = rebaseruntime(repo, ui, inmemory, opts)
with repo.wlock(), repo.lock():
# Validate input and define rebasing points
@@ -746,7 +840,7 @@
if retcode is not None:
return retcode
else:
- destmap = _definedestmap(ui, repo, destf, srcf, basef, revf,
+ destmap = _definedestmap(ui, repo, rbsrt, destf, srcf, basef, revf,
destspace=destspace)
retcode = rbsrt._preparenewrebase(destmap)
if retcode is not None:
@@ -758,16 +852,22 @@
singletr = ui.configbool('rebase', 'singletransaction')
if singletr:
tr = repo.transaction('rebase')
+
+ # If `rebase.singletransaction` is enabled, wrap the entire operation in
+ # one transaction here. Otherwise, transactions are obtained when
+ # committing each node, which is slower but allows partial success.
with util.acceptintervention(tr):
- if singletr:
+ # Same logic for the dirstate guard, except we don't create one when
+ # rebasing in-memory (it's not needed).
+ if singletr and not inmemory:
dsguard = dirstateguard.dirstateguard(repo, 'rebase')
with util.acceptintervention(dsguard):
rbsrt._performrebase(tr)
rbsrt._finishrebase()
-def _definedestmap(ui, repo, destf=None, srcf=None, basef=None, revf=None,
- destspace=None):
+def _definedestmap(ui, repo, rbsrt, destf=None, srcf=None, basef=None,
+ revf=None, destspace=None):
"""use revisions argument to define destmap {srcrev: destrev}"""
if revf is None:
revf = []
@@ -781,8 +881,9 @@
if revf and srcf:
raise error.Abort(_('cannot specify both a revision and a source'))
- cmdutil.checkunfinished(repo)
- cmdutil.bailifchanged(repo)
+ if not rbsrt.inmemory:
+ cmdutil.checkunfinished(repo)
+ cmdutil.bailifchanged(repo)
if ui.configbool('commands', 'rebase.requiredest') and not destf:
raise error.Abort(_('you must specify a destination'),
@@ -855,6 +956,23 @@
ui.status(_('nothing to rebase from %s to %s\n') %
('+'.join(str(repo[r]) for r in base), dest))
return None
+ # If rebasing the working copy parent, force in-memory merge to be off.
+ #
+ # This is because the extra work of checking out the newly rebased commit
+ # outweights the benefits of rebasing in-memory, and executing an extra
+ # update command adds a bit of overhead, so better to just do it on disk. In
+ # all other cases leave it on.
+ #
+ # Note that there are cases where this isn't true -- e.g., rebasing large
+ # stacks that include the WCP. However, I'm not yet sure where the cutoff
+ # is.
+ rebasingwcp = repo['.'].rev() in rebaseset
+ ui.log("rebase", "", rebase_rebasing_wcp=rebasingwcp)
+ if rbsrt.inmemory and rebasingwcp:
+ rbsrt.inmemory = False
+ # Check these since we did not before.
+ cmdutil.checkunfinished(repo)
+ cmdutil.bailifchanged(repo)
if not destf:
dest = repo[_destrebase(repo, rebaseset, destspace=destspace)]
@@ -868,8 +986,6 @@
# fast path: try to resolve dest without SRC alias
dest = scmutil.revsingle(repo, destf, localalias=alias)
except error.RepoLookupError:
- if not ui.configbool('experimental', 'rebase.multidest'):
- raise
# multi-dest path: resolve dest for each SRC separately
destmap = {}
for r in rebaseset:
@@ -920,6 +1036,44 @@
(max(destancestors),
', '.join(str(p) for p in sorted(parents))))
+def concludememorynode(repo, rev, p1, p2, wctx=None,
+ commitmsg=None, editor=None, extrafn=None,
+ keepbranches=False, date=None):
+ '''Commit the memory changes with parents p1 and p2. Reuse commit info from
+ rev but also store useful information in extra.
+ Return node of committed revision.'''
+ ctx = repo[rev]
+ if commitmsg is None:
+ commitmsg = ctx.description()
+ keepbranch = keepbranches and repo[p1].branch() != ctx.branch()
+ extra = {'rebase_source': ctx.hex()}
+ if extrafn:
+ extrafn(ctx, extra)
+
+ destphase = max(ctx.phase(), phases.draft)
+ overrides = {('phases', 'new-commit'): destphase}
+ with repo.ui.configoverride(overrides, 'rebase'):
+ if keepbranch:
+ repo.ui.setconfig('ui', 'allowemptycommit', True)
+ # Replicates the empty check in ``repo.commit``.
+ if wctx.isempty() and not repo.ui.configbool('ui', 'allowemptycommit'):
+ return None
+
+ if date is None:
+ date = ctx.date()
+
+ # By convention, ``extra['branch']`` (set by extrafn) clobbers
+ # ``branch`` (used when passing ``--keepbranches``).
+ branch = repo[p1].branch()
+ if 'branch' in extra:
+ branch = extra['branch']
+
+ memctx = wctx.tomemctx(commitmsg, parents=(p1, p2), date=date,
+ extra=extra, user=ctx.user(), branch=branch, editor=editor)
+ commitres = repo.commitctx(memctx)
+ wctx.clean() # Might be reused
+ return commitres
+
def concludenode(repo, rev, p1, p2, commitmsg=None, editor=None, extrafn=None,
keepbranches=False, date=None):
'''Commit the wd changes with parents p1 and p2. Reuse commit info from rev
@@ -952,24 +1106,29 @@
repo.dirstate.setbranch(repo[newnode].branch())
return newnode
-def rebasenode(repo, rev, p1, base, state, collapse, dest):
+def rebasenode(repo, rev, p1, base, state, collapse, dest, wctx):
'Rebase a single revision rev on top of p1 using base as merge ancestor'
# Merge phase
# Update to destination and merge it with local
- if repo['.'].rev() != p1:
- repo.ui.debug(" update to %d:%s\n" % (p1, repo[p1]))
- mergemod.update(repo, p1, False, True)
+ if wctx.isinmemory():
+ wctx.setbase(repo[p1])
else:
- repo.ui.debug(" already in destination\n")
- repo.dirstate.write(repo.currenttransaction())
+ if repo['.'].rev() != p1:
+ repo.ui.debug(" update to %d:%s\n" % (p1, repo[p1]))
+ mergemod.update(repo, p1, False, True)
+ else:
+ repo.ui.debug(" already in destination\n")
+ # This is, alas, necessary to invalidate workingctx's manifest cache,
+ # as well as other data we litter on it in other places.
+ wctx = repo[None]
+ repo.dirstate.write(repo.currenttransaction())
repo.ui.debug(" merge against %d:%s\n" % (rev, repo[rev]))
if base is not None:
repo.ui.debug(" detach base %d:%s\n" % (base, repo[base]))
# When collapsing in-place, the parent is the common ancestor, we
# have to allow merging with it.
- wctx = repo[None]
stats = mergemod.update(repo, rev, True, True, base, collapse,
- labels=['dest', 'source'])
+ labels=['dest', 'source'], wc=wctx)
if collapse:
copies.duplicatecopies(repo, wctx, rev, dest)
else:
@@ -1546,22 +1705,26 @@
replacements[oldnode] = succs
scmutil.cleanupnodes(repo, replacements, 'rebase', moves)
if fm:
- nodechanges = {hex(oldn): [hex(n) for n in newn]
- for oldn, newn in replacements.iteritems()}
+ hf = fm.hexfunc
+ fl = fm.formatlist
+ fd = fm.formatdict
+ nodechanges = fd({hf(oldn): fl([hf(n) for n in newn], name='node')
+ for oldn, newn in replacements.iteritems()},
+ key="oldnode", value="newnodes")
fm.data(nodechanges=nodechanges)
def pullrebase(orig, ui, repo, *args, **opts):
'Call rebase after pull if the latter has been invoked with --rebase'
ret = None
- if opts.get('rebase'):
+ if opts.get(r'rebase'):
if ui.configbool('commands', 'rebase.requiredest'):
msg = _('rebase destination required by configuration')
hint = _('use hg pull followed by hg rebase -d DEST')
raise error.Abort(msg, hint=hint)
with repo.wlock(), repo.lock():
- if opts.get('update'):
- del opts['update']
+ if opts.get(r'update'):
+ del opts[r'update']
ui.debug('--update and --rebase are not compatible, ignoring '
'the update flag\n')
@@ -1582,15 +1745,15 @@
if revspostpull > revsprepull:
# --rev option from pull conflict with rebase own --rev
# dropping it
- if 'rev' in opts:
- del opts['rev']
+ if r'rev' in opts:
+ del opts[r'rev']
# positional argument from pull conflicts with rebase's own
# --source.
- if 'source' in opts:
- del opts['source']
+ if r'source' in opts:
+ del opts[r'source']
# revsprepull is the len of the repo, not revnum of tip.
destspace = list(repo.changelog.revs(start=revsprepull))
- opts['_destspace'] = destspace
+ opts[r'_destspace'] = destspace
try:
rebase(ui, repo, **opts)
except error.NoMergeDestAbort:
@@ -1604,7 +1767,7 @@
# with warning and trumpets
commands.update(ui, repo)
else:
- if opts.get('tool'):
+ if opts.get(r'tool'):
raise error.Abort(_('--tool can only be used with --rebase'))
ret = orig(ui, repo, *args, **opts)
@@ -1615,11 +1778,16 @@
return set(r for r in revs if repo[r].obsolete())
def _computeobsoletenotrebased(repo, rebaseobsrevs, destmap):
- """return a mapping obsolete => successor for all obsolete nodes to be
- rebased that have a successors in the destination
+ """Return (obsoletenotrebased, obsoletewithoutsuccessorindestination).
+
+ `obsoletenotrebased` is a mapping mapping obsolete => successor for all
+ obsolete nodes to be rebased given in `rebaseobsrevs`.
- obsolete => None entries in the mapping indicate nodes with no successor"""
+ `obsoletewithoutsuccessorindestination` is a set with obsolete revisions
+ without a successor in destination.
+ """
obsoletenotrebased = {}
+ obsoletewithoutsuccessorindestination = set([])
assert repo.filtername is None
cl = repo.changelog
@@ -1640,8 +1808,15 @@
if cl.isancestor(succnode, destnode):
obsoletenotrebased[srcrev] = nodemap[succnode]
break
+ else:
+ # If 'srcrev' has a successor in rebase set but none in
+ # destination (which would be catched above), we shall skip it
+ # and its descendants to avoid divergence.
+ if any(nodemap[s] in destmap
+ for s in successors if s != srcnode):
+ obsoletewithoutsuccessorindestination.add(srcrev)
- return obsoletenotrebased
+ return obsoletenotrebased, obsoletewithoutsuccessorindestination
def summaryhook(ui, repo):
if not repo.vfs.exists('rebasestate'):
diff -r 87676e8ee056 -r 27b6df1b5adb hgext/record.py
--- a/hgext/record.py Mon Jan 08 16:07:51 2018 -0800
+++ b/hgext/record.py Mon Jan 22 17:53:02 2018 -0500
@@ -68,13 +68,13 @@
raise error.Abort(_('running non-interactively, use %s instead') %
'commit')
- opts["interactive"] = True
+ opts[r"interactive"] = True
overrides = {('experimental', 'crecord'): False}
with ui.configoverride(overrides, 'record'):
return commands.commit(ui, repo, *pats, **opts)
def qrefresh(origfn, ui, repo, *pats, **opts):
- if not opts['interactive']:
+ if not opts[r'interactive']:
return origfn(ui, repo, *pats, **opts)
mq = extensions.find('mq')
@@ -112,7 +112,7 @@
repo.mq.checkpatchname(patch)
def committomq(ui, repo, *pats, **opts):
- opts['checkname'] = False
+ opts[r'checkname'] = False
mq.new(ui, repo, patch, *pats, **opts)
overrides = {('experimental', 'crecord'): False}
@@ -121,7 +121,7 @@
cmdutil.recordfilter, *pats, **opts)
def qnew(origfn, ui, repo, patch, *args, **opts):
- if opts['interactive']:
+ if opts[r'interactive']:
return _qrecord(None, ui, repo, patch, *args, **opts)
return origfn(ui, repo, patch, *args, **opts)
diff -r 87676e8ee056 -r 27b6df1b5adb hgext/releasenotes.py
--- a/hgext/releasenotes.py Mon Jan 08 16:07:51 2018 -0800
+++ b/hgext/releasenotes.py Mon Jan 22 17:53:02 2018 -0500
@@ -25,6 +25,7 @@
error,
minirst,
node,
+ pycompat,
registrar,
scmutil,
util,
@@ -570,6 +571,8 @@
admonitions along with their title. This also includes the custom
admonitions (if any).
"""
+
+ opts = pycompat.byteskwargs(opts)
sections = releasenotessections(ui, repo)
listflag = opts.get('list')
diff -r 87676e8ee056 -r 27b6df1b5adb hgext/shelve.py
--- a/hgext/shelve.py Mon Jan 08 16:07:51 2018 -0800
+++ b/hgext/shelve.py Mon Jan 22 17:53:02 2018 -0500
@@ -43,6 +43,7 @@
node as nodemod,
patch,
phases,
+ pycompat,
registrar,
repair,
scmutil,
@@ -380,7 +381,7 @@
editor_ = False
if editor:
editor_ = cmdutil.getcommiteditor(editform='shelve.shelve',
- **opts)
+ **pycompat.strkwargs(opts))
with repo.ui.configoverride(overrides):
return repo.commit(message, shelveuser, opts.get('date'),
match, editor=editor_, extra=extra)
@@ -389,6 +390,7 @@
repo.mq.checkapplied = saved
def interactivecommitfunc(ui, repo, *pats, **opts):
+ opts = pycompat.byteskwargs(opts)
match = scmutil.match(repo['.'], pats, {})
message = opts['message']
return commitfunc(ui, repo, message, match, opts)
@@ -465,7 +467,7 @@
else:
node = cmdutil.dorecord(ui, repo, commitfunc, None,
False, cmdutil.recordfilter, *pats,
- **opts)
+ **pycompat.strkwargs(opts))
if not node:
_nothingtoshelvemessaging(ui, repo, pats, opts)
return 1
@@ -852,6 +854,7 @@
return _dounshelve(ui, repo, *shelved, **opts)
def _dounshelve(ui, repo, *shelved, **opts):
+ opts = pycompat.byteskwargs(opts)
abortf = opts.get('abort')
continuef = opts.get('continue')
if not abortf and not continuef:
@@ -1010,6 +1013,7 @@
To delete specific shelved changes, use ``--delete``. To delete
all shelved changes, use ``--cleanup``.
'''
+ opts = pycompat.byteskwargs(opts)
allowables = [
('addremove', {'create'}), # 'create' is pseudo action
('unknown', {'create'}),
diff -r 87676e8ee056 -r 27b6df1b5adb hgext/show.py
--- a/hgext/show.py Mon Jan 08 16:07:51 2018 -0800
+++ b/hgext/show.py Mon Jan 22 17:53:02 2018 -0500
@@ -28,7 +28,10 @@
from __future__ import absolute_import
from mercurial.i18n import _
-from mercurial.node import nullrev
+from mercurial.node import (
+ hex,
+ nullrev,
+)
from mercurial import (
cmdutil,
commands,
@@ -252,7 +255,9 @@
# our simplicity and the customizations required.
# TODO use proper graph symbols from graphmod
- shortesttmpl = formatter.maketemplater(ui, '{shortest(node, %d)}' % nodelen)
+ tres = formatter.templateresources(ui, repo)
+ shortesttmpl = formatter.maketemplater(ui, '{shortest(node, %d)}' % nodelen,
+ resources=tres)
def shortest(ctx):
return shortesttmpl.render({'ctx': ctx, 'node': ctx.hex()})
@@ -438,14 +443,11 @@
If we fail to do this, a value of e.g. ``10023`` could mean either
revision 10023 or node ``10023abc...``.
"""
- tmpl = formatter.maketemplater(repo.ui, '{shortest(node, %d)}' % minlen)
- lens = [minlen]
- for rev in revs:
- ctx = repo[rev]
- shortest = tmpl.render({'ctx': ctx, 'node': ctx.hex()})
- lens.append(len(shortest))
-
- return max(lens)
+ if not revs:
+ return minlen
+ # don't use filtered repo because it's slow. see templater.shortest().
+ cl = repo.unfiltered().changelog
+ return max(len(cl.shortest(hex(cl.node(r)), minlen)) for r in revs)
# Adjust the docstring of the show command so it shows all registered views.
# This is a bit hacky because it runs at the end of module load. When moved
diff -r 87676e8ee056 -r 27b6df1b5adb hgext/sparse.py
--- a/hgext/sparse.py Mon Jan 08 16:07:51 2018 -0800
+++ b/hgext/sparse.py Mon Jan 22 17:53:02 2018 -0500
@@ -82,6 +82,7 @@
extensions,
hg,
match as matchmod,
+ pycompat,
registrar,
sparse,
util,
@@ -286,6 +287,7 @@
Returns 0 if editing the sparse checkout succeeds.
"""
+ opts = pycompat.byteskwargs(opts)
include = opts.get('include')
exclude = opts.get('exclude')
force = opts.get('force')
diff -r 87676e8ee056 -r 27b6df1b5adb hgext/split.py
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/hgext/split.py Mon Jan 22 17:53:02 2018 -0500
@@ -0,0 +1,177 @@
+# split.py - split a changeset into smaller ones
+#
+# Copyright 2015 Laurent Charignon
+# Copyright 2017 Facebook, Inc.
+#
+# This software may be used and distributed according to the terms of the
+# GNU General Public License version 2 or any later version.
+"""command to split a changeset into smaller ones (EXPERIMENTAL)"""
+
+from __future__ import absolute_import
+
+from mercurial.i18n import _
+
+from mercurial.node import (
+ nullid,
+ short,
+)
+
+from mercurial import (
+ bookmarks,
+ cmdutil,
+ commands,
+ error,
+ hg,
+ obsolete,
+ phases,
+ registrar,
+ revsetlang,
+ scmutil,
+)
+
+# allow people to use split without explicitly enabling rebase extension
+from . import (
+ rebase,
+)
+
+cmdtable = {}
+command = registrar.command(cmdtable)
+
+# Note for extension authors: ONLY specify testedwith = 'ships-with-hg-core' for
+# extensions which SHIP WITH MERCURIAL. Non-mainline extensions should
+# be specifying the version(s) of Mercurial they are tested with, or
+# leave the attribute unspecified.
+testedwith = 'ships-with-hg-core'
+
+@command('^split',
+ [('r', 'rev', '', _("revision to split"), _('REV')),
+ ('', 'rebase', True, _('rebase descendants after split')),
+ ] + cmdutil.commitopts2,
+ _('hg split [--no-rebase] [[-r] REV]'))
+def split(ui, repo, *revs, **opts):
+ """split a changeset into smaller ones
+
+ Repeatedly prompt changes and commit message for new changesets until there
+ is nothing left in the original changeset.
+
+ If --rev was not given, split the working directory parent.
+
+ By default, rebase connected non-obsoleted descendants onto the new
+ changeset. Use --no-rebase to avoid the rebase.
+ """
+ revlist = []
+ if opts.get('rev'):
+ revlist.append(opts.get('rev'))
+ revlist.extend(revs)
+ with repo.wlock(), repo.lock(), repo.transaction('split') as tr:
+ revs = scmutil.revrange(repo, revlist or ['.'])
+ if len(revs) > 1:
+ raise error.Abort(_('cannot split multiple revisions'))
+
+ rev = revs.first()
+ ctx = repo[rev]
+ if rev is None or ctx.node() == nullid:
+ ui.status(_('nothing to split\n'))
+ return 1
+ if ctx.node() is None:
+ raise error.Abort(_('cannot split working directory'))
+
+ # rewriteutil.precheck is not very useful here because:
+ # 1. null check is done above and it's more friendly to return 1
+ # instead of abort
+ # 2. mergestate check is done below by cmdutil.bailifchanged
+ # 3. unstable check is more complex here because of --rebase
+ #
+ # So only "public" check is useful and it's checked directly here.
+ if ctx.phase() == phases.public:
+ raise error.Abort(_('cannot split public changeset'),
+ hint=_("see 'hg help phases' for details"))
+
+ descendants = list(repo.revs('(%d::) - (%d)', rev, rev))
+ alloworphaned = obsolete.isenabled(repo, obsolete.allowunstableopt)
+ if opts.get('rebase'):
+ # Skip obsoleted descendants and their descendants so the rebase
+ # won't cause conflicts for sure.
+ torebase = list(repo.revs('%ld - (%ld & obsolete())::',
+ descendants, descendants))
+ if not alloworphaned and len(torebase) != len(descendants):
+ raise error.Abort(_('split would leave orphaned changesets '
+ 'behind'))
+ else:
+ if not alloworphaned and descendants:
+ raise error.Abort(
+ _('cannot split changeset with children without rebase'))
+ torebase = ()
+
+ if len(ctx.parents()) > 1:
+ raise error.Abort(_('cannot split a merge changeset'))
+
+ cmdutil.bailifchanged(repo)
+
+ # Deactivate bookmark temporarily so it won't get moved unintentionally
+ bname = repo._activebookmark
+ if bname and repo._bookmarks[bname] != ctx.node():
+ bookmarks.deactivate(repo)
+
+ wnode = repo['.'].node()
+ top = None
+ try:
+ top = dosplit(ui, repo, tr, ctx, opts)
+ finally:
+ # top is None: split failed, need update --clean recovery.
+ # wnode == ctx.node(): wnode split, no need to update.
+ if top is None or wnode != ctx.node():
+ hg.clean(repo, wnode, show_stats=False)
+ if bname:
+ bookmarks.activate(repo, bname)
+ if torebase and top:
+ dorebase(ui, repo, torebase, top)
+
+def dosplit(ui, repo, tr, ctx, opts):
+ committed = [] # [ctx]
+
+ # Set working parent to ctx.p1(), and keep working copy as ctx's content
+ # NOTE: if we can have "update without touching working copy" API, the
+ # revert step could be cheaper.
+ hg.clean(repo, ctx.p1().node(), show_stats=False)
+ parents = repo.changelog.parents(ctx.node())
+ ui.pushbuffer()
+ cmdutil.revert(ui, repo, ctx, parents)
+ ui.popbuffer() # discard "reverting ..." messages
+
+ # Any modified, added, removed, deleted result means split is incomplete
+ incomplete = lambda repo: any(repo.status()[:4])
+
+ # Main split loop
+ while incomplete(repo):
+ if committed:
+ header = (_('HG: Splitting %s. So far it has been split into:\n')
+ % short(ctx.node()))
+ for c in committed:
+ firstline = c.description().split('\n', 1)[0]
+ header += _('HG: - %s: %s\n') % (short(c.node()), firstline)
+ header += _('HG: Write commit message for the next split '
+ 'changeset.\n')
+ else:
+ header = _('HG: Splitting %s. Write commit message for the '
+ 'first split changeset.\n') % short(ctx.node())
+ opts.update({
+ 'edit': True,
+ 'interactive': True,
+ 'message': header + ctx.description(),
+ })
+ commands.commit(ui, repo, **opts)
+ newctx = repo['.']
+ committed.append(newctx)
+
+ if not committed:
+ raise error.Abort(_('cannot split an empty revision'))
+
+ scmutil.cleanupnodes(repo, {ctx.node(): [c.node() for c in committed]},
+ operation='split')
+
+ return committed[-1]
+
+def dorebase(ui, repo, src, dest):
+ rebase.rebase(ui, repo, rev=[revsetlang.formatspec('%ld', src)],
+ dest=revsetlang.formatspec('%d', dest))
diff -r 87676e8ee056 -r 27b6df1b5adb hgext/uncommit.py
--- a/hgext/uncommit.py Mon Jan 08 16:07:51 2018 -0800
+++ b/hgext/uncommit.py Mon Jan 22 17:53:02 2018 -0500
@@ -28,8 +28,10 @@
copies,
error,
node,
- obsolete,
+ obsutil,
+ pycompat,
registrar,
+ rewriteutil,
scmutil,
)
@@ -75,7 +77,7 @@
if path not in contentctx:
return None
fctx = contentctx[path]
- mctx = context.memfilectx(repo, fctx.path(), fctx.data(),
+ mctx = context.memfilectx(repo, memctx, fctx.path(), fctx.data(),
fctx.islink(),
fctx.isexec(),
copied=copied.get(path))
@@ -96,15 +98,13 @@
newid = repo.commitctx(new)
return newid
-def _uncommitdirstate(repo, oldctx, match):
- """Fix the dirstate after switching the working directory from
- oldctx to a copy of oldctx not containing changed files matched by
- match.
+def _fixdirstate(repo, oldctx, newctx, status):
+ """ fix the dirstate after switching the working directory from oldctx to
+ newctx which can be result of either unamend or uncommit.
"""
- ctx = repo['.']
ds = repo.dirstate
copies = dict(ds.copies())
- s = repo.status(oldctx.p1(), oldctx, match=match)
+ s = status
for f in s.modified:
if ds[f] == 'r':
# modified + removed -> removed
@@ -136,7 +136,7 @@
for dst, src in oldcopies.iteritems())
# Adjust the dirstate copies
for dst, src in copies.iteritems():
- if (src not in ctx or dst in ctx or ds[dst] != 'a'):
+ if (src not in newctx or dst in newctx or ds[dst] != 'a'):
src = None
ds.copy(src, dst)
@@ -152,25 +152,17 @@
deleted in the changeset will be left unchanged, and so will remain
modified in the working directory.
"""
+ opts = pycompat.byteskwargs(opts)
with repo.wlock(), repo.lock():
- wctx = repo[None]
if not pats and not repo.ui.configbool('experimental',
'uncommitondirtywdir'):
cmdutil.bailifchanged(repo)
- if wctx.parents()[0].node() == node.nullid:
- raise error.Abort(_("cannot uncommit null changeset"))
- if len(wctx.parents()) > 1:
- raise error.Abort(_("cannot uncommit while merging"))
old = repo['.']
- if not old.mutable():
- raise error.Abort(_('cannot uncommit public changesets'))
+ rewriteutil.precheck(repo, [old.rev()], 'uncommit')
if len(old.parents()) > 1:
raise error.Abort(_("cannot uncommit merge changeset"))
- allowunstable = obsolete.isenabled(repo, obsolete.allowunstableopt)
- if not allowunstable and old.children():
- raise error.Abort(_('cannot uncommit changeset with children'))
with repo.transaction('uncommit'):
match = scmutil.match(old, pats, opts)
@@ -191,4 +183,75 @@
with repo.dirstate.parentchange():
repo.dirstate.setparents(newid, node.nullid)
- _uncommitdirstate(repo, old, match)
+ s = repo.status(old.p1(), old, match=match)
+ _fixdirstate(repo, old, repo[newid], s)
+
+def predecessormarkers(ctx):
+ """yields the obsolete markers marking the given changeset as a successor"""
+ for data in ctx.repo().obsstore.predecessors.get(ctx.node(), ()):
+ yield obsutil.marker(ctx.repo(), data)
+
+@command('^unamend', [])
+def unamend(ui, repo, **opts):
+ """
+ undo the most recent amend operation on a current changeset
+
+ This command will roll back to the previous version of a changeset,
+ leaving working directory in state in which it was before running
+ `hg amend` (e.g. files modified as part of an amend will be
+ marked as modified `hg status`)
+ """
+
+ unfi = repo.unfiltered()
+ with repo.wlock(), repo.lock(), repo.transaction('unamend'):
+
+ # identify the commit from which to unamend
+ curctx = repo['.']
+
+ rewriteutil.precheck(repo, [curctx.rev()], 'unamend')
+
+ # identify the commit to which to unamend
+ markers = list(predecessormarkers(curctx))
+ if len(markers) != 1:
+ e = _("changeset must have one predecessor, found %i predecessors")
+ raise error.Abort(e % len(markers))
+
+ prednode = markers[0].prednode()
+ predctx = unfi[prednode]
+
+ # add an extra so that we get a new hash
+ # note: allowing unamend to undo an unamend is an intentional feature
+ extras = predctx.extra()
+ extras['unamend_source'] = curctx.hex()
+
+ def filectxfn(repo, ctx_, path):
+ try:
+ return predctx.filectx(path)
+ except KeyError:
+ return None
+
+ # Make a new commit same as predctx
+ newctx = context.memctx(repo,
+ parents=(predctx.p1(), predctx.p2()),
+ text=predctx.description(),
+ files=predctx.files(),
+ filectxfn=filectxfn,
+ user=predctx.user(),
+ date=predctx.date(),
+ extra=extras)
+ # phase handling
+ commitphase = curctx.phase()
+ overrides = {('phases', 'new-commit'): commitphase}
+ with repo.ui.configoverride(overrides, 'uncommit'):
+ newprednode = repo.commitctx(newctx)
+
+ newpredctx = repo[newprednode]
+ dirstate = repo.dirstate
+
+ with dirstate.parentchange():
+ dirstate.setparents(newprednode, node.nullid)
+ s = repo.status(predctx, curctx)
+ _fixdirstate(repo, curctx, newpredctx, s)
+
+ mapping = {curctx.node(): (newprednode,)}
+ scmutil.cleanupnodes(repo, mapping, 'unamend')
diff -r 87676e8ee056 -r 27b6df1b5adb hgext/win32text.py
--- a/hgext/win32text.py Mon Jan 08 16:07:51 2018 -0800
+++ b/hgext/win32text.py Mon Jan 22 17:53:02 2018 -0500
@@ -139,7 +139,7 @@
# changegroup that contains an unacceptable commit followed later
# by a commit that fixes the problem.
tip = repo['tip']
- for rev in xrange(len(repo) - 1, repo[node].rev() - 1, -1):
+ for rev in xrange(repo.changelog.tiprev(), repo[node].rev() - 1, -1):
c = repo[rev]
for f in c.files():
if f in seen or f not in tip or f not in c:
diff -r 87676e8ee056 -r 27b6df1b5adb hgext/zeroconf/Zeroconf.py
--- a/hgext/zeroconf/Zeroconf.py Mon Jan 08 16:07:51 2018 -0800
+++ b/hgext/zeroconf/Zeroconf.py Mon Jan 22 17:53:02 2018 -0500
@@ -1613,7 +1613,8 @@
_DNS_TTL, service.address))
service = self.services.get(question.name.lower(), None)
- if not service: continue
+ if not service:
+ continue
if (question.type == _TYPE_SRV or
question.type == _TYPE_ANY):
diff -r 87676e8ee056 -r 27b6df1b5adb i18n/de.po
--- a/i18n/de.po Mon Jan 08 16:07:51 2018 -0800
+++ b/i18n/de.po Mon Jan 22 17:53:02 2018 -0500
@@ -9744,86 +9744,9 @@
msgid "child process failed to start"
msgstr ""
-#. i18n: column positioning for "hg log"
-#, python-format
-msgid "changeset: %s\n"
-msgstr "Änderung: %s\n"
-
-#. i18n: column positioning for "hg log"
-#, python-format
-msgid "branch: %s\n"
-msgstr "Zweig: %s\n"
-
-#. i18n: column positioning for "hg log"
-#, python-format
-msgid "bookmark: %s\n"
-msgstr "Lesezeichen: %s\n"
-
-#. i18n: column positioning for "hg log"
-#, python-format
-msgid "tag: %s\n"
-msgstr "Marke: %s\n"
-
-#. i18n: column positioning for "hg log"
-#, python-format
-msgid "phase: %s\n"
-msgstr "Phase: %s\n"
-
-#. i18n: column positioning for "hg log"
-#, python-format
-msgid "parent: %s\n"
-msgstr "Vorgänger: %s\n"
-
-#. i18n: column positioning for "hg log"
-#, python-format
-msgid "manifest: %d:%s\n"
-msgstr "Manifest: %d:%s\n"
-
-#. i18n: column positioning for "hg log"
-#, python-format
-msgid "user: %s\n"
-msgstr "Nutzer: %s\n"
-
-#. i18n: column positioning for "hg log"
-#, python-format
-msgid "date: %s\n"
-msgstr "Datum: %s\n"
-
-#. i18n: column positioning for "hg log"
-msgid "files:"
-msgstr "Dateien:"
-
-#. i18n: column positioning for "hg log"
-msgid "files+:"
-msgstr "Dateien+:"
-
-#. i18n: column positioning for "hg log"
-msgid "files-:"
-msgstr "Dateien-:"
-
-#. i18n: column positioning for "hg log"
-#, python-format
-msgid "files: %s\n"
-msgstr "Dateien: %s\n"
-
-#. i18n: column positioning for "hg log"
-#, python-format
-msgid "copies: %s\n"
-msgstr "Kopien: %s\n"
-
-#. i18n: column positioning for "hg log"
-#, python-format
-msgid "extra: %s=%s\n"
-msgstr "Extra: %s=%s\n"
-
msgid "description:\n"
msgstr "Beschreibung:\n"
-#. i18n: column positioning for "hg log"
-#, python-format
-msgid "summary: %s\n"
-msgstr "Zusammenfassung: %s\n"
-
#, python-format
msgid "%s: no key named '%s'"
msgstr "%s: kein Schlüsselwort '%s'"
@@ -23194,6 +23117,45 @@
":emailuser: Beliebiger Text. Gibt den Nutzerteil einer E-Mail-Adresse\n"
" (vor dem @-Zeichen) zurück."
+#. i18n: column positioning for "hg log"
+#, python-format
+msgid ""
+"bookmark: %s\n"
+"branch: %s\n"
+"changeset: %s\n"
+"copies: %s\n"
+"date: %s\n"
+"extra: %s=%s\n"
+"files+: %s\n"
+"files-: %s\n"
+"files: %s\n"
+"instability: %s\n"
+"manifest: %s\n"
+"obsolete: %s\n"
+"parent: %s\n"
+"phase: %s\n"
+"summary: %s\n"
+"tag: %s\n"
+"user: %s\n"
+msgstr ""
+"Lesezeichen: %s\n"
+"Zweig: %s\n"
+"Änderung: %s\n"
+"Kopien: %s\n"
+"Datum: %s\n"
+"Extra: %s=%s\n"
+"Dateien+: %s\n"
+"Dateien-: %s\n"
+"Dateien: %s\n"
+"instability: %s\n"
+"Manifest: %s\n"
+"obsolete: %s\n"
+"Vorgänger: %s\n"
+"Phase: %s\n"
+"Zusammenfassung: %s\n"
+"Marke: %s\n"
+"Nutzer: %s\n"
+
msgid ":author: String. The unmodified author of the changeset."
msgstr ":author: Zeichenkette. Der unveränderte Autor eines Änderungssatzes."
diff -r 87676e8ee056 -r 27b6df1b5adb mercurial/__init__.py
--- a/mercurial/__init__.py Mon Jan 08 16:07:51 2018 -0800
+++ b/mercurial/__init__.py Mon Jan 22 17:53:02 2018 -0500
@@ -31,9 +31,6 @@
# Only handle Mercurial-related modules.
if not fullname.startswith(('mercurial.', 'hgext.', 'hgext3rd.')):
return None
- # selectors2 is already dual-version clean, don't try and mangle it
- if fullname.startswith('mercurial.selectors2'):
- return None
# third-party packages are expected to be dual-version clean
if fullname.startswith('mercurial.thirdparty'):
return None
diff -r 87676e8ee056 -r 27b6df1b5adb mercurial/archival.py
--- a/mercurial/archival.py Mon Jan 08 16:07:51 2018 -0800
+++ b/mercurial/archival.py Mon Jan 22 17:53:02 2018 -0500
@@ -126,7 +126,7 @@
def __init__(self, *args, **kw):
timestamp = None
if 'timestamp' in kw:
- timestamp = kw.pop('timestamp')
+ timestamp = kw.pop(r'timestamp')
if timestamp is None:
self.timestamp = time.time()
else:
@@ -262,6 +262,7 @@
def __init__(self, name, mtime):
self.basedir = name
self.opener = vfsmod.vfs(self.basedir)
+ self.mtime = mtime
def addfile(self, name, mode, islink, data):
if islink:
@@ -272,6 +273,8 @@
f.close()
destfile = os.path.join(self.basedir, name)
os.chmod(destfile, mode)
+ if self.mtime is not None:
+ os.utime(destfile, (self.mtime, self.mtime))
def done(self):
pass
@@ -299,7 +302,12 @@
matchfn is function to filter names of files to write to archive.
- prefix is name of path to put before every archive member.'''
+ prefix is name of path to put before every archive member.
+
+ mtime is the modified time, in seconds, or None to use the changeset time.
+
+ subrepos tells whether to include subrepos.
+ '''
if kind == 'files':
if prefix:
diff -r 87676e8ee056 -r 27b6df1b5adb mercurial/bdiff.c
--- a/mercurial/bdiff.c Mon Jan 08 16:07:51 2018 -0800
+++ b/mercurial/bdiff.c Mon Jan 22 17:53:02 2018 -0500
@@ -41,7 +41,7 @@
if (p == plast)
i++;
- *lr = l = (struct bdiff_line *)malloc(sizeof(struct bdiff_line) * i);
+ *lr = l = (struct bdiff_line *)calloc(i, sizeof(struct bdiff_line));
if (!l)
return -1;
@@ -95,7 +95,7 @@
/* try to allocate a large hash table to avoid collisions */
for (scale = 4; scale; scale /= 2) {
- h = (struct pos *)malloc(scale * buckets * sizeof(struct pos));
+ h = (struct pos *)calloc(buckets, scale * sizeof(struct pos));
if (h)
break;
}
diff -r 87676e8ee056 -r 27b6df1b5adb mercurial/bookmarks.py
--- a/mercurial/bookmarks.py Mon Jan 08 16:07:51 2018 -0800
+++ b/mercurial/bookmarks.py Mon Jan 22 17:53:02 2018 -0500
@@ -8,17 +8,18 @@
from __future__ import absolute_import
import errno
+import struct
from .i18n import _
from .node import (
bin,
hex,
short,
+ wdirid,
)
from . import (
encoding,
error,
- lock as lockmod,
obsutil,
pycompat,
scmutil,
@@ -121,6 +122,12 @@
self._clean = False
return dict.__delitem__(self, key)
+ def update(self, *others):
+ msg = ("bookmarks.update(...)' is deprecated, "
+ "use 'bookmarks.applychanges'")
+ self._repo.ui.deprecwarn(msg, '4.5')
+ return dict.update(self, *others)
+
def applychanges(self, repo, tr, changes):
"""Apply a list of changes to bookmarks
"""
@@ -390,14 +397,8 @@
bmchanges.append((bm, None))
if bmchanges:
- lock = tr = None
- try:
- lock = repo.lock()
- tr = repo.transaction('bookmark')
+ with repo.lock(), repo.transaction('bookmark') as tr:
marks.applychanges(repo, tr, bmchanges)
- tr.close()
- finally:
- lockmod.release(tr, lock)
return bool(bmchanges)
def listbinbookmarks(repo):
@@ -418,11 +419,7 @@
return d
def pushbookmark(repo, key, old, new):
- w = l = tr = None
- try:
- w = repo.wlock()
- l = repo.lock()
- tr = repo.transaction('bookmarks')
+ with repo.wlock(), repo.lock(), repo.transaction('bookmarks') as tr:
marks = repo._bookmarks
existing = hex(marks.get(key, ''))
if existing != old and existing != new:
@@ -434,10 +431,7 @@
return False
changes = [(key, repo[new].node())]
marks.applychanges(repo, tr, changes)
- tr.close()
return True
- finally:
- lockmod.release(tr, l, w)
def comparebookmarks(repo, srcmarks, dstmarks, targets=None):
'''Compare bookmarks between srcmarks and dstmarks
@@ -550,6 +544,60 @@
binremotemarks[name] = bin(node)
return binremotemarks
+_binaryentry = struct.Struct('>20sH')
+
+def binaryencode(bookmarks):
+ """encode a '(bookmark, node)' iterable into a binary stream
+
+ the binary format is:
+
+
+
+ :node: is a 20 bytes binary node,
+ :bookmark-length: an unsigned short,
+ :bookmark-name: the name of the bookmark (of length )
+
+ wdirid (all bits set) will be used as a special value for "missing"
+ """
+ binarydata = []
+ for book, node in bookmarks:
+ if not node: # None or ''
+ node = wdirid
+ binarydata.append(_binaryentry.pack(node, len(book)))
+ binarydata.append(book)
+ return ''.join(binarydata)
+
+def binarydecode(stream):
+ """decode a binary stream into an '(bookmark, node)' iterable
+
+ the binary format is:
+
+
+
+ :node: is a 20 bytes binary node,
+ :bookmark-length: an unsigned short,
+ :bookmark-name: the name of the bookmark (of length ))
+
+ wdirid (all bits set) will be used as a special value for "missing"
+ """
+ entrysize = _binaryentry.size
+ books = []
+ while True:
+ entry = stream.read(entrysize)
+ if len(entry) < entrysize:
+ if entry:
+ raise error.Abort(_('bad bookmark stream'))
+ break
+ node, length = _binaryentry.unpack(entry)
+ bookmark = stream.read(length)
+ if len(bookmark) < length:
+ if entry:
+ raise error.Abort(_('bad bookmark stream'))
+ if node == wdirid:
+ node = None
+ books.append((bookmark, node))
+ return books
+
def updatefromremote(ui, repo, remotemarks, path, trfunc, explicit=()):
ui.debug("checking for updated bookmarks\n")
localmarks = repo._bookmarks
@@ -788,6 +836,12 @@
cur = repo.changectx('.').node()
newact = None
changes = []
+ hiddenrev = None
+
+ # unhide revs if any
+ if rev:
+ repo = scmutil.unhidehashlikerevs(repo, [rev], 'nowarn')
+
for mark in names:
mark = checkformat(repo, mark)
if newact is None:
@@ -797,10 +851,21 @@
return
tgt = cur
if rev:
- tgt = scmutil.revsingle(repo, rev).node()
+ ctx = scmutil.revsingle(repo, rev)
+ if ctx.hidden():
+ hiddenrev = ctx.hex()[:12]
+ tgt = ctx.node()
for bm in marks.checkconflict(mark, force, tgt):
changes.append((bm, None))
changes.append((mark, tgt))
+
+ if hiddenrev:
+ repo.ui.warn(_("bookmarking hidden changeset %s\n") % hiddenrev)
+
+ if ctx.obsolete():
+ msg = obsutil._getfilteredreason(repo, "%s" % hiddenrev, ctx)
+ repo.ui.warn("(%s)\n" % msg)
+
marks.applychanges(repo, tr, changes)
if not inactive and cur == marks[newact] and not rev:
activate(repo, newact)
diff -r 87676e8ee056 -r 27b6df1b5adb mercurial/branchmap.py
--- a/mercurial/branchmap.py Mon Jan 08 16:07:51 2018 -0800
+++ b/mercurial/branchmap.py Mon Jan 22 17:53:02 2018 -0500
@@ -84,6 +84,7 @@
# This create and ordering used for branchmap purpose.
# the ordering may be partial
subsettable = {None: 'visible',
+ 'visible-hidden': 'visible',
'visible': 'served',
'served': 'immutable',
'immutable': 'base'}
diff -r 87676e8ee056 -r 27b6df1b5adb mercurial/bundle2.py
--- a/mercurial/bundle2.py Mon Jan 08 16:07:51 2018 -0800
+++ b/mercurial/bundle2.py Mon Jan 22 17:53:02 2018 -0500
@@ -148,6 +148,7 @@
from __future__ import absolute_import, division
import errno
+import os
import re
import string
import struct
@@ -155,6 +156,7 @@
from .i18n import _
from . import (
+ bookmarks,
changegroup,
error,
node as nodemod,
@@ -162,6 +164,7 @@
phases,
pushkey,
pycompat,
+ streamclone,
tags,
url,
util,
@@ -180,7 +183,7 @@
_fpayloadsize = '>i'
_fpartparamcount = '>BB'
-preferedchunksize = 4096
+preferedchunksize = 32768
_parttypeforbidden = re.compile('[^a-zA-Z0-9_:-]')
@@ -299,6 +302,8 @@
self.captureoutput = captureoutput
self.hookargs = {}
self._gettransaction = transactiongetter
+ # carries value that can modify part behavior
+ self.modes = {}
def gettransaction(self):
transaction = self._gettransaction()
@@ -362,7 +367,7 @@
self.count = count
self.current = p
yield p
- p.seek(0, 2)
+ p.consume()
self.current = None
self.iterator = func()
return self.iterator
@@ -384,11 +389,11 @@
try:
if self.current:
# consume the part content to not corrupt the stream.
- self.current.seek(0, 2)
+ self.current.consume()
for part in self.iterator:
# consume the bundle content
- part.seek(0, 2)
+ part.consume()
except Exception:
seekerror = True
@@ -594,6 +599,10 @@
self.capabilities = dict(capabilities)
self._compengine = util.compengines.forbundletype('UN')
self._compopts = None
+ # If compression is being handled by a consumer of the raw
+ # data (e.g. the wire protocol), unsetting this flag tells
+ # consumers that the bundle is best left uncompressed.
+ self.prefercompressed = True
def setcompression(self, alg, compopts=None):
"""setup core part compression to """
@@ -844,8 +853,9 @@
yield self._readexact(size)
- def iterparts(self):
+ def iterparts(self, seekable=False):
"""yield all parts contained in the stream"""
+ cls = seekableunbundlepart if seekable else unbundlepart
# make sure param have been loaded
self.params
# From there, payload need to be decompressed
@@ -853,13 +863,12 @@
indebug(self.ui, 'start extraction of bundle2 parts')
headerblock = self._readpartheader()
while headerblock is not None:
- part = unbundlepart(self.ui, headerblock, self._fp)
+ part = cls(self.ui, headerblock, self._fp)
yield part
- # Seek to the end of the part to force it's consumption so the next
- # part can be read. But then seek back to the beginning so the
- # code consuming this generator has a part that starts at 0.
- part.seek(0, 2)
- part.seek(0)
+ # Ensure part is fully consumed so we can start reading the next
+ # part.
+ part.consume()
+
headerblock = self._readpartheader()
indebug(self.ui, 'end of bundle2 stream')
@@ -1164,7 +1173,7 @@
raise
finally:
if not hardabort:
- part.seek(0, 2)
+ part.consume()
self.ui.debug('bundle2-input-stream-interrupt:'
' closing out of band context\n')
@@ -1186,6 +1195,55 @@
def gettransaction(self):
raise TransactionUnavailable('no repo access from stream interruption')
+def decodepayloadchunks(ui, fh):
+ """Reads bundle2 part payload data into chunks.
+
+ Part payload data consists of framed chunks. This function takes
+ a file handle and emits those chunks.
+ """
+ dolog = ui.configbool('devel', 'bundle2.debug')
+ debug = ui.debug
+
+ headerstruct = struct.Struct(_fpayloadsize)
+ headersize = headerstruct.size
+ unpack = headerstruct.unpack
+
+ readexactly = changegroup.readexactly
+ read = fh.read
+
+ chunksize = unpack(readexactly(fh, headersize))[0]
+ indebug(ui, 'payload chunk size: %i' % chunksize)
+
+ # changegroup.readexactly() is inlined below for performance.
+ while chunksize:
+ if chunksize >= 0:
+ s = read(chunksize)
+ if len(s) < chunksize:
+ raise error.Abort(_('stream ended unexpectedly '
+ ' (got %d bytes, expected %d)') %
+ (len(s), chunksize))
+
+ yield s
+ elif chunksize == flaginterrupt:
+ # Interrupt "signal" detected. The regular stream is interrupted
+ # and a bundle2 part follows. Consume it.
+ interrupthandler(ui, fh)()
+ else:
+ raise error.BundleValueError(
+ 'negative payload chunk size: %s' % chunksize)
+
+ s = read(headersize)
+ if len(s) < headersize:
+ raise error.Abort(_('stream ended unexpectedly '
+ ' (got %d bytes, expected %d)') %
+ (len(s), chunksize))
+
+ chunksize = unpack(s)[0]
+
+ # indebug() inlined for performance.
+ if dolog:
+ debug('bundle2-input: payload chunk size: %i\n' % chunksize)
+
class unbundlepart(unpackermixin):
"""a bundle part read from a bundle"""
@@ -1206,10 +1264,8 @@
self.advisoryparams = None
self.params = None
self.mandatorykeys = ()
- self._payloadstream = None
self._readheader()
self._mandatory = None
- self._chunkindex = [] #(payload, file) position tuples for chunk starts
self._pos = 0
def _fromheader(self, size):
@@ -1236,46 +1292,6 @@
self.params.update(self.advisoryparams)
self.mandatorykeys = frozenset(p[0] for p in mandatoryparams)
- def _payloadchunks(self, chunknum=0):
- '''seek to specified chunk and start yielding data'''
- if len(self._chunkindex) == 0:
- assert chunknum == 0, 'Must start with chunk 0'
- self._chunkindex.append((0, self._tellfp()))
- else:
- assert chunknum < len(self._chunkindex), \
- 'Unknown chunk %d' % chunknum
- self._seekfp(self._chunkindex[chunknum][1])
-
- pos = self._chunkindex[chunknum][0]
- payloadsize = self._unpack(_fpayloadsize)[0]
- indebug(self.ui, 'payload chunk size: %i' % payloadsize)
- while payloadsize:
- if payloadsize == flaginterrupt:
- # interruption detection, the handler will now read a
- # single part and process it.
- interrupthandler(self.ui, self._fp)()
- elif payloadsize < 0:
- msg = 'negative payload chunk size: %i' % payloadsize
- raise error.BundleValueError(msg)
- else:
- result = self._readexact(payloadsize)
- chunknum += 1
- pos += payloadsize
- if chunknum == len(self._chunkindex):
- self._chunkindex.append((pos, self._tellfp()))
- yield result
- payloadsize = self._unpack(_fpayloadsize)[0]
- indebug(self.ui, 'payload chunk size: %i' % payloadsize)
-
- def _findchunk(self, pos):
- '''for a given payload position, return a chunk number and offset'''
- for chunk, (ppos, fpos) in enumerate(self._chunkindex):
- if ppos == pos:
- return chunk, 0
- elif ppos > pos:
- return chunk - 1, pos - self._chunkindex[chunk - 1][0]
- raise ValueError('Unknown chunk')
-
def _readheader(self):
"""read the header and setup the object"""
typesize = self._unpackheader(_fparttypesize)[0]
@@ -1311,6 +1327,24 @@
# we read the data, tell it
self._initialized = True
+ def _payloadchunks(self):
+ """Generator of decoded chunks in the payload."""
+ return decodepayloadchunks(self.ui, self._fp)
+
+ def consume(self):
+ """Read the part payload until completion.
+
+ By consuming the part data, the underlying stream read offset will
+ be advanced to the next part (or end of stream).
+ """
+ if self.consumed:
+ return
+
+ chunk = self.read(32768)
+ while chunk:
+ self._pos += len(chunk)
+ chunk = self.read(32768)
+
def read(self, size=None):
"""read payload data"""
if not self._initialized:
@@ -1327,23 +1361,82 @@
self.consumed = True
return data
+class seekableunbundlepart(unbundlepart):
+ """A bundle2 part in a bundle that is seekable.
+
+ Regular ``unbundlepart`` instances can only be read once. This class
+ extends ``unbundlepart`` to enable bi-directional seeking within the
+ part.
+
+ Bundle2 part data consists of framed chunks. Offsets when seeking
+ refer to the decoded data, not the offsets in the underlying bundle2
+ stream.
+
+ To facilitate quickly seeking within the decoded data, instances of this
+ class maintain a mapping between offsets in the underlying stream and
+ the decoded payload. This mapping will consume memory in proportion
+ to the number of chunks within the payload (which almost certainly
+ increases in proportion with the size of the part).
+ """
+ def __init__(self, ui, header, fp):
+ # (payload, file) offsets for chunk starts.
+ self._chunkindex = []
+
+ super(seekableunbundlepart, self).__init__(ui, header, fp)
+
+ def _payloadchunks(self, chunknum=0):
+ '''seek to specified chunk and start yielding data'''
+ if len(self._chunkindex) == 0:
+ assert chunknum == 0, 'Must start with chunk 0'
+ self._chunkindex.append((0, self._tellfp()))
+ else:
+ assert chunknum < len(self._chunkindex), \
+ 'Unknown chunk %d' % chunknum
+ self._seekfp(self._chunkindex[chunknum][1])
+
+ pos = self._chunkindex[chunknum][0]
+
+ for chunk in decodepayloadchunks(self.ui, self._fp):
+ chunknum += 1
+ pos += len(chunk)
+ if chunknum == len(self._chunkindex):
+ self._chunkindex.append((pos, self._tellfp()))
+
+ yield chunk
+
+ def _findchunk(self, pos):
+ '''for a given payload position, return a chunk number and offset'''
+ for chunk, (ppos, fpos) in enumerate(self._chunkindex):
+ if ppos == pos:
+ return chunk, 0
+ elif ppos > pos:
+ return chunk - 1, pos - self._chunkindex[chunk - 1][0]
+ raise ValueError('Unknown chunk')
+
def tell(self):
return self._pos
- def seek(self, offset, whence=0):
- if whence == 0:
+ def seek(self, offset, whence=os.SEEK_SET):
+ if whence == os.SEEK_SET:
newpos = offset
- elif whence == 1:
+ elif whence == os.SEEK_CUR:
newpos = self._pos + offset
- elif whence == 2:
+ elif whence == os.SEEK_END:
if not self.consumed:
- self.read()
+ # Can't use self.consume() here because it advances self._pos.
+ chunk = self.read(32768)
+ while chunk:
+ chunk = self.read(32768)
newpos = self._chunkindex[-1][0] - offset
else:
raise ValueError('Unknown whence value: %r' % (whence,))
if newpos > self._chunkindex[-1][0] and not self.consumed:
- self.read()
+ # Can't use self.consume() here because it advances self._pos.
+ chunk = self.read(32768)
+ while chunk:
+ chunk = self.read(32668)
+
if not 0 <= newpos <= self._chunkindex[-1][0]:
raise ValueError('Offset out of range')
@@ -1389,6 +1482,7 @@
# These are only the static capabilities.
# Check the 'getrepocaps' function for the rest.
capabilities = {'HG20': (),
+ 'bookmarks': (),
'error': ('abort', 'unsupportedcontent', 'pushraced',
'pushkey'),
'listkeys': (),
@@ -1397,13 +1491,21 @@
'remote-changegroup': ('http', 'https'),
'hgtagsfnodes': (),
'phases': ('heads',),
+ 'stream': ('v2',),
}
-def getrepocaps(repo, allowpushback=False):
+def getrepocaps(repo, allowpushback=False, role=None):
"""return the bundle2 capabilities for a given repo
Exists to allow extensions (like evolution) to mutate the capabilities.
+
+ The returned value is used for servers advertising their capabilities as
+ well as clients advertising their capabilities to servers as part of
+ bundle2 requests. The ``role`` argument specifies which is which.
"""
+ if role not in ('client', 'server'):
+ raise error.ProgrammingError('role argument must be client or server')
+
caps = capabilities.copy()
caps['changegroup'] = tuple(sorted(
changegroup.supportedincomingversions(repo)))
@@ -1417,6 +1519,18 @@
caps['checkheads'] = ('related',)
if 'phases' in repo.ui.configlist('devel', 'legacy.exchange'):
caps.pop('phases')
+
+ # Don't advertise stream clone support in server mode if not configured.
+ if role == 'server':
+ streamsupported = repo.ui.configbool('server', 'uncompressed',
+ untrusted=True)
+ featuresupported = repo.ui.configbool('experimental', 'bundle2.stream')
+
+ if not streamsupported or not featuresupported:
+ caps.pop('stream')
+ # Else always advertise support on client, because payload support
+ # should always be advertised.
+
return caps
def bundle2caps(remote):
@@ -1702,6 +1816,34 @@
replyto = int(inpart.params['in-reply-to'])
op.records.add('changegroup', {'return': ret}, replyto)
+@parthandler('check:bookmarks')
+def handlecheckbookmarks(op, inpart):
+ """check location of bookmarks
+
+ This part is to be used to detect push race regarding bookmark, it
+ contains binary encoded (bookmark, node) tuple. If the local state does
+ not marks the one in the part, a PushRaced exception is raised
+ """
+ bookdata = bookmarks.binarydecode(inpart)
+
+ msgstandard = ('repository changed while pushing - please try again '
+ '(bookmark "%s" move from %s to %s)')
+ msgmissing = ('repository changed while pushing - please try again '
+ '(bookmark "%s" is missing, expected %s)')
+ msgexist = ('repository changed while pushing - please try again '
+ '(bookmark "%s" set on %s, expected missing)')
+ for book, node in bookdata:
+ currentnode = op.repo._bookmarks.get(book)
+ if currentnode != node:
+ if node is None:
+ finalmsg = msgexist % (book, nodemod.short(currentnode))
+ elif currentnode is None:
+ finalmsg = msgmissing % (book, nodemod.short(node))
+ else:
+ finalmsg = msgstandard % (book, nodemod.short(node),
+ nodemod.short(currentnode))
+ raise error.PushRaced(finalmsg)
+
@parthandler('check:heads')
def handlecheckheads(op, inpart):
"""check that head of the repo did not change
@@ -1861,6 +2003,60 @@
kwargs[key] = inpart.params[key]
raise error.PushkeyFailed(partid=str(inpart.id), **kwargs)
+@parthandler('bookmarks')
+def handlebookmark(op, inpart):
+ """transmit bookmark information
+
+ The part contains binary encoded bookmark information.
+
+ The exact behavior of this part can be controlled by the 'bookmarks' mode
+ on the bundle operation.
+
+ When mode is 'apply' (the default) the bookmark information is applied as
+ is to the unbundling repository. Make sure a 'check:bookmarks' part is
+ issued earlier to check for push races in such update. This behavior is
+ suitable for pushing.
+
+ When mode is 'records', the information is recorded into the 'bookmarks'
+ records of the bundle operation. This behavior is suitable for pulling.
+ """
+ changes = bookmarks.binarydecode(inpart)
+
+ pushkeycompat = op.repo.ui.configbool('server', 'bookmarks-pushkey-compat')
+ bookmarksmode = op.modes.get('bookmarks', 'apply')
+
+ if bookmarksmode == 'apply':
+ tr = op.gettransaction()
+ bookstore = op.repo._bookmarks
+ if pushkeycompat:
+ allhooks = []
+ for book, node in changes:
+ hookargs = tr.hookargs.copy()
+ hookargs['pushkeycompat'] = '1'
+ hookargs['namespace'] = 'bookmark'
+ hookargs['key'] = book
+ hookargs['old'] = nodemod.hex(bookstore.get(book, ''))
+ hookargs['new'] = nodemod.hex(node if node is not None else '')
+ allhooks.append(hookargs)
+
+ for hookargs in allhooks:
+ op.repo.hook('prepushkey', throw=True, **hookargs)
+
+ bookstore.applychanges(op.repo, op.gettransaction(), changes)
+
+ if pushkeycompat:
+ def runhook():
+ for hookargs in allhooks:
+ op.repo.hook('pushkey', **hookargs)
+ op.repo._afterlock(runhook)
+
+ elif bookmarksmode == 'records':
+ for book, node in changes:
+ record = {'bookmark': book, 'node': node}
+ op.records.add('bookmarks', record)
+ else:
+ raise error.ProgrammingError('unkown bookmark mode: %s' % bookmarksmode)
+
@parthandler('phase-heads')
def handlephases(op, inpart):
"""apply phases from bundle part to repo"""
@@ -1885,7 +2081,7 @@
# The mergemarkers call will crash if marker creation is not enabled.
# we want to avoid this if the part is advisory.
if not inpart.mandatory and op.repo.obsstore.readonly:
- op.repo.ui.debug('ignoring obsolescence markers, feature not enabled')
+ op.repo.ui.debug('ignoring obsolescence markers, feature not enabled\n')
return
new = op.repo.obsstore.mergemarkers(tr, markerdata)
op.repo.invalidatevolatilesets()
@@ -1943,3 +2139,27 @@
key = "USERVAR_" + key
hookargs[key] = value
op.addhookargs(hookargs)
+
+@parthandler('stream2', ('requirements', 'filecount', 'bytecount'))
+def handlestreamv2bundle(op, part):
+
+ requirements = part.params['requirements'].split()
+ filecount = int(part.params['filecount'])
+ bytecount = int(part.params['bytecount'])
+
+ repo = op.repo
+ if len(repo):
+ msg = _('cannot apply stream clone to non empty repository')
+ raise error.Abort(msg)
+
+ repo.ui.debug('applying stream bundle\n')
+ streamclone.applybundlev2(repo, part, filecount, bytecount,
+ requirements)
+
+ # new requirements = old non-format requirements +
+ # new format-related remote requirements
+ # requirements from the streamed-in repository
+ repo.requirements = set(requirements) | (
+ repo.requirements - repo.supportedformats)
+ repo._applyopenerreqs()
+ repo._writerequirements()
diff -r 87676e8ee056 -r 27b6df1b5adb mercurial/bundlerepo.py
--- a/mercurial/bundlerepo.py Mon Jan 08 16:07:51 2018 -0800
+++ b/mercurial/bundlerepo.py Mon Jan 22 17:53:02 2018 -0500
@@ -42,7 +42,7 @@
)
class bundlerevlog(revlog.revlog):
- def __init__(self, opener, indexfile, bundle, linkmapper):
+ def __init__(self, opener, indexfile, cgunpacker, linkmapper):
# How it works:
# To retrieve a revision, we need to know the offset of the revision in
# the bundle (an unbundle object). We store this offset in the index
@@ -52,15 +52,15 @@
# check revision against repotiprev.
opener = vfsmod.readonlyvfs(opener)
revlog.revlog.__init__(self, opener, indexfile)
- self.bundle = bundle
+ self.bundle = cgunpacker
n = len(self)
self.repotiprev = n - 1
self.bundlerevs = set() # used by 'bundle()' revset expression
- for deltadata in bundle.deltaiter():
+ for deltadata in cgunpacker.deltaiter():
node, p1, p2, cs, deltabase, delta, flags = deltadata
size = len(delta)
- start = bundle.tell() - size
+ start = cgunpacker.tell() - size
link = linkmapper(cs)
if node in self.nodemap:
@@ -86,7 +86,7 @@
self.bundlerevs.add(n)
n += 1
- def _chunk(self, rev):
+ def _chunk(self, rev, df=None):
# Warning: in case of bundle, the diff is against what we stored as
# delta base, not against rev - 1
# XXX: could use some caching
@@ -108,7 +108,7 @@
return mdiff.textdiff(self.revision(rev1, raw=True),
self.revision(rev2, raw=True))
- def revision(self, nodeorrev, raw=False):
+ def revision(self, nodeorrev, _df=None, raw=False):
"""return an uncompressed revision of a given node or revision
number.
"""
@@ -152,20 +152,23 @@
# needs to override 'baserevision' and make more specific call here.
return revlog.revlog.revision(self, nodeorrev, raw=True)
- def addrevision(self, text, transaction, link, p1=None, p2=None, d=None):
+ def addrevision(self, *args, **kwargs):
+ raise NotImplementedError
+
+ def addgroup(self, *args, **kwargs):
raise NotImplementedError
- def addgroup(self, deltas, transaction, addrevisioncb=None):
+
+ def strip(self, *args, **kwargs):
raise NotImplementedError
- def strip(self, rev, minlink):
- raise NotImplementedError
+
def checksize(self):
raise NotImplementedError
class bundlechangelog(bundlerevlog, changelog.changelog):
- def __init__(self, opener, bundle):
+ def __init__(self, opener, cgunpacker):
changelog.changelog.__init__(self, opener)
linkmapper = lambda x: x
- bundlerevlog.__init__(self, opener, self.indexfile, bundle,
+ bundlerevlog.__init__(self, opener, self.indexfile, cgunpacker,
linkmapper)
def baserevision(self, nodeorrev):
@@ -183,9 +186,10 @@
self.filteredrevs = oldfilter
class bundlemanifest(bundlerevlog, manifest.manifestrevlog):
- def __init__(self, opener, bundle, linkmapper, dirlogstarts=None, dir=''):
+ def __init__(self, opener, cgunpacker, linkmapper, dirlogstarts=None,
+ dir=''):
manifest.manifestrevlog.__init__(self, opener, dir=dir)
- bundlerevlog.__init__(self, opener, self.indexfile, bundle,
+ bundlerevlog.__init__(self, opener, self.indexfile, cgunpacker,
linkmapper)
if dirlogstarts is None:
dirlogstarts = {}
@@ -214,9 +218,9 @@
return super(bundlemanifest, self).dirlog(d)
class bundlefilelog(bundlerevlog, filelog.filelog):
- def __init__(self, opener, path, bundle, linkmapper):
+ def __init__(self, opener, path, cgunpacker, linkmapper):
filelog.filelog.__init__(self, opener, path)
- bundlerevlog.__init__(self, opener, self.indexfile, bundle,
+ bundlerevlog.__init__(self, opener, self.indexfile, cgunpacker,
linkmapper)
def baserevision(self, nodeorrev):
@@ -243,82 +247,106 @@
self.invalidate()
self.dirty = True
-def _getfilestarts(bundle):
- bundlefilespos = {}
- for chunkdata in iter(bundle.filelogheader, {}):
+def _getfilestarts(cgunpacker):
+ filespos = {}
+ for chunkdata in iter(cgunpacker.filelogheader, {}):
fname = chunkdata['filename']
- bundlefilespos[fname] = bundle.tell()
- for chunk in iter(lambda: bundle.deltachunk(None), {}):
+ filespos[fname] = cgunpacker.tell()
+ for chunk in iter(lambda: cgunpacker.deltachunk(None), {}):
pass
- return bundlefilespos
+ return filespos
class bundlerepository(localrepo.localrepository):
- def __init__(self, ui, path, bundlename):
+ """A repository instance that is a union of a local repo and a bundle.
+
+ Instances represent a read-only repository composed of a local repository
+ with the contents of a bundle file applied. The repository instance is
+ conceptually similar to the state of a repository after an
+ ``hg unbundle`` operation. However, the contents of the bundle are never
+ applied to the actual base repository.
+ """
+ def __init__(self, ui, repopath, bundlepath):
self._tempparent = None
try:
- localrepo.localrepository.__init__(self, ui, path)
+ localrepo.localrepository.__init__(self, ui, repopath)
except error.RepoError:
self._tempparent = tempfile.mkdtemp()
localrepo.instance(ui, self._tempparent, 1)
localrepo.localrepository.__init__(self, ui, self._tempparent)
self.ui.setconfig('phases', 'publish', False, 'bundlerepo')
- if path:
- self._url = 'bundle:' + util.expandpath(path) + '+' + bundlename
+ if repopath:
+ self._url = 'bundle:' + util.expandpath(repopath) + '+' + bundlepath
else:
- self._url = 'bundle:' + bundlename
+ self._url = 'bundle:' + bundlepath
self.tempfile = None
- f = util.posixfile(bundlename, "rb")
- self.bundlefile = self.bundle = exchange.readbundle(ui, f, bundlename)
+ f = util.posixfile(bundlepath, "rb")
+ bundle = exchange.readbundle(ui, f, bundlepath)
- if isinstance(self.bundle, bundle2.unbundle20):
- hadchangegroup = False
- for part in self.bundle.iterparts():
+ if isinstance(bundle, bundle2.unbundle20):
+ self._bundlefile = bundle
+ self._cgunpacker = None
+
+ cgpart = None
+ for part in bundle.iterparts(seekable=True):
if part.type == 'changegroup':
- if hadchangegroup:
+ if cgpart:
raise NotImplementedError("can't process "
"multiple changegroups")
- hadchangegroup = True
+ cgpart = part
- self._handlebundle2part(part)
+ self._handlebundle2part(bundle, part)
- if not hadchangegroup:
+ if not cgpart:
raise error.Abort(_("No changegroups found"))
- elif self.bundle.compressed():
- f = self._writetempbundle(self.bundle.read, '.hg10un',
- header='HG10UN')
- self.bundlefile = self.bundle = exchange.readbundle(ui, f,
- bundlename,
- self.vfs)
+ # This is required to placate a later consumer, which expects
+ # the payload offset to be at the beginning of the changegroup.
+ # We need to do this after the iterparts() generator advances
+ # because iterparts() will seek to end of payload after the
+ # generator returns control to iterparts().
+ cgpart.seek(0, os.SEEK_SET)
- # dict with the mapping 'filename' -> position in the bundle
- self.bundlefilespos = {}
+ elif isinstance(bundle, changegroup.cg1unpacker):
+ if bundle.compressed():
+ f = self._writetempbundle(bundle.read, '.hg10un',
+ header='HG10UN')
+ bundle = exchange.readbundle(ui, f, bundlepath, self.vfs)
+
+ self._bundlefile = bundle
+ self._cgunpacker = bundle
+ else:
+ raise error.Abort(_('bundle type %s cannot be read') %
+ type(bundle))
+
+ # dict with the mapping 'filename' -> position in the changegroup.
+ self._cgfilespos = {}
self.firstnewrev = self.changelog.repotiprev + 1
phases.retractboundary(self, None, phases.draft,
[ctx.node() for ctx in self[self.firstnewrev:]])
- def _handlebundle2part(self, part):
- if part.type == 'changegroup':
- cgstream = part
- version = part.params.get('version', '01')
- legalcgvers = changegroup.supportedincomingversions(self)
- if version not in legalcgvers:
- msg = _('Unsupported changegroup version: %s')
- raise error.Abort(msg % version)
- if self.bundle.compressed():
- cgstream = self._writetempbundle(part.read,
- ".cg%sun" % version)
+ def _handlebundle2part(self, bundle, part):
+ if part.type != 'changegroup':
+ return
- self.bundle = changegroup.getunbundler(version, cgstream, 'UN')
+ cgstream = part
+ version = part.params.get('version', '01')
+ legalcgvers = changegroup.supportedincomingversions(self)
+ if version not in legalcgvers:
+ msg = _('Unsupported changegroup version: %s')
+ raise error.Abort(msg % version)
+ if bundle.compressed():
+ cgstream = self._writetempbundle(part.read, '.cg%sun' % version)
+
+ self._cgunpacker = changegroup.getunbundler(version, cgstream, 'UN')
def _writetempbundle(self, readfn, suffix, header=''):
"""Write a temporary file to disk
"""
fdtemp, temp = self.vfs.mkstemp(prefix="hg-bundle-",
- suffix=".hg10un")
+ suffix=suffix)
self.tempfile = temp
with os.fdopen(fdtemp, pycompat.sysstr('wb')) as fptemp:
@@ -338,20 +366,29 @@
@localrepo.unfilteredpropertycache
def changelog(self):
# consume the header if it exists
- self.bundle.changelogheader()
- c = bundlechangelog(self.svfs, self.bundle)
- self.manstart = self.bundle.tell()
+ self._cgunpacker.changelogheader()
+ c = bundlechangelog(self.svfs, self._cgunpacker)
+ self.manstart = self._cgunpacker.tell()
return c
def _constructmanifest(self):
- self.bundle.seek(self.manstart)
+ self._cgunpacker.seek(self.manstart)
# consume the header if it exists
- self.bundle.manifestheader()
+ self._cgunpacker.manifestheader()
linkmapper = self.unfiltered().changelog.rev
- m = bundlemanifest(self.svfs, self.bundle, linkmapper)
- self.filestart = self.bundle.tell()
+ m = bundlemanifest(self.svfs, self._cgunpacker, linkmapper)
+ self.filestart = self._cgunpacker.tell()
return m
+ def _consumemanifest(self):
+ """Consumes the manifest portion of the bundle, setting filestart so the
+ file portion can be read."""
+ self._cgunpacker.seek(self.manstart)
+ self._cgunpacker.manifestheader()
+ for delta in self._cgunpacker.deltaiter():
+ pass
+ self.filestart = self._cgunpacker.tell()
+
@localrepo.unfilteredpropertycache
def manstart(self):
self.changelog
@@ -360,26 +397,34 @@
@localrepo.unfilteredpropertycache
def filestart(self):
self.manifestlog
+
+ # If filestart was not set by self.manifestlog, that means the
+ # manifestlog implementation did not consume the manifests from the
+ # changegroup (ex: it might be consuming trees from a separate bundle2
+ # part instead). So we need to manually consume it.
+ if 'filestart' not in self.__dict__:
+ self._consumemanifest()
+
return self.filestart
def url(self):
return self._url
def file(self, f):
- if not self.bundlefilespos:
- self.bundle.seek(self.filestart)
- self.bundlefilespos = _getfilestarts(self.bundle)
+ if not self._cgfilespos:
+ self._cgunpacker.seek(self.filestart)
+ self._cgfilespos = _getfilestarts(self._cgunpacker)
- if f in self.bundlefilespos:
- self.bundle.seek(self.bundlefilespos[f])
+ if f in self._cgfilespos:
+ self._cgunpacker.seek(self._cgfilespos[f])
linkmapper = self.unfiltered().changelog.rev
- return bundlefilelog(self.svfs, f, self.bundle, linkmapper)
+ return bundlefilelog(self.svfs, f, self._cgunpacker, linkmapper)
else:
return filelog.filelog(self.svfs, f)
def close(self):
"""Close assigned bundle file immediately."""
- self.bundlefile.close()
+ self._bundlefile.close()
if self.tempfile is not None:
self.vfs.unlink(self.tempfile)
if self._tempparent:
@@ -496,10 +541,10 @@
and other.capable('bundle2'))
if canbundle2:
kwargs = {}
- kwargs['common'] = common
- kwargs['heads'] = rheads
- kwargs['bundlecaps'] = exchange.caps20to10(repo)
- kwargs['cg'] = True
+ kwargs[r'common'] = common
+ kwargs[r'heads'] = rheads
+ kwargs[r'bundlecaps'] = exchange.caps20to10(repo, role='client')
+ kwargs[r'cg'] = True
b2 = other.getbundle('incoming', **kwargs)
fname = bundle = changegroup.writechunks(ui, b2._forwardchunks(),
bundlename)
diff -r 87676e8ee056 -r 27b6df1b5adb mercurial/byterange.py
--- a/mercurial/byterange.py Mon Jan 08 16:07:51 2018 -0800
+++ b/mercurial/byterange.py Mon Jan 22 17:53:02 2018 -0500
@@ -416,7 +416,7 @@
if range_header is None:
return None
if _rangere is None:
- _rangere = re.compile(r'^bytes=(\d{1,})-(\d*)')
+ _rangere = re.compile(br'^bytes=(\d{1,})-(\d*)')
match = _rangere.match(range_header)
if match:
tup = range_tuple_normalize(match.group(1, 2))
diff -r 87676e8ee056 -r 27b6df1b5adb mercurial/cacheutil.py
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/mercurial/cacheutil.py Mon Jan 22 17:53:02 2018 -0500
@@ -0,0 +1,21 @@
+# scmutil.py - Mercurial core utility functions
+#
+# Copyright Matt Mackall and other
+#
+# This software may be used and distributed according to the terms of the
+# GNU General Public License version 2 or any later version.
+from __future__ import absolute_import
+
+from . import repoview
+
+def cachetocopy(srcrepo):
+ """return the list of cache file valuable to copy during a clone"""
+ # In local clones we're copying all nodes, not just served
+ # ones. Therefore copy all branch caches over.
+ cachefiles = ['branch2']
+ cachefiles += ['branch2-%s' % f for f in repoview.filtertable]
+ cachefiles += ['rbc-names-v1', 'rbc-revs-v1']
+ cachefiles += ['tags2']
+ cachefiles += ['tags2-%s' % f for f in repoview.filtertable]
+ cachefiles += ['hgtagsfnodes1']
+ return cachefiles
diff -r 87676e8ee056 -r 27b6df1b5adb mercurial/cext/osutil.c
--- a/mercurial/cext/osutil.c Mon Jan 08 16:07:51 2018 -0800
+++ b/mercurial/cext/osutil.c Mon Jan 22 17:53:02 2018 -0500
@@ -20,6 +20,7 @@
#include
#else
#include
+#include
#include
#include
#include
@@ -1111,6 +1112,43 @@
}
#endif /* defined(HAVE_LINUX_STATFS) || defined(HAVE_BSD_STATFS) */
+#if defined(HAVE_BSD_STATFS)
+/* given a directory path, return filesystem mount point (best-effort) */
+static PyObject *getfsmountpoint(PyObject *self, PyObject *args)
+{
+ const char *path = NULL;
+ struct statfs buf;
+ int r;
+ if (!PyArg_ParseTuple(args, "s", &path))
+ return NULL;
+
+ memset(&buf, 0, sizeof(buf));
+ r = statfs(path, &buf);
+ if (r != 0)
+ return PyErr_SetFromErrno(PyExc_OSError);
+ return Py_BuildValue("s", buf.f_mntonname);
+}
+#endif /* defined(HAVE_BSD_STATFS) */
+
+static PyObject *unblocksignal(PyObject *self, PyObject *args)
+{
+ int sig = 0;
+ int r;
+ if (!PyArg_ParseTuple(args, "i", &sig))
+ return NULL;
+ sigset_t set;
+ r = sigemptyset(&set);
+ if (r != 0)
+ return PyErr_SetFromErrno(PyExc_OSError);
+ r = sigaddset(&set, sig);
+ if (r != 0)
+ return PyErr_SetFromErrno(PyExc_OSError);
+ r = sigprocmask(SIG_UNBLOCK, &set, NULL);
+ if (r != 0)
+ return PyErr_SetFromErrno(PyExc_OSError);
+ Py_RETURN_NONE;
+}
+
#endif /* ndef _WIN32 */
static PyObject *listdir(PyObject *self, PyObject *args, PyObject *kwargs)
@@ -1291,6 +1329,12 @@
{"getfstype", (PyCFunction)getfstype, METH_VARARGS,
"get filesystem type (best-effort)\n"},
#endif
+#if defined(HAVE_BSD_STATFS)
+ {"getfsmountpoint", (PyCFunction)getfsmountpoint, METH_VARARGS,
+ "get filesystem mount point (best-effort)\n"},
+#endif
+ {"unblocksignal", (PyCFunction)unblocksignal, METH_VARARGS,
+ "change signal mask to unblock a given signal\n"},
#endif /* ndef _WIN32 */
#ifdef __APPLE__
{
@@ -1301,7 +1345,7 @@
{NULL, NULL}
};
-static const int version = 1;
+static const int version = 3;
#ifdef IS_PY3K
static struct PyModuleDef osutil_module = {
diff -r 87676e8ee056 -r 27b6df1b5adb mercurial/cext/parsers.c
--- a/mercurial/cext/parsers.c Mon Jan 08 16:07:51 2018 -0800
+++ b/mercurial/cext/parsers.c Mon Jan 22 17:53:02 2018 -0500
@@ -710,7 +710,7 @@
void manifest_module_init(PyObject *mod);
void revlog_module_init(PyObject *mod);
-static const int version = 3;
+static const int version = 4;
static void module_init(PyObject *mod)
{
diff -r 87676e8ee056 -r 27b6df1b5adb mercurial/cext/revlog.c
--- a/mercurial/cext/revlog.c Mon Jan 08 16:07:51 2018 -0800
+++ b/mercurial/cext/revlog.c Mon Jan 22 17:53:02 2018 -0500
@@ -628,7 +628,7 @@
{
PyObject *roots = Py_None;
PyObject *ret = NULL;
- PyObject *phaseslist = NULL;
+ PyObject *phasessize = NULL;
PyObject *phaseroots = NULL;
PyObject *phaseset = NULL;
PyObject *phasessetlist = NULL;
@@ -685,12 +685,10 @@
}
}
/* Transform phase list to a python list */
- phaseslist = PyList_New(len);
- if (phaseslist == NULL)
+ phasessize = PyInt_FromLong(len);
+ if (phasessize == NULL)
goto release;
for (i = 0; i < len; i++) {
- PyObject *phaseval;
-
phase = phases[i];
/* We only store the sets of phase for non public phase, the public phase
* is computed as a difference */
@@ -702,15 +700,11 @@
PySet_Add(phaseset, rev);
Py_XDECREF(rev);
}
- phaseval = PyInt_FromLong(phase);
- if (phaseval == NULL)
- goto release;
- PyList_SET_ITEM(phaseslist, i, phaseval);
}
- ret = PyTuple_Pack(2, phaseslist, phasessetlist);
+ ret = PyTuple_Pack(2, phasessize, phasessetlist);
release:
- Py_XDECREF(phaseslist);
+ Py_XDECREF(phasessize);
Py_XDECREF(phasessetlist);
done:
free(phases);
diff -r 87676e8ee056 -r 27b6df1b5adb mercurial/cext/util.h
--- a/mercurial/cext/util.h Mon Jan 08 16:07:51 2018 -0800
+++ b/mercurial/cext/util.h Mon Jan 22 17:53:02 2018 -0500
@@ -27,7 +27,9 @@
extern PyTypeObject dirstateTupleType;
#define dirstate_tuple_check(op) (Py_TYPE(op) == &dirstateTupleType)
+#ifndef MIN
#define MIN(a, b) (((a) < (b)) ? (a) : (b))
+#endif
/* VC9 doesn't include bool and lacks stdbool.h based on my searching */
#if defined(_MSC_VER) || __STDC_VERSION__ < 199901L
#define true 1
diff -r 87676e8ee056 -r 27b6df1b5adb mercurial/changegroup.py
--- a/mercurial/changegroup.py Mon Jan 08 16:07:51 2018 -0800
+++ b/mercurial/changegroup.py Mon Jan 22 17:53:02 2018 -0500
@@ -32,14 +32,7 @@
_CHANGEGROUPV2_DELTA_HEADER = "20s20s20s20s20s"
_CHANGEGROUPV3_DELTA_HEADER = ">20s20s20s20s20sH"
-def readexactly(stream, n):
- '''read n bytes from stream.read and abort if less was available'''
- s = stream.read(n)
- if len(s) < n:
- raise error.Abort(_("stream ended unexpectedly"
- " (got %d bytes, expected %d)")
- % (len(s), n))
- return s
+readexactly = util.readexactly
def getchunk(stream):
"""return the next chunk from stream as a string"""
@@ -692,7 +685,7 @@
# Callback for the manifest, used to collect linkrevs for filelog
# revisions.
# Returns the linkrev node (collected in lookupcl).
- def makelookupmflinknode(dir):
+ def makelookupmflinknode(dir, nodes):
if fastpathlinkrev:
assert not dir
return mfs.__getitem__
@@ -713,7 +706,7 @@
the client before you can trust the list of files and
treemanifests to send.
"""
- clnode = tmfnodes[dir][x]
+ clnode = nodes[x]
mdata = mfl.get(dir, x).readfast(shallow=True)
for p, n, fl in mdata.iterentries():
if fl == 't': # subdirectory manifest
@@ -733,15 +726,13 @@
size = 0
while tmfnodes:
- dir = min(tmfnodes)
- nodes = tmfnodes[dir]
+ dir, nodes = tmfnodes.popitem()
prunednodes = self.prune(dirlog(dir), nodes, commonrevs)
if not dir or prunednodes:
for x in self._packmanifests(dir, prunednodes,
- makelookupmflinknode(dir)):
+ makelookupmflinknode(dir, nodes)):
size += len(x)
yield x
- del tmfnodes[dir]
self._verbosenote(_('%8.i (manifests)\n') % size)
yield self._manifestsdone()
diff -r 87676e8ee056 -r 27b6df1b5adb mercurial/changelog.py
--- a/mercurial/changelog.py Mon Jan 08 16:07:51 2018 -0800
+++ b/mercurial/changelog.py Mon Jan 22 17:53:02 2018 -0500
@@ -295,11 +295,14 @@
self._divert = False
self.filteredrevs = frozenset()
+ def tiprev(self):
+ for i in xrange(len(self) -1, -2, -1):
+ if i not in self.filteredrevs:
+ return i
+
def tip(self):
"""filtered version of revlog.tip"""
- for i in xrange(len(self) -1, -2, -1):
- if i not in self.filteredrevs:
- return self.node(i)
+ return self.node(self.tiprev())
def __contains__(self, rev):
"""filtered version of revlog.__contains__"""
@@ -541,5 +544,10 @@
*args, **kwargs)
revs = transaction.changes.get('revs')
if revs is not None:
- revs.add(rev)
+ if revs:
+ assert revs[-1] + 1 == rev
+ revs = xrange(revs[0], rev + 1)
+ else:
+ revs = xrange(rev, rev + 1)
+ transaction.changes['revs'] = revs
return node
diff -r 87676e8ee056 -r 27b6df1b5adb mercurial/chgserver.py
--- a/mercurial/chgserver.py Mon Jan 08 16:07:51 2018 -0800
+++ b/mercurial/chgserver.py Mon Jan 22 17:53:02 2018 -0500
@@ -55,6 +55,7 @@
encoding,
error,
extensions,
+ node,
pycompat,
util,
)
@@ -63,7 +64,7 @@
def _hashlist(items):
"""return sha1 hexdigest for a list"""
- return hashlib.sha1(str(items)).hexdigest()
+ return node.hex(hashlib.sha1(str(items)).digest())
# sensitive config sections affecting confighash
_configsections = [
@@ -220,16 +221,7 @@
newui._csystem = srcui._csystem
# command line args
- options = {}
- if srcui.plain('strictflags'):
- options.update(dispatch._earlyparseopts(args))
- else:
- args = args[:]
- options['config'] = dispatch._earlygetopt(['--config'], args)
- cwds = dispatch._earlygetopt(['--cwd'], args)
- options['cwd'] = cwds and cwds[-1] or ''
- rpath = dispatch._earlygetopt(["-R", "--repository", "--repo"], args)
- options['repository'] = rpath and rpath[-1] or ''
+ options = dispatch._earlyparseopts(newui, args)
dispatch._parseconfig(newui, options['config'])
# stolen from tortoisehg.util.copydynamicconfig()
diff -r 87676e8ee056 -r 27b6df1b5adb mercurial/cmdutil.py
--- a/mercurial/cmdutil.py Mon Jan 08 16:07:51 2018 -0800
+++ b/mercurial/cmdutil.py Mon Jan 22 17:53:02 2018 -0500
@@ -41,6 +41,8 @@
registrar,
revlog,
revset,
+ revsetlang,
+ rewriteutil,
scmutil,
smartset,
templatekw,
@@ -181,7 +183,7 @@
def setupwrapcolorwrite(ui):
# wrap ui.write so diff output can be labeled/colorized
def wrapwrite(orig, *args, **kw):
- label = kw.pop('label', '')
+ label = kw.pop(r'label', '')
for chunk, l in patch.difflabel(lambda: args):
orig(chunk, label=label + l)
@@ -372,7 +374,7 @@
# Make all of the pathnames absolute.
newfiles = [repo.wjoin(nf) for nf in newfiles]
- return commitfunc(ui, repo, *newfiles, **opts)
+ return commitfunc(ui, repo, *newfiles, **pycompat.strkwargs(opts))
finally:
# 5. finally restore backed-up files
try:
@@ -712,6 +714,97 @@
raise error.UnknownCommand(cmd, allcmds)
+def changebranch(ui, repo, revs, label):
+ """ Change the branch name of given revs to label """
+
+ with repo.wlock(), repo.lock(), repo.transaction('branches'):
+ # abort in case of uncommitted merge or dirty wdir
+ bailifchanged(repo)
+ revs = scmutil.revrange(repo, revs)
+ if not revs:
+ raise error.Abort("empty revision set")
+ roots = repo.revs('roots(%ld)', revs)
+ if len(roots) > 1:
+ raise error.Abort(_("cannot change branch of non-linear revisions"))
+ rewriteutil.precheck(repo, revs, 'change branch of')
+
+ root = repo[roots.first()]
+ if not root.p1().branch() == label and label in repo.branchmap():
+ raise error.Abort(_("a branch of the same name already exists"))
+
+ if repo.revs('merge() and %ld', revs):
+ raise error.Abort(_("cannot change branch of a merge commit"))
+ if repo.revs('obsolete() and %ld', revs):
+ raise error.Abort(_("cannot change branch of a obsolete changeset"))
+
+ # make sure only topological heads
+ if repo.revs('heads(%ld) - head()', revs):
+ raise error.Abort(_("cannot change branch in middle of a stack"))
+
+ replacements = {}
+ # avoid import cycle mercurial.cmdutil -> mercurial.context ->
+ # mercurial.subrepo -> mercurial.cmdutil
+ from . import context
+ for rev in revs:
+ ctx = repo[rev]
+ oldbranch = ctx.branch()
+ # check if ctx has same branch
+ if oldbranch == label:
+ continue
+
+ def filectxfn(repo, newctx, path):
+ try:
+ return ctx[path]
+ except error.ManifestLookupError:
+ return None
+
+ ui.debug("changing branch of '%s' from '%s' to '%s'\n"
+ % (hex(ctx.node()), oldbranch, label))
+ extra = ctx.extra()
+ extra['branch_change'] = hex(ctx.node())
+ # While changing branch of set of linear commits, make sure that
+ # we base our commits on new parent rather than old parent which
+ # was obsoleted while changing the branch
+ p1 = ctx.p1().node()
+ p2 = ctx.p2().node()
+ if p1 in replacements:
+ p1 = replacements[p1][0]
+ if p2 in replacements:
+ p2 = replacements[p2][0]
+
+ mc = context.memctx(repo, (p1, p2),
+ ctx.description(),
+ ctx.files(),
+ filectxfn,
+ user=ctx.user(),
+ date=ctx.date(),
+ extra=extra,
+ branch=label)
+
+ commitphase = ctx.phase()
+ overrides = {('phases', 'new-commit'): commitphase}
+ with repo.ui.configoverride(overrides, 'branch-change'):
+ newnode = repo.commitctx(mc)
+
+ replacements[ctx.node()] = (newnode,)
+ ui.debug('new node id is %s\n' % hex(newnode))
+
+ # create obsmarkers and move bookmarks
+ scmutil.cleanupnodes(repo, replacements, 'branch-change')
+
+ # move the working copy too
+ wctx = repo[None]
+ # in-progress merge is a bit too complex for now.
+ if len(wctx.parents()) == 1:
+ newid = replacements.get(wctx.p1().node())
+ if newid is not None:
+ # avoid import cycle mercurial.cmdutil -> mercurial.hg ->
+ # mercurial.cmdutil
+ from . import hg
+ hg.update(repo, newid[0], quietempty=True)
+
+ ui.status(_("changed branch on %d changesets\n") % len(replacements))
+
def findrepo(p):
while not os.path.isdir(os.path.join(p, ".hg")):
oldp, p = p, os.path.dirname(p)
@@ -823,9 +916,9 @@
total=None, seqno=None, revwidth=None, pathname=None):
node_expander = {
'H': lambda: hex(node),
- 'R': lambda: str(repo.changelog.rev(node)),
+ 'R': lambda: '%d' % repo.changelog.rev(node),
'h': lambda: short(node),
- 'm': lambda: re.sub('[^\w]', '_', str(desc))
+ 'm': lambda: re.sub('[^\w]', '_', desc or '')
}
expander = {
'%': lambda: '%',
@@ -837,13 +930,13 @@
expander.update(node_expander)
if node:
expander['r'] = (lambda:
- str(repo.changelog.rev(node)).zfill(revwidth or 0))
+ ('%d' % repo.changelog.rev(node)).zfill(revwidth or 0))
if total is not None:
- expander['N'] = lambda: str(total)
+ expander['N'] = lambda: '%d' % total
if seqno is not None:
- expander['n'] = lambda: str(seqno)
+ expander['n'] = lambda: '%d' % seqno
if total is not None and seqno is not None:
- expander['n'] = lambda: str(seqno).zfill(len(str(total)))
+ expander['n'] = (lambda: ('%d' % seqno).zfill(len('%d' % total)))
if pathname is not None:
expander['s'] = lambda: os.path.basename(pathname)
expander['d'] = lambda: os.path.dirname(pathname) or '.'
@@ -1334,7 +1427,8 @@
if opts.get('exact'):
editor = None
else:
- editor = getcommiteditor(editform=editform, **opts)
+ editor = getcommiteditor(editform=editform,
+ **pycompat.strkwargs(opts))
extra = {}
for idfunc in extrapreimport:
extrapreimportmap[idfunc](repo, extractdata, extra, opts)
@@ -1518,7 +1612,7 @@
width = 80
if not ui.plain():
width = ui.termwidth()
- chunks = patch.diff(repo, node1, node2, match, changes, diffopts,
+ chunks = patch.diff(repo, node1, node2, match, changes, opts=diffopts,
prefix=prefix, relroot=relroot,
hunksfilterfn=hunksfilterfn)
for chunk, label in patch.diffstatui(util.iterlines(chunks),
@@ -1526,7 +1620,7 @@
write(chunk, label=label)
else:
for chunk, label in patch.diffui(repo, node1, node2, match,
- changes, diffopts, prefix=prefix,
+ changes, opts=diffopts, prefix=prefix,
relroot=relroot,
hunksfilterfn=hunksfilterfn):
write(chunk, label=label)
@@ -1571,6 +1665,7 @@
self.hunk = {}
self.lastheader = None
self.footer = None
+ self._columns = templatekw.getlogcolumns()
def flush(self, ctx):
rev = ctx.rev()
@@ -1583,8 +1678,6 @@
if rev in self.hunk:
self.ui.write(self.hunk[rev])
del self.hunk[rev]
- return 1
- return 0
def close(self):
if self.footer:
@@ -1610,10 +1703,8 @@
label='log.node')
return
- date = util.datestr(ctx.date())
-
- # i18n: column positioning for "hg log"
- self.ui.write(_("changeset: %s\n") % scmutil.formatchangeid(ctx),
+ columns = self._columns
+ self.ui.write(columns['changeset'] % scmutil.formatchangeid(ctx),
label=_changesetlabels(ctx))
# branches are shown first before any other names due to backwards
@@ -1621,9 +1712,7 @@
branch = ctx.branch()
# don't show the default branch name
if branch != 'default':
- # i18n: column positioning for "hg log"
- self.ui.write(_("branch: %s\n") % branch,
- label='log.branch')
+ self.ui.write(columns['branch'] % branch, label='log.branch')
for nsname, ns in self.repo.names.iteritems():
# branches has special logic already handled above, so here we just
@@ -1636,33 +1725,25 @@
self.ui.write(ns.logfmt % name,
label='log.%s' % ns.colorname)
if self.ui.debugflag:
- # i18n: column positioning for "hg log"
- self.ui.write(_("phase: %s\n") % ctx.phasestr(),
- label='log.phase')
+ self.ui.write(columns['phase'] % ctx.phasestr(), label='log.phase')
for pctx in scmutil.meaningfulparents(self.repo, ctx):
label = 'log.parent changeset.%s' % pctx.phasestr()
- # i18n: column positioning for "hg log"
- self.ui.write(_("parent: %s\n") % scmutil.formatchangeid(pctx),
+ self.ui.write(columns['parent'] % scmutil.formatchangeid(pctx),
label=label)
if self.ui.debugflag and rev is not None:
mnode = ctx.manifestnode()
mrev = self.repo.manifestlog._revlog.rev(mnode)
- # i18n: column positioning for "hg log"
- self.ui.write(_("manifest: %s\n")
+ self.ui.write(columns['manifest']
% scmutil.formatrevnode(self.ui, mrev, mnode),
label='ui.debug log.manifest')
- # i18n: column positioning for "hg log"
- self.ui.write(_("user: %s\n") % ctx.user(),
- label='log.user')
- # i18n: column positioning for "hg log"
- self.ui.write(_("date: %s\n") % date,
+ self.ui.write(columns['user'] % ctx.user(), label='log.user')
+ self.ui.write(columns['date'] % util.datestr(ctx.date()),
label='log.date')
if ctx.isunstable():
- # i18n: column positioning for "hg log"
instabilities = ctx.instabilities()
- self.ui.write(_("instability: %s\n") % ', '.join(instabilities),
+ self.ui.write(columns['instability'] % ', '.join(instabilities),
label='log.instability')
elif ctx.obsolete():
@@ -1672,31 +1753,22 @@
if self.ui.debugflag:
files = ctx.p1().status(ctx)[:3]
- for key, value in zip([# i18n: column positioning for "hg log"
- _("files:"),
- # i18n: column positioning for "hg log"
- _("files+:"),
- # i18n: column positioning for "hg log"
- _("files-:")], files):
+ for key, value in zip(['files', 'files+', 'files-'], files):
if value:
- self.ui.write("%-12s %s\n" % (key, " ".join(value)),
+ self.ui.write(columns[key] % " ".join(value),
label='ui.debug log.files')
elif ctx.files() and self.ui.verbose:
- # i18n: column positioning for "hg log"
- self.ui.write(_("files: %s\n") % " ".join(ctx.files()),
+ self.ui.write(columns['files'] % " ".join(ctx.files()),
label='ui.note log.files')
if copies and self.ui.verbose:
copies = ['%s (%s)' % c for c in copies]
- # i18n: column positioning for "hg log"
- self.ui.write(_("copies: %s\n") % ' '.join(copies),
+ self.ui.write(columns['copies'] % ' '.join(copies),
label='ui.note log.copies')
extra = ctx.extra()
if extra and self.ui.debugflag:
for key, value in sorted(extra.items()):
- # i18n: column positioning for "hg log"
- self.ui.write(_("extra: %s=%s\n")
- % (key, util.escapestr(value)),
+ self.ui.write(columns['extra'] % (key, util.escapestr(value)),
label='ui.debug log.extra')
description = ctx.description().strip()
@@ -1708,9 +1780,7 @@
label='ui.note log.description')
self.ui.write("\n\n")
else:
- # i18n: column positioning for "hg log"
- self.ui.write(_("summary: %s\n") %
- description.splitlines()[0],
+ self.ui.write(columns['summary'] % description.splitlines()[0],
label='log.summary')
self.ui.write("\n")
@@ -1721,8 +1791,7 @@
if obsfate:
for obsfateline in obsfate:
- # i18n: column positioning for "hg log"
- self.ui.write(_("obsolete: %s\n") % obsfateline,
+ self.ui.write(self._columns['obsolete'] % obsfateline,
label='log.obsfate')
def _exthook(self, ctx):
@@ -1748,7 +1817,8 @@
diffordiffstat(self.ui, self.repo, diffopts, prev, node,
match=matchfn, stat=False,
hunksfilterfn=hunksfilterfn)
- self.ui.write("\n")
+ if stat or diff:
+ self.ui.write("\n")
class jsonchangeset(changeset_printer):
'''format changeset information.'''
@@ -1850,7 +1920,13 @@
self.ui.write("\n }")
class changeset_templater(changeset_printer):
- '''format changeset information.'''
+ '''format changeset information.
+
+ Note: there are a variety of convenience functions to build a
+ changeset_templater for common cases. See functions such as:
+ makelogtemplater, show_changeset, buildcommittemplate, or other
+ functions that use changesest_templater.
+ '''
# Arguments before "buffered" used to be positional. Consider not
# adding/removing arguments before "buffered" to not break callers.
@@ -1859,10 +1935,13 @@
diffopts = diffopts or {}
changeset_printer.__init__(self, ui, repo, matchfn, diffopts, buffered)
+ tres = formatter.templateresources(ui, repo)
self.t = formatter.loadtemplater(ui, tmplspec,
+ defaults=templatekw.keywords,
+ resources=tres,
cache=templatekw.defaulttempl)
self._counter = itertools.count()
- self.cache = {}
+ self.cache = tres['cache'] # shared with _graphnodeformatter()
self._tref = tmplspec.ref
self._parts = {'header': '', 'footer': '',
@@ -1901,14 +1980,9 @@
def _show(self, ctx, copies, matchfn, hunksfilterfn, props):
'''show a single changeset or file revision'''
props = props.copy()
- props.update(templatekw.keywords)
- props['templ'] = self.t
props['ctx'] = ctx
- props['repo'] = self.repo
- props['ui'] = self.repo.ui
props['index'] = index = next(self._counter)
props['revcache'] = {'copies': copies}
- props['cache'] = self.cache
props = pycompat.strkwargs(props)
# write separator, which wouldn't work well with the header part below
@@ -1972,7 +2046,8 @@
return formatter.lookuptemplate(ui, 'changeset', tmpl)
def makelogtemplater(ui, repo, tmpl, buffered=False):
- """Create a changeset_templater from a literal template 'tmpl'"""
+ """Create a changeset_templater from a literal template 'tmpl'
+ byte-string."""
spec = logtemplatespec(tmpl, None)
return changeset_templater(ui, repo, spec, buffered=buffered)
@@ -2050,6 +2125,21 @@
if windowsize < sizelimit:
windowsize *= 2
+def _walkrevs(repo, opts):
+ # Default --rev value depends on --follow but --follow behavior
+ # depends on revisions resolved from --rev...
+ follow = opts.get('follow') or opts.get('follow_first')
+ if opts.get('rev'):
+ revs = scmutil.revrange(repo, opts['rev'])
+ elif follow and repo.dirstate.p1() == nullid:
+ revs = smartset.baseset()
+ elif follow:
+ revs = repo.revs('reverse(:.)')
+ else:
+ revs = smartset.spanset(repo)
+ revs.reverse()
+ return revs
+
class FileWalkError(Exception):
pass
@@ -2204,12 +2294,11 @@
function on each context in the window in forward order.'''
follow = opts.get('follow') or opts.get('follow_first')
- revs = _logrevs(repo, opts)
+ revs = _walkrevs(repo, opts)
if not revs:
return []
wanted = set()
- slowpath = match.anypats() or ((match.isexact() or match.prefix()) and
- opts.get('removed'))
+ slowpath = match.anypats() or (not match.always() and opts.get('removed'))
fncache = {}
change = repo.changectx
@@ -2326,90 +2415,36 @@
return iterate()
-def _makefollowlogfilematcher(repo, files, followfirst):
- # When displaying a revision with --patch --follow FILE, we have
- # to know which file of the revision must be diffed. With
- # --follow, we want the names of the ancestors of FILE in the
- # revision, stored in "fcache". "fcache" is populated by
- # reproducing the graph traversal already done by --follow revset
- # and relating revs to file names (which is not "correct" but
- # good enough).
- fcache = {}
- fcacheready = [False]
- pctx = repo['.']
-
- def populate():
- for fn in files:
- fctx = pctx[fn]
- fcache.setdefault(fctx.introrev(), set()).add(fctx.path())
- for c in fctx.ancestors(followfirst=followfirst):
- fcache.setdefault(c.rev(), set()).add(c.path())
-
- def filematcher(rev):
- if not fcacheready[0]:
- # Lazy initialization
- fcacheready[0] = True
- populate()
- return scmutil.matchfiles(repo, fcache.get(rev, []))
-
- return filematcher
-
-def _makenofollowlogfilematcher(repo, pats, opts):
- '''hook for extensions to override the filematcher for non-follow cases'''
- return None
-
-def _makelogrevset(repo, pats, opts, revs):
- """Return (expr, filematcher) where expr is a revset string built
- from log options and file patterns or None. If --stat or --patch
- are not passed filematcher is None. Otherwise it is a callable
- taking a revision number and returning a match objects filtering
- the files to be detailed when displaying the revision.
+def _makelogmatcher(repo, revs, pats, opts):
+ """Build matcher and expanded patterns from log options
+
+ If --follow, revs are the revisions to follow from.
+
+ Returns (match, pats, slowpath) where
+ - match: a matcher built from the given pats and -I/-X opts
+ - pats: patterns used (globs are expanded on Windows)
+ - slowpath: True if patterns aren't as simple as scanning filelogs
"""
- opt2revset = {
- 'no_merges': ('not merge()', None),
- 'only_merges': ('merge()', None),
- '_ancestors': ('ancestors(%(val)s)', None),
- '_fancestors': ('_firstancestors(%(val)s)', None),
- '_descendants': ('descendants(%(val)s)', None),
- '_fdescendants': ('_firstdescendants(%(val)s)', None),
- '_matchfiles': ('_matchfiles(%(val)s)', None),
- 'date': ('date(%(val)r)', None),
- 'branch': ('branch(%(val)r)', ' or '),
- '_patslog': ('filelog(%(val)r)', ' or '),
- '_patsfollow': ('follow(%(val)r)', ' or '),
- '_patsfollowfirst': ('_followfirst(%(val)r)', ' or '),
- 'keyword': ('keyword(%(val)r)', ' or '),
- 'prune': ('not (%(val)r or ancestors(%(val)r))', ' and '),
- 'user': ('user(%(val)r)', ' or '),
- }
-
- opts = dict(opts)
- # follow or not follow?
- follow = opts.get('follow') or opts.get('follow_first')
- if opts.get('follow_first'):
- followfirst = 1
- else:
- followfirst = 0
- # --follow with FILE behavior depends on revs...
- it = iter(revs)
- startrev = next(it)
- followdescendants = startrev < next(it, startrev)
-
- # branch and only_branch are really aliases and must be handled at
- # the same time
- opts['branch'] = opts.get('branch', []) + opts.get('only_branch', [])
- opts['branch'] = [repo.lookupbranch(b) for b in opts['branch']]
# pats/include/exclude are passed to match.match() directly in
# _matchfiles() revset but walkchangerevs() builds its matcher with
# scmutil.match(). The difference is input pats are globbed on
# platforms without shell expansion (windows).
wctx = repo[None]
match, pats = scmutil.matchandpats(wctx, pats, opts)
- slowpath = match.anypats() or ((match.isexact() or match.prefix()) and
- opts.get('removed'))
+ slowpath = match.anypats() or (not match.always() and opts.get('removed'))
if not slowpath:
+ follow = opts.get('follow') or opts.get('follow_first')
+ startctxs = []
+ if follow and opts.get('rev'):
+ startctxs = [repo[r] for r in revs]
for f in match.files():
- if follow and f not in wctx:
+ if follow and startctxs:
+ # No idea if the path was a directory at that revision, so
+ # take the slow path.
+ if any(f not in c for c in startctxs):
+ slowpath = True
+ continue
+ elif follow and f not in wctx:
# If the file exists, it may be a directory, so let it
# take the slow path.
if os.path.exists(repo.wjoin(f)):
@@ -2417,7 +2452,7 @@
continue
else:
raise error.Abort(_('cannot follow file not in parent '
- 'revision: "%s"') % f)
+ 'revision: "%s"') % f)
filelog = repo.file(f)
if not filelog:
# A zero count may be a directory or deleted file, so
@@ -2438,15 +2473,62 @@
else:
slowpath = False
- fpats = ('_patsfollow', '_patsfollowfirst')
- fnopats = (('_ancestors', '_fancestors'),
- ('_descendants', '_fdescendants'))
+ return match, pats, slowpath
+
+def _fileancestors(repo, revs, match, followfirst):
+ fctxs = []
+ for r in revs:
+ ctx = repo[r]
+ fctxs.extend(ctx[f].introfilectx() for f in ctx.walk(match))
+
+ # When displaying a revision with --patch --follow FILE, we have
+ # to know which file of the revision must be diffed. With
+ # --follow, we want the names of the ancestors of FILE in the
+ # revision, stored in "fcache". "fcache" is populated as a side effect
+ # of the graph traversal.
+ fcache = {}
+ def filematcher(rev):
+ return scmutil.matchfiles(repo, fcache.get(rev, []))
+
+ def revgen():
+ for rev, cs in dagop.filectxancestors(fctxs, followfirst=followfirst):
+ fcache[rev] = [c.path() for c in cs]
+ yield rev
+ return smartset.generatorset(revgen(), iterasc=False), filematcher
+
+def _makenofollowlogfilematcher(repo, pats, opts):
+ '''hook for extensions to override the filematcher for non-follow cases'''
+ return None
+
+_opt2logrevset = {
+ 'no_merges': ('not merge()', None),
+ 'only_merges': ('merge()', None),
+ '_matchfiles': (None, '_matchfiles(%ps)'),
+ 'date': ('date(%s)', None),
+ 'branch': ('branch(%s)', '%lr'),
+ '_patslog': ('filelog(%s)', '%lr'),
+ 'keyword': ('keyword(%s)', '%lr'),
+ 'prune': ('ancestors(%s)', 'not %lr'),
+ 'user': ('user(%s)', '%lr'),
+}
+
+def _makelogrevset(repo, match, pats, slowpath, opts):
+ """Return a revset string built from log options and file patterns"""
+ opts = dict(opts)
+ # follow or not follow?
+ follow = opts.get('follow') or opts.get('follow_first')
+
+ # branch and only_branch are really aliases and must be handled at
+ # the same time
+ opts['branch'] = opts.get('branch', []) + opts.get('only_branch', [])
+ opts['branch'] = [repo.lookupbranch(b) for b in opts['branch']]
+
if slowpath:
# See walkchangerevs() slow path.
#
# pats/include/exclude cannot be represented as separate
# revset expressions as their filtering logic applies at file
- # level. For instance "-I a -X a" matches a revision touching
+ # level. For instance "-I a -X b" matches a revision touching
# "a" and "b" while "file(a) and not file(b)" does
# not. Besides, filesets are evaluated against the working
# directory.
@@ -2457,130 +2539,84 @@
matchargs.append('i:' + p)
for p in opts.get('exclude', []):
matchargs.append('x:' + p)
- matchargs = ','.join(('%r' % p) for p in matchargs)
opts['_matchfiles'] = matchargs
- if follow:
- opts[fnopats[0][followfirst]] = '.'
- else:
- if follow:
- if pats:
- # follow() revset interprets its file argument as a
- # manifest entry, so use match.files(), not pats.
- opts[fpats[followfirst]] = list(match.files())
- else:
- op = fnopats[followdescendants][followfirst]
- opts[op] = 'rev(%d)' % startrev
- else:
- opts['_patslog'] = list(pats)
-
- filematcher = None
- if opts.get('patch') or opts.get('stat'):
- # When following files, track renames via a special matcher.
- # If we're forced to take the slowpath it means we're following
- # at least one pattern/directory, so don't bother with rename tracking.
- if follow and not match.always() and not slowpath:
- # _makefollowlogfilematcher expects its files argument to be
- # relative to the repo root, so use match.files(), not pats.
- filematcher = _makefollowlogfilematcher(repo, match.files(),
- followfirst)
- else:
- filematcher = _makenofollowlogfilematcher(repo, pats, opts)
- if filematcher is None:
- filematcher = lambda rev: match
+ elif not follow:
+ opts['_patslog'] = list(pats)
expr = []
for op, val in sorted(opts.iteritems()):
if not val:
continue
- if op not in opt2revset:
+ if op not in _opt2logrevset:
continue
- revop, andor = opt2revset[op]
- if '%(val)' not in revop:
+ revop, listop = _opt2logrevset[op]
+ if revop and '%' not in revop:
expr.append(revop)
+ elif not listop:
+ expr.append(revsetlang.formatspec(revop, val))
else:
- if not isinstance(val, list):
- e = revop % {'val': val}
- else:
- e = '(' + andor.join((revop % {'val': v}) for v in val) + ')'
- expr.append(e)
+ if revop:
+ val = [revsetlang.formatspec(revop, v) for v in val]
+ expr.append(revsetlang.formatspec(listop, val))
if expr:
expr = '(' + ' and '.join(expr) + ')'
else:
expr = None
- return expr, filematcher
+ return expr
def _logrevs(repo, opts):
- # Default --rev value depends on --follow but --follow behavior
- # depends on revisions resolved from --rev...
+ """Return the initial set of revisions to be filtered or followed"""
follow = opts.get('follow') or opts.get('follow_first')
if opts.get('rev'):
revs = scmutil.revrange(repo, opts['rev'])
elif follow and repo.dirstate.p1() == nullid:
revs = smartset.baseset()
elif follow:
- revs = repo.revs('reverse(:.)')
+ revs = repo.revs('.')
else:
revs = smartset.spanset(repo)
revs.reverse()
return revs
-def getgraphlogrevs(repo, pats, opts):
- """Return (revs, expr, filematcher) where revs is an iterable of
- revision numbers, expr is a revset string built from log options
- and file patterns or None, and used to filter 'revs'. If --stat or
- --patch are not passed filematcher is None. Otherwise it is a
- callable taking a revision number and returning a match objects
- filtering the files to be detailed when displaying the revision.
+def getlogrevs(repo, pats, opts):
+ """Return (revs, filematcher) where revs is a smartset
+
+ filematcher is a callable taking a revision number and returning a match
+ objects filtering the files to be detailed when displaying the revision.
"""
+ follow = opts.get('follow') or opts.get('follow_first')
+ followfirst = opts.get('follow_first')
limit = loglimit(opts)
revs = _logrevs(repo, opts)
if not revs:
- return smartset.baseset(), None, None
- expr, filematcher = _makelogrevset(repo, pats, opts, revs)
- if opts.get('rev'):
+ return smartset.baseset(), None
+ match, pats, slowpath = _makelogmatcher(repo, revs, pats, opts)
+ filematcher = None
+ if follow:
+ if slowpath or match.always():
+ revs = dagop.revancestors(repo, revs, followfirst=followfirst)
+ else:
+ revs, filematcher = _fileancestors(repo, revs, match, followfirst)
+ revs.reverse()
+ if filematcher is None:
+ filematcher = _makenofollowlogfilematcher(repo, pats, opts)
+ if filematcher is None:
+ def filematcher(rev):
+ return match
+
+ expr = _makelogrevset(repo, match, pats, slowpath, opts)
+ if opts.get('graph') and opts.get('rev'):
# User-specified revs might be unsorted, but don't sort before
# _makelogrevset because it might depend on the order of revs
if not (revs.isdescending() or revs.istopo()):
revs.sort(reverse=True)
if expr:
- matcher = revset.match(repo.ui, expr)
+ matcher = revset.match(None, expr)
revs = matcher(repo, revs)
if limit is not None:
- limitedrevs = []
- for idx, rev in enumerate(revs):
- if idx >= limit:
- break
- limitedrevs.append(rev)
- revs = smartset.baseset(limitedrevs)
-
- return revs, expr, filematcher
-
-def getlogrevs(repo, pats, opts):
- """Return (revs, expr, filematcher) where revs is an iterable of
- revision numbers, expr is a revset string built from log options
- and file patterns or None, and used to filter 'revs'. If --stat or
- --patch are not passed filematcher is None. Otherwise it is a
- callable taking a revision number and returning a match objects
- filtering the files to be detailed when displaying the revision.
- """
- limit = loglimit(opts)
- revs = _logrevs(repo, opts)
- if not revs:
- return smartset.baseset([]), None, None
- expr, filematcher = _makelogrevset(repo, pats, opts, revs)
- if expr:
- matcher = revset.match(repo.ui, expr)
- revs = matcher(repo, revs)
- if limit is not None:
- limitedrevs = []
- for idx, r in enumerate(revs):
- if limit <= idx:
- break
- limitedrevs.append(r)
- revs = smartset.baseset(limitedrevs)
-
- return revs, expr, filematcher
+ revs = revs.slice(0, limit)
+ return revs, filematcher
def _parselinerangelogopt(repo, opts):
"""Parse --line-range log option and return a list of tuples (filename,
@@ -2675,18 +2711,13 @@
return templatekw.showgraphnode # fast path for "{graphnode}"
spec = templater.unquotestring(spec)
- templ = formatter.maketemplater(ui, spec)
- cache = {}
+ tres = formatter.templateresources(ui)
if isinstance(displayer, changeset_templater):
- cache = displayer.cache # reuse cache of slow templates
- props = templatekw.keywords.copy()
- props['templ'] = templ
- props['cache'] = cache
+ tres['cache'] = displayer.cache # reuse cache of slow templates
+ templ = formatter.maketemplater(ui, spec, defaults=templatekw.keywords,
+ resources=tres)
def formatnode(repo, ctx):
- props['ctx'] = ctx
- props['repo'] = repo
- props['ui'] = repo.ui
- props['revcache'] = {}
+ props = {'ctx': ctx, 'repo': repo, 'revcache': {}}
return templ.render(props)
return formatnode
@@ -2733,7 +2764,7 @@
firstedge = next(edges)
width = firstedge[2]
displayer.show(ctx, copies=copies, matchfn=revmatchfn,
- _graphwidth=width, **props)
+ _graphwidth=width, **pycompat.strkwargs(props))
lines = displayer.hunk.pop(rev).split('\n')
if not lines[-1]:
del lines[-1]
@@ -2743,9 +2774,8 @@
lines = []
displayer.close()
-def graphlog(ui, repo, pats, opts):
+def graphlog(ui, repo, revs, filematcher, opts):
# Parameters are identical to log command ones
- revs, expr, filematcher = getgraphlogrevs(repo, pats, opts)
revdag = graphmod.dagwalker(repo, revs)
getrenamed = None
@@ -2975,8 +3005,9 @@
for f in remaining:
count += 1
ui.progress(_('skipping'), count, total=total, unit=_('files'))
- warnings.append(_('not removing %s: file still exists\n')
- % m.rel(f))
+ if ui.verbose or (f in files):
+ warnings.append(_('not removing %s: file still exists\n')
+ % m.rel(f))
ret = 1
ui.progress(_('skipping'), None)
else:
@@ -3021,21 +3052,34 @@
return ret
+def _updatecatformatter(fm, ctx, matcher, path, decode):
+ """Hook for adding data to the formatter used by ``hg cat``.
+
+ Extensions (e.g., lfs) can wrap this to inject keywords/data, but must call
+ this method first."""
+ data = ctx[path].data()
+ if decode:
+ data = ctx.repo().wwritedata(path, data)
+ fm.startitem()
+ fm.write('data', '%s', data)
+ fm.data(abspath=path, path=matcher.rel(path))
+
def cat(ui, repo, ctx, matcher, basefm, fntemplate, prefix, **opts):
err = 1
+ opts = pycompat.byteskwargs(opts)
def write(path):
filename = None
if fntemplate:
filename = makefilename(repo, fntemplate, ctx.node(),
pathname=os.path.join(prefix, path))
+ # attempt to create the directory if it does not already exist
+ try:
+ os.makedirs(os.path.dirname(filename))
+ except OSError:
+ pass
with formatter.maybereopen(basefm, filename, opts) as fm:
- data = ctx[path].data()
- if opts.get('decode'):
- data = repo.wwritedata(path, data)
- fm.startitem()
- fm.write('data', '%s', data)
- fm.data(abspath=path, path=matcher.rel(path))
+ _updatecatformatter(fm, ctx, matcher, path, opts.get('decode'))
# Automation often uses hg cat on single files, so special case it
# for performance to avoid the cost of parsing the manifest.
@@ -3060,7 +3104,8 @@
submatch = matchmod.subdirmatcher(subpath, matcher)
if not sub.cat(submatch, basefm, fntemplate,
- os.path.join(prefix, sub._path), **opts):
+ os.path.join(prefix, sub._path),
+ **pycompat.strkwargs(opts)):
err = 0
except error.RepoLookupError:
ui.status(_("skipping missing subrepository: %s\n")
@@ -3124,6 +3169,8 @@
# base o - first parent of the changeset to amend
wctx = repo[None]
+ # Copy to avoid mutating input
+ extra = extra.copy()
# Update extra dict from amended commit (e.g. to preserve graft
# source)
extra.update(old.extra())
@@ -3200,7 +3247,7 @@
fctx = wctx[path]
flags = fctx.flags()
- mctx = context.memfilectx(repo,
+ mctx = context.memfilectx(repo, ctx_,
fctx.path(), fctx.data(),
islink='l' in flags,
isexec='x' in flags,
@@ -3445,6 +3492,7 @@
return repo.status(match=scmutil.match(repo[None], pats, opts))
def revert(ui, repo, ctx, parents, *pats, **opts):
+ opts = pycompat.byteskwargs(opts)
parent, p2 = parents
node = ctx.node()
@@ -3706,7 +3754,7 @@
else:
util.rename(target, bakname)
if ui.verbose or not exact:
- if not isinstance(msg, basestring):
+ if not isinstance(msg, bytes):
msg = msg(abs)
ui.status(msg % rel)
elif exact:
@@ -3722,7 +3770,8 @@
# Revert the subrepos on the revert list
for sub in targetsubs:
try:
- wctx.sub(sub).revert(ctx.substate[sub], *pats, **opts)
+ wctx.sub(sub).revert(ctx.substate[sub], *pats,
+ **pycompat.strkwargs(opts))
except KeyError:
raise error.Abort("subrepository '%s' does not exist in %s!"
% (sub, short(ctx.node())))
@@ -3802,9 +3851,8 @@
operation = 'discard'
reversehunks = True
if node != parent:
- operation = 'revert'
- reversehunks = repo.ui.configbool('experimental',
- 'revertalternateinteractivemode')
+ operation = 'apply'
+ reversehunks = False
if reversehunks:
diff = patch.diff(repo, ctx.node(), None, m, opts=diffopts)
else:
@@ -3869,6 +3917,7 @@
repo.dirstate.copy(copied[f], f)
class command(registrar.command):
+ """deprecated: used registrar.command instead"""
def _doregister(self, func, name, *args, **kwargs):
func._deprecatedregistrar = True # flag for deprecwarn in extensions.py
return super(command, self)._doregister(func, name, *args, **kwargs)
diff -r 87676e8ee056 -r 27b6df1b5adb mercurial/color.py
--- a/mercurial/color.py Mon Jan 08 16:07:51 2018 -0800
+++ b/mercurial/color.py Mon Jan 22 17:53:02 2018 -0500
@@ -87,12 +87,14 @@
'branches.inactive': 'none',
'diff.changed': 'white',
'diff.deleted': 'red',
+ 'diff.deleted.highlight': 'red bold underline',
'diff.diffline': 'bold',
'diff.extended': 'cyan bold',
'diff.file_a': 'red bold',
'diff.file_b': 'green bold',
'diff.hunk': 'magenta',
'diff.inserted': 'green',
+ 'diff.inserted.highlight': 'green bold underline',
'diff.tab': '',
'diff.trailingwhitespace': 'bold red_background',
'changeset.public': '',
@@ -100,6 +102,15 @@
'changeset.secret': '',
'diffstat.deleted': 'red',
'diffstat.inserted': 'green',
+ 'formatvariant.name.mismatchconfig': 'red',
+ 'formatvariant.name.mismatchdefault': 'yellow',
+ 'formatvariant.name.uptodate': 'green',
+ 'formatvariant.repo.mismatchconfig': 'red',
+ 'formatvariant.repo.mismatchdefault': 'yellow',
+ 'formatvariant.repo.uptodate': 'green',
+ 'formatvariant.config.special': 'yellow',
+ 'formatvariant.config.default': 'green',
+ 'formatvariant.default': '',
'histedit.remaining': 'red bold',
'ui.prompt': 'yellow',
'log.changeset': 'yellow',
@@ -181,7 +192,7 @@
configstyles(ui)
def _modesetup(ui):
- if ui.plain():
+ if ui.plain('color'):
return None
config = ui.config('ui', 'color')
if config == 'debug':
@@ -473,7 +484,7 @@
_win32print(ui, text, writefunc, **opts)
def _win32print(ui, text, writefunc, **opts):
- label = opts.get('label', '')
+ label = opts.get(r'label', '')
attr = origattr
def mapcolor(val, attr):
diff -r 87676e8ee056 -r 27b6df1b5adb mercurial/commands.py
--- a/mercurial/commands.py Mon Jan 08 16:07:51 2018 -0800
+++ b/mercurial/commands.py Mon Jan 22 17:53:02 2018 -0500
@@ -43,12 +43,14 @@
lock as lockmod,
merge as mergemod,
obsolete,
+ obsutil,
patch,
phases,
pycompat,
rcutil,
registrar,
revsetlang,
+ rewriteutil,
scmutil,
server,
sshserver,
@@ -65,6 +67,7 @@
table.update(debugcommandsmod.command._table)
command = registrar.command(table)
+readonly = registrar.command.readonly
# common command options
@@ -102,10 +105,6 @@
_("when to paginate (boolean, always, auto, or never)"), _('TYPE')),
]
-# options which must be pre-parsed before loading configs and extensions
-# TODO: perhaps --debugger should be included
-earlyoptflags = ("--cwd", "-R", "--repository", "--repo", "--config")
-
dryrunopts = cmdutil.dryrunopts
remoteopts = cmdutil.remoteopts
walkopts = cmdutil.walkopts
@@ -295,7 +294,10 @@
# to mimic the behavior of Mercurial before version 1.5
opts['file'] = True
- ctx = scmutil.revsingle(repo, opts.get('rev'))
+ rev = opts.get('rev')
+ if rev:
+ repo = scmutil.unhidehashlikerevs(repo, [rev], 'nowarn')
+ ctx = scmutil.revsingle(repo, rev)
rootfm = ui.formatter('annotate', opts)
if ui.quiet:
@@ -466,7 +468,10 @@
'''
opts = pycompat.byteskwargs(opts)
- ctx = scmutil.revsingle(repo, opts.get('rev'))
+ rev = opts.get('rev')
+ if rev:
+ repo = scmutil.unhidehashlikerevs(repo, [rev], 'nowarn')
+ ctx = scmutil.revsingle(repo, rev)
if not ctx:
raise error.Abort(_('no working directory: please specify a revision'))
node = ctx.node()
@@ -857,7 +862,7 @@
ui.status(_('changeset %d:%s: %s\n') % (ctx, ctx, transition))
hbisect.checkstate(state)
# bisect
- nodes, changesets, bgood = hbisect.bisect(repo.changelog, state)
+ nodes, changesets, bgood = hbisect.bisect(repo, state)
# update to next check
node = nodes[0]
mayupdate(repo, node, show_stats=False)
@@ -870,7 +875,7 @@
hbisect.checkstate(state)
# actually bisect
- nodes, changesets, good = hbisect.bisect(repo.changelog, state)
+ nodes, changesets, good = hbisect.bisect(repo, state)
if extend:
if not changesets:
extendnode = hbisect.extendrange(repo, state, nodes, good)
@@ -997,7 +1002,9 @@
@command('branch',
[('f', 'force', None,
_('set branch name even if it shadows an existing branch')),
- ('C', 'clean', None, _('reset branch name to parent branch name'))],
+ ('C', 'clean', None, _('reset branch name to parent branch name')),
+ ('r', 'rev', [], _('change branches of the given revs (EXPERIMENTAL)')),
+ ],
_('[-fC] [NAME]'))
def branch(ui, repo, label=None, **opts):
"""set or show the current branch name
@@ -1029,10 +1036,13 @@
Returns 0 on success.
"""
opts = pycompat.byteskwargs(opts)
+ revs = opts.get('rev')
if label:
label = label.strip()
if not opts.get('clean') and not label:
+ if revs:
+ raise error.Abort(_("no branch name specified for the revisions"))
ui.write("%s\n" % repo.dirstate.branch())
return
@@ -1042,13 +1052,18 @@
repo.dirstate.setbranch(label)
ui.status(_('reset working directory to branch %s\n') % label)
elif label:
+
+ scmutil.checknewlabel(repo, label, 'branch')
+ if revs:
+ return cmdutil.changebranch(ui, repo, revs, label)
+
if not opts.get('force') and label in repo.branchmap():
if label not in [p.branch() for p in repo[None].parents()]:
raise error.Abort(_('a branch of the same name already'
' exists'),
# i18n: "it" refers to an existing branch
hint=_("use 'hg update' to switch to it"))
- scmutil.checknewlabel(repo, label, 'branch')
+
repo.dirstate.setbranch(label)
ui.status(_('marked working directory as branch %s\n') % label)
@@ -1064,7 +1079,7 @@
_('show only branches that have unmerged heads (DEPRECATED)')),
('c', 'closed', False, _('show normal and closed branches')),
] + formatteropts,
- _('[-c]'))
+ _('[-c]'), cmdtype=readonly)
def branches(ui, repo, active=False, closed=False, **opts):
"""list repository named branches
@@ -1258,7 +1273,7 @@
('', 'decode', None, _('apply any matching decode filter')),
] + walkopts + formatteropts,
_('[OPTION]... FILE...'),
- inferrepo=True)
+ inferrepo=True, cmdtype=readonly)
def cat(ui, repo, file1, *pats, **opts):
"""output the current or given revision of files
@@ -1280,7 +1295,11 @@
Returns 0 on success.
"""
- ctx = scmutil.revsingle(repo, opts.get('rev'))
+ opts = pycompat.byteskwargs(opts)
+ rev = opts.get('rev')
+ if rev:
+ repo = scmutil.unhidehashlikerevs(repo, [rev], 'nowarn')
+ ctx = scmutil.revsingle(repo, rev)
m = scmutil.match(ctx, (file1,) + pats, opts)
fntemplate = opts.pop('output', '')
if cmdutil.isstdiofilename(fntemplate):
@@ -1292,7 +1311,8 @@
ui.pager('cat')
fm = ui.formatter('cat', opts)
with fm:
- return cmdutil.cat(ui, repo, ctx, m, fm, fntemplate, '', **opts)
+ return cmdutil.cat(ui, repo, ctx, m, fm, fntemplate, '',
+ **pycompat.strkwargs(opts))
@command('^clone',
[('U', 'noupdate', None, _('the clone will include an empty working '
@@ -1544,13 +1564,7 @@
raise error.Abort(_('cannot amend with ui.commitsubrepos enabled'))
old = repo['.']
- if not old.mutable():
- raise error.Abort(_('cannot amend public changesets'))
- if len(repo[None].parents()) > 1:
- raise error.Abort(_('cannot amend while merging'))
- allowunstable = obsolete.isenabled(repo, obsolete.allowunstableopt)
- if not allowunstable and old.children():
- raise error.Abort(_('cannot amend changeset with children'))
+ rewriteutil.precheck(repo, [old.rev()], 'amend')
# Currently histedit gets confused if an amend happens while histedit
# is in progress. Since we have a checkunfinished command, we are
@@ -1604,7 +1618,7 @@
('l', 'local', None, _('edit repository config')),
('g', 'global', None, _('edit global config'))] + formatteropts,
_('[-u] [NAME]...'),
- optionalrepo=True)
+ optionalrepo=True, cmdtype=readonly)
def config(ui, repo, *values, **opts):
"""show combined config settings from all hgrc files
@@ -1751,7 +1765,7 @@
def debugcomplete(ui, cmd='', **opts):
"""returns the completion list associated with the given command"""
- if opts.get('options'):
+ if opts.get(r'options'):
options = []
otables = [globalopts]
if cmd:
@@ -1777,7 +1791,7 @@
('c', 'change', '', _('change made by revision'), _('REV'))
] + diffopts + diffopts2 + walkopts + subrepoopts,
_('[OPTION]... ([-c REV] | [-r REV1 [-r REV2]]) [FILE]...'),
- inferrepo=True)
+ inferrepo=True, cmdtype=readonly)
def diff(ui, repo, *pats, **opts):
"""diff repository (or selected files)
@@ -1846,9 +1860,11 @@
msg = _('cannot specify --rev and --change at the same time')
raise error.Abort(msg)
elif change:
+ repo = scmutil.unhidehashlikerevs(repo, [change], 'nowarn')
node2 = scmutil.revsingle(repo, change, None).node()
node1 = repo[node2].p1().node()
else:
+ repo = scmutil.unhidehashlikerevs(repo, revs, 'nowarn')
node1, node2 = scmutil.revpair(repo, revs)
if reverse:
@@ -1867,7 +1883,7 @@
('', 'switch-parent', None, _('diff against the second parent')),
('r', 'rev', [], _('revisions to export'), _('REV')),
] + diffopts,
- _('[OPTION]... [-o OUTFILESPEC] [-r] [REV]...'))
+ _('[OPTION]... [-o OUTFILESPEC] [-r] [REV]...'), cmdtype=readonly)
def export(ui, repo, *changesets, **opts):
"""dump the header and diffs for one or more changesets
@@ -1932,6 +1948,7 @@
changesets += tuple(opts.get('rev', []))
if not changesets:
changesets = ['.']
+ repo = scmutil.unhidehashlikerevs(repo, changesets, 'nowarn')
revs = scmutil.revrange(repo, changesets)
if not revs:
raise error.Abort(_("export requires at least one changeset"))
@@ -1948,7 +1965,7 @@
[('r', 'rev', '', _('search the repository as it is in REV'), _('REV')),
('0', 'print0', None, _('end filenames with NUL, for use with xargs')),
] + walkopts + formatteropts + subrepoopts,
- _('[OPTION]... [FILE]...'))
+ _('[OPTION]... [FILE]...'), cmdtype=readonly)
def files(ui, repo, *pats, **opts):
"""list tracked files
@@ -1995,7 +2012,10 @@
"""
opts = pycompat.byteskwargs(opts)
- ctx = scmutil.revsingle(repo, opts.get('rev'), None)
+ rev = opts.get('rev')
+ if rev:
+ repo = scmutil.unhidehashlikerevs(repo, [rev], 'nowarn')
+ ctx = scmutil.revsingle(repo, rev, None)
end = '\n'
if opts.get('print0'):
@@ -2321,7 +2341,7 @@
('d', 'date', None, _('list the date (short with -q)')),
] + formatteropts + walkopts,
_('[OPTION]... PATTERN [FILE]...'),
- inferrepo=True)
+ inferrepo=True, cmdtype=readonly)
def grep(ui, repo, pattern, *pats, **opts):
"""search revision history for a pattern in specified files
@@ -2564,7 +2584,7 @@
('a', 'active', False, _('show active branchheads only (DEPRECATED)')),
('c', 'closed', False, _('show normal and closed branch heads')),
] + templateopts,
- _('[-ct] [-r STARTREV] [REV]...'))
+ _('[-ct] [-r STARTREV] [REV]...'), cmdtype=readonly)
def heads(ui, repo, *branchrevs, **opts):
"""show branch heads
@@ -2592,8 +2612,10 @@
opts = pycompat.byteskwargs(opts)
start = None
- if 'rev' in opts:
- start = scmutil.revsingle(repo, opts['rev'], None).node()
+ rev = opts.get('rev')
+ if rev:
+ repo = scmutil.unhidehashlikerevs(repo, [rev], 'nowarn')
+ start = scmutil.revsingle(repo, rev, None).node()
if opts.get('topo'):
heads = [repo[h] for h in repo.heads(start)]
@@ -2637,7 +2659,7 @@
('s', 'system', [], _('show help for specific platform(s)')),
],
_('[-ecks] [TOPIC]'),
- norepo=True)
+ norepo=True, cmdtype=readonly)
def help_(ui, name=None, **opts):
"""show help for a given topic or a help overview
@@ -2679,7 +2701,7 @@
('B', 'bookmarks', None, _('show bookmarks')),
] + remoteopts + formatteropts,
_('[-nibtB] [-r REV] [SOURCE]'),
- optionalrepo=True)
+ optionalrepo=True, cmdtype=readonly)
def identify(ui, repo, source=None, rev=None,
num=None, id=None, branch=None, tags=None, bookmarks=None, **opts):
"""identify the working directory or specified revision
@@ -2777,6 +2799,8 @@
fm.data(node=hex(remoterev))
fm.data(bookmarks=fm.formatlist(bms, name='bookmark'))
else:
+ if rev:
+ repo = scmutil.unhidehashlikerevs(repo, [rev], 'nowarn')
ctx = scmutil.revsingle(repo, rev, None)
if ctx.rev() is None:
@@ -3254,7 +3278,7 @@
_('do not display revision or any of its ancestors'), _('REV')),
] + logopts + walkopts,
_('[OPTION]... [FILE]'),
- inferrepo=True)
+ inferrepo=True, cmdtype=readonly)
def log(ui, repo, *pats, **opts):
"""show revision history of entire repository or files
@@ -3268,7 +3292,7 @@
File history is shown without following rename or copy history of
files. Use -f/--follow with a filename to follow history across
renames and copies. --follow without a filename will only show
- ancestors or descendants of the starting revision.
+ ancestors of the starting revision.
By default this command prints revision number and changeset id,
tags, non-trivial parents, user, date and time, and a summary for
@@ -3393,17 +3417,14 @@
_('FILE arguments are not compatible with --line-range option')
)
- if opts.get('follow') and opts.get('rev'):
- opts['rev'] = [revsetlang.formatspec('reverse(::%lr)', opts.get('rev'))]
- del opts['follow']
+ repo = scmutil.unhidehashlikerevs(repo, opts.get('rev'), 'nowarn')
+ revs, filematcher = cmdutil.getlogrevs(repo, pats, opts)
+ hunksfilter = None
if opts.get('graph'):
if linerange:
raise error.Abort(_('graph not supported with line range patterns'))
- return cmdutil.graphlog(ui, repo, pats, opts)
-
- revs, expr, filematcher = cmdutil.getlogrevs(repo, pats, opts)
- hunksfilter = None
+ return cmdutil.graphlog(ui, repo, revs, filematcher, opts)
if linerange:
revs, lrfilematcher, hunksfilter = cmdutil.getloglinerangerevs(
@@ -3420,9 +3441,6 @@
elif filematcher is None:
filematcher = lrfilematcher
- limit = cmdutil.loglimit(opts)
- count = 0
-
getrenamed = None
if opts.get('copies'):
endrev = None
@@ -3433,8 +3451,6 @@
ui.pager('log')
displayer = cmdutil.show_changeset(ui, repo, opts, buffered=True)
for rev in revs:
- if count == limit:
- break
ctx = repo[rev]
copies = None
if getrenamed is not None and rev:
@@ -3453,8 +3469,7 @@
revhunksfilter = None
displayer.show(ctx, copies=copies, matchfn=revmatchfn,
hunksfilterfn=revhunksfilter)
- if displayer.flush(ctx):
- count += 1
+ displayer.flush(ctx)
displayer.close()
@@ -3462,7 +3477,7 @@
[('r', 'rev', '', _('revision to display'), _('REV')),
('', 'all', False, _("list files from all revisions"))]
+ formatteropts,
- _('[-r REV]'))
+ _('[-r REV]'), cmdtype=readonly)
def manifest(ui, repo, node=None, rev=None, **opts):
"""output the current or given revision of the project manifest
@@ -3509,6 +3524,8 @@
char = {'l': '@', 'x': '*', '': ''}
mode = {'l': '644', 'x': '755', '': '644'}
+ if node:
+ repo = scmutil.unhidehashlikerevs(repo, [node], 'nowarn')
ctx = scmutil.revsingle(repo, node)
mf = ctx.manifest()
ui.pager('manifest')
@@ -3525,7 +3542,8 @@
_('force a merge including outstanding changes (DEPRECATED)')),
('r', 'rev', '', _('revision to merge'), _('REV')),
('P', 'preview', None,
- _('review revisions to merge (no merge is performed)'))
+ _('review revisions to merge (no merge is performed)')),
+ ('', 'abort', None, _('abort the ongoing merge')),
] + mergetoolopts,
_('[-P] [[-r] REV]'))
def merge(ui, repo, node=None, **opts):
@@ -3550,7 +3568,7 @@
See :hg:`help resolve` for information on handling file conflicts.
- To undo an uncommitted merge, use :hg:`update --clean .` which
+ To undo an uncommitted merge, use :hg:`merge --abort` which
will check out a clean copy of the original merge parent, losing
all changes.
@@ -3558,6 +3576,16 @@
"""
opts = pycompat.byteskwargs(opts)
+ abort = opts.get('abort')
+ if abort and repo.dirstate.p2() == nullid:
+ cmdutil.wrongtooltocontinue(repo, _('merge'))
+ if abort:
+ if node:
+ raise error.Abort(_("cannot specify a node with --abort"))
+ if opts.get('rev'):
+ raise error.Abort(_("cannot specify both --rev and --abort"))
+ if opts.get('preview'):
+ raise error.Abort(_("cannot specify --preview with --abort"))
if opts.get('rev') and node:
raise error.Abort(_("please specify just one revision"))
if not node:
@@ -3566,7 +3594,7 @@
if node:
node = scmutil.revsingle(repo, node).node()
- if not node:
+ if not node and not abort:
node = repo[destutil.destmerge(repo)].node()
if opts.get('preview'):
@@ -3587,7 +3615,7 @@
force = opts.get('force')
labels = ['working copy', 'merge rev']
return hg.merge(repo, node, force=force, mergeforce=force,
- labels=labels)
+ labels=labels, abort=abort)
finally:
ui.setconfig('ui', 'forcemerge', '', 'merge')
@@ -3696,7 +3724,10 @@
"""
opts = pycompat.byteskwargs(opts)
- ctx = scmutil.revsingle(repo, opts.get('rev'), None)
+ rev = opts.get('rev')
+ if rev:
+ repo = scmutil.unhidehashlikerevs(repo, [rev], 'nowarn')
+ ctx = scmutil.revsingle(repo, rev, None)
if file_:
m = scmutil.match(ctx, (file_,), opts)
@@ -3726,7 +3757,8 @@
displayer.show(repo[n])
displayer.close()
-@command('paths', formatteropts, _('[NAME]'), optionalrepo=True)
+@command('paths', formatteropts, _('[NAME]'), optionalrepo=True,
+ cmdtype=readonly)
def paths(ui, repo, search=None, **opts):
"""show aliases for remote repositories
@@ -3841,7 +3873,6 @@
revs = scmutil.revrange(repo, revs)
- lock = None
ret = 0
if targetphase is None:
# display
@@ -3849,10 +3880,7 @@
ctx = repo[r]
ui.write('%i: %s\n' % (ctx.rev(), ctx.phasestr()))
else:
- tr = None
- lock = repo.lock()
- try:
- tr = repo.transaction("phase")
+ with repo.lock(), repo.transaction("phase") as tr:
# set phase
if not revs:
raise error.Abort(_('empty revision set'))
@@ -3865,11 +3893,6 @@
phases.advanceboundary(repo, tr, targetphase, nodes)
if opts['force']:
phases.retractboundary(repo, tr, targetphase, nodes)
- tr.close()
- finally:
- if tr is not None:
- tr.release()
- lock.release()
getphase = unfi._phasecache.phase
newdata = [getphase(unfi, r) for r in unfi]
changes = sum(newdata[r] != olddata[r] for r in unfi)
@@ -3923,7 +3946,7 @@
@command('^pull',
[('u', 'update', None,
- _('update to new branch head if changesets were pulled')),
+ _('update to new branch head if new descendants were pulled')),
('f', 'force', None, _('run even when remote repository is unrelated')),
('r', 'rev', [], _('a remote changeset intended to be added'), _('REV')),
('B', 'bookmark', [], _("bookmark to pull"), _('BOOKMARK')),
@@ -3978,12 +4001,13 @@
# not ending up with the name of the bookmark because of a race
# condition on the server. (See issue 4689 for details)
remotebookmarks = other.listkeys('bookmarks')
+ remotebookmarks = bookmarks.unhexlifybookmarks(remotebookmarks)
pullopargs['remotebookmarks'] = remotebookmarks
for b in opts['bookmark']:
b = repo._bookmarks.expandname(b)
if b not in remotebookmarks:
raise error.Abort(_('remote bookmark %s not found!') % b)
- revs.append(remotebookmarks[b])
+ revs.append(hex(remotebookmarks[b]))
if revs:
try:
@@ -4002,36 +4026,40 @@
"so a rev cannot be specified.")
raise error.Abort(err)
- pullopargs.update(opts.get('opargs', {}))
- modheads = exchange.pull(repo, other, heads=revs,
- force=opts.get('force'),
- bookmarks=opts.get('bookmark', ()),
- opargs=pullopargs).cgresult
-
- # brev is a name, which might be a bookmark to be activated at
- # the end of the update. In other words, it is an explicit
- # destination of the update
- brev = None
-
- if checkout:
- checkout = str(repo.changelog.rev(checkout))
-
- # order below depends on implementation of
- # hg.addbranchrevs(). opts['bookmark'] is ignored,
- # because 'checkout' is determined without it.
- if opts.get('rev'):
- brev = opts['rev'][0]
- elif opts.get('branch'):
- brev = opts['branch'][0]
- else:
- brev = branches[0]
- repo._subtoppath = source
- try:
- ret = postincoming(ui, repo, modheads, opts.get('update'),
- checkout, brev)
-
- finally:
- del repo._subtoppath
+ wlock = util.nullcontextmanager()
+ if opts.get('update'):
+ wlock = repo.wlock()
+ with wlock:
+ pullopargs.update(opts.get('opargs', {}))
+ modheads = exchange.pull(repo, other, heads=revs,
+ force=opts.get('force'),
+ bookmarks=opts.get('bookmark', ()),
+ opargs=pullopargs).cgresult
+
+ # brev is a name, which might be a bookmark to be activated at
+ # the end of the update. In other words, it is an explicit
+ # destination of the update
+ brev = None
+
+ if checkout:
+ checkout = str(repo.changelog.rev(checkout))
+
+ # order below depends on implementation of
+ # hg.addbranchrevs(). opts['bookmark'] is ignored,
+ # because 'checkout' is determined without it.
+ if opts.get('rev'):
+ brev = opts['rev'][0]
+ elif opts.get('branch'):
+ brev = opts['branch'][0]
+ else:
+ brev = branches[0]
+ repo._subtoppath = source
+ try:
+ ret = postincoming(ui, repo, modheads, opts.get('update'),
+ checkout, brev)
+
+ finally:
+ del repo._subtoppath
finally:
other.close()
@@ -4522,8 +4550,7 @@
('d', 'date', '', _('tipmost revision matching date'), _('DATE')),
('r', 'rev', '', _('revert to the specified revision'), _('REV')),
('C', 'no-backup', None, _('do not save backup copies of files')),
- ('i', 'interactive', None,
- _('interactively select the changes (EXPERIMENTAL)')),
+ ('i', 'interactive', None, _('interactively select the changes')),
] + walkopts + dryrunopts,
_('[OPTION]... [-r REV] [NAME]...'))
def revert(ui, repo, *pats, **opts):
@@ -4563,6 +4590,7 @@
Returns 0 on success.
"""
+ opts = pycompat.byteskwargs(opts)
if opts.get("date"):
if opts.get("rev"):
raise error.Abort(_("you can't specify a revision and a date"))
@@ -4574,7 +4602,10 @@
raise error.Abort(_('uncommitted merge with no revision specified'),
hint=_("use 'hg update' or see 'hg help revert'"))
- ctx = scmutil.revsingle(repo, opts.get('rev'))
+ rev = opts.get('rev')
+ if rev:
+ repo = scmutil.unhidehashlikerevs(repo, [rev], 'nowarn')
+ ctx = scmutil.revsingle(repo, rev)
if (not (pats or opts.get('include') or opts.get('exclude') or
opts.get('all') or opts.get('interactive'))):
@@ -4598,7 +4629,8 @@
hint = _("use --all to revert all files")
raise error.Abort(msg, hint=hint)
- return cmdutil.revert(ui, repo, ctx, (parent, p2), *pats, **opts)
+ return cmdutil.revert(ui, repo, ctx, (parent, p2), *pats,
+ **pycompat.strkwargs(opts))
@command('rollback', dryrunopts +
[('f', 'force', False, _('ignore safety measures'))])
@@ -4653,7 +4685,7 @@
return repo.rollback(dryrun=opts.get(r'dry_run'),
force=opts.get(r'force'))
-@command('root', [])
+@command('root', [], cmdtype=readonly)
def root(ui, repo):
"""print the root (top) of the current working directory
@@ -4701,7 +4733,7 @@
Please note that the server does not implement access control.
This means that, by default, anybody can read from the server and
- nobody can write to it by default. Set the ``web.allow_push``
+ nobody can write to it by default. Set the ``web.allow-push``
option to ``*`` to allow everybody to push to the server. You
should use a real web server if you need to authenticate users.
@@ -4747,7 +4779,7 @@
('', 'change', '', _('list the changed files of a revision'), _('REV')),
] + walkopts + subrepoopts + formatteropts,
_('[OPTION]... [FILE]...'),
- inferrepo=True)
+ inferrepo=True, cmdtype=readonly)
def status(ui, repo, *pats, **opts):
"""show changed files in the working directory
@@ -4845,9 +4877,11 @@
msg = _('cannot use --terse with --rev')
raise error.Abort(msg)
elif change:
+ repo = scmutil.unhidehashlikerevs(repo, [change], 'nowarn')
node2 = scmutil.revsingle(repo, change, None).node()
node1 = repo[node2].p1().node()
else:
+ repo = scmutil.unhidehashlikerevs(repo, revs, 'nowarn')
node1, node2 = scmutil.revpair(repo, revs)
if pats or ui.configbool('commands', 'status.relative'):
@@ -4912,7 +4946,8 @@
fm.end()
@command('^summary|sum',
- [('', 'remote', None, _('check for push and pull'))], '[--remote]')
+ [('', 'remote', None, _('check for push and pull'))],
+ '[--remote]', cmdtype=readonly)
def summary(ui, repo, **opts):
"""summarize working directory state
@@ -5313,7 +5348,7 @@
finally:
release(lock, wlock)
-@command('tags', formatteropts, '')
+@command('tags', formatteropts, '', cmdtype=readonly)
def tags(ui, repo, **opts):
"""list repository tags
@@ -5510,7 +5545,17 @@
# if we defined a bookmark, we have to remember the original name
brev = rev
- rev = scmutil.revsingle(repo, rev, rev).rev()
+ if rev:
+ repo = scmutil.unhidehashlikerevs(repo, [rev], 'nowarn')
+ ctx = scmutil.revsingle(repo, rev, rev)
+ rev = ctx.rev()
+ if ctx.hidden():
+ ctxstr = ctx.hex()[:12]
+ ui.warn(_("updating to a hidden changeset %s\n") % ctxstr)
+
+ if ctx.obsolete():
+ obsfatemsg = obsutil._getfilteredreason(repo, ctxstr, ctx)
+ ui.warn("(%s)\n" % obsfatemsg)
repo.ui.setconfig('ui', 'forcemerge', tool, 'update')
@@ -5536,7 +5581,7 @@
"""
return hg.verify(repo)
-@command('version', [] + formatteropts, norepo=True)
+@command('version', [] + formatteropts, norepo=True, cmdtype=readonly)
def version_(ui, **opts):
"""output version and copyright information"""
opts = pycompat.byteskwargs(opts)
@@ -5548,7 +5593,7 @@
util.version())
license = _(
"(see https://mercurial-scm.org for more information)\n"
- "\nCopyright (C) 2005-2017 Matt Mackall and others\n"
+ "\nCopyright (C) 2005-2018 Matt Mackall and others\n"
"This is free software; see the source for copying conditions. "
"There is NO\nwarranty; "
"not even for MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.\n"
diff -r 87676e8ee056 -r 27b6df1b5adb mercurial/commandserver.py
--- a/mercurial/commandserver.py Mon Jan 08 16:07:51 2018 -0800
+++ b/mercurial/commandserver.py Mon Jan 22 17:53:02 2018 -0500
@@ -17,11 +17,11 @@
import traceback
from .i18n import _
+from .thirdparty import selectors2
from . import (
encoding,
error,
pycompat,
- selectors2,
util,
)
@@ -247,13 +247,13 @@
req = dispatch.request(args[:], copiedui, self.repo, self.cin,
self.cout, self.cerr)
- ret = (dispatch.dispatch(req) or 0) & 255 # might return None
-
- # restore old cwd
- if '--cwd' in args:
- os.chdir(self.cwd)
-
- self.cresult.write(struct.pack('>i', int(ret)))
+ try:
+ ret = (dispatch.dispatch(req) or 0) & 255 # might return None
+ self.cresult.write(struct.pack('>i', int(ret)))
+ finally:
+ # restore old cwd
+ if '--cwd' in args:
+ os.chdir(self.cwd)
def getencoding(self):
""" writes the current encoding to the result channel """
@@ -449,6 +449,8 @@
def init(self):
self._sock = socket.socket(socket.AF_UNIX)
self._servicehandler.bindsocket(self._sock, self.address)
+ if util.safehasattr(util, 'unblocksignal'):
+ util.unblocksignal(signal.SIGCHLD)
o = signal.signal(signal.SIGCHLD, self._sigchldhandler)
self._oldsigchldhandler = o
self._socketunlinked = False
diff -r 87676e8ee056 -r 27b6df1b5adb mercurial/configitems.py
--- a/mercurial/configitems.py Mon Jan 08 16:07:51 2018 -0800
+++ b/mercurial/configitems.py Mon Jan 22 17:53:02 2018 -0500
@@ -362,6 +362,9 @@
coreconfigitem('devel', 'warn-config-unknown',
default=None,
)
+coreconfigitem('devel', 'debug.peer-request',
+ default=False,
+)
coreconfigitem('diff', 'nodates',
default=False,
)
@@ -428,6 +431,9 @@
coreconfigitem('experimental', 'bundle2.pushback',
default=False,
)
+coreconfigitem('experimental', 'bundle2.stream',
+ default=False,
+)
coreconfigitem('experimental', 'bundle2lazylocking',
default=False,
)
@@ -452,6 +458,12 @@
coreconfigitem('experimental', 'crecordtest',
default=None,
)
+coreconfigitem('experimental', 'directaccess',
+ default=False,
+)
+coreconfigitem('experimental', 'directaccess.revnums',
+ default=False,
+)
coreconfigitem('experimental', 'editortmpinhg',
default=False,
)
@@ -469,7 +481,7 @@
default=None,
)
coreconfigitem('experimental', 'evolution.effect-flags',
- default=False,
+ default=True,
alias=[('experimental', 'effect-flags')]
)
coreconfigitem('experimental', 'evolution.exchange',
@@ -478,9 +490,15 @@
coreconfigitem('experimental', 'evolution.bundle-obsmarker',
default=False,
)
+coreconfigitem('experimental', 'evolution.report-instabilities',
+ default=True,
+)
coreconfigitem('experimental', 'evolution.track-operation',
default=True,
)
+coreconfigitem('experimental', 'worddiff',
+ default=False,
+)
coreconfigitem('experimental', 'maxdeltachainspan',
default=-1,
)
@@ -529,15 +547,15 @@
coreconfigitem('experimental', 'obsmarkers-exchange-debug',
default=False,
)
-coreconfigitem('experimental', 'rebase.multidest',
+coreconfigitem('experimental', 'remotenames',
default=False,
)
-coreconfigitem('experimental', 'revertalternateinteractivemode',
- default=True,
-)
coreconfigitem('experimental', 'revlogv2',
default=None,
)
+coreconfigitem('experimental', 'single-head-per-branch',
+ default=False,
+)
coreconfigitem('experimental', 'spacemovesdown',
default=False,
)
@@ -553,6 +571,9 @@
coreconfigitem('experimental', 'treemanifest',
default=False,
)
+coreconfigitem('experimental', 'update.atomic-file',
+ default=False,
+)
coreconfigitem('extensions', '.*',
default=None,
generic=True,
@@ -838,6 +859,9 @@
coreconfigitem('push', 'pushvars.server',
default=False,
)
+coreconfigitem('server', 'bookmarks-pushkey-compat',
+ default=True,
+)
coreconfigitem('server', 'bundle1',
default=True,
)
@@ -1060,6 +1084,9 @@
coreconfigitem('ui', 'ssh',
default='ssh',
)
+coreconfigitem('ui', 'ssherrorhint',
+ default=None,
+)
coreconfigitem('ui', 'statuscopies',
default=False,
)
@@ -1078,6 +1105,9 @@
coreconfigitem('ui', 'timeout',
default='600',
)
+coreconfigitem('ui', 'timeout.warn',
+ default=0,
+)
coreconfigitem('ui', 'traceback',
default=False,
)
@@ -1102,10 +1132,12 @@
coreconfigitem('web', 'allowgz',
default=False,
)
-coreconfigitem('web', 'allowpull',
+coreconfigitem('web', 'allow-pull',
+ alias=[('web', 'allowpull')],
default=True,
)
-coreconfigitem('web', 'allow_push',
+coreconfigitem('web', 'allow-push',
+ alias=[('web', 'allow_push')],
default=list,
)
coreconfigitem('web', 'allowzip',
@@ -1239,6 +1271,9 @@
coreconfigitem('worker', 'backgroundclosethreadcount',
default=4,
)
+coreconfigitem('worker', 'enabled',
+ default=True,
+)
coreconfigitem('worker', 'numcpus',
default=None,
)
@@ -1255,3 +1290,6 @@
coreconfigitem('rebase', 'singletransaction',
default=False,
)
+coreconfigitem('rebase', 'experimental.inmemory',
+ default=False,
+)
diff -r 87676e8ee056 -r 27b6df1b5adb mercurial/context.py
--- a/mercurial/context.py Mon Jan 08 16:07:51 2018 -0800
+++ b/mercurial/context.py Mon Jan 22 17:53:02 2018 -0500
@@ -36,6 +36,7 @@
match as matchmod,
mdiff,
obsolete as obsmod,
+ obsutil,
patch,
pathutil,
phases,
@@ -354,7 +355,7 @@
ctx2 = self.p1()
if ctx2 is not None:
ctx2 = self._repo[ctx2]
- diffopts = patch.diffopts(self._repo.ui, opts)
+ diffopts = patch.diffopts(self._repo.ui, pycompat.byteskwargs(opts))
return patch.diff(self._repo, ctx2, self, match=match, opts=diffopts)
def dirs(self):
@@ -433,8 +434,20 @@
This is extracted in a function to help extensions (eg: evolve) to
experiment with various message variants."""
if repo.filtername.startswith('visible'):
- msg = _("hidden revision '%s'") % changeid
+
+ # Check if the changeset is obsolete
+ unfilteredrepo = repo.unfiltered()
+ ctx = unfilteredrepo[changeid]
+
+ # If the changeset is obsolete, enrich the message with the reason
+ # that made this changeset not visible
+ if ctx.obsolete():
+ msg = obsutil._getfilteredreason(repo, changeid, ctx)
+ else:
+ msg = _("hidden revision '%s'") % changeid
+
hint = _('use --hidden to access hidden revisions')
+
return error.FilteredRepoLookupError(msg, hint=hint)
msg = _("filtered revision '%s' (not in '%s' subset)")
msg %= (changeid, repo.filtername)
@@ -615,10 +628,13 @@
def closesbranch(self):
return 'close' in self._changeset.extra
def extra(self):
+ """Return a dict of extra information."""
return self._changeset.extra
def tags(self):
+ """Return a list of byte tag names"""
return self._repo.nodetags(self._node)
def bookmarks(self):
+ """Return a list of byte bookmark names."""
return self._repo.nodebookmarks(self._node)
def phase(self):
return self._repo._phasecache.phase(self._repo, self._rev)
@@ -629,7 +645,11 @@
return False
def children(self):
- """return contexts for each child changeset"""
+ """return list of changectx contexts for each child changeset.
+
+ This returns only the immediate child changesets. Use descendants() to
+ recursively walk children.
+ """
c = self._repo.changelog.children(self._node)
return [changectx(self._repo, x) for x in c]
@@ -638,6 +658,10 @@
yield changectx(self._repo, a)
def descendants(self):
+ """Recursively yield all children of the changeset.
+
+ For just the immediate children, use children()
+ """
for d in self._repo.changelog.descendants([self._rev]):
yield changectx(self._repo, d)
@@ -819,6 +843,10 @@
return self._changectx.phase()
def phasestr(self):
return self._changectx.phasestr()
+ def obsolete(self):
+ return self._changectx.obsolete()
+ def instabilities(self):
+ return self._changectx.instabilities()
def manifest(self):
return self._changectx.manifest()
def changectx(self):
@@ -931,6 +959,14 @@
return self.linkrev()
return self._adjustlinkrev(self.rev(), inclusive=True)
+ def introfilectx(self):
+ """Return filectx having identical contents, but pointing to the
+ changeset revision where this filectx was introduced"""
+ introrev = self.introrev()
+ if self.rev() == introrev:
+ return self
+ return self.filectx(self.filenode(), changeid=introrev)
+
def _parentfilectx(self, path, fileid, filelog):
"""create parent filectx keeping ancestry info for _adjustlinkrev()"""
fctx = filectx(self._repo, path, fileid=fileid, filelog=filelog)
@@ -1021,19 +1057,16 @@
return pl
# use linkrev to find the first changeset where self appeared
- base = self
- introrev = self.introrev()
- if self.rev() != introrev:
- base = self.filectx(self.filenode(), changeid=introrev)
+ base = self.introfilectx()
if getattr(base, '_ancestrycontext', None) is None:
cl = self._repo.changelog
- if introrev is None:
+ if base.rev() is None:
# wctx is not inclusive, but works because _ancestrycontext
# is used to test filelog revisions
ac = cl.ancestors([p.rev() for p in base.parents()],
inclusive=True)
else:
- ac = cl.ancestors([introrev], inclusive=True)
+ ac = cl.ancestors([base.rev()], inclusive=True)
base._ancestrycontext = ac
# This algorithm would prefer to be recursive, but Python is a
@@ -1088,7 +1121,7 @@
hist[f] = curr
del pcache[f]
- return zip(hist[base][0], hist[base][1].splitlines(True))
+ return pycompat.ziplist(hist[base][0], hist[base][1].splitlines(True))
def ancestors(self, followfirst=False):
visit = {}
@@ -1633,9 +1666,6 @@
listsubrepos=listsubrepos, badfn=badfn,
icasefs=icasefs)
- def flushall(self):
- pass # For overlayworkingfilectx compatibility.
-
def _filtersuspectsymlink(self, files):
if not files or self._repo.dirstate._checklink:
return files
@@ -1932,10 +1962,11 @@
"""wraps unlink for a repo's working directory"""
self._repo.wvfs.unlinkpath(self._path, ignoremissing=ignoremissing)
- def write(self, data, flags, backgroundclose=False):
+ def write(self, data, flags, backgroundclose=False, **kwargs):
"""wraps repo.wwrite"""
self._repo.wwrite(self._path, data, flags,
- backgroundclose=backgroundclose)
+ backgroundclose=backgroundclose,
+ **kwargs)
def markcopied(self, src):
"""marks this file a copy of `src`"""
@@ -1959,25 +1990,33 @@
def setflags(self, l, x):
self._repo.wvfs.setflags(self._path, l, x)
-class overlayworkingctx(workingctx):
- """Wraps another mutable context with a write-back cache that can be flushed
- at a later time.
+class overlayworkingctx(committablectx):
+ """Wraps another mutable context with a write-back cache that can be
+ converted into a commit context.
self._cache[path] maps to a dict with keys: {
'exists': bool?
'date': date?
'data': str?
'flags': str?
+ 'copied': str? (path or None)
}
If `exists` is True, `flags` must be non-None and 'date' is non-None. If it
is `False`, the file was deleted.
"""
- def __init__(self, repo, wrappedctx):
+ def __init__(self, repo):
super(overlayworkingctx, self).__init__(repo)
self._repo = repo
+ self.clean()
+
+ def setbase(self, wrappedctx):
self._wrappedctx = wrappedctx
- self._clean()
+ self._parents = [wrappedctx]
+ # Drop old manifest cache as it is now out of date.
+ # This is necessary when, e.g., rebasing several nodes with one
+ # ``overlayworkingctx`` (e.g. with --collapse).
+ util.clearcachedproperty(self, '_manifest')
def data(self, path):
if self.isdirty(path):
@@ -1989,10 +2028,47 @@
return self._wrappedctx[path].data()
else:
raise error.ProgrammingError("No such file or directory: %s" %
- self._path)
+ path)
else:
return self._wrappedctx[path].data()
+ @propertycache
+ def _manifest(self):
+ parents = self.parents()
+ man = parents[0].manifest().copy()
+
+ flag = self._flagfunc
+ for path in self.added():
+ man[path] = addednodeid
+ man.setflag(path, flag(path))
+ for path in self.modified():
+ man[path] = modifiednodeid
+ man.setflag(path, flag(path))
+ for path in self.removed():
+ del man[path]
+ return man
+
+ @propertycache
+ def _flagfunc(self):
+ def f(path):
+ return self._cache[path]['flags']
+ return f
+
+ def files(self):
+ return sorted(self.added() + self.modified() + self.removed())
+
+ def modified(self):
+ return [f for f in self._cache.keys() if self._cache[f]['exists'] and
+ self._existsinparent(f)]
+
+ def added(self):
+ return [f for f in self._cache.keys() if self._cache[f]['exists'] and
+ not self._existsinparent(f)]
+
+ def removed(self):
+ return [f for f in self._cache.keys() if
+ not self._cache[f]['exists'] and self._existsinparent(f)]
+
def isinmemory(self):
return True
@@ -2002,6 +2078,18 @@
else:
return self._wrappedctx[path].date()
+ def markcopied(self, path, origin):
+ if self.isdirty(path):
+ self._cache[path]['copied'] = origin
+ else:
+ raise error.ProgrammingError('markcopied() called on clean context')
+
+ def copydata(self, path):
+ if self.isdirty(path):
+ return self._cache[path]['copied']
+ else:
+ raise error.ProgrammingError('copydata() called on clean context')
+
def flags(self, path):
if self.isdirty(path):
if self._cache[path]['exists']:
@@ -2012,9 +2100,60 @@
else:
return self._wrappedctx[path].flags()
- def write(self, path, data, flags=''):
+ def _existsinparent(self, path):
+ try:
+ # ``commitctx` raises a ``ManifestLookupError`` if a path does not
+ # exist, unlike ``workingctx``, which returns a ``workingfilectx``
+ # with an ``exists()`` function.
+ self._wrappedctx[path]
+ return True
+ except error.ManifestLookupError:
+ return False
+
+ def _auditconflicts(self, path):
+ """Replicates conflict checks done by wvfs.write().
+
+ Since we never write to the filesystem and never call `applyupdates` in
+ IMM, we'll never check that a path is actually writable -- e.g., because
+ it adds `a/foo`, but `a` is actually a file in the other commit.
+ """
+ def fail(path, component):
+ # p1() is the base and we're receiving "writes" for p2()'s
+ # files.
+ if 'l' in self.p1()[component].flags():
+ raise error.Abort("error: %s conflicts with symlink %s "
+ "in %s." % (path, component,
+ self.p1().rev()))
+ else:
+ raise error.Abort("error: '%s' conflicts with file '%s' in "
+ "%s." % (path, component,
+ self.p1().rev()))
+
+ # Test that each new directory to be created to write this path from p2
+ # is not a file in p1.
+ components = path.split('/')
+ for i in xrange(len(components)):
+ component = "/".join(components[0:i])
+ if component in self.p1():
+ fail(path, component)
+
+ # Test the other direction -- that this path from p2 isn't a directory
+ # in p1 (test that p1 doesn't any paths matching `path/*`).
+ match = matchmod.match('/', '', [path + '/'], default=b'relpath')
+ matches = self.p1().manifest().matches(match)
+ if len(matches) > 0:
+ if len(matches) == 1 and matches.keys()[0] == path:
+ return
+ raise error.Abort("error: file '%s' cannot be written because "
+ " '%s/' is a folder in %s (containing %d "
+ "entries: %s)"
+ % (path, path, self.p1(), len(matches),
+ ', '.join(matches.keys())))
+
+ def write(self, path, data, flags='', **kwargs):
if data is None:
raise error.ProgrammingError("data must be non-None")
+ self._auditconflicts(path)
self._markdirty(path, exists=True, data=data, date=util.makedate(),
flags=flags)
@@ -2037,13 +2176,15 @@
return self.exists(self._cache[path]['data'].strip())
else:
return self._cache[path]['exists']
- return self._wrappedctx[path].exists()
+
+ return self._existsinparent(path)
def lexists(self, path):
"""lexists returns True if the path exists"""
if self.isdirty(path):
return self._cache[path]['exists']
- return self._wrappedctx[path].lexists()
+
+ return self._existsinparent(path)
def size(self, path):
if self.isdirty(path):
@@ -2054,48 +2195,90 @@
self._path)
return self._wrappedctx[path].size()
- def flushall(self):
- for path in self._writeorder:
- entry = self._cache[path]
- if entry['exists']:
- self._wrappedctx[path].clearunknown()
- if entry['data'] is not None:
- if entry['flags'] is None:
- raise error.ProgrammingError('data set but not flags')
- self._wrappedctx[path].write(
- entry['data'],
- entry['flags'])
- else:
- self._wrappedctx[path].setflags(
- 'l' in entry['flags'],
- 'x' in entry['flags'])
+ def tomemctx(self, text, branch=None, extra=None, date=None, parents=None,
+ user=None, editor=None):
+ """Converts this ``overlayworkingctx`` into a ``memctx`` ready to be
+ committed.
+
+ ``text`` is the commit message.
+ ``parents`` (optional) are rev numbers.
+ """
+ # Default parents to the wrapped contexts' if not passed.
+ if parents is None:
+ parents = self._wrappedctx.parents()
+ if len(parents) == 1:
+ parents = (parents[0], None)
+
+ # ``parents`` is passed as rev numbers; convert to ``commitctxs``.
+ if parents[1] is None:
+ parents = (self._repo[parents[0]], None)
+ else:
+ parents = (self._repo[parents[0]], self._repo[parents[1]])
+
+ files = self._cache.keys()
+ def getfile(repo, memctx, path):
+ if self._cache[path]['exists']:
+ return memfilectx(repo, memctx, path,
+ self._cache[path]['data'],
+ 'l' in self._cache[path]['flags'],
+ 'x' in self._cache[path]['flags'],
+ self._cache[path]['copied'])
else:
- self._wrappedctx[path].remove(path)
- self._clean()
+ # Returning None, but including the path in `files`, is
+ # necessary for memctx to register a deletion.
+ return None
+ return memctx(self._repo, parents, text, files, getfile, date=date,
+ extra=extra, user=user, branch=branch, editor=editor)
def isdirty(self, path):
return path in self._cache
- def _clean(self):
+ def isempty(self):
+ # We need to discard any keys that are actually clean before the empty
+ # commit check.
+ self._compact()
+ return len(self._cache) == 0
+
+ def clean(self):
self._cache = {}
- self._writeorder = []
+
+ def _compact(self):
+ """Removes keys from the cache that are actually clean, by comparing
+ them with the underlying context.
+
+ This can occur during the merge process, e.g. by passing --tool :local
+ to resolve a conflict.
+ """
+ keys = []
+ for path in self._cache.keys():
+ cache = self._cache[path]
+ try:
+ underlying = self._wrappedctx[path]
+ if (underlying.data() == cache['data'] and
+ underlying.flags() == cache['flags']):
+ keys.append(path)
+ except error.ManifestLookupError:
+ # Path not in the underlying manifest (created).
+ continue
+
+ for path in keys:
+ del self._cache[path]
+ return keys
def _markdirty(self, path, exists, data=None, date=None, flags=''):
- if path not in self._cache:
- self._writeorder.append(path)
-
self._cache[path] = {
'exists': exists,
'data': data,
'date': date,
'flags': flags,
+ 'copied': None,
}
def filectx(self, path, filelog=None):
return overlayworkingfilectx(self._repo, path, parent=self,
filelog=filelog)
-class overlayworkingfilectx(workingfilectx):
+class overlayworkingfilectx(committablefilectx):
"""Wrap a ``workingfilectx`` but intercepts all writes into an in-memory
cache, which can be flushed through later by calling ``flush()``."""
@@ -2109,7 +2292,7 @@
def cmp(self, fctx):
return self.data() != fctx.data()
- def ctx(self):
+ def changectx(self):
return self._parent
def data(self):
@@ -2125,16 +2308,17 @@
return self._parent.exists(self._path)
def renamed(self):
- # Copies are currently tracked in the dirstate as before. Straight copy
- # from workingfilectx.
- rp = self._repo.dirstate.copied(self._path)
- if not rp:
+ path = self._parent.copydata(self._path)
+ if not path:
return None
- return rp, self._changectx._parents[0]._manifest.get(rp, nullid)
+ return path, self._changectx._parents[0]._manifest.get(path, nullid)
def size(self):
return self._parent.size(self._path)
+ def markcopied(self, origin):
+ self._parent.markcopied(self._path, origin)
+
def audit(self):
pass
@@ -2144,12 +2328,15 @@
def setflags(self, islink, isexec):
return self._parent.setflags(self._path, islink, isexec)
- def write(self, data, flags, backgroundclose=False):
- return self._parent.write(self._path, data, flags)
+ def write(self, data, flags, backgroundclose=False, **kwargs):
+ return self._parent.write(self._path, data, flags, **kwargs)
def remove(self, ignoremissing=False):
return self._parent.remove(self._path)
+ def clearunknown(self):
+ pass
+
class workingcommitctx(workingctx):
"""A workingcommitctx object makes access to data related to
the revision being committed convenient.
@@ -2215,9 +2402,9 @@
copied = fctx.renamed()
if copied:
copied = copied[0]
- return memfilectx(repo, path, fctx.data(),
+ return memfilectx(repo, memctx, path, fctx.data(),
islink=fctx.islink(), isexec=fctx.isexec(),
- copied=copied, memctx=memctx)
+ copied=copied)
return getfilectx
@@ -2231,9 +2418,8 @@
if data is None:
return None
islink, isexec = mode
- return memfilectx(repo, path, data, islink=islink,
- isexec=isexec, copied=copied,
- memctx=memctx)
+ return memfilectx(repo, memctx, path, data, islink=islink,
+ isexec=isexec, copied=copied)
return getfilectx
@@ -2365,8 +2551,8 @@
See memctx and committablefilectx for more details.
"""
- def __init__(self, repo, path, data, islink=False,
- isexec=False, copied=None, memctx=None):
+ def __init__(self, repo, changectx, path, data, islink=False,
+ isexec=False, copied=None):
"""
path is the normalized file path relative to repository root.
data is the file content as a string.
@@ -2374,7 +2560,7 @@
isexec is True if the file is executable.
copied is the source file path if current file was copied in the
revision being committed, or None."""
- super(memfilectx, self).__init__(repo, path, None, memctx)
+ super(memfilectx, self).__init__(repo, path, None, changectx)
self._data = data
self._flags = (islink and 'l' or '') + (isexec and 'x' or '')
self._copied = None
@@ -2389,7 +2575,7 @@
# need to figure out what to do here
del self._changectx[self._path]
- def write(self, data, flags):
+ def write(self, data, flags, **kwargs):
"""wraps repo.wwrite"""
self._data = data
@@ -2598,7 +2784,7 @@
def remove(self):
util.unlink(self._path)
- def write(self, data, flags):
+ def write(self, data, flags, **kwargs):
assert not flags
with open(self._path, "w") as f:
f.write(data)
diff -r 87676e8ee056 -r 27b6df1b5adb mercurial/copies.py
--- a/mercurial/copies.py Mon Jan 08 16:07:51 2018 -0800
+++ b/mercurial/copies.py Mon Jan 22 17:53:02 2018 -0500
@@ -107,7 +107,7 @@
return min(limit, a, b)
def _chain(src, dst, a, b):
- '''chain two sets of copies a->b'''
+ """chain two sets of copies a->b"""
t = a.copy()
for k, v in b.iteritems():
if v in t:
@@ -130,8 +130,8 @@
return t
def _tracefile(fctx, am, limit=-1):
- '''return file context that is the ancestor of fctx present in ancestor
- manifest am, stopping after the first ancestor lower than limit'''
+ """return file context that is the ancestor of fctx present in ancestor
+ manifest am, stopping after the first ancestor lower than limit"""
for f in fctx.ancestors():
if am.get(f.path(), None) == f.filenode():
@@ -139,11 +139,11 @@
if limit >= 0 and f.linkrev() < limit and f.rev() < limit:
return None
-def _dirstatecopies(d):
+def _dirstatecopies(d, match=None):
ds = d._repo.dirstate
c = ds.copies().copy()
for k in list(c):
- if ds[k] not in 'anm':
+ if ds[k] not in 'anm' or (match and not match(k)):
del c[k]
return c
@@ -156,18 +156,8 @@
mb = b.manifest()
return mb.filesnotin(ma, match=match)
-def _forwardcopies(a, b, match=None):
- '''find {dst@b: src@a} copy mapping where a is an ancestor of b'''
-
- # check for working copy
- w = None
- if b.rev() is None:
- w = b
- b = w.p1()
- if a == b:
- # short-circuit to avoid issues with merge states
- return _dirstatecopies(w)
-
+def _committedforwardcopies(a, b, match):
+ """Like _forwardcopies(), but b.rev() cannot be None (working copy)"""
# files might have to be traced back to the fctx parent of the last
# one-side-only changeset, but not further back than that
limit = _findlimit(a._repo, a.rev(), b.rev())
@@ -199,12 +189,21 @@
ofctx = _tracefile(fctx, am, limit)
if ofctx:
cm[f] = ofctx.path()
+ return cm
- # combine copies from dirstate if necessary
- if w is not None:
- cm = _chain(a, w, cm, _dirstatecopies(w))
+def _forwardcopies(a, b, match=None):
+ """find {dst@b: src@a} copy mapping where a is an ancestor of b"""
- return cm
+ # check for working copy
+ if b.rev() is None:
+ if a == b.p1():
+ # short-circuit to avoid issues with merge states
+ return _dirstatecopies(b, match)
+
+ cm = _committedforwardcopies(a, b.p1(), match)
+ # combine copies from dirstate if necessary
+ return _chain(a, b, cm, _dirstatecopies(b, match))
+ return _committedforwardcopies(a, b, match)
def _backwardrenames(a, b):
if a._repo.ui.config('experimental', 'copytrace') == 'off':
@@ -223,7 +222,7 @@
return r
def pathcopies(x, y, match=None):
- '''find {dst@y: src@x} copy mapping for directed compare'''
+ """find {dst@y: src@x} copy mapping for directed compare"""
if x == y or not x or not y:
return {}
a = y.ancestor(x)
@@ -861,13 +860,13 @@
return
def duplicatecopies(repo, wctx, rev, fromrev, skiprev=None):
- '''reproduce copies from fromrev to rev in the dirstate
+ """reproduce copies from fromrev to rev in the dirstate
If skiprev is specified, it's a revision that should be used to
filter copy records. Any copies that occur between fromrev and
skiprev will not be duplicated, even if they appear in the set of
copies between fromrev and rev.
- '''
+ """
exclude = {}
if (skiprev is not None and
repo.ui.config('experimental', 'copytrace') != 'off'):
diff -r 87676e8ee056 -r 27b6df1b5adb mercurial/crecord.py
--- a/mercurial/crecord.py Mon Jan 08 16:07:51 2018 -0800
+++ b/mercurial/crecord.py Mon Jan 22 17:53:02 2018 -0500
@@ -555,7 +555,7 @@
return chunkselector.opts
_headermessages = { # {operation: text}
- 'revert': _('Select hunks to revert'),
+ 'apply': _('Select hunks to apply'),
'discard': _('Select hunks to discard'),
None: _('Select hunks to record'),
}
@@ -581,6 +581,13 @@
# maps custom nicknames of color-pairs to curses color-pair values
self.colorpairnames = {}
+ # Honor color setting of ui section. Keep colored setup as
+ # long as not explicitly set to a falsy value - especially,
+ # when not set at all. This is to stay most compatible with
+ # previous (color only) behaviour.
+ uicolor = util.parsebool(self.ui.config('ui', 'color'))
+ self.usecolor = uicolor is not False
+
# the currently selected header, hunk, or hunk-line
self.currentselecteditem = self.headerlist[0]
@@ -1371,11 +1378,19 @@
colorpair = self.colorpairs[(fgcolor, bgcolor)]
else:
pairindex = len(self.colorpairs) + 1
- curses.init_pair(pairindex, fgcolor, bgcolor)
- colorpair = self.colorpairs[(fgcolor, bgcolor)] = (
- curses.color_pair(pairindex))
- if name is not None:
- self.colorpairnames[name] = curses.color_pair(pairindex)
+ if self.usecolor:
+ curses.init_pair(pairindex, fgcolor, bgcolor)
+ colorpair = self.colorpairs[(fgcolor, bgcolor)] = (
+ curses.color_pair(pairindex))
+ if name is not None:
+ self.colorpairnames[name] = curses.color_pair(pairindex)
+ else:
+ cval = 0
+ if name is not None:
+ if name == 'selected':
+ cval = curses.A_REVERSE
+ self.colorpairnames[name] = cval
+ colorpair = self.colorpairs[(fgcolor, bgcolor)] = cval
# add attributes if possible
if attrlist is None:
@@ -1704,7 +1719,10 @@
self.yscreensize, self.xscreensize = self.stdscr.getmaxyx()
curses.start_color()
- curses.use_default_colors()
+ try:
+ curses.use_default_colors()
+ except curses.error:
+ self.usecolor = False
# available colors: black, blue, cyan, green, magenta, white, yellow
# init_pair(color_id, foreground_color, background_color)
diff -r 87676e8ee056 -r 27b6df1b5adb mercurial/dagop.py
--- a/mercurial/dagop.py Mon Jan 08 16:07:51 2018 -0800
+++ b/mercurial/dagop.py Mon Jan 22 17:53:02 2018 -0500
@@ -75,6 +75,46 @@
if prev != node.nullrev:
heapq.heappush(pendingheap, (heapsign * prev, pdepth))
+def filectxancestors(fctxs, followfirst=False):
+ """Like filectx.ancestors(), but can walk from multiple files/revisions,
+ and includes the given fctxs themselves
+
+ Yields (rev, {fctx, ...}) pairs in descending order.
+ """
+ visit = {}
+ visitheap = []
+ def addvisit(fctx):
+ rev = fctx.rev()
+ if rev not in visit:
+ visit[rev] = set()
+ heapq.heappush(visitheap, -rev) # max heap
+ visit[rev].add(fctx)
+
+ if followfirst:
+ cut = 1
+ else:
+ cut = None
+
+ for c in fctxs:
+ addvisit(c)
+ while visit:
+ currev = -heapq.heappop(visitheap)
+ curfctxs = visit.pop(currev)
+ yield currev, curfctxs
+ for c in curfctxs:
+ for parent in c.parents()[:cut]:
+ addvisit(parent)
+ assert not visitheap
+
+def filerevancestors(fctxs, followfirst=False):
+ """Like filectx.ancestors(), but can walk from multiple files/revisions,
+ and includes the given fctxs themselves
+
+ Returns a smartset.
+ """
+ gen = (rev for rev, _cs in filectxancestors(fctxs, followfirst))
+ return generatorset(gen, iterasc=False)
+
def _genrevancestors(repo, revs, followfirst, startdepth, stopdepth, cutfunc):
if followfirst:
cut = 1
@@ -251,9 +291,7 @@
`fromline`-`toline` range.
"""
diffopts = patch.diffopts(fctx._repo.ui)
- introrev = fctx.introrev()
- if fctx.rev() != introrev:
- fctx = fctx.filectx(fctx.filenode(), changeid=introrev)
+ fctx = fctx.introfilectx()
visit = {(fctx.linkrev(), fctx.filenode()): (fctx, (fromline, toline))}
while visit:
c, linerange2 = visit.pop(max(visit))
diff -r 87676e8ee056 -r 27b6df1b5adb mercurial/dagutil.py
--- a/mercurial/dagutil.py Mon Jan 08 16:07:51 2018 -0800
+++ b/mercurial/dagutil.py Mon Jan 22 17:53:02 2018 -0500
@@ -154,8 +154,9 @@
class revlogdag(revlogbaseddag):
'''dag interface to a revlog'''
- def __init__(self, revlog):
+ def __init__(self, revlog, localsubset=None):
revlogbaseddag.__init__(self, revlog, set(revlog))
+ self._heads = localsubset
def _getheads(self):
return [r for r in self._revlog.headrevs() if r != nullrev]
diff -r 87676e8ee056 -r 27b6df1b5adb mercurial/debugcommands.py
--- a/mercurial/debugcommands.py Mon Jan 08 16:07:51 2018 -0800
+++ b/mercurial/debugcommands.py Mon Jan 22 17:53:02 2018 -0500
@@ -69,6 +69,7 @@
templater,
treediscovery,
upgrade,
+ url as urlmod,
util,
vfs as vfsmod,
)
@@ -179,11 +180,11 @@
ui.progress(_('building'), id, unit=_('revisions'), total=total)
for type, data in dagparser.parsedag(text):
if type == 'n':
- ui.note(('node %s\n' % str(data)))
+ ui.note(('node %s\n' % pycompat.bytestr(data)))
id, ps = data
files = []
- fctxs = {}
+ filecontent = {}
p2 = None
if mergeable_file:
@@ -204,27 +205,30 @@
ml[id * linesperrev] += " r%i" % id
mergedtext = "\n".join(ml)
files.append(fn)
- fctxs[fn] = context.memfilectx(repo, fn, mergedtext)
+ filecontent[fn] = mergedtext
if overwritten_file:
fn = "of"
files.append(fn)
- fctxs[fn] = context.memfilectx(repo, fn, "r%i\n" % id)
+ filecontent[fn] = "r%i\n" % id
if new_file:
fn = "nf%i" % id
files.append(fn)
- fctxs[fn] = context.memfilectx(repo, fn, "r%i\n" % id)
+ filecontent[fn] = "r%i\n" % id
if len(ps) > 1:
if not p2:
p2 = repo[ps[1]]
for fn in p2:
if fn.startswith("nf"):
files.append(fn)
- fctxs[fn] = p2[fn]
+ filecontent[fn] = p2[fn].data()
def fctxfn(repo, cx, path):
- return fctxs.get(path)
+ if path in filecontent:
+ return context.memfilectx(repo, cx, path,
+ filecontent[path])
+ return None
if len(ps) == 0 or ps[0] < 0:
pars = [None, None]
@@ -296,7 +300,7 @@
msg %= indent_string, exc.version, len(data)
ui.write(msg)
else:
- msg = "%sversion: %s (%d bytes)\n"
+ msg = "%sversion: %d (%d bytes)\n"
msg %= indent_string, version, len(data)
ui.write(msg)
fm = ui.formatter('debugobsolete', opts)
@@ -360,6 +364,25 @@
return _debugbundle2(ui, gen, all=all, **opts)
_debugchangegroup(ui, gen, all=all, **opts)
+@command('debugcapabilities',
+ [], _('PATH'),
+ norepo=True)
+def debugcapabilities(ui, path, **opts):
+ """lists the capabilities of a remote peer"""
+ opts = pycompat.byteskwargs(opts)
+ peer = hg.peer(ui, opts, path)
+ caps = peer.capabilities()
+ ui.write(('Main capabilities:\n'))
+ for c in sorted(caps):
+ ui.write((' %s\n') % c)
+ b2caps = bundle2.bundle2caps(peer)
+ if b2caps:
+ ui.write(('Bundle2 capabilities:\n'))
+ for key, values in sorted(b2caps.iteritems()):
+ ui.write((' %s\n') % key)
+ for v in values:
+ ui.write((' %s\n') % v)
+
@command('debugcheckstate', [], '')
def debugcheckstate(ui, repo):
"""validate the correctness of the current dirstate"""
@@ -569,11 +592,23 @@
the delta chain for this revision
:``extraratio``: extradist divided by chainsize; another representation of
how much unrelated data is needed to load this delta chain
+
+ If the repository is configured to use the sparse read, additional keywords
+ are available:
+
+ :``readsize``: total size of data read from the disk for a revision
+ (sum of the sizes of all the blocks)
+ :``largestblock``: size of the largest block of data read from the disk
+ :``readdensity``: density of useful bytes in the data read from the disk
+ :``srchunks``: in how many data hunks the whole revision would be read
+
+ The sparse read can be enabled with experimental.sparse-read = True
"""
opts = pycompat.byteskwargs(opts)
r = cmdutil.openrevlog(repo, 'debugdeltachain', file_, opts)
index = r.index
generaldelta = r.version & revlog.FLAG_GENERALDELTA
+ withsparseread = getattr(r, '_withsparseread', False)
def revinfo(rev):
e = index[rev]
@@ -609,15 +644,20 @@
fm.plain(' rev chain# chainlen prev delta '
'size rawsize chainsize ratio lindist extradist '
- 'extraratio\n')
+ 'extraratio')
+ if withsparseread:
+ fm.plain(' readsize largestblk rddensity srchunks')
+ fm.plain('\n')
chainbases = {}
for rev in r:
comp, uncomp, deltatype, chain, chainsize = revinfo(rev)
chainbase = chain[0]
chainid = chainbases.setdefault(chainbase, len(chainbases) + 1)
- basestart = r.start(chainbase)
- revstart = r.start(rev)
+ start = r.start
+ length = r.length
+ basestart = start(chainbase)
+ revstart = start(rev)
lineardist = revstart + comp - basestart
extradist = lineardist - chainsize
try:
@@ -632,7 +672,7 @@
fm.write('rev chainid chainlen prevrev deltatype compsize '
'uncompsize chainsize chainratio lindist extradist '
'extraratio',
- '%7d %7d %8d %8d %7s %10d %10d %10d %9.5f %9d %9d %10.5f\n',
+ '%7d %7d %8d %8d %7s %10d %10d %10d %9.5f %9d %9d %10.5f',
rev, chainid, len(chain), prevrev, deltatype, comp,
uncomp, chainsize, chainratio, lineardist, extradist,
extraratio,
@@ -641,6 +681,29 @@
uncompsize=uncomp, chainsize=chainsize,
chainratio=chainratio, lindist=lineardist,
extradist=extradist, extraratio=extraratio)
+ if withsparseread:
+ readsize = 0
+ largestblock = 0
+ srchunks = 0
+
+ for revschunk in revlog._slicechunk(r, chain):
+ srchunks += 1
+ blkend = start(revschunk[-1]) + length(revschunk[-1])
+ blksize = blkend - start(revschunk[0])
+
+ readsize += blksize
+ if largestblock < blksize:
+ largestblock = blksize
+
+ readdensity = float(chainsize) / float(readsize)
+
+ fm.write('readsize largestblock readdensity srchunks',
+ ' %10d %10d %9.5f %8d',
+ readsize, largestblock, readdensity, srchunks,
+ readsize=readsize, largestblock=largestblock,
+ readdensity=readdensity, srchunks=srchunks)
+
+ fm.plain('\n')
fm.end()
@@ -665,8 +728,9 @@
elif nodates:
timestr = 'set '
else:
- timestr = time.strftime("%Y-%m-%d %H:%M:%S ",
+ timestr = time.strftime(r"%Y-%m-%d %H:%M:%S ",
time.localtime(ent[3]))
+ timestr = encoding.strtolocal(timestr)
if ent[1] & 0o20000:
mode = 'lnk'
else:
@@ -679,24 +743,21 @@
[('', 'old', None, _('use old-style discovery')),
('', 'nonheads', None,
_('use old-style discovery with non-heads included')),
+ ('', 'rev', [], 'restrict discovery to this set of revs'),
] + cmdutil.remoteopts,
- _('[-l REV] [-r REV] [-b BRANCH]... [OTHER]'))
+ _('[--rev REV] [OTHER]'))
def debugdiscovery(ui, repo, remoteurl="default", **opts):
"""runs the changeset discovery protocol in isolation"""
opts = pycompat.byteskwargs(opts)
- remoteurl, branches = hg.parseurl(ui.expandpath(remoteurl),
- opts.get('branch'))
+ remoteurl, branches = hg.parseurl(ui.expandpath(remoteurl))
remote = hg.peer(repo, opts, remoteurl)
ui.status(_('comparing with %s\n') % util.hidepassword(remoteurl))
# make sure tests are repeatable
random.seed(12323)
- def doit(localheads, remoteheads, remote=remote):
+ def doit(pushedrevs, remoteheads, remote=remote):
if opts.get('old'):
- if localheads:
- raise error.Abort('cannot use localheads with old style '
- 'discovery')
if not util.safehasattr(remote, 'branches'):
# enable in-client legacy support
remote = localrepo.locallegacypeer(remote.local())
@@ -710,7 +771,12 @@
all = dag.ancestorset(dag.internalizeall(common))
common = dag.externalizeall(dag.headsetofconnecteds(all))
else:
- common, any, hds = setdiscovery.findcommonheads(ui, repo, remote)
+ nodes = None
+ if pushedrevs:
+ revs = scmutil.revrange(repo, pushedrevs)
+ nodes = [repo[r].node() for r in revs]
+ common, any, hds = setdiscovery.findcommonheads(ui, repo, remote,
+ ancestorsof=nodes)
common = set(common)
rheads = set(hds)
lheads = set(repo.heads())
@@ -721,26 +787,33 @@
elif rheads <= common:
ui.write(("remote is subset\n"))
- serverlogs = opts.get('serverlog')
- if serverlogs:
- for filename in serverlogs:
- with open(filename, 'r') as logfile:
- line = logfile.readline()
- while line:
- parts = line.strip().split(';')
- op = parts[1]
- if op == 'cg':
- pass
- elif op == 'cgss':
- doit(parts[2].split(' '), parts[3].split(' '))
- elif op == 'unb':
- doit(parts[3].split(' '), parts[2].split(' '))
- line = logfile.readline()
- else:
- remoterevs, _checkout = hg.addbranchrevs(repo, remote, branches,
- opts.get('remote_head'))
- localrevs = opts.get('local_head')
- doit(localrevs, remoterevs)
+ remoterevs, _checkout = hg.addbranchrevs(repo, remote, branches, revs=None)
+ localrevs = opts['rev']
+ doit(localrevs, remoterevs)
+
+_chunksize = 4 << 10
+
+@command('debugdownload',
+ [
+ ('o', 'output', '', _('path')),
+ ],
+ optionalrepo=True)
+def debugdownload(ui, repo, url, output=None, **opts):
+ """download a resource using Mercurial logic and config
+ """
+ fh = urlmod.open(ui, url, output)
+
+ dest = ui
+ if output:
+ dest = open(output, "wb", _chunksize)
+ try:
+ data = fh.read(_chunksize)
+ while data:
+ dest.write(data)
+ data = fh.read(_chunksize)
+ finally:
+ if output:
+ dest.close()
@command('debugextensions', cmdutil.formatteropts, [], norepo=True)
def debugextensions(ui, **opts):
@@ -801,9 +874,74 @@
for f in ctx.getfileset(expr):
ui.write("%s\n" % f)
+@command('debugformat',
+ [] + cmdutil.formatteropts,
+ _(''))
+def debugformat(ui, repo, **opts):
+ """display format information about the current repository
+
+ Use --verbose to get extra information about current config value and
+ Mercurial default."""
+ opts = pycompat.byteskwargs(opts)
+ maxvariantlength = max(len(fv.name) for fv in upgrade.allformatvariant)
+ maxvariantlength = max(len('format-variant'), maxvariantlength)
+
+ def makeformatname(name):
+ return '%s:' + (' ' * (maxvariantlength - len(name)))
+
+ fm = ui.formatter('debugformat', opts)
+ if fm.isplain():
+ def formatvalue(value):
+ if util.safehasattr(value, 'startswith'):
+ return value
+ if value:
+ return 'yes'
+ else:
+ return 'no'
+ else:
+ formatvalue = pycompat.identity
+
+ fm.plain('format-variant')
+ fm.plain(' ' * (maxvariantlength - len('format-variant')))
+ fm.plain(' repo')
+ if ui.verbose:
+ fm.plain(' config default')
+ fm.plain('\n')
+ for fv in upgrade.allformatvariant:
+ fm.startitem()
+ repovalue = fv.fromrepo(repo)
+ configvalue = fv.fromconfig(repo)
+
+ if repovalue != configvalue:
+ namelabel = 'formatvariant.name.mismatchconfig'
+ repolabel = 'formatvariant.repo.mismatchconfig'
+ elif repovalue != fv.default:
+ namelabel = 'formatvariant.name.mismatchdefault'
+ repolabel = 'formatvariant.repo.mismatchdefault'
+ else:
+ namelabel = 'formatvariant.name.uptodate'
+ repolabel = 'formatvariant.repo.uptodate'
+
+ fm.write('name', makeformatname(fv.name), fv.name,
+ label=namelabel)
+ fm.write('repo', ' %3s', formatvalue(repovalue),
+ label=repolabel)
+ if fv.default != configvalue:
+ configlabel = 'formatvariant.config.special'
+ else:
+ configlabel = 'formatvariant.config.default'
+ fm.condwrite(ui.verbose, 'config', ' %6s', formatvalue(configvalue),
+ label=configlabel)
+ fm.condwrite(ui.verbose, 'default', ' %7s', formatvalue(fv.default),
+ label='formatvariant.default')
+ fm.plain('\n')
+ fm.end()
+
@command('debugfsinfo', [], _('[PATH]'), norepo=True)
def debugfsinfo(ui, path="."):
"""show information detected about current filesystem"""
+ ui.write(('path: %s\n') % path)
+ ui.write(('mounted on: %s\n') % (util.getfsmountpoint(path) or '(unknown)'))
ui.write(('exec: %s\n') % (util.checkexec(path) and 'yes' or 'no'))
ui.write(('fstype: %s\n') % (util.getfstype(path) or '(unknown)'))
ui.write(('symlink: %s\n') % (util.checklink(path) and 'yes' or 'no'))
@@ -1066,6 +1204,11 @@
fm.formatlist([e.name() for e in wirecompengines
if e.wireprotosupport()],
name='compengine', fmt='%s', sep=', '))
+ re2 = 'missing'
+ if util._re2:
+ re2 = 'available'
+ fm.plain(_('checking "re2" regexp engine (%s)\n') % re2)
+ fm.data(re2=bool(util._re2))
# templates
p = templater.templatepaths()
@@ -1155,7 +1298,10 @@
@command('debuglocks',
[('L', 'force-lock', None, _('free the store lock (DANGEROUS)')),
('W', 'force-wlock', None,
- _('free the working state lock (DANGEROUS)'))],
+ _('free the working state lock (DANGEROUS)')),
+ ('s', 'set-lock', None, _('set the store lock until stopped')),
+ ('S', 'set-wlock', None,
+ _('set the working state lock until stopped'))],
_('[OPTION]...'))
def debuglocks(ui, repo, **opts):
"""show or modify state of locks
@@ -1174,6 +1320,10 @@
instance, on a shared filesystem). Removing locks may also be
blocked by filesystem permissions.
+ Setting a lock will prevent other commands from changing the data.
+ The command will wait until an interruption (SIGINT, SIGTERM, ...) occurs.
+ The set locks are removed when the command exits.
+
Returns 0 if no locks are held.
"""
@@ -1182,9 +1332,27 @@
repo.svfs.unlink('lock')
if opts.get(r'force_wlock'):
repo.vfs.unlink('wlock')
- if opts.get(r'force_lock') or opts.get(r'force_lock'):
+ if opts.get(r'force_lock') or opts.get(r'force_wlock'):
return 0
+ locks = []
+ try:
+ if opts.get(r'set_wlock'):
+ try:
+ locks.append(repo.wlock(False))
+ except error.LockHeld:
+ raise error.Abort(_('wlock is already held'))
+ if opts.get(r'set_lock'):
+ try:
+ locks.append(repo.lock(False))
+ except error.LockHeld:
+ raise error.Abort(_('lock is already held'))
+ if len(locks):
+ ui.promptchoice(_("ready to release the lock (y)? $$ &Yes"))
+ return 0
+ finally:
+ release(*locks)
+
now = time.time()
held = 0
@@ -2170,15 +2338,11 @@
cache = {}
ctx2str = str
node2str = short
- if ui.debug():
- def ctx2str(ctx):
- return ctx.hex()
- node2str = hex
for rev in scmutil.revrange(repo, revs):
ctx = repo[rev]
ui.write('%s\n'% ctx2str(ctx))
for succsset in obsutil.successorssets(repo, ctx.node(),
- closest=opts['closest'],
+ closest=opts[r'closest'],
cache=cache):
if succsset:
ui.write(' ')
@@ -2228,8 +2392,8 @@
ui.note(("* expanded:\n"), templater.prettyformat(newtree), '\n')
if revs is None:
- t = formatter.maketemplater(ui, tmpl)
- props['ui'] = ui
+ tres = formatter.templateresources(ui, repo)
+ t = formatter.maketemplater(ui, tmpl, resources=tres)
ui.write(t.render(props))
else:
displayer = cmdutil.makelogtemplater(ui, repo, tmpl)
@@ -2304,6 +2468,7 @@
for k, v in opts.iteritems():
if v:
args[k] = v
+ args = pycompat.strkwargs(args)
# run twice to check that we don't mess up the stream for the next command
res1 = repo.debugwireargs(*vals, **args)
res2 = repo.debugwireargs(*vals, **args)
diff -r 87676e8ee056 -r 27b6df1b5adb mercurial/dirstate.py
--- a/mercurial/dirstate.py Mon Jan 08 16:07:51 2018 -0800
+++ b/mercurial/dirstate.py Mon Jan 22 17:53:02 2018 -0500
@@ -80,6 +80,7 @@
self._plchangecallbacks = {}
self._origpl = None
self._updatedfiles = set()
+ self._mapcls = dirstatemap
@contextlib.contextmanager
def parentchange(self):
@@ -127,9 +128,8 @@
@propertycache
def _map(self):
- '''Return the dirstate contents as a map from filename to
- (state, mode, size, time).'''
- self._map = dirstatemap(self._ui, self._opener, self._root)
+ """Return the dirstate contents (see documentation for dirstatemap)."""
+ self._map = self._mapcls(self._ui, self._opener, self._root)
return self._map
@property
@@ -158,8 +158,8 @@
def _pl(self):
return self._map.parents()
- def dirs(self):
- return self._map.dirs
+ def hasdir(self, d):
+ return self._map.hastrackeddir(d)
@rootcache('.hgignore')
def _ignore(self):
@@ -387,40 +387,23 @@
def copies(self):
return self._map.copymap
- def _droppath(self, f):
- if self[f] not in "?r" and "dirs" in self._map.__dict__:
- self._map.dirs.delpath(f)
-
- if "filefoldmap" in self._map.__dict__:
- normed = util.normcase(f)
- if normed in self._map.filefoldmap:
- del self._map.filefoldmap[normed]
-
- self._updatedfiles.add(f)
-
def _addpath(self, f, state, mode, size, mtime):
oldstate = self[f]
if state == 'a' or oldstate == 'r':
scmutil.checkfilename(f)
- if f in self._map.dirs:
+ if self._map.hastrackeddir(f):
raise error.Abort(_('directory %r already in dirstate') % f)
# shadows
for d in util.finddirs(f):
- if d in self._map.dirs:
+ if self._map.hastrackeddir(d):
break
entry = self._map.get(d)
if entry is not None and entry[0] != 'r':
raise error.Abort(
_('file %r in dirstate clashes with %r') % (d, f))
- if oldstate in "?r" and "dirs" in self._map.__dict__:
- self._map.dirs.addpath(f)
self._dirty = True
self._updatedfiles.add(f)
- self._map[f] = dirstatetuple(state, mode, size, mtime)
- if state != 'n' or mtime == -1:
- self._map.nonnormalset.add(f)
- if size == -2:
- self._map.otherparentset.add(f)
+ self._map.addfile(f, oldstate, state, mode, size, mtime)
def normal(self, f):
'''Mark a file normal and clean.'''
@@ -458,8 +441,6 @@
return
self._addpath(f, 'n', 0, -1, -1)
self._map.copymap.pop(f, None)
- if f in self._map.nonnormalset:
- self._map.nonnormalset.remove(f)
def otherparent(self, f):
'''Mark as coming from the other parent, always dirty.'''
@@ -482,7 +463,7 @@
def remove(self, f):
'''Mark a file removed.'''
self._dirty = True
- self._droppath(f)
+ oldstate = self[f]
size = 0
if self._pl[1] != nullid:
entry = self._map.get(f)
@@ -493,8 +474,8 @@
elif entry[0] == 'n' and entry[2] == -2: # other parent
size = -2
self._map.otherparentset.add(f)
- self._map[f] = dirstatetuple('r', 0, size, 0)
- self._map.nonnormalset.add(f)
+ self._updatedfiles.add(f)
+ self._map.removefile(f, oldstate, size)
if size == 0:
self._map.copymap.pop(f, None)
@@ -506,12 +487,10 @@
def drop(self, f):
'''Drop a file from the dirstate'''
- if f in self._map:
+ oldstate = self[f]
+ if self._map.dropfile(f, oldstate):
self._dirty = True
- self._droppath(f)
- del self._map[f]
- if f in self._map.nonnormalset:
- self._map.nonnormalset.remove(f)
+ self._updatedfiles.add(f)
self._map.copymap.pop(f, None)
def _discoverpath(self, path, normed, ignoremissing, exists, storemap):
@@ -635,12 +614,7 @@
# emulate dropping timestamp in 'parsers.pack_dirstate'
now = _getfsnow(self._opener)
- dmap = self._map
- for f in self._updatedfiles:
- e = dmap.get(f)
- if e is not None and e[0] == 'n' and e[3] == now:
- dmap[f] = dirstatetuple(e[0], e[1], e[2], -1)
- self._map.nonnormalset.add(f)
+ self._map.clearambiguoustimes(self._updatedfiles, now)
# emulate that all 'dirstate.normal' results are written out
self._lastnormaltime = 0
@@ -797,7 +771,6 @@
results = dict.fromkeys(subrepos)
results['.hg'] = None
- alldirs = None
for ff in files:
# constructing the foldmap is expensive, so don't do it for the
# common case where files is ['.']
@@ -828,9 +801,7 @@
if nf in dmap: # does it exactly match a missing file?
results[nf] = None
else: # does it match a missing directory?
- if alldirs is None:
- alldirs = util.dirs(dmap._map)
- if nf in alldirs:
+ if self._map.hasdir(nf):
if matchedir:
matchedir(nf)
notfoundadd(nf)
@@ -1198,6 +1169,39 @@
self._opener.unlink(backupname)
class dirstatemap(object):
+ """Map encapsulating the dirstate's contents.
+
+ The dirstate contains the following state:
+
+ - `identity` is the identity of the dirstate file, which can be used to
+ detect when changes have occurred to the dirstate file.
+
+ - `parents` is a pair containing the parents of the working copy. The
+ parents are updated by calling `setparents`.
+
+ - the state map maps filenames to tuples of (state, mode, size, mtime),
+ where state is a single character representing 'normal', 'added',
+ 'removed', or 'merged'. It is read by treating the dirstate as a
+ dict. File state is updated by calling the `addfile`, `removefile` and
+ `dropfile` methods.
+
+ - `copymap` maps destination filenames to their source filename.
+
+ The dirstate also provides the following views onto the state:
+
+ - `nonnormalset` is a set of the filenames that have state other
+ than 'normal', or are normal but have an mtime of -1 ('normallookup').
+
+ - `otherparentset` is a set of the filenames that are marked as coming
+ from the second parent when the dirstate is currently being merged.
+
+ - `filefoldmap` is a dict mapping normalized filenames to the denormalized
+ form that they appear as in the dirstate.
+
+ - `dirfoldmap` is a dict mapping normalized directory names to the
+ denormalized form that they appear as in the dirstate.
+ """
+
def __init__(self, ui, opener, root):
self._ui = ui
self._opener = opener
@@ -1226,6 +1230,12 @@
self._map.clear()
self.copymap.clear()
self.setparents(nullid, nullid)
+ util.clearcachedproperty(self, "_dirs")
+ util.clearcachedproperty(self, "_alldirs")
+ util.clearcachedproperty(self, "filefoldmap")
+ util.clearcachedproperty(self, "dirfoldmap")
+ util.clearcachedproperty(self, "nonnormalset")
+ util.clearcachedproperty(self, "otherparentset")
def iteritems(self):
return self._map.iteritems()
@@ -1242,15 +1252,9 @@
def __contains__(self, key):
return key in self._map
- def __setitem__(self, key, value):
- self._map[key] = value
-
def __getitem__(self, key):
return self._map[key]
- def __delitem__(self, key):
- del self._map[key]
-
def keys(self):
return self._map.keys()
@@ -1258,6 +1262,60 @@
"""Loads the underlying data, if it's not already loaded"""
self._map
+ def addfile(self, f, oldstate, state, mode, size, mtime):
+ """Add a tracked file to the dirstate."""
+ if oldstate in "?r" and "_dirs" in self.__dict__:
+ self._dirs.addpath(f)
+ if oldstate == "?" and "_alldirs" in self.__dict__:
+ self._alldirs.addpath(f)
+ self._map[f] = dirstatetuple(state, mode, size, mtime)
+ if state != 'n' or mtime == -1:
+ self.nonnormalset.add(f)
+ if size == -2:
+ self.otherparentset.add(f)
+
+ def removefile(self, f, oldstate, size):
+ """
+ Mark a file as removed in the dirstate.
+
+ The `size` parameter is used to store sentinel values that indicate
+ the file's previous state. In the future, we should refactor this
+ to be more explicit about what that state is.
+ """
+ if oldstate not in "?r" and "_dirs" in self.__dict__:
+ self._dirs.delpath(f)
+ if oldstate == "?" and "_alldirs" in self.__dict__:
+ self._alldirs.addpath(f)
+ if "filefoldmap" in self.__dict__:
+ normed = util.normcase(f)
+ self.filefoldmap.pop(normed, None)
+ self._map[f] = dirstatetuple('r', 0, size, 0)
+ self.nonnormalset.add(f)
+
+ def dropfile(self, f, oldstate):
+ """
+ Remove a file from the dirstate. Returns True if the file was
+ previously recorded.
+ """
+ exists = self._map.pop(f, None) is not None
+ if exists:
+ if oldstate != "r" and "_dirs" in self.__dict__:
+ self._dirs.delpath(f)
+ if "_alldirs" in self.__dict__:
+ self._alldirs.delpath(f)
+ if "filefoldmap" in self.__dict__:
+ normed = util.normcase(f)
+ self.filefoldmap.pop(normed, None)
+ self.nonnormalset.discard(f)
+ return exists
+
+ def clearambiguoustimes(self, files, now):
+ for f in files:
+ e = self.get(f)
+ if e is not None and e[0] == 'n' and e[3] == now:
+ self._map[f] = dirstatetuple(e[0], e[1], e[2], -1)
+ self.nonnormalset.add(f)
+
def nonnormalentries(self):
'''Compute the nonnormal dirstate entries from the dmap'''
try:
@@ -1293,13 +1351,28 @@
f['.'] = '.' # prevents useless util.fspath() invocation
return f
+ def hastrackeddir(self, d):
+ """
+ Returns True if the dirstate contains a tracked (not removed) file
+ in this directory.
+ """
+ return d in self._dirs
+
+ def hasdir(self, d):
+ """
+ Returns True if the dirstate contains a file (tracked or removed)
+ in this directory.
+ """
+ return d in self._alldirs
+
@propertycache
- def dirs(self):
- """Returns a set-like object containing all the directories in the
- current dirstate.
- """
+ def _dirs(self):
return util.dirs(self._map, 'r')
+ @propertycache
+ def _alldirs(self):
+ return util.dirs(self._map)
+
def _opendirstatefile(self):
fp, mode = txnutil.trypending(self._root, self._opener, self._filename)
if self._pendingmode is not None and self._pendingmode != mode:
@@ -1387,8 +1460,6 @@
# Avoid excess attribute lookups by fast pathing certain checks
self.__contains__ = self._map.__contains__
self.__getitem__ = self._map.__getitem__
- self.__setitem__ = self._map.__setitem__
- self.__delitem__ = self._map.__delitem__
self.get = self._map.get
def write(self, st, now):
@@ -1419,6 +1490,6 @@
def dirfoldmap(self):
f = {}
normcase = util.normcase
- for name in self.dirs:
+ for name in self._dirs:
f[normcase(name)] = name
return f
diff -r 87676e8ee056 -r 27b6df1b5adb mercurial/discovery.py
--- a/mercurial/discovery.py Mon Jan 08 16:07:51 2018 -0800
+++ b/mercurial/discovery.py Mon Jan 22 17:53:02 2018 -0500
@@ -21,12 +21,13 @@
branchmap,
error,
phases,
+ scmutil,
setdiscovery,
treediscovery,
util,
)
-def findcommonincoming(repo, remote, heads=None, force=False):
+def findcommonincoming(repo, remote, heads=None, force=False, ancestorsof=None):
"""Return a tuple (common, anyincoming, heads) used to identify the common
subset of nodes between repo and remote.
@@ -37,6 +38,9 @@
changegroupsubset. No code except for pull should be relying on this fact
any longer.
"heads" is either the supplied heads, or else the remote's heads.
+ "ancestorsof" if not None, restrict the discovery to a subset defined by
+ these nodes. Changeset outside of this set won't be considered (and
+ won't appears in "common")
If you pass heads and they are all known locally, the response lists just
these heads in "common" and in "heads".
@@ -59,7 +63,8 @@
return (heads, False, heads)
res = setdiscovery.findcommonheads(repo.ui, repo, remote,
- abortwhenunrelated=not force)
+ abortwhenunrelated=not force,
+ ancestorsof=ancestorsof)
common, anyinc, srvheads = res
return (list(common), anyinc, heads or list(srvheads))
@@ -141,7 +146,8 @@
# get common set if not provided
if commoninc is None:
- commoninc = findcommonincoming(repo, other, force=force)
+ commoninc = findcommonincoming(repo, other, force=force,
+ ancestorsof=onlyheads)
og.commonheads, _any, _hds = commoninc
# compute outgoing
@@ -365,11 +371,8 @@
if None in unsyncedheads:
# old remote, no heads data
heads = None
- elif len(unsyncedheads) <= 4 or repo.ui.verbose:
- heads = ' '.join(short(h) for h in unsyncedheads)
else:
- heads = (' '.join(short(h) for h in unsyncedheads[:4]) +
- ' ' + _("and %s others") % (len(unsyncedheads) - 4))
+ heads = scmutil.nodesummaries(repo, unsyncedheads)
if heads is None:
repo.ui.status(_("remote has heads that are "
"not known locally\n"))
diff -r 87676e8ee056 -r 27b6df1b5adb mercurial/dispatch.py
--- a/mercurial/dispatch.py Mon Jan 08 16:07:51 2018 -0800
+++ b/mercurial/dispatch.py Mon Jan 22 17:53:02 2018 -0500
@@ -55,7 +55,7 @@
self.fout = fout
self.ferr = ferr
- # remember options pre-parsed by _earlyreqopt*()
+ # remember options pre-parsed by _earlyparseopts()
self.earlyoptions = {}
# reposetups which run before extensions, useful for chg to pre-fill
@@ -96,10 +96,16 @@
err = e
status = -1
if util.safehasattr(req.ui, 'ferr'):
- if err is not None and err.errno != errno.EPIPE:
- req.ui.ferr.write('abort: %s\n' %
- encoding.strtolocal(err.strerror))
- req.ui.ferr.flush()
+ try:
+ if err is not None and err.errno != errno.EPIPE:
+ req.ui.ferr.write('abort: %s\n' %
+ encoding.strtolocal(err.strerror))
+ req.ui.ferr.flush()
+ # There's not much we can do about an I/O error here. So (possibly)
+ # change the status code and move on.
+ except IOError:
+ status = -1
+
sys.exit(status & 255)
def _initstdio():
@@ -150,9 +156,8 @@
try:
if not req.ui:
req.ui = uimod.ui.load()
- if req.ui.plain('strictflags'):
- req.earlyoptions.update(_earlyparseopts(req.args))
- if _earlyreqoptbool(req, 'traceback', ['--traceback']):
+ req.earlyoptions.update(_earlyparseopts(req.ui, req.args))
+ if req.earlyoptions['traceback']:
req.ui.setconfig('ui', 'traceback', 'on', '--traceback')
# set ui streams from the request
@@ -201,7 +206,8 @@
req.ui.flush()
if req.ui.logblockedtimes:
req.ui._blockedtimes['command_duration'] = duration * 1000
- req.ui.log('uiblocked', 'ui blocked ms', **req.ui._blockedtimes)
+ req.ui.log('uiblocked', 'ui blocked ms',
+ **pycompat.strkwargs(req.ui._blockedtimes))
req.ui.log("commandfinish", "%s exited %d after %0.2f seconds\n",
msg, ret or 0, duration)
try:
@@ -266,8 +272,7 @@
# read --config before doing anything else
# (e.g. to change trust settings for reading .hg/hgrc)
- cfgs = _parseconfig(req.ui,
- _earlyreqopt(req, 'config', ['--config']))
+ cfgs = _parseconfig(req.ui, req.earlyoptions['config'])
if req.repo:
# copy configs that were passed on the cmdline (--config) to
@@ -281,7 +286,7 @@
if not debugger or ui.plain():
# if we are in HGPLAIN mode, then disable custom debugging
debugger = 'pdb'
- elif _earlyreqoptbool(req, 'debugger', ['--debugger']):
+ elif req.earlyoptions['debugger']:
# This import can be slow for fancy debuggers, so only
# do it when absolutely necessary, i.e. when actual
# debugging has been requested
@@ -295,7 +300,7 @@
debugmortem[debugger] = debugmod.post_mortem
# enter the debugger before command execution
- if _earlyreqoptbool(req, 'debugger', ['--debugger']):
+ if req.earlyoptions['debugger']:
ui.warn(_("entering debugger - "
"type c to continue starting hg or h for help\n"))
@@ -311,7 +316,7 @@
ui.flush()
except: # re-raises
# enter the debugger when we hit an exception
- if _earlyreqoptbool(req, 'debugger', ['--debugger']):
+ if req.earlyoptions['debugger']:
traceback.print_exc()
debugmortem[debugger](sys.exc_info()[2])
raise
@@ -410,7 +415,7 @@
# tokenize each argument into exactly one word.
replacemap['"$@"'] = ' '.join(util.shellquote(arg) for arg in args)
# escape '\$' for regex
- regex = '|'.join(replacemap.keys()).replace('$', r'\$')
+ regex = '|'.join(replacemap.keys()).replace('$', br'\$')
r = re.compile(regex)
return r.sub(lambda x: replacemap[x.group()], cmd)
@@ -452,10 +457,10 @@
return m.group()
else:
ui.debug("No argument found for substitution "
- "of %i variable in alias '%s' definition."
+ "of %i variable in alias '%s' definition.\n"
% (int(m.groups()[0]), self.name))
return ''
- cmd = re.sub(r'\$(\d+|\$)', _checkvar, self.definition[1:])
+ cmd = re.sub(br'\$(\d+|\$)', _checkvar, self.definition[1:])
cmd = aliasinterpolate(self.name, args, cmd)
return ui.system(cmd, environ=env,
blockedtag='alias_%s' % self.name)
@@ -468,16 +473,15 @@
self.badalias = (_("error in definition for alias '%s': %s")
% (self.name, inst))
return
+ earlyopts, args = _earlysplitopts(args)
+ if earlyopts:
+ self.badalias = (_("error in definition for alias '%s': %s may "
+ "only be given on the command line")
+ % (self.name, '/'.join(zip(*earlyopts)[0])))
+ return
self.cmdname = cmd = args.pop(0)
self.givenargs = args
- for invalidarg in commands.earlyoptflags:
- if _earlygetopt([invalidarg], args):
- self.badalias = (_("error in definition for alias '%s': %s may "
- "only be given on the command line")
- % (self.name, invalidarg))
- return
-
try:
tableentry = cmdutil.findcmd(cmd, cmdtable, False)[1]
if len(tableentry) > 2:
@@ -646,139 +650,20 @@
return configs
-def _earlyparseopts(args):
+def _earlyparseopts(ui, args):
options = {}
fancyopts.fancyopts(args, commands.globalopts, options,
- gnu=False, early=True)
+ gnu=not ui.plain('strictflags'), early=True,
+ optaliases={'repository': ['repo']})
return options
-def _earlygetopt(aliases, args, strip=True):
- """Return list of values for an option (or aliases).
-
- The values are listed in the order they appear in args.
- The options and values are removed from args if strip=True.
-
- >>> args = [b'x', b'--cwd', b'foo', b'y']
- >>> _earlygetopt([b'--cwd'], args), args
- (['foo'], ['x', 'y'])
-
- >>> args = [b'x', b'--cwd=bar', b'y']
- >>> _earlygetopt([b'--cwd'], args), args
- (['bar'], ['x', 'y'])
-
- >>> args = [b'x', b'--cwd=bar', b'y']
- >>> _earlygetopt([b'--cwd'], args, strip=False), args
- (['bar'], ['x', '--cwd=bar', 'y'])
-
- >>> args = [b'x', b'-R', b'foo', b'y']
- >>> _earlygetopt([b'-R'], args), args
- (['foo'], ['x', 'y'])
-
- >>> args = [b'x', b'-R', b'foo', b'y']
- >>> _earlygetopt([b'-R'], args, strip=False), args
- (['foo'], ['x', '-R', 'foo', 'y'])
-
- >>> args = [b'x', b'-Rbar', b'y']
- >>> _earlygetopt([b'-R'], args), args
- (['bar'], ['x', 'y'])
-
- >>> args = [b'x', b'-Rbar', b'y']
- >>> _earlygetopt([b'-R'], args, strip=False), args
- (['bar'], ['x', '-Rbar', 'y'])
-
- >>> args = [b'x', b'-R=bar', b'y']
- >>> _earlygetopt([b'-R'], args), args
- (['=bar'], ['x', 'y'])
-
- >>> args = [b'x', b'-R', b'--', b'y']
- >>> _earlygetopt([b'-R'], args), args
- ([], ['x', '-R', '--', 'y'])
- """
- try:
- argcount = args.index("--")
- except ValueError:
- argcount = len(args)
- shortopts = [opt for opt in aliases if len(opt) == 2]
- values = []
- pos = 0
- while pos < argcount:
- fullarg = arg = args[pos]
- equals = -1
- if arg.startswith('--'):
- equals = arg.find('=')
- if equals > -1:
- arg = arg[:equals]
- if arg in aliases:
- if equals > -1:
- values.append(fullarg[equals + 1:])
- if strip:
- del args[pos]
- argcount -= 1
- else:
- pos += 1
- else:
- if pos + 1 >= argcount:
- # ignore and let getopt report an error if there is no value
- break
- values.append(args[pos + 1])
- if strip:
- del args[pos:pos + 2]
- argcount -= 2
- else:
- pos += 2
- elif arg[:2] in shortopts:
- # short option can have no following space, e.g. hg log -Rfoo
- values.append(args[pos][2:])
- if strip:
- del args[pos]
- argcount -= 1
- else:
- pos += 1
- else:
- pos += 1
- return values
-
-def _earlyreqopt(req, name, aliases):
- """Peek a list option without using a full options table"""
- if req.ui.plain('strictflags'):
- return req.earlyoptions[name]
- values = _earlygetopt(aliases, req.args, strip=False)
- req.earlyoptions[name] = values
- return values
-
-def _earlyreqoptstr(req, name, aliases):
- """Peek a string option without using a full options table"""
- if req.ui.plain('strictflags'):
- return req.earlyoptions[name]
- value = (_earlygetopt(aliases, req.args, strip=False) or [''])[-1]
- req.earlyoptions[name] = value
- return value
-
-def _earlyreqoptbool(req, name, aliases):
- """Peek a boolean option without using a full options table
-
- >>> req = request([b'x', b'--debugger'], uimod.ui())
- >>> _earlyreqoptbool(req, b'debugger', [b'--debugger'])
- True
-
- >>> req = request([b'x', b'--', b'--debugger'], uimod.ui())
- >>> _earlyreqoptbool(req, b'debugger', [b'--debugger'])
- """
- if req.ui.plain('strictflags'):
- return req.earlyoptions[name]
- try:
- argcount = req.args.index("--")
- except ValueError:
- argcount = len(req.args)
- value = None
- pos = 0
- while pos < argcount:
- arg = req.args[pos]
- if arg in aliases:
- value = True
- pos += 1
- req.earlyoptions[name] = value
- return value
+def _earlysplitopts(args):
+ """Split args into a list of possible early options and remainder args"""
+ shortoptions = 'R:'
+ # TODO: perhaps 'debugger' should be included
+ longoptions = ['cwd=', 'repository=', 'repo=', 'config=']
+ return fancyopts.earlygetopt(args, shortoptions, longoptions,
+ gnu=True, keepsep=True)
def runcommand(lui, repo, cmd, fullargs, ui, options, d, cmdpats, cmdoptions):
# run pre-hook, and abort if it fails
@@ -847,8 +732,7 @@
if cmd and util.safehasattr(fn, 'shell'):
# shell alias shouldn't receive early options which are consumed by hg
- args = args[:]
- _earlygetopt(commands.earlyoptflags, args, strip=True)
+ _earlyopts, args = _earlysplitopts(args)
d = lambda: fn(ui, *args[1:])
return lambda: runcommand(lui, None, cmd, args[:1], ui, options, d,
[], {})
@@ -858,11 +742,11 @@
ui = req.ui
# check for cwd
- cwd = _earlyreqoptstr(req, 'cwd', ['--cwd'])
+ cwd = req.earlyoptions['cwd']
if cwd:
os.chdir(cwd)
- rpath = _earlyreqoptstr(req, 'repository', ["-R", "--repository", "--repo"])
+ rpath = req.earlyoptions['repository']
path, lui = _getlocal(ui, rpath)
uis = {ui, lui}
@@ -870,7 +754,7 @@
if req.repo:
uis.add(req.repo.ui)
- if _earlyreqoptbool(req, 'profile', ['--profile']):
+ if req.earlyoptions['profile']:
for ui_ in uis:
ui_.setconfig('profiling', 'enabled', 'true', '--profile')
@@ -1006,10 +890,11 @@
if not func.optionalrepo:
if func.inferrepo and args and not path:
# try to infer -R from command args
- repos = map(cmdutil.findrepo, args)
+ repos = pycompat.maplist(cmdutil.findrepo, args)
guess = repos[0]
if guess and repos.count(guess) == len(repos):
req.args = ['--repository', guess] + fullargs
+ req.earlyoptions['repository'] = guess
return _dispatch(req)
if not path:
raise error.RepoError(_("no repository found in"
diff -r 87676e8ee056 -r 27b6df1b5adb mercurial/error.py
--- a/mercurial/error.py Mon Jan 08 16:07:51 2018 -0800
+++ b/mercurial/error.py Mon Jan 22 17:53:02 2018 -0500
@@ -301,3 +301,7 @@
class PeerTransportError(Abort):
"""Transport-level I/O error when communicating with a peer repo."""
+
+class InMemoryMergeConflictsError(Exception):
+ """Exception raised when merge conflicts arose during an in-memory merge."""
+ __bytes__ = _tobytes
diff -r 87676e8ee056 -r 27b6df1b5adb mercurial/exchange.py
--- a/mercurial/exchange.py Mon Jan 08 16:07:51 2018 -0800
+++ b/mercurial/exchange.py Mon Jan 22 17:53:02 2018 -0500
@@ -13,6 +13,7 @@
from .i18n import _
from .node import (
+ bin,
hex,
nullid,
)
@@ -23,6 +24,7 @@
discovery,
error,
lock as lockmod,
+ logexchange,
obsolete,
phases,
pushkey,
@@ -512,7 +514,11 @@
def _pushdiscoverychangeset(pushop):
"""discover the changeset that need to be pushed"""
fci = discovery.findcommonincoming
- commoninc = fci(pushop.repo, pushop.remote, force=pushop.force)
+ if pushop.revs:
+ commoninc = fci(pushop.repo, pushop.remote, force=pushop.force,
+ ancestorsof=pushop.revs)
+ else:
+ commoninc = fci(pushop.repo, pushop.remote, force=pushop.force)
common, inc, remoteheads = commoninc
fco = discovery.findcommonoutgoing
outgoing = fco(pushop.repo, pushop.remote, onlyheads=pushop.revs,
@@ -742,6 +748,22 @@
or pushop.outobsmarkers
or pushop.outbookmarks)
+@b2partsgenerator('check-bookmarks')
+def _pushb2checkbookmarks(pushop, bundler):
+ """insert bookmark move checking"""
+ if not _pushing(pushop) or pushop.force:
+ return
+ b2caps = bundle2.bundle2caps(pushop.remote)
+ hasbookmarkcheck = 'bookmarks' in b2caps
+ if not (pushop.outbookmarks and hasbookmarkcheck):
+ return
+ data = []
+ for book, old, new in pushop.outbookmarks:
+ old = bin(old)
+ data.append((book, old))
+ checkdata = bookmod.binaryencode(data)
+ bundler.newpart('check:bookmarks', data=checkdata)
+
@b2partsgenerator('check-phases')
def _pushb2checkphases(pushop, bundler):
"""insert phase move checking"""
@@ -879,8 +901,46 @@
if 'bookmarks' in pushop.stepsdone:
return
b2caps = bundle2.bundle2caps(pushop.remote)
- if 'pushkey' not in b2caps:
+
+ legacy = pushop.repo.ui.configlist('devel', 'legacy.exchange')
+ legacybooks = 'bookmarks' in legacy
+
+ if not legacybooks and 'bookmarks' in b2caps:
+ return _pushb2bookmarkspart(pushop, bundler)
+ elif 'pushkey' in b2caps:
+ return _pushb2bookmarkspushkey(pushop, bundler)
+
+def _bmaction(old, new):
+ """small utility for bookmark pushing"""
+ if not old:
+ return 'export'
+ elif not new:
+ return 'delete'
+ return 'update'
+
+def _pushb2bookmarkspart(pushop, bundler):
+ pushop.stepsdone.add('bookmarks')
+ if not pushop.outbookmarks:
return
+
+ allactions = []
+ data = []
+ for book, old, new in pushop.outbookmarks:
+ new = bin(new)
+ data.append((book, new))
+ allactions.append((book, _bmaction(old, new)))
+ checkdata = bookmod.binaryencode(data)
+ bundler.newpart('bookmarks', data=checkdata)
+
+ def handlereply(op):
+ ui = pushop.ui
+ # if success
+ for book, action in allactions:
+ ui.status(bookmsgmap[action][0] % book)
+
+ return handlereply
+
+def _pushb2bookmarkspushkey(pushop, bundler):
pushop.stepsdone.add('bookmarks')
part2book = []
enc = pushkey.encode
@@ -955,7 +1015,8 @@
# create reply capability
capsblob = bundle2.encodecaps(bundle2.getrepocaps(pushop.repo,
- allowpushback=pushback))
+ allowpushback=pushback,
+ role='client'))
bundler.newpart('replycaps', data=capsblob)
replyhandlers = []
for partgenname in b2partsgenorder:
@@ -1273,7 +1334,8 @@
if opargs is None:
opargs = {}
pullop = pulloperation(repo, remote, heads, force, bookmarks=bookmarks,
- streamclonerequested=streamclonerequested, **opargs)
+ streamclonerequested=streamclonerequested,
+ **pycompat.strkwargs(opargs))
peerlocal = pullop.remote.local()
if peerlocal:
@@ -1284,11 +1346,8 @@
" %s") % (', '.join(sorted(missing)))
raise error.Abort(msg)
- wlock = lock = None
- try:
- wlock = pullop.repo.wlock()
- lock = pullop.repo.lock()
- pullop.trmanager = transactionmanager(repo, 'pull', remote.url())
+ pullop.trmanager = transactionmanager(repo, 'pull', remote.url())
+ with repo.wlock(), repo.lock(), pullop.trmanager:
# This should ideally be in _pullbundle2(). However, it needs to run
# before discovery to avoid extra work.
_maybeapplyclonebundle(pullop)
@@ -1300,9 +1359,10 @@
_pullphase(pullop)
_pullbookmarks(pullop)
_pullobsolete(pullop)
- pullop.trmanager.close()
- finally:
- lockmod.release(pullop.trmanager, lock, wlock)
+
+ # storing remotenames
+ if repo.ui.configbool('experimental', 'remotenames'):
+ logexchange.pullremotenames(repo, remote)
return pullop
@@ -1348,7 +1408,8 @@
# all known bundle2 servers now support listkeys, but lets be nice with
# new implementation.
return
- pullop.remotebookmarks = pullop.remote.listkeys('bookmarks')
+ books = pullop.remote.listkeys('bookmarks')
+ pullop.remotebookmarks = bookmod.unhexlifybookmarks(books)
@pulldiscovery('changegroup')
@@ -1388,32 +1449,59 @@
"""pull data using bundle2
For now, the only supported data are changegroup."""
- kwargs = {'bundlecaps': caps20to10(pullop.repo)}
+ kwargs = {'bundlecaps': caps20to10(pullop.repo, role='client')}
+
+ # make ui easier to access
+ ui = pullop.repo.ui
# At the moment we don't do stream clones over bundle2. If that is
# implemented then here's where the check for that will go.
- streaming = False
+ streaming = streamclone.canperformstreamclone(pullop, bundle2=True)[0]
- # pulling changegroup
- pullop.stepsdone.add('changegroup')
-
+ # declare pull perimeters
kwargs['common'] = pullop.common
kwargs['heads'] = pullop.heads or pullop.rheads
- kwargs['cg'] = pullop.fetch
- ui = pullop.repo.ui
- legacyphase = 'phases' in ui.configlist('devel', 'legacy.exchange')
- hasbinaryphase = 'heads' in pullop.remotebundle2caps.get('phases', ())
- if (not legacyphase and hasbinaryphase):
- kwargs['phases'] = True
+ if streaming:
+ kwargs['cg'] = False
+ kwargs['stream'] = True
+ pullop.stepsdone.add('changegroup')
pullop.stepsdone.add('phases')
+ else:
+ # pulling changegroup
+ pullop.stepsdone.add('changegroup')
+
+ kwargs['cg'] = pullop.fetch
+
+ legacyphase = 'phases' in ui.configlist('devel', 'legacy.exchange')
+ hasbinaryphase = 'heads' in pullop.remotebundle2caps.get('phases', ())
+ if (not legacyphase and hasbinaryphase):
+ kwargs['phases'] = True
+ pullop.stepsdone.add('phases')
+
+ if 'listkeys' in pullop.remotebundle2caps:
+ if 'phases' not in pullop.stepsdone:
+ kwargs['listkeys'] = ['phases']
+
+ bookmarksrequested = False
+ legacybookmark = 'bookmarks' in ui.configlist('devel', 'legacy.exchange')
+ hasbinarybook = 'bookmarks' in pullop.remotebundle2caps
+
+ if pullop.remotebookmarks is not None:
+ pullop.stepsdone.add('request-bookmarks')
+
+ if ('request-bookmarks' not in pullop.stepsdone
+ and pullop.remotebookmarks is None
+ and not legacybookmark and hasbinarybook):
+ kwargs['bookmarks'] = True
+ bookmarksrequested = True
+
if 'listkeys' in pullop.remotebundle2caps:
- if 'phases' not in pullop.stepsdone:
- kwargs['listkeys'] = ['phases']
- if pullop.remotebookmarks is None:
+ if 'request-bookmarks' not in pullop.stepsdone:
# make sure to always includes bookmark data when migrating
# `hg incoming --bundle` to using this function.
+ pullop.stepsdone.add('request-bookmarks')
kwargs.setdefault('listkeys', []).append('bookmarks')
# If this is a full pull / clone and the server supports the clone bundles
@@ -1441,7 +1529,9 @@
_pullbundle2extraprepare(pullop, kwargs)
bundle = pullop.remote.getbundle('pull', **pycompat.strkwargs(kwargs))
try:
- op = bundle2.processbundle(pullop.repo, bundle, pullop.gettransaction)
+ op = bundle2.bundleoperation(pullop.repo, pullop.gettransaction)
+ op.modes['bookmarks'] = 'records'
+ bundle2.processbundle(pullop.repo, bundle, op=op)
except bundle2.AbortFromPart as exc:
pullop.repo.ui.status(_('remote: abort: %s\n') % exc)
raise error.Abort(_('pull failed on remote'), hint=exc.hint)
@@ -1457,9 +1547,15 @@
_pullapplyphases(pullop, value)
# processing bookmark update
- for namespace, value in op.records['listkeys']:
- if namespace == 'bookmarks':
- pullop.remotebookmarks = value
+ if bookmarksrequested:
+ books = {}
+ for record in op.records['bookmarks']:
+ books[record['bookmark']] = record["node"]
+ pullop.remotebookmarks = books
+ else:
+ for namespace, value in op.records['listkeys']:
+ if namespace == 'bookmarks':
+ pullop.remotebookmarks = bookmod.unhexlifybookmarks(value)
# bookmark data were either already there or pulled in the bundle
if pullop.remotebookmarks is not None:
@@ -1552,7 +1648,6 @@
pullop.stepsdone.add('bookmarks')
repo = pullop.repo
remotebookmarks = pullop.remotebookmarks
- remotebookmarks = bookmod.unhexlifybookmarks(remotebookmarks)
bookmod.updatefromremote(repo.ui, repo, remotebookmarks,
pullop.remote.url(),
pullop.gettransaction,
@@ -1586,10 +1681,10 @@
pullop.repo.invalidatevolatilesets()
return tr
-def caps20to10(repo):
+def caps20to10(repo, role):
"""return a set with appropriate options to use bundle20 during getbundle"""
caps = {'HG20'}
- capsblob = bundle2.encodecaps(bundle2.getrepocaps(repo))
+ capsblob = bundle2.encodecaps(bundle2.getrepocaps(repo, role=role))
caps.add('bundle2=' + urlreq.quote(capsblob))
return caps
@@ -1632,9 +1727,11 @@
Could be a bundle HG10 or a bundle HG20 depending on bundlecaps
passed.
- Returns an iterator over raw chunks (of varying sizes).
+ Returns a 2-tuple of a dict with metadata about the generated bundle
+ and an iterator over raw chunks (of varying sizes).
"""
kwargs = pycompat.byteskwargs(kwargs)
+ info = {}
usebundle2 = bundle2requested(bundlecaps)
# bundle10 case
if not usebundle2:
@@ -1645,10 +1742,12 @@
raise ValueError(_('unsupported getbundle arguments: %s')
% ', '.join(sorted(kwargs.keys())))
outgoing = _computeoutgoing(repo, heads, common)
- return changegroup.makestream(repo, outgoing, '01', source,
- bundlecaps=bundlecaps)
+ info['bundleversion'] = 1
+ return info, changegroup.makestream(repo, outgoing, '01', source,
+ bundlecaps=bundlecaps)
# bundle20 case
+ info['bundleversion'] = 2
b2caps = {}
for bcaps in bundlecaps:
if bcaps.startswith('bundle2='):
@@ -1664,14 +1763,41 @@
func(bundler, repo, source, bundlecaps=bundlecaps, b2caps=b2caps,
**pycompat.strkwargs(kwargs))
- return bundler.getchunks()
+ info['prefercompressed'] = bundler.prefercompressed
+
+ return info, bundler.getchunks()
+
+@getbundle2partsgenerator('stream2')
+def _getbundlestream2(bundler, repo, source, bundlecaps=None,
+ b2caps=None, heads=None, common=None, **kwargs):
+ if not kwargs.get('stream', False):
+ return
+
+ if not streamclone.allowservergeneration(repo):
+ raise error.Abort(_('stream data requested but server does not allow '
+ 'this feature'),
+ hint=_('well-behaved clients should not be '
+ 'requesting stream data from servers not '
+ 'advertising it; the client may be buggy'))
+
+ # Stream clones don't compress well. And compression undermines a
+ # goal of stream clones, which is to be fast. Communicate the desire
+ # to avoid compression to consumers of the bundle.
+ bundler.prefercompressed = False
+
+ filecount, bytecount, it = streamclone.generatev2(repo)
+ requirements = ' '.join(sorted(repo.requirements))
+ part = bundler.newpart('stream2', data=it)
+ part.addparam('bytecount', '%d' % bytecount, mandatory=True)
+ part.addparam('filecount', '%d' % filecount, mandatory=True)
+ part.addparam('requirements', requirements, mandatory=True)
@getbundle2partsgenerator('changegroup')
def _getbundlechangegrouppart(bundler, repo, source, bundlecaps=None,
b2caps=None, heads=None, common=None, **kwargs):
"""add a changegroup part to the requested bundle"""
cgstream = None
- if kwargs.get('cg', True):
+ if kwargs.get(r'cg', True):
# build changegroup bundle here.
version = '01'
cgversions = b2caps.get('changegroup')
@@ -1695,11 +1821,24 @@
if 'treemanifest' in repo.requirements:
part.addparam('treemanifest', '1')
+@getbundle2partsgenerator('bookmarks')
+def _getbundlebookmarkpart(bundler, repo, source, bundlecaps=None,
+ b2caps=None, **kwargs):
+ """add a bookmark part to the requested bundle"""
+ if not kwargs.get(r'bookmarks', False):
+ return
+ if 'bookmarks' not in b2caps:
+ raise ValueError(_('no common bookmarks exchange method'))
+ books = bookmod.listbinbookmarks(repo)
+ data = bookmod.binaryencode(books)
+ if data:
+ bundler.newpart('bookmarks', data=data)
+
@getbundle2partsgenerator('listkeys')
def _getbundlelistkeysparts(bundler, repo, source, bundlecaps=None,
b2caps=None, **kwargs):
"""add parts containing listkeys namespaces to the requested bundle"""
- listkeys = kwargs.get('listkeys', ())
+ listkeys = kwargs.get(r'listkeys', ())
for namespace in listkeys:
part = bundler.newpart('listkeys')
part.addparam('namespace', namespace)
@@ -1710,7 +1849,7 @@
def _getbundleobsmarkerpart(bundler, repo, source, bundlecaps=None,
b2caps=None, heads=None, **kwargs):
"""add an obsolescence markers part to the requested bundle"""
- if kwargs.get('obsmarkers', False):
+ if kwargs.get(r'obsmarkers', False):
if heads is None:
heads = repo.heads()
subset = [c.node() for c in repo.set('::%ln', heads)]
@@ -1722,7 +1861,7 @@
def _getbundlephasespart(bundler, repo, source, bundlecaps=None,
b2caps=None, heads=None, **kwargs):
"""add phase heads part to the requested bundle"""
- if kwargs.get('phases', False):
+ if kwargs.get(r'phases', False):
if not 'heads' in b2caps.get('phases'):
raise ValueError(_('no common phases exchange method'))
if heads is None:
@@ -1779,23 +1918,12 @@
# Don't send unless:
# - changeset are being exchanged,
# - the client supports it.
- if not (kwargs.get('cg', True) and 'hgtagsfnodes' in b2caps):
+ if not (kwargs.get(r'cg', True) and 'hgtagsfnodes' in b2caps):
return
outgoing = _computeoutgoing(repo, heads, common)
bundle2.addparttagsfnodescache(repo, bundler, outgoing)
-def _getbookmarks(repo, **kwargs):
- """Returns bookmark to node mapping.
-
- This function is primarily used to generate `bookmarks` bundle2 part.
- It is a separate function in order to make it easy to wrap it
- in extensions. Passing `kwargs` to the function makes it easy to
- add new parameters in extensions.
- """
-
- return dict(bookmod.listbinbookmarks(repo))
-
def check_heads(repo, their_heads, context):
"""check if the heads of a repo have been modified
diff -r 87676e8ee056 -r 27b6df1b5adb mercurial/fancyopts.py
--- a/mercurial/fancyopts.py Mon Jan 08 16:07:51 2018 -0800
+++ b/mercurial/fancyopts.py Mon Jan 22 17:53:02 2018 -0500
@@ -119,7 +119,7 @@
>>> get([b'--cwd=foo', b'x', b'y', b'-R', b'bar', b'--debugger'], gnu=False)
([('--cwd', 'foo')], ['x', 'y', '-R', 'bar', '--debugger'])
>>> get([b'--unknown', b'--cwd=foo', b'--', '--debugger'], gnu=False)
- ([], ['--unknown', '--cwd=foo', '--debugger'])
+ ([], ['--unknown', '--cwd=foo', '--', '--debugger'])
stripping early options (without loosing '--'):
@@ -141,6 +141,13 @@
>>> get([b'-q', b'--'])
([('-q', '')], [])
+ '--' may be a value:
+
+ >>> get([b'-R', b'--', b'x'])
+ ([('-R', '--')], ['x'])
+ >>> get([b'--cwd', b'--', b'x'])
+ ([('--cwd', '--')], ['x'])
+
value passed to bool options:
>>> get([b'--debugger=foo', b'x'])
@@ -163,20 +170,16 @@
>>> get([b'-', b'y'])
([], ['-', 'y'])
"""
- # ignoring everything just after '--' isn't correct as '--' may be an
- # option value (e.g. ['-R', '--']), but we do that consistently.
- try:
- argcount = args.index('--')
- except ValueError:
- argcount = len(args)
-
parsedopts = []
parsedargs = []
pos = 0
- while pos < argcount:
+ while pos < len(args):
arg = args[pos]
+ if arg == '--':
+ pos += not keepsep
+ break
flag, hasval, val, takeval = _earlyoptarg(arg, shortlist, namelist)
- if not hasval and takeval and pos + 1 >= argcount:
+ if not hasval and takeval and pos + 1 >= len(args):
# missing last argument
break
if not flag or hasval and not takeval:
@@ -195,38 +198,10 @@
parsedopts.append((flag, args[pos + 1]))
pos += 2
- parsedargs.extend(args[pos:argcount])
- parsedargs.extend(args[argcount + (not keepsep):])
+ parsedargs.extend(args[pos:])
return parsedopts, parsedargs
-def gnugetopt(args, options, longoptions):
- """Parse options mostly like getopt.gnu_getopt.
-
- This is different from getopt.gnu_getopt in that an argument of - will
- become an argument of - instead of vanishing completely.
- """
- extraargs = []
- if '--' in args:
- stopindex = args.index('--')
- extraargs = args[stopindex + 1:]
- args = args[:stopindex]
- opts, parseargs = pycompat.getoptb(args, options, longoptions)
- args = []
- while parseargs:
- arg = parseargs.pop(0)
- if arg and arg[0:1] == '-' and len(arg) > 1:
- parseargs.insert(0, arg)
- topts, newparseargs = pycompat.getoptb(parseargs,\
- options, longoptions)
- opts = opts + topts
- parseargs = newparseargs
- else:
- args.append(arg)
- args.extend(extraargs)
- return opts, args
-
-
-def fancyopts(args, options, state, gnu=False, early=False):
+def fancyopts(args, options, state, gnu=False, early=False, optaliases=None):
"""
read args, parse options, and store options in state
@@ -246,8 +221,15 @@
integer - parameter strings is stored as int
function - call function with parameter
+ optaliases is a mapping from a canonical option name to a list of
+ additional long options. This exists for preserving backward compatibility
+ of early options. If we want to use it extensively, please consider moving
+ the functionality to the options table (e.g separate long options by '|'.)
+
non-option args are returned
"""
+ if optaliases is None:
+ optaliases = {}
namelist = []
shortlist = ''
argmap = {}
@@ -261,10 +243,13 @@
else:
short, name, default, comment = option
# convert opts to getopt format
- oname = name
+ onames = [name]
+ onames.extend(optaliases.get(name, []))
name = name.replace('-', '_')
- argmap['-' + short] = argmap['--' + oname] = name
+ argmap['-' + short] = name
+ for n in onames:
+ argmap['--' + n] = name
defmap[name] = default
# copy defaults to state
@@ -279,30 +264,30 @@
if not (default is None or default is True or default is False):
if short:
short += ':'
- if oname:
- oname += '='
- elif oname not in nevernegate:
- if oname.startswith('no-'):
- insert = oname[3:]
- else:
- insert = 'no-' + oname
- # backout (as a practical example) has both --commit and
- # --no-commit options, so we don't want to allow the
- # negations of those flags.
- if insert not in alllong:
- assert ('--' + oname) not in negations
- negations['--' + insert] = '--' + oname
- namelist.append(insert)
+ onames = [n + '=' for n in onames]
+ elif name not in nevernegate:
+ for n in onames:
+ if n.startswith('no-'):
+ insert = n[3:]
+ else:
+ insert = 'no-' + n
+ # backout (as a practical example) has both --commit and
+ # --no-commit options, so we don't want to allow the
+ # negations of those flags.
+ if insert not in alllong:
+ assert ('--' + n) not in negations
+ negations['--' + insert] = '--' + n
+ namelist.append(insert)
if short:
shortlist += short
if name:
- namelist.append(oname)
+ namelist.extend(onames)
# parse arguments
if early:
parse = functools.partial(earlygetopt, gnu=gnu)
elif gnu:
- parse = gnugetopt
+ parse = pycompat.gnugetoptb
else:
parse = pycompat.getoptb
opts, args = parse(args, shortlist, namelist)
diff -r 87676e8ee056 -r 27b6df1b5adb mercurial/filelog.py
--- a/mercurial/filelog.py Mon Jan 08 16:07:51 2018 -0800
+++ b/mercurial/filelog.py Mon Jan 22 17:53:02 2018 -0500
@@ -43,6 +43,8 @@
def __init__(self, opener, path):
super(filelog, self).__init__(opener,
"/".join(("data", path + ".i")))
+ # full name of the user visible file, relative to the repository root
+ self.filename = path
def read(self, node):
t = self.revision(node)
diff -r 87676e8ee056 -r 27b6df1b5adb mercurial/filemerge.py
--- a/mercurial/filemerge.py Mon Jan 08 16:07:51 2018 -0800
+++ b/mercurial/filemerge.py Mon Jan 22 17:53:02 2018 -0500
@@ -241,6 +241,12 @@
ui = repo.ui
fd = fcd.path()
+ # Avoid prompting during an in-memory merge since it doesn't support merge
+ # conflicts.
+ if fcd.changectx().isinmemory():
+ raise error.InMemoryMergeConflictsError('in-memory merge does not '
+ 'support file conflicts')
+
prompts = partextras(labels)
prompts['fd'] = fd
try:
@@ -465,11 +471,10 @@
a = _workingpath(repo, fcd)
fd = fcd.path()
- # Run ``flushall()`` to make any missing folders the following wwrite
- # calls might be depending on.
from . import context
if isinstance(fcd, context.overlayworkingfilectx):
- fcd.ctx().flushall()
+ raise error.InMemoryMergeConflictsError('in-memory merge does not '
+ 'support the :dump tool.')
util.writefile(a + ".local", fcd.decodeddata())
repo.wwrite(fd + ".other", fco.data(), fco.flags())
@@ -485,6 +490,18 @@
return _idump(repo, mynode, orig, fcd, fco, fca, toolconf, files,
labels=labels)
+def _xmergeimm(repo, mynode, orig, fcd, fco, fca, toolconf, files, labels=None):
+ # In-memory merge simply raises an exception on all external merge tools,
+ # for now.
+ #
+ # It would be possible to run most tools with temporary files, but this
+ # raises the question of what to do if the user only partially resolves the
+ # file -- we can't leave a merge state. (Copy to somewhere in the .hg/
+ # directory and tell the user how to get it is my best idea, but it's
+ # clunky.)
+ raise error.InMemoryMergeConflictsError('in-memory merge does not support '
+ 'external merge tools')
+
def _xmerge(repo, mynode, orig, fcd, fco, fca, toolconf, files, labels=None):
tool, toolpath, binary, symlink = toolconf
if fcd.isabsent() or fco.isabsent():
@@ -526,7 +543,7 @@
util.unlink(b)
util.unlink(c)
-def _formatconflictmarker(repo, ctx, template, label, pad):
+def _formatconflictmarker(ctx, template, label, pad):
"""Applies the given template to the ctx, prefixed by the label.
Pad is the minimum width of the label prefix, so that multiple markers
@@ -535,10 +552,7 @@
if ctx.node() is None:
ctx = ctx.p1()
- props = templatekw.keywords.copy()
- props['templ'] = template
- props['ctx'] = ctx
- props['repo'] = repo
+ props = {'ctx': ctx}
templateresult = template.render(props)
label = ('%s:' % label).ljust(pad + 1)
@@ -564,14 +578,16 @@
ui = repo.ui
template = ui.config('ui', 'mergemarkertemplate')
template = templater.unquotestring(template)
- tmpl = formatter.maketemplater(ui, template)
+ tres = formatter.templateresources(ui, repo)
+ tmpl = formatter.maketemplater(ui, template, defaults=templatekw.keywords,
+ resources=tres)
pad = max(len(l) for l in labels)
- newlabels = [_formatconflictmarker(repo, cd, tmpl, labels[0], pad),
- _formatconflictmarker(repo, co, tmpl, labels[1], pad)]
+ newlabels = [_formatconflictmarker(cd, tmpl, labels[0], pad),
+ _formatconflictmarker(co, tmpl, labels[1], pad)]
if len(labels) > 2:
- newlabels.append(_formatconflictmarker(repo, ca, tmpl, labels[2], pad))
+ newlabels.append(_formatconflictmarker(ca, tmpl, labels[2], pad))
return newlabels
def partextras(labels):
@@ -602,6 +618,9 @@
(if any), the backup is used to undo certain premerges, confirm whether a
merge changed anything, and determine what line endings the new file should
have.
+
+ Backups only need to be written once (right before the premerge) since their
+ content doesn't change afterwards.
"""
if fcd.isabsent():
return None
@@ -612,21 +631,26 @@
back = scmutil.origpath(ui, repo, a)
inworkingdir = (back.startswith(repo.wvfs.base) and not
back.startswith(repo.vfs.base))
-
if isinstance(fcd, context.overlayworkingfilectx) and inworkingdir:
# If the backup file is to be in the working directory, and we're
# merging in-memory, we must redirect the backup to the memory context
# so we don't disturb the working directory.
relpath = back[len(repo.wvfs.base) + 1:]
- wctx[relpath].write(fcd.data(), fcd.flags())
+ if premerge:
+ wctx[relpath].write(fcd.data(), fcd.flags())
return wctx[relpath]
else:
- # Otherwise, write to wherever the user specified the backups should go.
- #
+ if premerge:
+ # Otherwise, write to wherever path the user specified the backups
+ # should go. We still need to switch based on whether the source is
+ # in-memory so we can use the fast path of ``util.copy`` if both are
+ # on disk.
+ if isinstance(fcd, context.overlayworkingfilectx):
+ util.writefile(back, fcd.data())
+ else:
+ util.copyfile(a, back)
# A arbitraryfilectx is returned, so we can run the same functions on
# the backup context regardless of where it lives.
- if premerge:
- util.copyfile(a, back)
return context.arbitraryfilectx(back, repo=repo)
def _maketempfiles(repo, fco, fca):
@@ -683,16 +707,14 @@
onfailure = func.onfailure
precheck = func.precheck
else:
- func = _xmerge
+ if wctx.isinmemory():
+ func = _xmergeimm
+ else:
+ func = _xmerge
mergetype = fullmerge
onfailure = _("merging %s failed!\n")
precheck = None
- # If using deferred writes, must flush any deferred contents if running
- # an external merge tool since it has arbitrary access to the working
- # copy.
- wctx.flushall()
-
toolconf = tool, toolpath, binary, symlink
if mergetype == nomerge:
@@ -710,6 +732,10 @@
if precheck and not precheck(repo, mynode, orig, fcd, fco, fca,
toolconf):
if onfailure:
+ if wctx.isinmemory():
+ raise error.InMemoryMergeConflictsError('in-memory merge does '
+ 'not support merge '
+ 'conflicts')
ui.warn(onfailure % fd)
return True, 1, False
@@ -736,6 +762,10 @@
if r:
if onfailure:
+ if wctx.isinmemory():
+ raise error.InMemoryMergeConflictsError('in-memory merge '
+ 'does not support '
+ 'merge conflicts')
ui.warn(onfailure % fd)
_onfilemergefailure(ui)
diff -r 87676e8ee056 -r 27b6df1b5adb mercurial/fileset.py
--- a/mercurial/fileset.py Mon Jan 08 16:07:51 2018 -0800
+++ b/mercurial/fileset.py Mon Jan 22 17:53:02 2018 -0500
@@ -12,6 +12,7 @@
from .i18n import _
from . import (
error,
+ match as matchmod,
merge,
parser,
pycompat,
@@ -23,6 +24,7 @@
elements = {
# token-type: binding-strength, primary, prefix, infix, suffix
"(": (20, None, ("group", 1, ")"), ("func", 1, ")"), None),
+ ":": (15, None, None, ("kindpat", 15), None),
"-": (5, None, ("negate", 19), ("minus", 5), None),
"not": (10, None, ("not", 10), None, None),
"!": (10, None, ("not", 10), None, None),
@@ -49,7 +51,7 @@
c = program[pos]
if c.isspace(): # skip inter-token whitespace
pass
- elif c in "(),-|&+!": # handle simple operators
+ elif c in "(),-:|&+!": # handle simple operators
yield (c, None, pos)
elif (c in '"\'' or c == 'r' and
program[pos:pos + 2] in ("r'", 'r"')): # handle quoted strings
@@ -99,11 +101,28 @@
raise error.ParseError(_("invalid token"), pos)
return tree
+def getsymbol(x):
+ if x and x[0] == 'symbol':
+ return x[1]
+ raise error.ParseError(_('not a symbol'))
+
def getstring(x, err):
if x and (x[0] == 'string' or x[0] == 'symbol'):
return x[1]
raise error.ParseError(err)
+def _getkindpat(x, y, allkinds, err):
+ kind = getsymbol(x)
+ pat = getstring(y, err)
+ if kind not in allkinds:
+ raise error.ParseError(_("invalid pattern kind: %s") % kind)
+ return '%s:%s' % (kind, pat)
+
+def getpattern(x, allkinds, err):
+ if x and x[0] == 'kindpat':
+ return _getkindpat(x[1], x[2], allkinds, err)
+ return getstring(x, err)
+
def getset(mctx, x):
if not x:
raise error.ParseError(_("missing argument"))
@@ -113,6 +132,10 @@
m = mctx.matcher([x])
return [f for f in mctx.subset if m(f)]
+def kindpatset(mctx, x, y):
+ return stringset(mctx, _getkindpat(x, y, matchmod.allpatternkinds,
+ _("pattern must be a string")))
+
def andset(mctx, x, y):
return getset(mctx.narrow(getset(mctx, x)), y)
@@ -131,6 +154,9 @@
yl = set(getset(mctx, y))
return [f for f in xl if f not in yl]
+def negateset(mctx, x):
+ raise error.ParseError(_("can't use negate operator in this context"))
+
def listset(mctx, a, b):
raise error.ParseError(_("can't use a list in this context"),
hint=_('see hg help "filesets.x or y"'))
@@ -225,8 +251,8 @@
return [f for f in mctx.subset if f in s]
def func(mctx, a, b):
- if a[0] == 'symbol' and a[1] in symbols:
- funcname = a[1]
+ funcname = getsymbol(a)
+ if funcname in symbols:
enabled = mctx._existingenabled
mctx._existingenabled = funcname in _existingcallers
try:
@@ -237,7 +263,7 @@
keep = lambda fn: getattr(fn, '__doc__', None) is not None
syms = [s for (s, fn) in symbols.items() if keep(fn)]
- raise error.UnknownIdentifier(a[1], syms)
+ raise error.UnknownIdentifier(funcname, syms)
def getlist(x):
if not x:
@@ -344,6 +370,34 @@
except ValueError:
raise error.ParseError(_("couldn't parse size: %s") % s)
+def sizematcher(x):
+ """Return a function(size) -> bool from the ``size()`` expression"""
+
+ # i18n: "size" is a keyword
+ expr = getstring(x, _("size requires an expression")).strip()
+ if '-' in expr: # do we have a range?
+ a, b = expr.split('-', 1)
+ a = util.sizetoint(a)
+ b = util.sizetoint(b)
+ return lambda x: x >= a and x <= b
+ elif expr.startswith("<="):
+ a = util.sizetoint(expr[2:])
+ return lambda x: x <= a
+ elif expr.startswith("<"):
+ a = util.sizetoint(expr[1:])
+ return lambda x: x < a
+ elif expr.startswith(">="):
+ a = util.sizetoint(expr[2:])
+ return lambda x: x >= a
+ elif expr.startswith(">"):
+ a = util.sizetoint(expr[1:])
+ return lambda x: x > a
+ elif expr[0].isdigit or expr[0] == '.':
+ a = util.sizetoint(expr)
+ b = _sizetomax(expr)
+ return lambda x: x >= a and x <= b
+ raise error.ParseError(_("couldn't parse size: %s") % expr)
+
@predicate('size(expression)', callexisting=True)
def size(mctx, x):
"""File size matches the given expression. Examples:
@@ -353,33 +407,7 @@
- size('>= .5MB') - files at least 524288 bytes
- size('4k - 1MB') - files from 4096 bytes to 1048576 bytes
"""
-
- # i18n: "size" is a keyword
- expr = getstring(x, _("size requires an expression")).strip()
- if '-' in expr: # do we have a range?
- a, b = expr.split('-', 1)
- a = util.sizetoint(a)
- b = util.sizetoint(b)
- m = lambda x: x >= a and x <= b
- elif expr.startswith("<="):
- a = util.sizetoint(expr[2:])
- m = lambda x: x <= a
- elif expr.startswith("<"):
- a = util.sizetoint(expr[1:])
- m = lambda x: x < a
- elif expr.startswith(">="):
- a = util.sizetoint(expr[2:])
- m = lambda x: x >= a
- elif expr.startswith(">"):
- a = util.sizetoint(expr[1:])
- m = lambda x: x > a
- elif expr[0].isdigit or expr[0] == '.':
- a = util.sizetoint(expr)
- b = _sizetomax(expr)
- m = lambda x: x >= a and x <= b
- else:
- raise error.ParseError(_("couldn't parse size: %s") % expr)
-
+ m = sizematcher(x)
return [f for f in mctx.existing() if m(mctx.ctx[f].size())]
@predicate('encoding(name)', callexisting=True)
@@ -496,10 +524,9 @@
ctx = mctx.ctx
sstate = sorted(ctx.substate)
if x:
- # i18n: "subrepo" is a keyword
- pat = getstring(x, _("subrepo requires a pattern or no arguments"))
-
- from . import match as matchmod # avoid circular import issues
+ pat = getpattern(x, matchmod.allpatternkinds,
+ # i18n: "subrepo" is a keyword
+ _("subrepo requires a pattern or no arguments"))
fast = not matchmod.patkind(pat)
if fast:
def m(s):
@@ -513,9 +540,11 @@
methods = {
'string': stringset,
'symbol': stringset,
+ 'kindpat': kindpatset,
'and': andset,
'or': orset,
'minus': minusset,
+ 'negate': negateset,
'list': listset,
'group': getset,
'not': notset,
diff -r 87676e8ee056 -r 27b6df1b5adb mercurial/formatter.py
--- a/mercurial/formatter.py Mon Jan 08 16:07:51 2018 -0800
+++ b/mercurial/formatter.py Mon Jan 22 17:53:02 2018 -0500
@@ -94,14 +94,14 @@
>>> def subrepos(ui, fm):
... fm.startitem()
-... fm.write(b'repo', b'[%s]\\n', b'baz')
+... fm.write(b'reponame', b'[%s]\\n', b'baz')
... files(ui, fm.nested(b'files'))
... fm.end()
>>> show(subrepos)
[baz]
foo
bar
->>> show(subrepos, template=b'{repo}: {join(files % "{path}", ", ")}\\n')
+>>> show(subrepos, template=b'{reponame}: {join(files % "{path}", ", ")}\\n')
baz: foo, bar
"""
@@ -363,11 +363,12 @@
self._out = out
spec = lookuptemplate(ui, topic, opts.get('template', ''))
self._tref = spec.ref
- self._t = loadtemplater(ui, spec, cache=templatekw.defaulttempl)
+ self._t = loadtemplater(ui, spec, defaults=templatekw.keywords,
+ resources=templateresources(ui),
+ cache=templatekw.defaulttempl)
self._parts = templatepartsmap(spec, self._t,
['docheader', 'docfooter', 'separator'])
self._counter = itertools.count()
- self._cache = {} # for templatekw/funcs to store reusable data
self._renderitem('docheader', {})
def _showitem(self):
@@ -386,17 +387,14 @@
# function will have to declare dependent resources. e.g.
# @templatekeyword(..., requires=('ctx',))
props = {}
- if 'ctx' in item:
- props.update(templatekw.keywords)
# explicitly-defined fields precede templatekw
props.update(item)
if 'ctx' in item:
# but template resources must be always available
- props['templ'] = self._t
props['repo'] = props['ctx'].repo()
props['revcache'] = {}
props = pycompat.strkwargs(props)
- g = self._t(ref, ui=self._ui, cache=self._cache, **props)
+ g = self._t(ref, **props)
self._out.write(templater.stringify(g))
def end(self):
@@ -468,24 +466,39 @@
partsmap[part] = ref
return partsmap
-def loadtemplater(ui, spec, cache=None):
+def loadtemplater(ui, spec, defaults=None, resources=None, cache=None):
"""Create a templater from either a literal template or loading from
a map file"""
assert not (spec.tmpl and spec.mapfile)
if spec.mapfile:
- return templater.templater.frommapfile(spec.mapfile, cache=cache)
- return maketemplater(ui, spec.tmpl, cache=cache)
+ frommapfile = templater.templater.frommapfile
+ return frommapfile(spec.mapfile, defaults=defaults, resources=resources,
+ cache=cache)
+ return maketemplater(ui, spec.tmpl, defaults=defaults, resources=resources,
+ cache=cache)
-def maketemplater(ui, tmpl, cache=None):
+def maketemplater(ui, tmpl, defaults=None, resources=None, cache=None):
"""Create a templater from a string template 'tmpl'"""
aliases = ui.configitems('templatealias')
- t = templater.templater(cache=cache, aliases=aliases)
+ t = templater.templater(defaults=defaults, resources=resources,
+ cache=cache, aliases=aliases)
t.cache.update((k, templater.unquotestring(v))
for k, v in ui.configitems('templates'))
if tmpl:
t.cache[''] = tmpl
return t
+def templateresources(ui, repo=None):
+ """Create a dict of template resources designed for the default templatekw
+ and function"""
+ return {
+ 'cache': {}, # for templatekw/funcs to store reusable data
+ 'ctx': None,
+ 'repo': repo,
+ 'revcache': None, # per-ctx cache; set later
+ 'ui': ui,
+ }
+
def formatter(ui, out, topic, opts):
template = opts.get("template", "")
if template == "json":
diff -r 87676e8ee056 -r 27b6df1b5adb mercurial/graphmod.py
--- a/mercurial/graphmod.py Mon Jan 08 16:07:51 2018 -0800
+++ b/mercurial/graphmod.py Mon Jan 22 17:53:02 2018 -0500
@@ -48,9 +48,6 @@
returned.
"""
- if not revs:
- return
-
gpcache = {}
for rev in revs:
diff -r 87676e8ee056 -r 27b6df1b5adb mercurial/hbisect.py
--- a/mercurial/hbisect.py Mon Jan 08 16:07:51 2018 -0800
+++ b/mercurial/hbisect.py Mon Jan 22 17:53:02 2018 -0500
@@ -21,7 +21,7 @@
error,
)
-def bisect(changelog, state):
+def bisect(repo, state):
"""find the next node (if any) for testing during a bisect search.
returns a (nodes, number, good) tuple.
@@ -32,33 +32,15 @@
if searching for a first bad one.
"""
+ changelog = repo.changelog
clparents = changelog.parentrevs
skip = set([changelog.rev(n) for n in state['skip']])
def buildancestors(bad, good):
- # only the earliest bad revision matters
badrev = min([changelog.rev(n) for n in bad])
- goodrevs = [changelog.rev(n) for n in good]
- goodrev = min(goodrevs)
- # build visit array
- ancestors = [None] * (len(changelog) + 1) # an extra for [-1]
-
- # set nodes descended from goodrevs
- for rev in goodrevs:
+ ancestors = collections.defaultdict(lambda: None)
+ for rev in repo.revs("descendants(%ln) - ancestors(%ln)", good, good):
ancestors[rev] = []
- for rev in changelog.revs(goodrev + 1):
- for prev in clparents(rev):
- if ancestors[prev] == []:
- ancestors[rev] = []
-
- # clear good revs from array
- for rev in goodrevs:
- ancestors[rev] = None
- for rev in changelog.revs(len(changelog), goodrev):
- if ancestors[rev] is None:
- for prev in clparents(rev):
- ancestors[prev] = None
-
if ancestors[badrev] is None:
return badrev, None
return badrev, ancestors
diff -r 87676e8ee056 -r 27b6df1b5adb mercurial/help.py
--- a/mercurial/help.py Mon Jan 08 16:07:51 2018 -0800
+++ b/mercurial/help.py Mon Jan 22 17:53:02 2018 -0500
@@ -226,6 +226,7 @@
(['color'], _("Colorizing Outputs"), loaddoc('color')),
(["config", "hgrc"], _("Configuration Files"), loaddoc('config')),
(["dates"], _("Date Formats"), loaddoc('dates')),
+ (["flags"], _("Command-line flags"), loaddoc('flags')),
(["patterns"], _("File Name Patterns"), loaddoc('patterns')),
(['environment', 'env'], _('Environment Variables'),
loaddoc('environment')),
@@ -452,7 +453,7 @@
rst.append(' :%s: %s\n' % (f, h[f]))
ex = opts.get
- anyopts = (ex('keyword') or not (ex('command') or ex('extension')))
+ anyopts = (ex(r'keyword') or not (ex(r'command') or ex(r'extension')))
if not name and anyopts:
exts = listexts(_('enabled extensions:'), extensions.enabled())
if exts:
diff -r 87676e8ee056 -r 27b6df1b5adb mercurial/help/config.txt
--- a/mercurial/help/config.txt Mon Jan 08 16:07:51 2018 -0800
+++ b/mercurial/help/config.txt Mon Jan 22 17:53:02 2018 -0500
@@ -1723,6 +1723,14 @@
Controls generic server settings.
+``bookmarks-pushkey-compat``
+ Trigger pushkey hook when being pushed bookmark updates. This config exist
+ for compatibility purpose (default to True)
+
+ If you use ``pushkey`` and ``pre-pushkey`` hooks to control bookmark
+ movement we recommend you migrate them to ``txnclose-bookmark`` and
+ ``pretxnclose-bookmark``.
+
``compressionengines``
List of compression engines and their relative priority to advertise
to clients.
@@ -2176,6 +2184,8 @@
(default: True)
``slash``
+ (Deprecated. Use ``slashpath`` template filter instead.)
+
Display paths using a slash (``/``) as the path separator. This
only makes a difference on systems where the default path
separator is not the slash character (e.g. Windows uses the
@@ -2188,6 +2198,10 @@
``ssh``
Command to use for SSH connections. (default: ``ssh``)
+``ssherrorhint``
+ A hint shown to the user in the case of SSH error (e.g.
+ ``Please see http://company/internalwiki/ssh.html``)
+
``strict``
Require exact command names, instead of allowing unambiguous
abbreviations. (default: False)
@@ -2211,6 +2225,10 @@
The timeout used when a lock is held (in seconds), a negative value
means no timeout. (default: 600)
+``timeout.warn``
+ Time (in seconds) before a warning is printed about held lock. A negative
+ value means no warning. (default: 0)
+
``traceback``
Mercurial always prints a traceback when an unknown exception
occurs. Setting this to True will make Mercurial print a traceback
@@ -2260,7 +2278,7 @@
you want it to accept pushes from anybody, you can use the following
command line::
- $ hg --config web.allow_push=* --config web.push_ssl=False serve
+ $ hg --config web.allow-push=* --config web.push_ssl=False serve
Note that this will allow anybody to push anything to the server and
that this should not be used for public servers.
@@ -2287,16 +2305,16 @@
revisions.
(default: False)
-``allowpull``
+``allow-pull``
Whether to allow pulling from the repository. (default: True)
-``allow_push``
+``allow-push``
Whether to allow pushing to the repository. If empty or not set,
pushing is not allowed. If the special value ``*``, any remote
user can push, including unauthenticated users. Otherwise, the
remote user must have been authenticated, and the authenticated
user name must be present in this list. The contents of the
- allow_push list are examined after the deny_push list.
+ allow-push list are examined after the deny_push list.
``allow_read``
If the user has not already been denied repository access due to
@@ -2390,7 +2408,7 @@
push is not denied. If the special value ``*``, all remote users are
denied push. Otherwise, unauthenticated users are all denied, and
any authenticated user name present in this list is also denied. The
- contents of the deny_push list are examined before the allow_push list.
+ contents of the deny_push list are examined before the allow-push list.
``deny_read``
Whether to deny reading/viewing of the repository. If this list is
@@ -2547,6 +2565,10 @@
directory updates in parallel on Unix-like systems, which greatly
helps performance.
+``enabled``
+ Whether to enable workers code to be used.
+ (default: true)
+
``numcpus``
Number of CPUs to use for parallel operations. A zero or
negative value is treated as ``use the default``.
diff -r 87676e8ee056 -r 27b6df1b5adb mercurial/help/environment.txt
--- a/mercurial/help/environment.txt Mon Jan 08 16:07:51 2018 -0800
+++ b/mercurial/help/environment.txt Mon Jan 22 17:53:02 2018 -0500
@@ -73,6 +73,8 @@
``alias``
Don't remove aliases.
+ ``color``
+ Don't disable colored output.
``i18n``
Preserve internationalization.
``revsetalias``
diff -r 87676e8ee056 -r 27b6df1b5adb mercurial/help/filesets.txt
--- a/mercurial/help/filesets.txt Mon Jan 08 16:07:51 2018 -0800
+++ b/mercurial/help/filesets.txt Mon Jan 22 17:53:02 2018 -0500
@@ -9,7 +9,8 @@
or double quotes if they contain characters outside of
``[.*{}[]?/\_a-zA-Z0-9\x80-\xff]`` or if they match one of the
predefined predicates. This generally applies to file patterns other
-than globs and arguments for predicates.
+than globs and arguments for predicates. Pattern prefixes such as
+``path:`` may be specified without quoting.
Special characters can be used in quoted identifiers by escaping them,
e.g., ``\n`` is interpreted as a newline. To prevent them from being
@@ -75,4 +76,4 @@
- Remove files listed in foo.lst that contain the letter a or b::
- hg remove "set: 'listfile:foo.lst' and (**a* or **b*)"
+ hg remove "set: listfile:foo.lst and (**a* or **b*)"
diff -r 87676e8ee056 -r 27b6df1b5adb mercurial/help/flags.txt
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/mercurial/help/flags.txt Mon Jan 22 17:53:02 2018 -0500
@@ -0,0 +1,104 @@
+Most Mercurial commands accept various flags.
+
+Flag names
+==========
+
+Flags for each command are listed in :hg:`help` for that command.
+Additionally, some flags, such as --repository, are global and can be used with
+any command - those are seen in :hg:`help -v`, and can be specified before or
+after the command.
+
+Every flag has at least a long name, such as --repository. Some flags may also
+have a short one-letter name, such as the equivalent -R. Using the short or long
+name is equivalent and has the same effect.
+
+Flags that have a short name can also be bundled together - for instance, to
+specify both --edit (short -e) and --interactive (short -i), one could use::
+
+ hg commit -ei
+
+If any of the bundled flags takes a value (i.e. is not a boolean), it must be
+last, followed by the value::
+
+ hg commit -im 'Message'
+
+Flag types
+==========
+
+Mercurial command-line flags can be strings, numbers, booleans, or lists of
+strings.
+
+Specifying flag values
+======================
+
+The following syntaxes are allowed, assuming a flag 'flagname' with short name
+'f'::
+
+ --flagname=foo
+ --flagname foo
+ -f foo
+ -ffoo
+
+This syntax applies to all non-boolean flags (strings, numbers or lists).
+
+Specifying boolean flags
+========================
+
+Boolean flags do not take a value parameter. To specify a boolean, use the flag
+name to set it to true, or the same name prefixed with 'no-' to set it to
+false::
+
+ hg commit --interactive
+ hg commit --no-interactive
+
+Specifying list flags
+=====================
+
+List flags take multiple values. To specify them, pass the flag multiple times::
+
+ hg files --include mercurial --include tests
+
+Setting flag defaults
+=====================
+
+In order to set a default value for a flag in an hgrc file, it is recommended to
+use aliases::
+
+ [alias]
+ commit = commit --interactive
+
+For more information on hgrc files, see :hg:`help config`.
+
+Overriding flags on the command line
+====================================
+
+If the same non-list flag is specified multiple times on the command line, the
+latest specification is used::
+
+ hg commit -m "Ignored value" -m "Used value"
+
+This includes the use of aliases - e.g., if one has::
+
+ [alias]
+ committemp = commit -m "Ignored value"
+
+then the following command will override that -m::
+
+ hg committemp -m "Used value"
+
+Overriding flag defaults
+========================
+
+Every flag has a default value, and you may also set your own defaults in hgrc
+as described above.
+Except for list flags, defaults can be overridden on the command line simply by
+specifying the flag in that location.
+
+Hidden flags
+============
+
+Some flags are not shown in a command's help by default - specifically, those
+that are deemed to be experimental, deprecated or advanced. To show all flags,
+add the --verbose flag for the help command::
+
+ hg help --verbose commit
diff -r 87676e8ee056 -r 27b6df1b5adb mercurial/help/hg.1.txt
--- a/mercurial/help/hg.1.txt Mon Jan 08 16:07:51 2018 -0800
+++ b/mercurial/help/hg.1.txt Mon Jan 22 17:53:02 2018 -0500
@@ -112,7 +112,7 @@
Copying
"""""""
-Copyright (C) 2005-2017 Matt Mackall.
+Copyright (C) 2005-2018 Matt Mackall.
Free use of this software is granted under the terms of the GNU General
Public License version 2 or any later version.
diff -r 87676e8ee056 -r 27b6df1b5adb mercurial/help/hgignore.5.txt
--- a/mercurial/help/hgignore.5.txt Mon Jan 08 16:07:51 2018 -0800
+++ b/mercurial/help/hgignore.5.txt Mon Jan 22 17:53:02 2018 -0500
@@ -26,7 +26,7 @@
Copying
=======
This manual page is copyright 2006 Vadim Gelfer.
-Mercurial is copyright 2005-2017 Matt Mackall.
+Mercurial is copyright 2005-2018 Matt Mackall.
Free use of this software is granted under the terms of the GNU General
Public License version 2 or any later version.
diff -r 87676e8ee056 -r 27b6df1b5adb mercurial/help/hgrc.5.txt
--- a/mercurial/help/hgrc.5.txt Mon Jan 08 16:07:51 2018 -0800
+++ b/mercurial/help/hgrc.5.txt Mon Jan 22 17:53:02 2018 -0500
@@ -34,7 +34,7 @@
Copying
=======
This manual page is copyright 2005 Bryan O'Sullivan.
-Mercurial is copyright 2005-2017 Matt Mackall.
+Mercurial is copyright 2005-2018 Matt Mackall.
Free use of this software is granted under the terms of the GNU General
Public License version 2 or any later version.
diff -r 87676e8ee056 -r 27b6df1b5adb mercurial/help/internals/wireprotocol.txt
--- a/mercurial/help/internals/wireprotocol.txt Mon Jan 08 16:07:51 2018 -0800
+++ b/mercurial/help/internals/wireprotocol.txt Mon Jan 22 17:53:02 2018 -0500
@@ -731,6 +731,8 @@
cbattempted
Boolean indicating whether the client attempted to use the *clone bundles*
feature before performing this request.
+bookmarks
+ Boolean indicating whether bookmark data is requested.
phases
Boolean indicating whether phases data is requested.
diff -r 87676e8ee056 -r 27b6df1b5adb mercurial/hg.py
--- a/mercurial/hg.py Mon Jan 08 16:07:51 2018 -0800
+++ b/mercurial/hg.py Mon Jan 22 17:53:02 2018 -0500
@@ -14,11 +14,14 @@
import shutil
from .i18n import _
-from .node import nullid
+from .node import (
+ nullid,
+)
from . import (
bookmarks,
bundlerepo,
+ cacheutil,
cmdutil,
destutil,
discovery,
@@ -28,10 +31,10 @@
httppeer,
localrepo,
lock,
+ logexchange,
merge as mergemod,
node,
phases,
- repoview,
scmutil,
sshpeer,
statichttprepo,
@@ -306,16 +309,13 @@
"""
default = defaultpath or sourcerepo.ui.config('paths', 'default')
if default:
- fp = destrepo.vfs("hgrc", "w", text=True)
- fp.write("[paths]\n")
- fp.write("default = %s\n" % default)
- fp.close()
+ template = ('[paths]\n'
+ 'default = %s\n')
+ destrepo.vfs.write('hgrc', util.tonativeeol(template % default))
with destrepo.wlock():
if bookmarks:
- fp = destrepo.vfs('shared', 'w')
- fp.write(sharedbookmarks + '\n')
- fp.close()
+ destrepo.vfs.write('shared', sharedbookmarks + '\n')
def _postshareupdate(repo, update, checkout=None):
"""Maybe perform a working directory update after a shared repo is created.
@@ -459,18 +459,6 @@
os.mkdir(dstcachedir)
util.copyfile(srcbranchcache, dstbranchcache)
-def _cachetocopy(srcrepo):
- """return the list of cache file valuable to copy during a clone"""
- # In local clones we're copying all nodes, not just served
- # ones. Therefore copy all branch caches over.
- cachefiles = ['branch2']
- cachefiles += ['branch2-%s' % f for f in repoview.filtertable]
- cachefiles += ['rbc-names-v1', 'rbc-revs-v1']
- cachefiles += ['tags2']
- cachefiles += ['tags2-%s' % f for f in repoview.filtertable]
- cachefiles += ['hgtagsfnodes1']
- return cachefiles
-
def clone(ui, peeropts, source, dest=None, pull=False, rev=None,
update=True, stream=False, branch=None, shareopts=None):
"""Make a copy of an existing repository.
@@ -568,7 +556,7 @@
'unable to resolve identity of remote)\n'))
elif sharenamemode == 'remote':
sharepath = os.path.join(
- sharepool, hashlib.sha1(source).hexdigest())
+ sharepool, node.hex(hashlib.sha1(source).digest()))
else:
raise error.Abort(_('unknown share naming mode: %s') %
sharenamemode)
@@ -629,7 +617,7 @@
util.copyfile(srcbookmarks, dstbookmarks)
dstcachedir = os.path.join(destpath, 'cache')
- for cache in _cachetocopy(srcrepo):
+ for cache in cacheutil.cachetocopy(srcrepo):
_copycache(srcrepo, dstcachedir, cache)
# we need to re-init the repo after manually copying the data
@@ -658,6 +646,9 @@
checkout = revs[0]
local = destpeer.local()
if local:
+ u = util.url(abspath)
+ defaulturl = bytes(u)
+ local.ui.setconfig('paths', 'default', defaulturl, 'clone')
if not stream:
if pull:
stream = False
@@ -680,14 +671,14 @@
destrepo = destpeer.local()
if destrepo:
template = uimod.samplehgrcs['cloned']
- fp = destrepo.vfs("hgrc", "wb")
u = util.url(abspath)
u.passwd = None
defaulturl = bytes(u)
- fp.write(util.tonativeeol(template % defaulturl))
- fp.close()
+ destrepo.vfs.write('hgrc', util.tonativeeol(template % defaulturl))
+ destrepo.ui.setconfig('paths', 'default', defaulturl, 'clone')
- destrepo.ui.setconfig('paths', 'default', defaulturl, 'clone')
+ if ui.configbool('experimental', 'remotenames'):
+ logexchange.pullremotenames(destrepo, srcpeer)
if update:
if update is not True:
@@ -843,16 +834,32 @@
return ret
-def merge(repo, node, force=None, remind=True, mergeforce=False, labels=None):
+def merge(repo, node, force=None, remind=True, mergeforce=False, labels=None,
+ abort=False):
"""Branch merge with node, resolving changes. Return true if any
unresolved conflicts."""
- stats = mergemod.update(repo, node, True, force, mergeforce=mergeforce,
- labels=labels)
+ if not abort:
+ stats = mergemod.update(repo, node, True, force, mergeforce=mergeforce,
+ labels=labels)
+ else:
+ ms = mergemod.mergestate.read(repo)
+ if ms.active():
+ # there were conflicts
+ node = ms.localctx.hex()
+ else:
+ # there were no conficts, mergestate was not stored
+ node = repo['.'].hex()
+
+ repo.ui.status(_("aborting the merge, updating back to"
+ " %s\n") % node[:12])
+ stats = mergemod.update(repo, node, branchmerge=False, force=True,
+ labels=labels)
+
_showstats(repo, stats)
if stats[3]:
repo.ui.status(_("use 'hg resolve' to retry unresolved file merges "
- "or 'hg update -C .' to abandon\n"))
- elif remind:
+ "or 'hg merge --abort' to abandon\n"))
+ elif remind and not abort:
repo.ui.status(_("(branch merge, don't forget to commit)\n"))
return stats[3] > 0
@@ -912,8 +919,13 @@
return _incoming(display, subreporecurse, ui, repo, source, opts)
def _outgoing(ui, repo, dest, opts):
- dest = ui.expandpath(dest or 'default-push', dest or 'default')
- dest, branches = parseurl(dest, opts.get('branch'))
+ path = ui.paths.getpath(dest, default=('default-push', 'default'))
+ if not path:
+ raise error.Abort(_('default repository not configured!'),
+ hint=_("see 'hg help config.paths'"))
+ dest = path.pushloc or path.loc
+ branches = path.branch, opts.get('branch') or []
+
ui.status(_('comparing with %s\n') % util.hidepassword(dest))
revs, checkout = addbranchrevs(repo, repo, branches, opts.get('rev'))
if revs:
diff -r 87676e8ee056 -r 27b6df1b5adb mercurial/hgweb/common.py
--- a/mercurial/hgweb/common.py Mon Jan 08 16:07:51 2018 -0800
+++ b/mercurial/hgweb/common.py Mon Jan 22 17:53:02 2018 -0500
@@ -75,7 +75,7 @@
if deny and (not user or ismember(hgweb.repo.ui, user, deny)):
raise ErrorResponse(HTTP_UNAUTHORIZED, 'push not authorized')
- allow = hgweb.configlist('web', 'allow_push')
+ allow = hgweb.configlist('web', 'allow-push')
if not (allow and ismember(hgweb.repo.ui, user, allow)):
raise ErrorResponse(HTTP_UNAUTHORIZED, 'push not authorized')
diff -r 87676e8ee056 -r 27b6df1b5adb mercurial/hgweb/hgweb_mod.py
--- a/mercurial/hgweb/hgweb_mod.py Mon Jan 08 16:07:51 2018 -0800
+++ b/mercurial/hgweb/hgweb_mod.py Mon Jan 22 17:53:02 2018 -0500
@@ -114,7 +114,7 @@
self.stripecount = self.configint('web', 'stripes')
self.maxshortchanges = self.configint('web', 'maxshortchanges')
self.maxfiles = self.configint('web', 'maxfiles')
- self.allowpull = self.configbool('web', 'allowpull')
+ self.allowpull = self.configbool('web', 'allow-pull')
# we use untrusted=False to prevent a repo owner from using
# web.templates in .hg/hgrc to get access to any file readable
diff -r 87676e8ee056 -r 27b6df1b5adb mercurial/hgweb/protocol.py
--- a/mercurial/hgweb/protocol.py Mon Jan 08 16:07:51 2018 -0800
+++ b/mercurial/hgweb/protocol.py Mon Jan 22 17:53:02 2018 -0500
@@ -102,25 +102,20 @@
urlreq.quote(self.req.env.get('REMOTE_HOST', '')),
urlreq.quote(self.req.env.get('REMOTE_USER', '')))
- def responsetype(self, v1compressible=False):
+ def responsetype(self, prefer_uncompressed):
"""Determine the appropriate response type and compression settings.
- The ``v1compressible`` argument states whether the response with
- application/mercurial-0.1 media types should be zlib compressed.
-
Returns a tuple of (mediatype, compengine, engineopts).
"""
- # For now, if it isn't compressible in the old world, it's never
- # compressible. We can change this to send uncompressed 0.2 payloads
- # later.
- if not v1compressible:
- return HGTYPE, None, None
-
# Determine the response media type and compression engine based
# on the request parameters.
protocaps = decodevaluefromheaders(self.req, r'X-HgProto').split(' ')
if '0.2' in protocaps:
+ # All clients are expected to support uncompressed data.
+ if prefer_uncompressed:
+ return HGTYPE2, util._noopengine(), {}
+
# Default as defined by wire protocol spec.
compformats = ['zlib', 'none']
for cap in protocaps:
@@ -155,7 +150,7 @@
def call(repo, req, cmd):
p = webproto(req, repo.ui)
- def genversion2(gen, compress, engine, engineopts):
+ def genversion2(gen, engine, engineopts):
# application/mercurial-0.2 always sends a payload header
# identifying the compression engine.
name = engine.wireprotosupport().name
@@ -163,31 +158,27 @@
yield struct.pack('B', len(name))
yield name
- if compress:
- for chunk in engine.compressstream(gen, opts=engineopts):
- yield chunk
- else:
- for chunk in gen:
- yield chunk
+ for chunk in gen:
+ yield chunk
rsp = wireproto.dispatch(repo, p, cmd)
if isinstance(rsp, bytes):
req.respond(HTTP_OK, HGTYPE, body=rsp)
return []
+ elif isinstance(rsp, wireproto.streamres_legacy):
+ gen = rsp.gen
+ req.respond(HTTP_OK, HGTYPE)
+ return gen
elif isinstance(rsp, wireproto.streamres):
- if rsp.reader:
- gen = iter(lambda: rsp.reader.read(32768), '')
- else:
- gen = rsp.gen
+ gen = rsp.gen
# This code for compression should not be streamres specific. It
# is here because we only compress streamres at the moment.
- mediatype, engine, engineopts = p.responsetype(rsp.v1compressible)
+ mediatype, engine, engineopts = p.responsetype(rsp.prefer_uncompressed)
+ gen = engine.compressstream(gen, engineopts)
- if mediatype == HGTYPE and rsp.v1compressible:
- gen = engine.compressstream(gen, engineopts)
- elif mediatype == HGTYPE2:
- gen = genversion2(gen, rsp.v1compressible, engine, engineopts)
+ if mediatype == HGTYPE2:
+ gen = genversion2(gen, engine, engineopts)
req.respond(HTTP_OK, mediatype)
return gen
diff -r 87676e8ee056 -r 27b6df1b5adb mercurial/hgweb/webcommands.py
--- a/mercurial/hgweb/webcommands.py Mon Jan 08 16:07:51 2018 -0800
+++ b/mercurial/hgweb/webcommands.py Mon Jan 22 17:53:02 2018 -0500
@@ -13,7 +13,7 @@
import re
from ..i18n import _
-from ..node import hex, short
+from ..node import hex, nullid, short
from .common import (
ErrorResponse,
@@ -36,9 +36,7 @@
revsetlang,
scmutil,
smartset,
- templatefilters,
templater,
- url,
util,
)
@@ -415,7 +413,7 @@
else:
nextentry = []
- return tmpl(shortlog and 'shortlog' or 'changelog', changenav=changenav,
+ return tmpl('shortlog' if shortlog else 'changelog', changenav=changenav,
node=ctx.hex(), rev=pos, symrev=symrev, changesets=count,
entries=entries,
latestentry=latestentry, nextentry=nextentry,
@@ -1178,11 +1176,16 @@
Information rendered by this handler can be used to create visual
representations of repository topology.
- The ``revision`` URL parameter controls the starting changeset.
+ The ``revision`` URL parameter controls the starting changeset. If it's
+ absent, the default is ``tip``.
The ``revcount`` query string argument can define the number of changesets
to show information for.
+ The ``graphtop`` query string argument can specify the starting changeset
+ for producing ``jsdata`` variable that is used for rendering graph in
+ JavaScript. By default it has the same value as ``revision``.
+
This handler will render the ``graph`` template.
"""
@@ -1209,6 +1212,10 @@
morevars = copy.copy(tmpl.defaults['sessionvars'])
morevars['revcount'] = revcount * 2
+ graphtop = req.form.get('graphtop', [ctx.hex()])[0]
+ graphvars = copy.copy(tmpl.defaults['sessionvars'])
+ graphvars['graphtop'] = graphtop
+
count = len(web.repo)
pos = rev
@@ -1217,94 +1224,97 @@
changenav = webutil.revnav(web.repo).gen(pos, revcount, count)
tree = []
+ nextentry = []
+ lastrev = 0
if pos != -1:
allrevs = web.repo.changelog.revs(pos, 0)
revs = []
for i in allrevs:
revs.append(i)
- if len(revs) >= revcount:
+ if len(revs) >= revcount + 1:
break
+ if len(revs) > revcount:
+ nextentry = [webutil.commonentry(web.repo, web.repo[revs[-1]])]
+ revs = revs[:-1]
+
+ lastrev = revs[-1]
+
# We have to feed a baseset to dagwalker as it is expecting smartset
# object. This does not have a big impact on hgweb performance itself
# since hgweb graphing code is not itself lazy yet.
dag = graphmod.dagwalker(web.repo, smartset.baseset(revs))
# As we said one line above... not lazy.
- tree = list(graphmod.colored(dag, web.repo))
-
- def getcolumns(tree):
- cols = 0
- for (id, type, ctx, vtx, edges) in tree:
- if type != graphmod.CHANGESET:
- continue
- cols = max(cols, max([edge[0] for edge in edges] or [0]),
- max([edge[1] for edge in edges] or [0]))
- return cols
-
- def graphdata(usetuples, encodestr):
- data = []
+ tree = list(item for item in graphmod.colored(dag, web.repo)
+ if item[1] == graphmod.CHANGESET)
- row = 0
- for (id, type, ctx, vtx, edges) in tree:
- if type != graphmod.CHANGESET:
- continue
- node = pycompat.bytestr(ctx)
- age = encodestr(templatefilters.age(ctx.date()))
- desc = templatefilters.firstline(encodestr(ctx.description()))
- desc = url.escape(templatefilters.nonempty(desc))
- user = url.escape(templatefilters.person(encodestr(ctx.user())))
- branch = url.escape(encodestr(ctx.branch()))
- try:
- branchnode = web.repo.branchtip(branch)
- except error.RepoLookupError:
- branchnode = None
- branch = branch, branchnode == ctx.node()
+ def nodecurrent(ctx):
+ wpnodes = web.repo.dirstate.parents()
+ if wpnodes[1] == nullid:
+ wpnodes = wpnodes[:1]
+ if ctx.node() in wpnodes:
+ return '@'
+ return ''
+
+ def nodesymbol(ctx):
+ if ctx.obsolete():
+ return 'x'
+ elif ctx.isunstable():
+ return '*'
+ elif ctx.closesbranch():
+ return '_'
+ else:
+ return 'o'
- if usetuples:
- data.append((node, vtx, edges, desc, user, age, branch,
- [url.escape(encodestr(x)) for x in ctx.tags()],
- [url.escape(encodestr(x))
- for x in ctx.bookmarks()]))
- else:
- edgedata = [{'col': edge[0], 'nextcol': edge[1],
- 'color': (edge[2] - 1) % 6 + 1,
- 'width': edge[3], 'bcolor': edge[4]}
- for edge in edges]
+ def fulltree():
+ pos = web.repo[graphtop].rev()
+ tree = []
+ if pos != -1:
+ revs = web.repo.changelog.revs(pos, lastrev)
+ dag = graphmod.dagwalker(web.repo, smartset.baseset(revs))
+ tree = list(item for item in graphmod.colored(dag, web.repo)
+ if item[1] == graphmod.CHANGESET)
+ return tree
+
+ def jsdata():
+ return [{'node': pycompat.bytestr(ctx),
+ 'graphnode': nodecurrent(ctx) + nodesymbol(ctx),
+ 'vertex': vtx,
+ 'edges': edges}
+ for (id, type, ctx, vtx, edges) in fulltree()]
- data.append(
- {'node': node,
- 'col': vtx[0],
- 'color': (vtx[1] - 1) % 6 + 1,
- 'edges': edgedata,
- 'row': row,
- 'nextrow': row + 1,
- 'desc': desc,
- 'user': user,
- 'age': age,
- 'bookmarks': webutil.nodebookmarksdict(
- web.repo, ctx.node()),
- 'branches': webutil.nodebranchdict(web.repo, ctx),
- 'inbranch': webutil.nodeinbranch(web.repo, ctx),
- 'tags': webutil.nodetagsdict(web.repo, ctx.node())})
+ def nodes():
+ parity = paritygen(web.stripecount)
+ for row, (id, type, ctx, vtx, edges) in enumerate(tree):
+ entry = webutil.commonentry(web.repo, ctx)
+ edgedata = [{'col': edge[0],
+ 'nextcol': edge[1],
+ 'color': (edge[2] - 1) % 6 + 1,
+ 'width': edge[3],
+ 'bcolor': edge[4]}
+ for edge in edges]
- row += 1
-
- return data
+ entry.update({'col': vtx[0],
+ 'color': (vtx[1] - 1) % 6 + 1,
+ 'parity': next(parity),
+ 'edges': edgedata,
+ 'row': row,
+ 'nextrow': row + 1})
- cols = getcolumns(tree)
+ yield entry
+
rows = len(tree)
- canvasheight = (rows + 1) * bg_height - 27
return tmpl('graph', rev=rev, symrev=symrev, revcount=revcount,
uprev=uprev,
lessvars=lessvars, morevars=morevars, downrev=downrev,
- cols=cols, rows=rows,
- canvaswidth=(cols + 1) * bg_height,
- truecanvasheight=rows * bg_height,
- canvasheight=canvasheight, bg_height=bg_height,
- # {jsdata} will be passed to |json, so it must be in utf-8
- jsdata=lambda **x: graphdata(True, encoding.fromlocal),
- nodes=lambda **x: graphdata(False, pycompat.bytestr),
+ graphvars=graphvars,
+ rows=rows,
+ bg_height=bg_height,
+ changesets=count,
+ nextentry=nextentry,
+ jsdata=lambda **x: jsdata(),
+ nodes=lambda **x: nodes(),
node=ctx.hex(), changenav=changenav)
def _getdoc(e):
diff -r 87676e8ee056 -r 27b6df1b5adb mercurial/hgweb/webutil.py
--- a/mercurial/hgweb/webutil.py Mon Jan 08 16:07:51 2018 -0800
+++ b/mercurial/hgweb/webutil.py Mon Jan 22 17:53:02 2018 -0500
@@ -32,6 +32,7 @@
pathutil,
pycompat,
templatefilters,
+ templatekw,
ui as uimod,
util,
)
@@ -351,6 +352,12 @@
def formatlinerange(fromline, toline):
return '%d:%d' % (fromline + 1, toline)
+def succsandmarkers(repo, ctx):
+ for item in templatekw.showsuccsandmarkers(repo, ctx):
+ item['successors'] = _siblings(repo[successor]
+ for successor in item['successors'])
+ yield item
+
def commonentry(repo, ctx):
node = ctx.node()
return {
@@ -361,6 +368,9 @@
'date': ctx.date(),
'extra': ctx.extra(),
'phase': ctx.phasestr(),
+ 'obsolete': ctx.obsolete(),
+ 'succsandmarkers': lambda **x: succsandmarkers(repo, ctx),
+ 'instabilities': [{"instability": i} for i in ctx.instabilities()],
'branch': nodebranchnodefault(ctx),
'inbranch': nodeinbranch(repo, ctx),
'branches': nodebranchdict(repo, ctx),
@@ -409,7 +419,7 @@
files = []
parity = paritygen(web.stripecount)
for blockno, f in enumerate(ctx.files()):
- template = f in ctx and 'filenodelink' or 'filenolink'
+ template = 'filenodelink' if f in ctx else 'filenolink'
files.append(tmpl(template,
node=ctx.hex(), file=f, blockno=blockno + 1,
parity=next(parity)))
@@ -571,7 +581,7 @@
fileno = 0
for filename, adds, removes, isbinary in stats:
- template = filename in files and 'diffstatlink' or 'diffstatnolink'
+ template = 'diffstatlink' if filename in files else 'diffstatnolink'
total = adds + removes
fileno += 1
yield tmpl(template, node=ctx.hex(), file=filename, fileno=fileno,
diff -r 87676e8ee056 -r 27b6df1b5adb mercurial/hook.py
--- a/mercurial/hook.py Mon Jan 08 16:07:51 2018 -0800
+++ b/mercurial/hook.py Mon Jan 22 17:53:02 2018 -0500
@@ -91,7 +91,7 @@
starttime = util.timer()
try:
- r = obj(ui=ui, repo=repo, hooktype=htype, **args)
+ r = obj(ui=ui, repo=repo, hooktype=htype, **pycompat.strkwargs(args))
except Exception as exc:
if isinstance(exc, error.Abort):
ui.warn(_('error: %s hook failed: %s\n') %
diff -r 87676e8ee056 -r 27b6df1b5adb mercurial/httpconnection.py
--- a/mercurial/httpconnection.py Mon Jan 08 16:07:51 2018 -0800
+++ b/mercurial/httpconnection.py Mon Jan 22 17:53:02 2018 -0500
@@ -248,7 +248,7 @@
return self.https_open(req)
def makehttpcon(*args, **kwargs):
k2 = dict(kwargs)
- k2['use_ssl'] = False
+ k2[r'use_ssl'] = False
return HTTPConnection(*args, **k2)
return self.do_open(makehttpcon, req, False)
@@ -288,8 +288,8 @@
if '[' in host:
host = host[1:-1]
- kwargs['keyfile'] = keyfile
- kwargs['certfile'] = certfile
+ kwargs[r'keyfile'] = keyfile
+ kwargs[r'certfile'] = certfile
con = HTTPConnection(host, port, use_ssl=True,
ssl_wrap_socket=sslutil.wrapsocket,
diff -r 87676e8ee056 -r 27b6df1b5adb mercurial/httppeer.py
--- a/mercurial/httppeer.py Mon Jan 08 16:07:51 2018 -0800
+++ b/mercurial/httppeer.py Mon Jan 22 17:53:02 2018 -0500
@@ -161,6 +161,41 @@
h.close()
getattr(h, "close_all", lambda: None)()
+ def _openurl(self, req):
+ if (self._ui.debugflag
+ and self._ui.configbool('devel', 'debug.peer-request')):
+ dbg = self._ui.debug
+ line = 'devel-peer-request: %s\n'
+ dbg(line % '%s %s' % (req.get_method(), req.get_full_url()))
+ hgargssize = None
+
+ for header, value in sorted(req.header_items()):
+ if header.startswith('X-hgarg-'):
+ if hgargssize is None:
+ hgargssize = 0
+ hgargssize += len(value)
+ else:
+ dbg(line % ' %s %s' % (header, value))
+
+ if hgargssize is not None:
+ dbg(line % ' %d bytes of commands arguments in headers'
+ % hgargssize)
+
+ if req.has_data():
+ data = req.get_data()
+ length = getattr(data, 'length', None)
+ if length is None:
+ length = len(data)
+ dbg(line % ' %d bytes of data' % length)
+
+ start = util.timer()
+
+ ret = self._urlopener.open(req)
+ if self._ui.configbool('devel', 'debug.peer-request'):
+ dbg(line % ' finished in %.4f seconds (%s)'
+ % (util.timer() - start, ret.code))
+ return ret
+
# Begin of _basepeer interface.
@util.propertycache
@@ -204,6 +239,7 @@
self._caps = set(self._call('capabilities').split())
def _callstream(self, cmd, _compressible=False, **args):
+ args = pycompat.byteskwargs(args)
if cmd == 'pushkey':
args['data'] = ''
data = args.pop('data', None)
@@ -222,7 +258,7 @@
if not data:
data = strargs
else:
- if isinstance(data, basestring):
+ if isinstance(data, bytes):
i = io.BytesIO(data)
i.length = len(data)
data = i
@@ -297,7 +333,7 @@
self.ui.debug("sending %s bytes\n" % size)
req.add_unredirected_header('Content-Length', '%d' % size)
try:
- resp = self._urlopener.open(req)
+ resp = self._openurl(req)
except urlerr.httperror as inst:
if inst.code == 401:
raise error.Abort(_('authorization failed'))
diff -r 87676e8ee056 -r 27b6df1b5adb mercurial/keepalive.py
--- a/mercurial/keepalive.py Mon Jan 08 16:07:51 2018 -0800
+++ b/mercurial/keepalive.py Mon Jan 22 17:53:02 2018 -0500
@@ -92,6 +92,7 @@
from .i18n import _
from . import (
+ node,
pycompat,
urllibcompat,
util,
@@ -322,7 +323,7 @@
data = urllibcompat.getdata(req)
h.putrequest(
req.get_method(), urllibcompat.getselector(req),
- **skipheaders)
+ **pycompat.strkwargs(skipheaders))
if 'content-type' not in headers:
h.putheader('Content-type',
'application/x-www-form-urlencoded')
@@ -331,7 +332,7 @@
else:
h.putrequest(
req.get_method(), urllibcompat.getselector(req),
- **skipheaders)
+ **pycompat.strkwargs(skipheaders))
except socket.error as err:
raise urlerr.urlerror(err)
for k, v in headers.items():
@@ -366,8 +367,8 @@
def __init__(self, sock, debuglevel=0, strict=0, method=None):
extrakw = {}
if not pycompat.ispy3:
- extrakw['strict'] = True
- extrakw['buffering'] = True
+ extrakw[r'strict'] = True
+ extrakw[r'buffering'] = True
httplib.HTTPResponse.__init__(self, sock, debuglevel=debuglevel,
method=method, **extrakw)
self.fileno = sock.fileno
@@ -607,7 +608,7 @@
foo = fo.read()
fo.close()
m = md5(foo)
- print(format % ('normal urllib', m.hexdigest()))
+ print(format % ('normal urllib', node.hex(m.digest())))
# now install the keepalive handler and try again
opener = urlreq.buildopener(HTTPHandler())
@@ -617,7 +618,7 @@
foo = fo.read()
fo.close()
m = md5(foo)
- print(format % ('keepalive read', m.hexdigest()))
+ print(format % ('keepalive read', node.hex(m.digest())))
fo = urlreq.urlopen(url)
foo = ''
@@ -629,7 +630,7 @@
break
fo.close()
m = md5(foo)
- print(format % ('keepalive readline', m.hexdigest()))
+ print(format % ('keepalive readline', node.hex(m.digest())))
def comp(N, url):
print(' making %i connections to:\n %s' % (N, url))
diff -r 87676e8ee056 -r 27b6df1b5adb mercurial/localrepo.py
--- a/mercurial/localrepo.py Mon Jan 08 16:07:51 2018 -0800
+++ b/mercurial/localrepo.py Mon Jan 22 17:53:02 2018 -0500
@@ -197,7 +197,7 @@
**kwargs):
chunks = exchange.getbundlechunks(self._repo, source, heads=heads,
common=common, bundlecaps=bundlecaps,
- **kwargs)
+ **kwargs)[1]
cb = util.chunkbuffer(chunks)
if exchange.bundle2requested(bundlecaps):
@@ -364,11 +364,14 @@
self.root = self.wvfs.base
self.path = self.wvfs.join(".hg")
self.origroot = path
- # These auditor are not used by the vfs,
- # only used when writing this comment: basectx.match
- self.auditor = pathutil.pathauditor(self.root, self._checknested)
- self.nofsauditor = pathutil.pathauditor(self.root, self._checknested,
- realfs=False, cached=True)
+ # This is only used by context.workingctx.match in order to
+ # detect files in subrepos.
+ self.auditor = pathutil.pathauditor(
+ self.root, callback=self._checknested)
+ # This is only used by context.basectx.match in order to detect
+ # files in subrepos.
+ self.nofsauditor = pathutil.pathauditor(
+ self.root, callback=self._checknested, realfs=False, cached=True)
self.baseui = baseui
self.ui = baseui.copy()
self.ui.copy = baseui.copy # prevent copying repo configuration
@@ -499,9 +502,6 @@
# post-dirstate-status hooks
self._postdsstatus = []
- # Cache of types representing filtered repos.
- self._filteredrepotypes = weakref.WeakKeyDictionary()
-
# generic mapping between names and nodes
self.names = namespaces.namespaces()
@@ -577,7 +577,8 @@
def _restrictcapabilities(self, caps):
if self.ui.configbool('experimental', 'bundle2-advertise'):
caps = set(caps)
- capsblob = bundle2.encodecaps(bundle2.getrepocaps(self))
+ capsblob = bundle2.encodecaps(bundle2.getrepocaps(self,
+ role='client'))
caps.add('bundle2=' + urlreq.quote(capsblob))
return caps
@@ -675,23 +676,10 @@
Intended to be overwritten by filtered repo."""
return self
- def filtered(self, name):
+ def filtered(self, name, visibilityexceptions=None):
"""Return a filtered version of a repository"""
- # Python <3.4 easily leaks types via __mro__. See
- # https://bugs.python.org/issue17950. We cache dynamically
- # created types so this method doesn't leak on every
- # invocation.
-
- key = self.unfiltered().__class__
- if key not in self._filteredrepotypes:
- # Build a new type with the repoview mixin and the base
- # class of this repo. Give it a name containing the
- # filter name to aid debugging.
- bases = (repoview.repoview, key)
- cls = type(r'%sfilteredrepo' % name, bases, {})
- self._filteredrepotypes[key] = cls
-
- return self._filteredrepotypes[key](self, name)
+ cls = repoview.newtype(self.unfiltered().__class__)
+ return cls(self, name, visibilityexceptions)
@repofilecache('bookmarks', 'bookmarks.current')
def _bookmarks(self):
@@ -701,8 +689,8 @@
def _activebookmark(self):
return self._bookmarks.active
- # _phaserevs and _phasesets depend on changelog. what we need is to
- # call _phasecache.invalidate() if '00changelog.i' was changed, but it
+ # _phasesets depend on changelog. what we need is to call
+ # _phasecache.invalidate() if '00changelog.i' was changed, but it
# can't be easily expressed in filecache mechanism.
@storecache('phaseroots', '00changelog.i')
def _phasecache(self):
@@ -775,7 +763,9 @@
__bool__ = __nonzero__
def __len__(self):
- return len(self.changelog)
+ # no need to pay the cost of repoview.changelog
+ unfi = self.unfiltered()
+ return len(unfi.changelog)
def __iter__(self):
return iter(self.changelog)
@@ -1112,7 +1102,7 @@
data = self.wvfs.read(filename)
return self._filter(self._encodefilterpats, filename, data)
- def wwrite(self, filename, data, flags, backgroundclose=False):
+ def wwrite(self, filename, data, flags, backgroundclose=False, **kwargs):
"""write ``data`` into ``filename`` in the working directory
This returns length of written (maybe decoded) data.
@@ -1121,9 +1111,12 @@
if 'l' in flags:
self.wvfs.symlink(data, filename)
else:
- self.wvfs.write(filename, data, backgroundclose=backgroundclose)
+ self.wvfs.write(filename, data, backgroundclose=backgroundclose,
+ **kwargs)
if 'x' in flags:
self.wvfs.setflags(filename, False, True)
+ else:
+ self.wvfs.setflags(filename, False, False)
return len(data)
def wwritedata(self, filename, data):
@@ -1147,7 +1140,6 @@
raise error.ProgrammingError('transaction requires locking')
tr = self.currenttransaction()
if tr is not None:
- scmutil.registersummarycallback(self, tr, desc)
return tr.nest()
# abort here if the journal already exists
@@ -1244,6 +1236,8 @@
# gating.
tracktags(tr2)
repo = reporef()
+ if repo.ui.configbool('experimental', 'single-head-per-branch'):
+ scmutil.enforcesinglehead(repo, tr2, desc)
if hook.hashook(repo.ui, 'pretxnclose-bookmark'):
for name, (old, new) in sorted(tr.changes['bookmarks'].items()):
args = tr.hookargs.copy()
@@ -1286,7 +1280,7 @@
validator=validate,
releasefn=releasefn,
checkambigfiles=_cachedfiles)
- tr.changes['revs'] = set()
+ tr.changes['revs'] = xrange(0, 0)
tr.changes['obsmarkers'] = set()
tr.changes['phases'] = {}
tr.changes['bookmarks'] = {}
@@ -1329,7 +1323,11 @@
**pycompat.strkwargs(hookargs))
reporef()._afterlock(hookfunc)
tr.addfinalize('txnclose-hook', txnclosehook)
- tr.addpostclose('warms-cache', self._buildcacheupdater(tr))
+ # Include a leading "-" to make it happen before the transaction summary
+ # reports registered via scmutil.registersummarycallback() whose names
+ # are 00-txnreport etc. That way, the caches will be warm when the
+ # callbacks run.
+ tr.addpostclose('-warm-cache', self._buildcacheupdater(tr))
def txnaborthook(tr2):
"""To be run if transaction is aborted
"""
@@ -1587,29 +1585,18 @@
# determine whether it can be inherited
if parentenvvar is not None:
parentlock = encoding.environ.get(parentenvvar)
- try:
- l = lockmod.lock(vfs, lockname, 0, releasefn=releasefn,
- acquirefn=acquirefn, desc=desc,
- inheritchecker=inheritchecker,
- parentlock=parentlock)
- except error.LockHeld as inst:
- if not wait:
- raise
- # show more details for new-style locks
- if ':' in inst.locker:
- host, pid = inst.locker.split(":", 1)
- self.ui.warn(
- _("waiting for lock on %s held by process %r "
- "on host %r\n") % (desc, pid, host))
- else:
- self.ui.warn(_("waiting for lock on %s held by %r\n") %
- (desc, inst.locker))
- # default to 600 seconds timeout
- l = lockmod.lock(vfs, lockname,
- int(self.ui.config("ui", "timeout")),
- releasefn=releasefn, acquirefn=acquirefn,
- desc=desc)
- self.ui.warn(_("got lock after %s seconds\n") % l.delay)
+
+ timeout = 0
+ warntimeout = 0
+ if wait:
+ timeout = self.ui.configint("ui", "timeout")
+ warntimeout = self.ui.configint("ui", "timeout.warn")
+
+ l = lockmod.trylock(self.ui, vfs, lockname, timeout, warntimeout,
+ releasefn=releasefn,
+ acquirefn=acquirefn, desc=desc,
+ inheritchecker=inheritchecker,
+ parentlock=parentlock)
return l
def _afterlock(self, callback):
diff -r 87676e8ee056 -r 27b6df1b5adb mercurial/lock.py
--- a/mercurial/lock.py Mon Jan 08 16:07:51 2018 -0800
+++ b/mercurial/lock.py Mon Jan 22 17:53:02 2018 -0500
@@ -14,6 +14,8 @@
import time
import warnings
+from .i18n import _
+
from . import (
encoding,
error,
@@ -39,6 +41,58 @@
raise
return result
+def trylock(ui, vfs, lockname, timeout, warntimeout, *args, **kwargs):
+ """return an acquired lock or raise an a LockHeld exception
+
+ This function is responsible to issue warnings and or debug messages about
+ the held lock while trying to acquires it."""
+
+ def printwarning(printer, locker):
+ """issue the usual "waiting on lock" message through any channel"""
+ # show more details for new-style locks
+ if ':' in locker:
+ host, pid = locker.split(":", 1)
+ msg = _("waiting for lock on %s held by process %r "
+ "on host %r\n") % (l.desc, pid, host)
+ else:
+ msg = _("waiting for lock on %s held by %r\n") % (l.desc, locker)
+ printer(msg)
+
+ l = lock(vfs, lockname, 0, *args, dolock=False, **kwargs)
+
+ debugidx = 0 if (warntimeout and timeout) else -1
+ warningidx = 0
+ if not timeout:
+ warningidx = -1
+ elif warntimeout:
+ warningidx = warntimeout
+
+ delay = 0
+ while True:
+ try:
+ l._trylock()
+ break
+ except error.LockHeld as inst:
+ if delay == debugidx:
+ printwarning(ui.debug, inst.locker)
+ if delay == warningidx:
+ printwarning(ui.warn, inst.locker)
+ if timeout <= delay:
+ raise error.LockHeld(errno.ETIMEDOUT, inst.filename,
+ l.desc, inst.locker)
+ time.sleep(1)
+ delay += 1
+
+ l.delay = delay
+ if l.delay:
+ if 0 <= warningidx <= l.delay:
+ ui.warn(_("got lock after %s seconds\n") % l.delay)
+ else:
+ ui.debug("got lock after %s seconds\n" % l.delay)
+ if l.acquirefn:
+ l.acquirefn()
+ return l
+
class lock(object):
'''An advisory lock held by one process to control access to a set
of files. Non-cooperating processes or incorrectly written scripts
@@ -60,7 +114,8 @@
_host = None
def __init__(self, vfs, file, timeout=-1, releasefn=None, acquirefn=None,
- desc=None, inheritchecker=None, parentlock=None):
+ desc=None, inheritchecker=None, parentlock=None,
+ dolock=True):
self.vfs = vfs
self.f = file
self.held = 0
@@ -74,9 +129,10 @@
self._inherited = False
self.postrelease = []
self.pid = self._getpid()
- self.delay = self.lock()
- if self.acquirefn:
- self.acquirefn()
+ if dolock:
+ self.delay = self.lock()
+ if self.acquirefn:
+ self.acquirefn()
def __enter__(self):
return self
diff -r 87676e8ee056 -r 27b6df1b5adb mercurial/logexchange.py
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/mercurial/logexchange.py Mon Jan 22 17:53:02 2018 -0500
@@ -0,0 +1,118 @@
+# logexchange.py
+#
+# Copyright 2017 Augie Fackler
+# Copyright 2017 Sean Farley
+#
+# This software may be used and distributed according to the terms of the
+# GNU General Public License version 2 or any later version.
+
+from __future__ import absolute_import
+
+from .node import hex
+
+from . import (
+ vfs as vfsmod,
+)
+
+# directory name in .hg/ in which remotenames files will be present
+remotenamedir = 'logexchange'
+
+def readremotenamefile(repo, filename):
+ """
+ reads a file from .hg/logexchange/ directory and yields it's content
+ filename: the file to be read
+ yield a tuple (node, remotepath, name)
+ """
+
+ vfs = vfsmod.vfs(repo.vfs.join(remotenamedir))
+ if not vfs.exists(filename):
+ return
+ f = vfs(filename)
+ lineno = 0
+ for line in f:
+ line = line.strip()
+ if not line:
+ continue
+ # contains the version number
+ if lineno == 0:
+ lineno += 1
+ try:
+ node, remote, rname = line.split('\0')
+ yield node, remote, rname
+ except ValueError:
+ pass
+
+ f.close()
+
+def readremotenames(repo):
+ """
+ read the details about the remotenames stored in .hg/logexchange/ and
+ yields a tuple (node, remotepath, name). It does not yields information
+ about whether an entry yielded is branch or bookmark. To get that
+ information, call the respective functions.
+ """
+
+ for bmentry in readremotenamefile(repo, 'bookmarks'):
+ yield bmentry
+ for branchentry in readremotenamefile(repo, 'branches'):
+ yield branchentry
+
+def writeremotenamefile(repo, remotepath, names, nametype):
+ vfs = vfsmod.vfs(repo.vfs.join(remotenamedir))
+ f = vfs(nametype, 'w', atomictemp=True)
+ # write the storage version info on top of file
+ # version '0' represents the very initial version of the storage format
+ f.write('0\n\n')
+
+ olddata = set(readremotenamefile(repo, nametype))
+ # re-save the data from a different remote than this one.
+ for node, oldpath, rname in sorted(olddata):
+ if oldpath != remotepath:
+ f.write('%s\0%s\0%s\n' % (node, oldpath, rname))
+
+ for name, node in sorted(names.iteritems()):
+ if nametype == "branches":
+ for n in node:
+ f.write('%s\0%s\0%s\n' % (n, remotepath, name))
+ elif nametype == "bookmarks":
+ if node:
+ f.write('%s\0%s\0%s\n' % (node, remotepath, name))
+
+ f.close()
+
+def saveremotenames(repo, remotepath, branches=None, bookmarks=None):
+ """
+ save remotenames i.e. remotebookmarks and remotebranches in their
+ respective files under ".hg/logexchange/" directory.
+ """
+ wlock = repo.wlock()
+ try:
+ if bookmarks:
+ writeremotenamefile(repo, remotepath, bookmarks, 'bookmarks')
+ if branches:
+ writeremotenamefile(repo, remotepath, branches, 'branches')
+ finally:
+ wlock.release()
+
+def pullremotenames(localrepo, remoterepo):
+ """
+ pulls bookmarks and branches information of the remote repo during a
+ pull or clone operation.
+ localrepo is our local repository
+ remoterepo is the peer instance
+ """
+ remotepath = remoterepo.url()
+ bookmarks = remoterepo.listkeys('bookmarks')
+ # on a push, we don't want to keep obsolete heads since
+ # they won't show up as heads on the next pull, so we
+ # remove them here otherwise we would require the user
+ # to issue a pull to refresh the storage
+ bmap = {}
+ repo = localrepo.unfiltered()
+ for branch, nodes in remoterepo.branchmap().iteritems():
+ bmap[branch] = []
+ for node in nodes:
+ if node in repo and not repo[node].obsolete():
+ bmap[branch].append(hex(node))
+
+ saveremotenames(localrepo, remotepath, bmap, bookmarks)
diff -r 87676e8ee056 -r 27b6df1b5adb mercurial/mail.py
--- a/mercurial/mail.py Mon Jan 08 16:07:51 2018 -0800
+++ b/mercurial/mail.py Mon Jan 22 17:53:02 2018 -0500
@@ -152,7 +152,7 @@
fp = open(mbox, 'ab+')
# Should be time.asctime(), but Windows prints 2-characters day
# of month instead of one. Make them print the same thing.
- date = time.strftime('%a %b %d %H:%M:%S %Y', time.localtime())
+ date = time.strftime(r'%a %b %d %H:%M:%S %Y', time.localtime())
fp.write('From %s %s\n' % (sender, date))
fp.write(msg)
fp.write('\n\n')
diff -r 87676e8ee056 -r 27b6df1b5adb mercurial/manifest.py
--- a/mercurial/manifest.py Mon Jan 08 16:07:51 2018 -0800
+++ b/mercurial/manifest.py Mon Jan 22 17:53:02 2018 -0500
@@ -810,7 +810,7 @@
if p in self._files:
yield self._subpath(p)
else:
- for f in self._dirs[p].iterkeys():
+ for f in self._dirs[p]:
yield f
def keys(self):
diff -r 87676e8ee056 -r 27b6df1b5adb mercurial/match.py
--- a/mercurial/match.py Mon Jan 08 16:07:51 2018 -0800
+++ b/mercurial/match.py Mon Jan 22 17:53:02 2018 -0500
@@ -305,9 +305,6 @@
Returns the string 'all' if the given directory and all subdirectories
should be visited. Otherwise returns True or False indicating whether
the given directory should be visited.
-
- This function's behavior is undefined if it has returned False for
- one of the dir's parent directories.
'''
return True
@@ -460,17 +457,10 @@
class differencematcher(basematcher):
'''Composes two matchers by matching if the first matches and the second
- does not. Well, almost... If the user provides a pattern like "-X foo foo",
- Mercurial actually does match "foo" against that. That's because exact
- matches are treated specially. So, since this differencematcher is used for
- excludes, it needs to special-case exact matching.
+ does not.
The second matcher's non-matching-attributes (root, cwd, bad, explicitdir,
traversedir) are ignored.
-
- TODO: If we want to keep the behavior described above for exact matches, we
- should consider instead treating the above case something like this:
- union(exact(foo), difference(pattern(foo), include(foo)))
'''
def __init__(self, m1, m2):
super(differencematcher, self).__init__(m1._root, m1._cwd)
@@ -481,7 +471,7 @@
self.traversedir = m1.traversedir
def matchfn(self, f):
- return self._m1(f) and (not self._m2(f) or self._m1.exact(f))
+ return self._m1(f) and not self._m2(f)
@propertycache
def _files(self):
@@ -496,9 +486,6 @@
def visitdir(self, dir):
if self._m2.visitdir(dir) == 'all':
- # There's a bug here: If m1 matches file 'dir/file' and m2 excludes
- # 'dir' (recursively), we should still visit 'dir' due to the
- # exception we have for exact matches.
return False
return bool(self._m1.visitdir(dir))
diff -r 87676e8ee056 -r 27b6df1b5adb mercurial/mdiff.py
--- a/mercurial/mdiff.py Mon Jan 08 16:07:51 2018 -0800
+++ b/mercurial/mdiff.py Mon Jan 22 17:53:02 2018 -0500
@@ -67,6 +67,7 @@
'ignoreblanklines': False,
'upgrade': False,
'showsimilarity': False,
+ 'worddiff': False,
}
def __init__(self, **opts):
@@ -99,7 +100,7 @@
if blank and opts.ignoreblanklines:
text = re.sub('\n+', '\n', text).strip('\n')
if opts.ignorewseol:
- text = re.sub(r'[ \t\r\f]+\n', r'\n', text)
+ text = re.sub(br'[ \t\r\f]+\n', r'\n', text)
return text
def splitblock(base1, lines1, base2, lines2, opts):
@@ -355,7 +356,7 @@
# the previous hunk context until we find a line starting with an
# alphanumeric char.
for i in xrange(astart - 1, lastpos - 1, -1):
- if l1[i][0].isalnum():
+ if l1[i][0:1].isalnum():
func = ' ' + l1[i].rstrip()[:40]
lastfunc[1] = func
break
diff -r 87676e8ee056 -r 27b6df1b5adb mercurial/merge.py
--- a/mercurial/merge.py Mon Jan 08 16:07:51 2018 -0800
+++ b/mercurial/merge.py Mon Jan 22 17:53:02 2018 -0500
@@ -646,6 +646,14 @@
return config
def _checkunknownfile(repo, wctx, mctx, f, f2=None):
+ if wctx.isinmemory():
+ # Nothing to do in IMM because nothing in the "working copy" can be an
+ # unknown file.
+ #
+ # Note that we should bail out here, not in ``_checkunknownfiles()``,
+ # because that function does other useful work.
+ return False
+
if f2 is None:
f2 = f
return (repo.wvfs.audit.check(f)
@@ -674,7 +682,11 @@
# updated with any new dirs that are checked and found to be absent.
self._missingdircache = set()
- def __call__(self, repo, f):
+ def __call__(self, repo, wctx, f):
+ if wctx.isinmemory():
+ # Nothing to do in IMM for the same reason as ``_checkunknownfile``.
+ return False
+
# Check for path prefixes that exist as unknown files.
for p in reversed(list(util.finddirs(f))):
if p in self._missingdircache:
@@ -726,7 +738,7 @@
if _checkunknownfile(repo, wctx, mctx, f):
fileconflicts.add(f)
elif pathconfig and f not in wctx:
- path = checkunknowndirs(repo, f)
+ path = checkunknowndirs(repo, wctx, f)
if path is not None:
pathconflicts.add(path)
elif m == 'dg':
@@ -1333,10 +1345,6 @@
repo.ui.warn(_("current directory was removed\n"
"(consider changing to repo root: %s)\n") % repo.root)
- # It's necessary to flush here in case we're inside a worker fork and will
- # quit after this function.
- wctx.flushall()
-
def batchget(repo, mctx, wctx, actions):
"""apply gets to the working directory
@@ -1368,7 +1376,9 @@
if repo.wvfs.lexists(absf):
util.rename(absf, orig)
wctx[f].clearunknown()
- wctx[f].write(fctx(f).data(), flags, backgroundclose=True)
+ atomictemp = ui.configbool("experimental", "update.atomic-file")
+ wctx[f].write(fctx(f).data(), flags, backgroundclose=True,
+ atomictemp=atomictemp)
if i == 100:
yield i, f
i = 0
@@ -1376,9 +1386,6 @@
if i > 0:
yield i, f
- # It's necessary to flush here in case we're inside a worker fork and will
- # quit after this function.
- wctx.flushall()
def applyupdates(repo, actions, wctx, mctx, overwrite, labels=None):
"""apply the merge action list to the working directory
@@ -1479,10 +1486,6 @@
z += 1
progress(_updating, z, item=f, total=numupdates, unit=_files)
- # We should flush before forking into worker processes, since those workers
- # flush when they complete, and we don't want to duplicate work.
- wctx.flushall()
-
# get in parallel
prog = worker.worker(repo.ui, cost, batchget, (repo, mctx, wctx),
actions['g'])
@@ -1555,6 +1558,9 @@
usemergedriver = not overwrite and mergeactions and ms.mergedriver
if usemergedriver:
+ if wctx.isinmemory():
+ raise error.InMemoryMergeConflictsError("in-memory merge does not "
+ "support mergedriver")
ms.commit()
proceed = driverpreprocess(repo, ms, wctx, labels=labels)
# the driver might leave some files unresolved
@@ -1850,8 +1856,9 @@
if not force and (wc.files() or wc.deleted()):
raise error.Abort(_("uncommitted changes"),
hint=_("use 'hg status' to list changes"))
- for s in sorted(wc.substate):
- wc.sub(s).bailifchanged()
+ if not wc.isinmemory():
+ for s in sorted(wc.substate):
+ wc.sub(s).bailifchanged()
elif not overwrite:
if p1 == p2: # no-op update
@@ -1966,7 +1973,7 @@
### apply phase
if not branchmerge: # just jump to the new rev
fp1, fp2, xp1, xp2 = fp2, nullid, xp2, ''
- if not partial:
+ if not partial and not wc.isinmemory():
repo.hook('preupdate', throw=True, parent1=xp1, parent2=xp2)
# note that we're in the middle of an update
repo.vfs.write('updatestate', p2.hex())
@@ -2004,9 +2011,8 @@
'see "hg help -e fsmonitor")\n'))
stats = applyupdates(repo, actions, wc, p2, overwrite, labels=labels)
- wc.flushall()
- if not partial:
+ if not partial and not wc.isinmemory():
with repo.dirstate.parentchange():
repo.setparents(fp1, fp2)
recordupdates(repo, actions, branchmerge)
diff -r 87676e8ee056 -r 27b6df1b5adb mercurial/minifileset.py
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/mercurial/minifileset.py Mon Jan 22 17:53:02 2018 -0500
@@ -0,0 +1,85 @@
+# minifileset.py - a simple language to select files
+#
+# Copyright 2017 Facebook, Inc.
+#
+# This software may be used and distributed according to the terms of the
+# GNU General Public License version 2 or any later version.
+
+from __future__ import absolute_import
+
+from .i18n import _
+from . import (
+ error,
+ fileset,
+)
+
+def _compile(tree):
+ if not tree:
+ raise error.ParseError(_("missing argument"))
+ op = tree[0]
+ if op in {'symbol', 'string', 'kindpat'}:
+ name = fileset.getpattern(tree, {'path'}, _('invalid file pattern'))
+ if name.startswith('**'): # file extension test, ex. "**.tar.gz"
+ ext = name[2:]
+ for c in ext:
+ if c in '*{}[]?/\\':
+ raise error.ParseError(_('reserved character: %s') % c)
+ return lambda n, s: n.endswith(ext)
+ elif name.startswith('path:'): # directory or full path test
+ p = name[5:] # prefix
+ pl = len(p)
+ f = lambda n, s: n.startswith(p) and (len(n) == pl or n[pl] == '/')
+ return f
+ raise error.ParseError(_("unsupported file pattern"),
+ hint=_('paths must be prefixed with "path:"'))
+ elif op == 'or':
+ func1 = _compile(tree[1])
+ func2 = _compile(tree[2])
+ return lambda n, s: func1(n, s) or func2(n, s)
+ elif op == 'and':
+ func1 = _compile(tree[1])
+ func2 = _compile(tree[2])
+ return lambda n, s: func1(n, s) and func2(n, s)
+ elif op == 'not':
+ return lambda n, s: not _compile(tree[1])(n, s)
+ elif op == 'group':
+ return _compile(tree[1])
+ elif op == 'func':
+ symbols = {
+ 'all': lambda n, s: True,
+ 'none': lambda n, s: False,
+ 'size': lambda n, s: fileset.sizematcher(tree[2])(s),
+ }
+
+ name = fileset.getsymbol(tree[1])
+ if name in symbols:
+ return symbols[name]
+
+ raise error.UnknownIdentifier(name, symbols.keys())
+ elif op == 'minus': # equivalent to 'x and not y'
+ func1 = _compile(tree[1])
+ func2 = _compile(tree[2])
+ return lambda n, s: func1(n, s) and not func2(n, s)
+ elif op == 'negate':
+ raise error.ParseError(_("can't use negate operator in this context"))
+ elif op == 'list':
+ raise error.ParseError(_("can't use a list in this context"),
+ hint=_('see hg help "filesets.x or y"'))
+ raise error.ProgrammingError('illegal tree: %r' % (tree,))
+
+def compile(text):
+ """generate a function (path, size) -> bool from filter specification.
+
+ "text" could contain the operators defined by the fileset language for
+ common logic operations, and parenthesis for grouping. The supported path
+ tests are '**.extname' for file extension test, and '"path:dir/subdir"'
+ for prefix test. The ``size()`` predicate is borrowed from filesets to test
+ file size. The predicates ``all()`` and ``none()`` are also supported.
+
+ '(**.php & size(">10MB")) | **.zip | (path:bin & !path:bin/README)' for
+ example, will catch all php files whose size is greater than 10 MB, all
+ files whose name ends with ".zip", and all files under "bin" in the repo
+ root except for "bin/README".
+ """
+ tree = fileset.parse(text)
+ return _compile(tree)
diff -r 87676e8ee056 -r 27b6df1b5adb mercurial/namespaces.py
--- a/mercurial/namespaces.py Mon Jan 08 16:07:51 2018 -0800
+++ b/mercurial/namespaces.py Mon Jan 22 17:53:02 2018 -0500
@@ -25,6 +25,7 @@
def __init__(self):
self._names = util.sortdict()
+ columns = templatekw.getlogcolumns()
# we need current mercurial named objects (bookmarks, tags, and
# branches) to be initialized somewhere, so that place is here
@@ -32,8 +33,7 @@
bmknamemap = lambda repo, name: tolist(repo._bookmarks.get(name))
bmknodemap = lambda repo, node: repo.nodebookmarks(node)
n = namespace("bookmarks", templatename="bookmark",
- # i18n: column positioning for "hg log"
- logfmt=_("bookmark: %s\n"),
+ logfmt=columns['bookmark'],
listnames=bmknames,
namemap=bmknamemap, nodemap=bmknodemap,
builtin=True)
@@ -43,8 +43,7 @@
tagnamemap = lambda repo, name: tolist(repo._tagscache.tags.get(name))
tagnodemap = lambda repo, node: repo.nodetags(node)
n = namespace("tags", templatename="tag",
- # i18n: column positioning for "hg log"
- logfmt=_("tag: %s\n"),
+ logfmt=columns['tag'],
listnames=tagnames,
namemap=tagnamemap, nodemap=tagnodemap,
deprecated={'tip'},
@@ -55,8 +54,7 @@
bnamemap = lambda repo, name: tolist(repo.branchtip(name, True))
bnodemap = lambda repo, node: [repo[node].branch()]
n = namespace("branches", templatename="branch",
- # i18n: column positioning for "hg log"
- logfmt=_("branch: %s\n"),
+ logfmt=columns['branch'],
listnames=bnames,
namemap=bnamemap, nodemap=bnodemap,
builtin=True)
diff -r 87676e8ee056 -r 27b6df1b5adb mercurial/obsolete.py
--- a/mercurial/obsolete.py Mon Jan 08 16:07:51 2018 -0800
+++ b/mercurial/obsolete.py Mon Jan 22 17:53:02 2018 -0500
@@ -776,7 +776,7 @@
# rely on obsstore class default when possible.
kwargs = {}
if defaultformat is not None:
- kwargs['defaultformat'] = defaultformat
+ kwargs[r'defaultformat'] = defaultformat
readonly = not isenabled(repo, createmarkersopt)
store = obsstore(repo.svfs, readonly=readonly, **kwargs)
if store and readonly:
@@ -838,18 +838,10 @@
repo.ui.warn(_('unexpected old value for %r') % key)
return False
data = util.b85decode(new)
- lock = repo.lock()
- try:
- tr = repo.transaction('pushkey: obsolete markers')
- try:
- repo.obsstore.mergemarkers(tr, data)
- repo.invalidatevolatilesets()
- tr.close()
- return True
- finally:
- tr.release()
- finally:
- lock.release()
+ with repo.lock(), repo.transaction('pushkey: obsolete markers') as tr:
+ repo.obsstore.mergemarkers(tr, data)
+ repo.invalidatevolatilesets()
+ return True
# keep compatibility for the 4.3 cycle
def allprecursors(obsstore, nodes, ignoreflags=0):
@@ -994,10 +986,10 @@
public = phases.public
cl = repo.changelog
torev = cl.nodemap.get
- for ctx in repo.set('(not public()) and (not obsolete())'):
- rev = ctx.rev()
+ tonode = cl.node
+ for rev in repo.revs('(not public()) and (not obsolete())'):
# We only evaluate mutable, non-obsolete revision
- node = ctx.node()
+ node = tonode(rev)
# (future) A cache of predecessors may worth if split is very common
for pnode in obsutil.allpredecessors(repo.obsstore, [node],
ignoreflags=bumpedfix):
@@ -1023,8 +1015,10 @@
divergent = set()
obsstore = repo.obsstore
newermap = {}
- for ctx in repo.set('(not public()) - obsolete()'):
- mark = obsstore.predecessors.get(ctx.node(), ())
+ tonode = repo.changelog.node
+ for rev in repo.revs('(not public()) - obsolete()'):
+ node = tonode(rev)
+ mark = obsstore.predecessors.get(node, ())
toprocess = set(mark)
seen = set()
while toprocess:
@@ -1036,7 +1030,7 @@
obsutil.successorssets(repo, prec, cache=newermap)
newer = [n for n in newermap[prec] if n]
if len(newer) > 1:
- divergent.add(ctx.rev())
+ divergent.add(rev)
break
toprocess.update(obsstore.predecessors.get(prec, ()))
return divergent
@@ -1079,8 +1073,7 @@
saveeffectflag = repo.ui.configbool('experimental',
'evolution.effect-flags')
- tr = repo.transaction('add-obsolescence-marker')
- try:
+ with repo.transaction('add-obsolescence-marker') as tr:
markerargs = []
for rel in relations:
prec = rel[0]
@@ -1121,6 +1114,3 @@
date=date, metadata=localmetadata,
ui=repo.ui)
repo.filteredrevcache.clear()
- tr.close()
- finally:
- tr.release()
diff -r 87676e8ee056 -r 27b6df1b5adb mercurial/obsutil.py
--- a/mercurial/obsutil.py Mon Jan 08 16:07:51 2018 -0800
+++ b/mercurial/obsutil.py Mon Jan 22 17:53:02 2018 -0500
@@ -9,9 +9,11 @@
import re
+from .i18n import _
from . import (
+ node as nodemod,
phases,
- util
+ util,
)
class marker(object):
@@ -441,12 +443,12 @@
public = phases.public
addedmarkers = tr.changes.get('obsmarkers')
addedrevs = tr.changes.get('revs')
- seenrevs = set(addedrevs)
+ seenrevs = set()
obsoleted = set()
for mark in addedmarkers:
node = mark[0]
rev = torev(node)
- if rev is None or rev in seenrevs:
+ if rev is None or rev in seenrevs or rev in addedrevs:
continue
seenrevs.add(rev)
if phase(repo, rev) == public:
@@ -751,8 +753,35 @@
return values
-def successorsetverb(successorset):
- """ Return the verb summarizing the successorset
+def _getobsfate(successorssets):
+ """ Compute a changeset obsolescence fate based on its successorssets.
+ Successors can be the tipmost ones or the immediate ones. This function
+ return values are not meant to be shown directly to users, it is meant to
+ be used by internal functions only.
+ Returns one fate from the following values:
+ - pruned
+ - diverged
+ - superseded
+ - superseded_split
+ """
+
+ if len(successorssets) == 0:
+ # The commit has been pruned
+ return 'pruned'
+ elif len(successorssets) > 1:
+ return 'diverged'
+ else:
+ # No divergence, only one set of successors
+ successors = successorssets[0]
+
+ if len(successors) == 1:
+ return 'superseded'
+ else:
+ return 'superseded_split'
+
+def obsfateverb(successorset, markers):
+ """ Return the verb summarizing the successorset and potentially using
+ information from the markers
"""
if not successorset:
verb = 'pruned'
@@ -795,7 +824,7 @@
line = []
# Verb
- line.append(successorsetverb(successors))
+ line.append(obsfateverb(successors, markers))
# Operations
operations = markersoperations(markers)
@@ -835,3 +864,43 @@
line.append(" (between %s and %s)" % (fmtmin_date, fmtmax_date))
return "".join(line)
+
+
+filteredmsgtable = {
+ "pruned": _("hidden revision '%s' is pruned"),
+ "diverged": _("hidden revision '%s' has diverged"),
+ "superseded": _("hidden revision '%s' was rewritten as: %s"),
+ "superseded_split": _("hidden revision '%s' was split as: %s"),
+ "superseded_split_several": _("hidden revision '%s' was split as: %s and "
+ "%d more"),
+}
+
+def _getfilteredreason(repo, changeid, ctx):
+ """return a human-friendly string on why a obsolete changeset is hidden
+ """
+ successors = successorssets(repo, ctx.node())
+ fate = _getobsfate(successors)
+
+ # Be more precise in case the revision is superseded
+ if fate == 'pruned':
+ return filteredmsgtable['pruned'] % changeid
+ elif fate == 'diverged':
+ return filteredmsgtable['diverged'] % changeid
+ elif fate == 'superseded':
+ single_successor = nodemod.short(successors[0][0])
+ return filteredmsgtable['superseded'] % (changeid, single_successor)
+ elif fate == 'superseded_split':
+
+ succs = []
+ for node_id in successors[0]:
+ succs.append(nodemod.short(node_id))
+
+ if len(succs) <= 2:
+ fmtsuccs = ', '.join(succs)
+ return filteredmsgtable['superseded_split'] % (changeid, fmtsuccs)
+ else:
+ firstsuccessors = ', '.join(succs[:2])
+ remainingnumber = len(succs) - 2
+
+ args = (changeid, firstsuccessors, remainingnumber)
+ return filteredmsgtable['superseded_split_several'] % args
diff -r 87676e8ee056 -r 27b6df1b5adb mercurial/patch.py
--- a/mercurial/patch.py Mon Jan 08 16:07:51 2018 -0800
+++ b/mercurial/patch.py Mon Jan 22 17:53:02 2018 -0500
@@ -10,7 +10,9 @@
import collections
import copy
+import difflib
import email
+import email.parser as emailparser
import errno
import hashlib
import os
@@ -45,6 +47,7 @@
gitre = re.compile(br'diff --git a/(.*) b/(.*)')
tabsplitter = re.compile(br'(\t+|[^\t]+)')
+_nonwordre = re.compile(br'([^a-zA-Z0-9_\x80-\xff])')
PatchError = error.PatchError
@@ -106,7 +109,7 @@
cur.append(line)
c = chunk(cur)
- m = email.Parser.Parser().parse(c)
+ m = emailparser.Parser().parse(c)
if not m.is_multipart():
yield msgfp(m)
else:
@@ -149,6 +152,8 @@
raise StopIteration
return l
+ __next__ = next
+
inheader = False
cur = []
@@ -203,7 +208,7 @@
# attempt to detect the start of a patch
# (this heuristic is borrowed from quilt)
- diffre = re.compile(br'^(?:Index:[ \t]|diff[ \t]|RCS file: |'
+ diffre = re.compile(br'^(?:Index:[ \t]|diff[ \t]-|RCS file: |'
br'retrieving revision [0-9]+(\.[0-9]+)*$|'
br'---[ \t].*?^\+\+\+[ \t]|'
br'\*\*\*[ \t].*?^---[ \t])',
@@ -213,7 +218,7 @@
fd, tmpname = tempfile.mkstemp(prefix='hg-patch-')
tmpfp = os.fdopen(fd, pycompat.sysstr('w'))
try:
- msg = email.Parser.Parser().parse(fileobj)
+ msg = emailparser.Parser().parse(fileobj)
subject = msg['Subject'] and mail.headdecode(msg['Subject'])
data['user'] = msg['From'] and mail.headdecode(msg['From'])
@@ -997,16 +1002,26 @@
def getmessages():
return {
'multiple': {
+ 'apply': _("apply change %d/%d to '%s'?"),
'discard': _("discard change %d/%d to '%s'?"),
'record': _("record change %d/%d to '%s'?"),
- 'revert': _("revert change %d/%d to '%s'?"),
},
'single': {
+ 'apply': _("apply this change to '%s'?"),
'discard': _("discard this change to '%s'?"),
'record': _("record this change to '%s'?"),
- 'revert': _("revert this change to '%s'?"),
},
'help': {
+ 'apply': _('[Ynesfdaq?]'
+ '$$ &Yes, apply this change'
+ '$$ &No, skip this change'
+ '$$ &Edit this change manually'
+ '$$ &Skip remaining changes to this file'
+ '$$ Apply remaining changes to this &file'
+ '$$ &Done, skip remaining changes and files'
+ '$$ Apply &all changes to all remaining files'
+ '$$ &Quit, applying no changes'
+ '$$ &? (display help)'),
'discard': _('[Ynesfdaq?]'
'$$ &Yes, discard this change'
'$$ &No, skip this change'
@@ -1027,16 +1042,6 @@
'$$ Record &all changes to all remaining files'
'$$ &Quit, recording no changes'
'$$ &? (display help)'),
- 'revert': _('[Ynesfdaq?]'
- '$$ &Yes, revert this change'
- '$$ &No, skip this change'
- '$$ &Edit this change manually'
- '$$ &Skip remaining changes to this file'
- '$$ Revert remaining changes to this &file'
- '$$ &Done, skip remaining changes and files'
- '$$ Revert &all changes to all remaining files'
- '$$ &Quit, reverting no changes'
- '$$ &? (display help)')
}
}
@@ -1990,14 +1995,16 @@
return _applydiff(ui, fp, patchfile, backend, store, strip=strip,
prefix=prefix, eolmode=eolmode)
+def _canonprefix(repo, prefix):
+ if prefix:
+ prefix = pathutil.canonpath(repo.root, repo.getcwd(), prefix)
+ if prefix != '':
+ prefix += '/'
+ return prefix
+
def _applydiff(ui, fp, patcher, backend, store, strip=1, prefix='',
eolmode='strict'):
-
- if prefix:
- prefix = pathutil.canonpath(backend.repo.root, backend.repo.getcwd(),
- prefix)
- if prefix != '':
- prefix += '/'
+ prefix = _canonprefix(backend.repo, prefix)
def pstrip(p):
return pathtransform(p, strip - 1, prefix)[1]
@@ -2183,20 +2190,22 @@
return internalpatch(ui, repo, patchname, strip, prefix, files, eolmode,
similarity)
-def changedfiles(ui, repo, patchpath, strip=1):
+def changedfiles(ui, repo, patchpath, strip=1, prefix=''):
backend = fsbackend(ui, repo.root)
+ prefix = _canonprefix(repo, prefix)
with open(patchpath, 'rb') as fp:
changed = set()
for state, values in iterhunks(fp):
if state == 'file':
afile, bfile, first_hunk, gp = values
if gp:
- gp.path = pathtransform(gp.path, strip - 1, '')[1]
+ gp.path = pathtransform(gp.path, strip - 1, prefix)[1]
if gp.oldpath:
- gp.oldpath = pathtransform(gp.oldpath, strip - 1, '')[1]
+ gp.oldpath = pathtransform(gp.oldpath, strip - 1,
+ prefix)[1]
else:
gp = makepatchmeta(backend, afile, bfile, first_hunk, strip,
- '')
+ prefix)
changed.add(gp.path)
if gp.op == 'RENAME':
changed.add(gp.oldpath)
@@ -2246,6 +2255,7 @@
'showfunc': get('show_function', 'showfunc'),
'context': get('unified', getter=ui.config),
}
+ buildopts['worddiff'] = ui.configbool('experimental', 'worddiff')
if git:
buildopts['git'] = get('git')
@@ -2434,7 +2444,7 @@
modified = sorted(modifiedset)
added = sorted(addedset)
removed = sorted(removedset)
- for dst, src in copy.items():
+ for dst, src in list(copy.items()):
if src not in ctx1:
# Files merged in during a merge and then copied/renamed are
# reported as copies. We want to show them in the diff as additions.
@@ -2457,6 +2467,9 @@
def difflabel(func, *args, **kw):
'''yields 2-tuples of (output, label) based on the output of func()'''
+ inlinecolor = False
+ if kw.get(r'opts'):
+ inlinecolor = kw[r'opts'].worddiff
headprefixes = [('diff', 'diff.diffline'),
('copy', 'diff.extended'),
('rename', 'diff.extended'),
@@ -2473,6 +2486,9 @@
head = False
for chunk in func(*args, **kw):
lines = chunk.split('\n')
+ matches = {}
+ if inlinecolor:
+ matches = _findmatches(lines)
for i, line in enumerate(lines):
if i != 0:
yield ('\n', '')
@@ -2496,11 +2512,17 @@
for prefix, label in prefixes:
if stripline.startswith(prefix):
if diffline:
- for token in tabsplitter.findall(stripline):
- if '\t' == token[0]:
- yield (token, 'diff.tab')
- else:
- yield (token, label)
+ if i in matches:
+ for t, l in _inlinediff(lines[i].rstrip(),
+ lines[matches[i]].rstrip(),
+ label):
+ yield (t, l)
+ else:
+ for token in tabsplitter.findall(stripline):
+ if '\t' == token[0]:
+ yield (token, 'diff.tab')
+ else:
+ yield (token, label)
else:
yield (stripline, label)
break
@@ -2509,6 +2531,75 @@
if line != stripline:
yield (line[len(stripline):], 'diff.trailingwhitespace')
+def _findmatches(slist):
+ '''Look for insertion matches to deletion and returns a dict of
+ correspondences.
+ '''
+ lastmatch = 0
+ matches = {}
+ for i, line in enumerate(slist):
+ if line == '':
+ continue
+ if line[0] == '-':
+ lastmatch = max(lastmatch, i)
+ newgroup = False
+ for j, newline in enumerate(slist[lastmatch + 1:]):
+ if newline == '':
+ continue
+ if newline[0] == '-' and newgroup: # too far, no match
+ break
+ if newline[0] == '+': # potential match
+ newgroup = True
+ sim = difflib.SequenceMatcher(None, line, newline).ratio()
+ if sim > 0.7:
+ lastmatch = lastmatch + 1 + j
+ matches[i] = lastmatch
+ matches[lastmatch] = i
+ break
+ return matches
+
+def _inlinediff(s1, s2, operation):
+ '''Perform string diff to highlight specific changes.'''
+ operation_skip = '+?' if operation == 'diff.deleted' else '-?'
+ if operation == 'diff.deleted':
+ s2, s1 = s1, s2
+
+ buff = []
+ # we never want to higlight the leading +-
+ if operation == 'diff.deleted' and s2.startswith('-'):
+ label = operation
+ token = '-'
+ s2 = s2[1:]
+ s1 = s1[1:]
+ elif operation == 'diff.inserted' and s1.startswith('+'):
+ label = operation
+ token = '+'
+ s2 = s2[1:]
+ s1 = s1[1:]
+ else:
+ raise error.ProgrammingError("Case not expected, operation = %s" %
+ operation)
+
+ s = difflib.ndiff(_nonwordre.split(s2), _nonwordre.split(s1))
+ for part in s:
+ if part[0] in operation_skip or len(part) == 2:
+ continue
+ l = operation + '.highlight'
+ if part[0] in ' ':
+ l = operation
+ if part[2:] == '\t':
+ l = 'diff.tab'
+ if l == label: # contiguous token with same label
+ token += part[2:]
+ continue
+ else:
+ buff.append((token, label))
+ label = l
+ token = part[2:]
+ buff.append((token, label))
+
+ return buff
+
def diffui(*args, **kw):
'''like diff(), but yields 2-tuples of (output, label) for ui.write()'''
return difflabel(diff, *args, **kw)
@@ -2564,7 +2655,7 @@
l = len(text)
s = hashlib.sha1('blob %d\0' % l)
s.update(text)
- return s.hexdigest()
+ return hex(s.digest())
if opts.noprefix:
aprefix = bprefix = ''
diff -r 87676e8ee056 -r 27b6df1b5adb mercurial/phases.py
--- a/mercurial/phases.py Mon Jan 08 16:07:51 2018 -0800
+++ b/mercurial/phases.py Mon Jan 22 17:53:02 2018 -0500
@@ -115,6 +115,7 @@
)
from . import (
error,
+ pycompat,
smartset,
txnutil,
util,
@@ -202,31 +203,43 @@
if _load:
# Cheap trick to allow shallow-copy without copy module
self.phaseroots, self.dirty = _readroots(repo, phasedefaults)
- self._phaserevs = None
+ self._loadedrevslen = 0
self._phasesets = None
self.filterunknown(repo)
self.opener = repo.svfs
- def getrevset(self, repo, phases):
+ def getrevset(self, repo, phases, subset=None):
"""return a smartset for the given phases"""
self.loadphaserevs(repo) # ensure phase's sets are loaded
-
- if self._phasesets and all(self._phasesets[p] is not None
- for p in phases):
- # fast path - use _phasesets
- revs = self._phasesets[phases[0]]
- if len(phases) > 1:
- revs = revs.copy() # only copy when needed
- for p in phases[1:]:
- revs.update(self._phasesets[p])
+ phases = set(phases)
+ if public not in phases:
+ # fast path: _phasesets contains the interesting sets,
+ # might only need a union and post-filtering.
+ if len(phases) == 1:
+ [p] = phases
+ revs = self._phasesets[p]
+ else:
+ revs = set.union(*[self._phasesets[p] for p in phases])
if repo.changelog.filteredrevs:
revs = revs - repo.changelog.filteredrevs
- return smartset.baseset(revs)
+ if subset is None:
+ return smartset.baseset(revs)
+ else:
+ return subset & smartset.baseset(revs)
else:
- # slow path - enumerate all revisions
- phase = self.phase
- revs = (r for r in repo if phase(repo, r) in phases)
- return smartset.generatorset(revs, iterasc=True)
+ phases = set(allphases).difference(phases)
+ if not phases:
+ return smartset.fullreposet(repo)
+ if len(phases) == 1:
+ [p] = phases
+ revs = self._phasesets[p]
+ else:
+ revs = set.union(*[self._phasesets[p] for p in phases])
+ if subset is None:
+ subset = smartset.fullreposet(repo)
+ if not revs:
+ return subset
+ return subset.filter(lambda r: r not in revs)
def copy(self):
# Shallow copy meant to ensure isolation in
@@ -235,13 +248,14 @@
ph.phaseroots = self.phaseroots[:]
ph.dirty = self.dirty
ph.opener = self.opener
- ph._phaserevs = self._phaserevs
+ ph._loadedrevslen = self._loadedrevslen
ph._phasesets = self._phasesets
return ph
def replace(self, phcache):
"""replace all values in 'self' with content of phcache"""
- for a in ('phaseroots', 'dirty', 'opener', '_phaserevs', '_phasesets'):
+ for a in ('phaseroots', 'dirty', 'opener', '_loadedrevslen',
+ '_phasesets'):
setattr(self, a, getattr(phcache, a))
def _getphaserevsnative(self, repo):
@@ -253,42 +267,38 @@
def _computephaserevspure(self, repo):
repo = repo.unfiltered()
- revs = [public] * len(repo.changelog)
- self._phaserevs = revs
- self._populatephaseroots(repo)
- for phase in trackedphases:
- roots = list(map(repo.changelog.rev, self.phaseroots[phase]))
- if roots:
- for rev in roots:
- revs[rev] = phase
- for rev in repo.changelog.descendants(roots):
- revs[rev] = phase
+ cl = repo.changelog
+ self._phasesets = [set() for phase in allphases]
+ roots = pycompat.maplist(cl.rev, self.phaseroots[secret])
+ if roots:
+ ps = set(cl.descendants(roots))
+ for root in roots:
+ ps.add(root)
+ self._phasesets[secret] = ps
+ roots = pycompat.maplist(cl.rev, self.phaseroots[draft])
+ if roots:
+ ps = set(cl.descendants(roots))
+ for root in roots:
+ ps.add(root)
+ ps.difference_update(self._phasesets[secret])
+ self._phasesets[draft] = ps
+ self._loadedrevslen = len(cl)
def loadphaserevs(self, repo):
"""ensure phase information is loaded in the object"""
- if self._phaserevs is None:
+ if self._phasesets is None:
try:
res = self._getphaserevsnative(repo)
- self._phaserevs, self._phasesets = res
+ self._loadedrevslen, self._phasesets = res
except AttributeError:
self._computephaserevspure(repo)
def invalidate(self):
- self._phaserevs = None
+ self._loadedrevslen = 0
self._phasesets = None
- def _populatephaseroots(self, repo):
- """Fills the _phaserevs cache with phases for the roots.
- """
- cl = repo.changelog
- phaserevs = self._phaserevs
- for phase in trackedphases:
- roots = map(cl.rev, self.phaseroots[phase])
- for root in roots:
- phaserevs[root] = phase
-
def phase(self, repo, rev):
- # We need a repo argument here to be able to build _phaserevs
+ # We need a repo argument here to be able to build _phasesets
# if necessary. The repository instance is not stored in
# phasecache to avoid reference cycles. The changelog instance
# is not stored because it is a filecache() property and can
@@ -297,10 +307,13 @@
return public
if rev < nullrev:
raise ValueError(_('cannot lookup negative revision'))
- if self._phaserevs is None or rev >= len(self._phaserevs):
+ if rev >= self._loadedrevslen:
self.invalidate()
self.loadphaserevs(repo)
- return self._phaserevs[rev]
+ for phase in trackedphases:
+ if rev in self._phasesets[phase]:
+ return phase
+ return public
def write(self):
if not self.dirty:
@@ -455,10 +468,10 @@
if filtered:
self.dirty = True
# filterunknown is called by repo.destroyed, we may have no changes in
- # root but phaserevs contents is certainly invalid (or at least we
+ # root but _phasesets contents is certainly invalid (or at least we
# have not proper way to check that). related to issue 3858.
#
- # The other caller is __init__ that have no _phaserevs initialized
+ # The other caller is __init__ that have no _phasesets initialized
# anyway. If this change we should consider adding a dedicated
# "destroyed" function to phasecache or a proper cache key mechanism
# (see branchmap one)
diff -r 87676e8ee056 -r 27b6df1b5adb mercurial/policy.py
--- a/mercurial/policy.py Mon Jan 08 16:07:51 2018 -0800
+++ b/mercurial/policy.py Mon Jan 22 17:53:02 2018 -0500
@@ -74,8 +74,8 @@
(r'cext', r'bdiff'): 1,
(r'cext', r'diffhelpers'): 1,
(r'cext', r'mpatch'): 1,
- (r'cext', r'osutil'): 1,
- (r'cext', r'parsers'): 3,
+ (r'cext', r'osutil'): 3,
+ (r'cext', r'parsers'): 4,
}
# map import request to other package or module
diff -r 87676e8ee056 -r 27b6df1b5adb mercurial/posix.py
--- a/mercurial/posix.py Mon Jan 08 16:07:51 2018 -0800
+++ b/mercurial/posix.py Mon Jan 22 17:53:02 2018 -0500
@@ -24,9 +24,12 @@
from . import (
encoding,
error,
+ policy,
pycompat,
)
+osutil = policy.importmod(r'osutil')
+
posixfile = open
normpath = os.path.normpath
samestat = os.path.samestat
@@ -302,6 +305,20 @@
Returns None if the path is ok, or a UI string describing the problem.'''
return None # on posix platforms, every path is ok
+def getfsmountpoint(dirpath):
+ '''Get the filesystem mount point from a directory (best-effort)
+
+ Returns None if we are unsure. Raises OSError on ENOENT, EPERM, etc.
+ '''
+ return getattr(osutil, 'getfsmountpoint', lambda x: None)(dirpath)
+
+def getfstype(dirpath):
+ '''Get the filesystem type name from a directory (best-effort)
+
+ Returns None if we are unsure. Raises OSError on ENOENT, EPERM, etc.
+ '''
+ return getattr(osutil, 'getfstype', lambda x: None)(dirpath)
+
def setbinary(fd):
pass
diff -r 87676e8ee056 -r 27b6df1b5adb mercurial/pycompat.py
--- a/mercurial/pycompat.py Mon Jan 08 16:07:51 2018 -0800
+++ b/mercurial/pycompat.py Mon Jan 22 17:53:02 2018 -0500
@@ -63,6 +63,7 @@
sysexecutable = os.fsencode(sysexecutable)
stringio = io.BytesIO
maplist = lambda *args: list(map(*args))
+ ziplist = lambda *args: list(zip(*args))
rawinput = input
# TODO: .buffer might not exist if std streams were replaced; we'll need
@@ -214,7 +215,7 @@
def open(name, mode='r', buffering=-1):
return builtins.open(name, sysstr(mode), buffering)
- def getoptb(args, shortlist, namelist):
+ def _getoptbwrapper(orig, args, shortlist, namelist):
"""
Takes bytes arguments, converts them to unicode, pass them to
getopt.getopt(), convert the returned values back to bytes and then
@@ -224,7 +225,7 @@
args = [a.decode('latin-1') for a in args]
shortlist = shortlist.decode('latin-1')
namelist = [a.decode('latin-1') for a in namelist]
- opts, args = getopt.getopt(args, shortlist, namelist)
+ opts, args = orig(args, shortlist, namelist)
opts = [(a[0].encode('latin-1'), a[1].encode('latin-1'))
for a in opts]
args = [a.encode('latin-1') for a in args]
@@ -291,8 +292,8 @@
def getdoc(obj):
return getattr(obj, '__doc__', None)
- def getoptb(args, shortlist, namelist):
- return getopt.getopt(args, shortlist, namelist)
+ def _getoptbwrapper(orig, args, shortlist, namelist):
+ return orig(args, shortlist, namelist)
strkwargs = identity
byteskwargs = identity
@@ -313,6 +314,7 @@
shlexsplit = shlex.split
stringio = cStringIO.StringIO
maplist = map
+ ziplist = zip
rawinput = raw_input
isjython = sysplatform.startswith('java')
@@ -320,3 +322,9 @@
isdarwin = sysplatform == 'darwin'
isposix = osname == 'posix'
iswindows = osname == 'nt'
+
+def getoptb(args, shortlist, namelist):
+ return _getoptbwrapper(getopt.getopt, args, shortlist, namelist)
+
+def gnugetoptb(args, shortlist, namelist):
+ return _getoptbwrapper(getopt.gnu_getopt, args, shortlist, namelist)
diff -r 87676e8ee056 -r 27b6df1b5adb mercurial/registrar.py
--- a/mercurial/registrar.py Mon Jan 08 16:07:51 2018 -0800
+++ b/mercurial/registrar.py Mon Jan 22 17:53:02 2018 -0500
@@ -112,35 +112,53 @@
The created object can be used as a decorator for adding commands to
that command table. This accepts multiple arguments to define a command.
- The first argument is the command name.
+ The first argument is the command name (as bytes).
- The options argument is an iterable of tuples defining command arguments.
- See ``mercurial.fancyopts.fancyopts()`` for the format of each tuple.
+ The `options` keyword argument is an iterable of tuples defining command
+ arguments. See ``mercurial.fancyopts.fancyopts()`` for the format of each
+ tuple.
- The synopsis argument defines a short, one line summary of how to use the
+ The `synopsis` argument defines a short, one line summary of how to use the
command. This shows up in the help output.
- The norepo argument defines whether the command does not require a
+ There are three arguments that control what repository (if any) is found
+ and passed to the decorated function: `norepo`, `optionalrepo`, and
+ `inferrepo`.
+
+ The `norepo` argument defines whether the command does not require a
local repository. Most commands operate against a repository, thus the
- default is False.
+ default is False. When True, no repository will be passed.
- The optionalrepo argument defines whether the command optionally requires
- a local repository.
+ The `optionalrepo` argument defines whether the command optionally requires
+ a local repository. If no repository can be found, None will be passed
+ to the decorated function.
- The inferrepo argument defines whether to try to find a repository from the
- command line arguments. If True, arguments will be examined for potential
- repository locations. See ``findrepo()``. If a repository is found, it
- will be used.
+ The `inferrepo` argument defines whether to try to find a repository from
+ the command line arguments. If True, arguments will be examined for
+ potential repository locations. See ``findrepo()``. If a repository is
+ found, it will be used and passed to the decorated function.
There are three constants in the class which tells what type of the command
that is. That information will be helpful at various places. It will be also
be used to decide what level of access the command has on hidden commits.
The constants are:
- unrecoverablewrite is for those write commands which can't be recovered like
- push.
- recoverablewrite is for write commands which can be recovered like commit.
- readonly is for commands which are read only.
+ `unrecoverablewrite` is for those write commands which can't be recovered
+ like push.
+ `recoverablewrite` is for write commands which can be recovered like commit.
+ `readonly` is for commands which are read only.
+
+ The signature of the decorated function looks like this:
+ def cmd(ui[, repo] [, ] [, ])
+
+ `repo` is required if `norepo` is False.
+ `` are positional args (or `*args`) arguments, of non-option
+ arguments from the command line.
+ `` are keyword arguments (or `**options`) of option arguments
+ from the command line.
+
+ See the WritingExtensions and MercurialApi documentation for more exhaustive
+ descriptions and examples.
"""
unrecoverablewrite = "unrecoverable"
diff -r 87676e8ee056 -r 27b6df1b5adb mercurial/repair.py
--- a/mercurial/repair.py Mon Jan 08 16:07:51 2018 -0800
+++ b/mercurial/repair.py Mon Jan 22 17:53:02 2018 -0500
@@ -203,8 +203,9 @@
deleteobsmarkers(repo.obsstore, stripobsidx)
del repo.obsstore
+ repo.invalidatevolatilesets()
+ repo._phasecache.filterunknown(repo)
- repo._phasecache.filterunknown(repo)
if tmpbundlefile:
ui.note(_("adding branch\n"))
f = vfs.open(tmpbundlefile, "rb")
@@ -222,8 +223,6 @@
if not repo.ui.verbose:
repo.ui.popbuffer()
f.close()
- repo._phasecache.invalidate()
-
with repo.transaction('repair') as tr:
bmchanges = [(m, repo[newbmtarget].node()) for m in updatebm]
diff -r 87676e8ee056 -r 27b6df1b5adb mercurial/repoview.py
--- a/mercurial/repoview.py Mon Jan 08 16:07:51 2018 -0800
+++ b/mercurial/repoview.py Mon Jan 22 17:53:02 2018 -0500
@@ -9,11 +9,13 @@
from __future__ import absolute_import
import copy
+import weakref
from .node import nullrev
from . import (
obsolete,
phases,
+ pycompat,
tags as tagsmod,
)
@@ -63,7 +65,7 @@
hidden.remove(p)
stack.append(p)
-def computehidden(repo):
+def computehidden(repo, visibilityexceptions=None):
"""compute the set of hidden revision to filter
During most operation hidden should be filtered."""
@@ -72,6 +74,8 @@
hidden = hideablerevs(repo)
if hidden:
hidden = set(hidden - pinnedrevs(repo))
+ if visibilityexceptions:
+ hidden -= visibilityexceptions
pfunc = repo.changelog.parentrevs
mutablephases = (phases.draft, phases.secret)
mutable = repo._phasecache.getrevset(repo, mutablephases)
@@ -80,7 +84,7 @@
_revealancestors(pfunc, hidden, visible)
return frozenset(hidden)
-def computeunserved(repo):
+def computeunserved(repo, visibilityexceptions=None):
"""compute the set of revision that should be filtered when used a server
Secret and hidden changeset should not pretend to be here."""
@@ -98,7 +102,7 @@
else:
return hiddens
-def computemutable(repo):
+def computemutable(repo, visibilityexceptions=None):
assert not repo.changelog.filteredrevs
# fast check to avoid revset call on huge repo
if any(repo._phasecache.phaseroots[1:]):
@@ -107,7 +111,7 @@
return frozenset(r for r in maymutable if getphase(repo, r))
return frozenset()
-def computeimpactable(repo):
+def computeimpactable(repo, visibilityexceptions=None):
"""Everything impactable by mutable revision
The immutable filter still have some chance to get invalidated. This will
@@ -139,14 +143,21 @@
# Otherwise your filter will have to recompute all its branches cache
# from scratch (very slow).
filtertable = {'visible': computehidden,
+ 'visible-hidden': computehidden,
'served': computeunserved,
'immutable': computemutable,
'base': computeimpactable}
-def filterrevs(repo, filtername):
- """returns set of filtered revision for this filter name"""
+def filterrevs(repo, filtername, visibilityexceptions=None):
+ """returns set of filtered revision for this filter name
+
+ visibilityexceptions is a set of revs which must are exceptions for
+ hidden-state and must be visible. They are dynamic and hence we should not
+ cache it's result"""
if filtername not in repo.filteredrevcache:
func = filtertable[filtername]
+ if visibilityexceptions:
+ return func(repo.unfiltered, visibilityexceptions)
repo.filteredrevcache[filtername] = func(repo.unfiltered())
return repo.filteredrevcache[filtername]
@@ -185,11 +196,14 @@
subclasses of `localrepo`. Eg: `bundlerepo` or `statichttprepo`.
"""
- def __init__(self, repo, filtername):
+ def __init__(self, repo, filtername, visibilityexceptions=None):
object.__setattr__(self, r'_unfilteredrepo', repo)
object.__setattr__(self, r'filtername', filtername)
object.__setattr__(self, r'_clcachekey', None)
object.__setattr__(self, r'_clcache', None)
+ # revs which are exceptions and must not be hidden
+ object.__setattr__(self, r'_visibilityexceptions',
+ visibilityexceptions)
# not a propertycache on purpose we shall implement a proper cache later
@property
@@ -205,7 +219,7 @@
unfilen = len(unfiindex) - 1
unfinode = unfiindex[unfilen - 1][7]
- revs = filterrevs(unfi, self.filtername)
+ revs = filterrevs(unfi, self.filtername, self._visibilityexceptions)
cl = self._clcache
newkey = (unfilen, unfinode, hash(revs), unfichangelog._delayed)
# if cl.index is not unfiindex, unfi.changelog would be
@@ -225,11 +239,16 @@
"""Return an unfiltered version of a repo"""
return self._unfilteredrepo
- def filtered(self, name):
+ def filtered(self, name, visibilityexceptions=None):
"""Return a filtered version of a repository"""
- if name == self.filtername:
+ if name == self.filtername and not visibilityexceptions:
return self
- return self.unfiltered().filtered(name)
+ return self.unfiltered().filtered(name, visibilityexceptions)
+
+ def __repr__(self):
+ return r'<%s:%s %r>' % (self.__class__.__name__,
+ pycompat.sysstr(self.filtername),
+ self.unfiltered())
# everything access are forwarded to the proxied repo
def __getattr__(self, attr):
@@ -240,3 +259,16 @@
def __delattr__(self, attr):
return delattr(self._unfilteredrepo, attr)
+
+# Python <3.4 easily leaks types via __mro__. See
+# https://bugs.python.org/issue17950. We cache dynamically created types
+# so they won't be leaked on every invocation of repo.filtered().
+_filteredrepotypes = weakref.WeakKeyDictionary()
+
+def newtype(base):
+ """Create a new type with the repoview mixin and the given base class"""
+ if base not in _filteredrepotypes:
+ class filteredrepo(repoview, base):
+ pass
+ _filteredrepotypes[base] = filteredrepo
+ return _filteredrepotypes[base]
diff -r 87676e8ee056 -r 27b6df1b5adb mercurial/revlog.py
--- a/mercurial/revlog.py Mon Jan 08 16:07:51 2018 -0800
+++ b/mercurial/revlog.py Mon Jan 22 17:53:02 2018 -0500
@@ -33,6 +33,9 @@
wdirrev,
)
from .i18n import _
+from .thirdparty import (
+ attr,
+)
from . import (
ancestor,
error,
@@ -251,6 +254,184 @@
if chunk:
yield chunk
+@attr.s(slots=True, frozen=True)
+class _deltainfo(object):
+ distance = attr.ib()
+ deltalen = attr.ib()
+ data = attr.ib()
+ base = attr.ib()
+ chainbase = attr.ib()
+ chainlen = attr.ib()
+ compresseddeltalen = attr.ib()
+
+class _deltacomputer(object):
+ def __init__(self, revlog):
+ self.revlog = revlog
+
+ def _getcandidaterevs(self, p1, p2, cachedelta):
+ """
+ Provides revisions that present an interest to be diffed against,
+ grouped by level of easiness.
+ """
+ revlog = self.revlog
+ curr = len(revlog)
+ prev = curr - 1
+ p1r, p2r = revlog.rev(p1), revlog.rev(p2)
+
+ # should we try to build a delta?
+ if prev != nullrev and revlog.storedeltachains:
+ tested = set()
+ # This condition is true most of the time when processing
+ # changegroup data into a generaldelta repo. The only time it
+ # isn't true is if this is the first revision in a delta chain
+ # or if ``format.generaldelta=true`` disabled ``lazydeltabase``.
+ if cachedelta and revlog._generaldelta and revlog._lazydeltabase:
+ # Assume what we received from the server is a good choice
+ # build delta will reuse the cache
+ yield (cachedelta[0],)
+ tested.add(cachedelta[0])
+
+ if revlog._generaldelta:
+ # exclude already lazy tested base if any
+ parents = [p for p in (p1r, p2r)
+ if p != nullrev and p not in tested]
+ if parents and not revlog._aggressivemergedeltas:
+ # Pick whichever parent is closer to us (to minimize the
+ # chance of having to build a fulltext).
+ parents = [max(parents)]
+ tested.update(parents)
+ yield parents
+
+ if prev not in tested:
+ # other approach failed try against prev to hopefully save us a
+ # fulltext.
+ yield (prev,)
+
+ def buildtext(self, revinfo, fh):
+ """Builds a fulltext version of a revision
+
+ revinfo: _revisioninfo instance that contains all needed info
+ fh: file handle to either the .i or the .d revlog file,
+ depending on whether it is inlined or not
+ """
+ btext = revinfo.btext
+ if btext[0] is not None:
+ return btext[0]
+
+ revlog = self.revlog
+ cachedelta = revinfo.cachedelta
+ flags = revinfo.flags
+ node = revinfo.node
+
+ baserev = cachedelta[0]
+ delta = cachedelta[1]
+ # special case deltas which replace entire base; no need to decode
+ # base revision. this neatly avoids censored bases, which throw when
+ # they're decoded.
+ hlen = struct.calcsize(">lll")
+ if delta[:hlen] == mdiff.replacediffheader(revlog.rawsize(baserev),
+ len(delta) - hlen):
+ btext[0] = delta[hlen:]
+ else:
+ basetext = revlog.revision(baserev, _df=fh, raw=True)
+ btext[0] = mdiff.patch(basetext, delta)
+
+ try:
+ res = revlog._processflags(btext[0], flags, 'read', raw=True)
+ btext[0], validatehash = res
+ if validatehash:
+ revlog.checkhash(btext[0], node, p1=revinfo.p1, p2=revinfo.p2)
+ if flags & REVIDX_ISCENSORED:
+ raise RevlogError(_('node %s is not censored') % node)
+ except CensoredNodeError:
+ # must pass the censored index flag to add censored revisions
+ if not flags & REVIDX_ISCENSORED:
+ raise
+ return btext[0]
+
+ def _builddeltadiff(self, base, revinfo, fh):
+ revlog = self.revlog
+ t = self.buildtext(revinfo, fh)
+ if revlog.iscensored(base):
+ # deltas based on a censored revision must replace the
+ # full content in one patch, so delta works everywhere
+ header = mdiff.replacediffheader(revlog.rawsize(base), len(t))
+ delta = header + t
+ else:
+ ptext = revlog.revision(base, _df=fh, raw=True)
+ delta = mdiff.textdiff(ptext, t)
+
+ return delta
+
+ def _builddeltainfo(self, revinfo, base, fh):
+ # can we use the cached delta?
+ if revinfo.cachedelta and revinfo.cachedelta[0] == base:
+ delta = revinfo.cachedelta[1]
+ else:
+ delta = self._builddeltadiff(base, revinfo, fh)
+ revlog = self.revlog
+ header, data = revlog.compress(delta)
+ deltalen = len(header) + len(data)
+ chainbase = revlog.chainbase(base)
+ offset = revlog.end(len(revlog) - 1)
+ dist = deltalen + offset - revlog.start(chainbase)
+ if revlog._generaldelta:
+ deltabase = base
+ else:
+ deltabase = chainbase
+ chainlen, compresseddeltalen = revlog._chaininfo(base)
+ chainlen += 1
+ compresseddeltalen += deltalen
+ return _deltainfo(dist, deltalen, (header, data), deltabase,
+ chainbase, chainlen, compresseddeltalen)
+
+ def finddeltainfo(self, revinfo, fh):
+ """Find an acceptable delta against a candidate revision
+
+ revinfo: information about the revision (instance of _revisioninfo)
+ fh: file handle to either the .i or the .d revlog file,
+ depending on whether it is inlined or not
+
+ Returns the first acceptable candidate revision, as ordered by
+ _getcandidaterevs
+ """
+ cachedelta = revinfo.cachedelta
+ p1 = revinfo.p1
+ p2 = revinfo.p2
+ revlog = self.revlog
+
+ deltainfo = None
+ for candidaterevs in self._getcandidaterevs(p1, p2, cachedelta):
+ nominateddeltas = []
+ for candidaterev in candidaterevs:
+ candidatedelta = self._builddeltainfo(revinfo, candidaterev, fh)
+ if revlog._isgooddeltainfo(candidatedelta, revinfo.textlen):
+ nominateddeltas.append(candidatedelta)
+ if nominateddeltas:
+ deltainfo = min(nominateddeltas, key=lambda x: x.deltalen)
+ break
+
+ return deltainfo
+
+@attr.s(slots=True, frozen=True)
+class _revisioninfo(object):
+ """Information about a revision that allows building its fulltext
+ node: expected hash of the revision
+ p1, p2: parent revs of the revision
+ btext: built text cache consisting of a one-element list
+ cachedelta: (baserev, uncompressed_delta) or None
+ flags: flags associated to the revision storage
+
+ One of btext[0] or cachedelta must be set.
+ """
+ node = attr.ib()
+ p1 = attr.ib()
+ p2 = attr.ib()
+ btext = attr.ib()
+ textlen = attr.ib()
+ cachedelta = attr.ib()
+ flags = attr.ib()
+
# index v0:
# 4 bytes: offset
# 4 bytes: compressed length
@@ -622,12 +803,14 @@
def parentrevs(self, rev):
try:
- return self.index[rev][5:7]
+ entry = self.index[rev]
except IndexError:
if rev == wdirrev:
raise error.WdirUnsupported
raise
+ return entry[5], entry[6]
+
def node(self, rev):
try:
return self.index[rev][7]
@@ -1687,7 +1870,7 @@
self._chunkclear()
def addrevision(self, text, transaction, link, p1, p2, cachedelta=None,
- node=None, flags=REVIDX_DEFAULT_FLAGS):
+ node=None, flags=REVIDX_DEFAULT_FLAGS, deltacomputer=None):
"""add a revision to the log
text - the revision data to add
@@ -1699,6 +1882,8 @@
computed by default as hash(text, p1, p2), however subclasses might
use different hashing method (and override checkhash() in such case)
flags - the known flags to set on the revision
+ deltacomputer - an optional _deltacomputer instance shared between
+ multiple calls
"""
if link == nullrev:
raise RevlogError(_("attempted to add linkrev -1 to %s")
@@ -1727,10 +1912,11 @@
self.checkhash(rawtext, node, p1=p1, p2=p2)
return self.addrawrevision(rawtext, transaction, link, p1, p2, node,
- flags, cachedelta=cachedelta)
+ flags, cachedelta=cachedelta,
+ deltacomputer=deltacomputer)
def addrawrevision(self, rawtext, transaction, link, p1, p2, node, flags,
- cachedelta=None):
+ cachedelta=None, deltacomputer=None):
"""add a raw revision with known flags, node and parents
useful when reusing a revision not stored in this revlog (ex: received
over wire, or read from an external bundle).
@@ -1741,7 +1927,8 @@
ifh = self.opener(self.indexfile, "a+", checkambig=self._checkambig)
try:
return self._addrevision(node, rawtext, transaction, link, p1, p2,
- flags, cachedelta, ifh, dfh)
+ flags, cachedelta, ifh, dfh,
+ deltacomputer=deltacomputer)
finally:
if dfh:
dfh.close()
@@ -1817,39 +2004,42 @@
return compressor.decompress(data)
- def _isgooddelta(self, d, textlen):
+ def _isgooddeltainfo(self, d, textlen):
"""Returns True if the given delta is good. Good means that it is within
the disk span, disk size, and chain length bounds that we know to be
performant."""
if d is None:
return False
- # - 'dist' is the distance from the base revision -- bounding it limits
- # the amount of I/O we need to do.
- # - 'compresseddeltalen' is the sum of the total size of deltas we need
- # to apply -- bounding it limits the amount of CPU we consume.
- dist, l, data, base, chainbase, chainlen, compresseddeltalen = d
+ # - 'd.distance' is the distance from the base revision -- bounding it
+ # limits the amount of I/O we need to do.
+ # - 'd.compresseddeltalen' is the sum of the total size of deltas we
+ # need to apply -- bounding it limits the amount of CPU we consume.
defaultmax = textlen * 4
maxdist = self._maxdeltachainspan
if not maxdist:
- maxdist = dist # ensure the conditional pass
+ maxdist = d.distance # ensure the conditional pass
maxdist = max(maxdist, defaultmax)
- if (dist > maxdist or l > textlen or
- compresseddeltalen > textlen * 2 or
- (self._maxchainlen and chainlen > self._maxchainlen)):
+ if (d.distance > maxdist or d.deltalen > textlen or
+ d.compresseddeltalen > textlen * 2 or
+ (self._maxchainlen and d.chainlen > self._maxchainlen)):
return False
return True
def _addrevision(self, node, rawtext, transaction, link, p1, p2, flags,
- cachedelta, ifh, dfh, alwayscache=False):
+ cachedelta, ifh, dfh, alwayscache=False,
+ deltacomputer=None):
"""internal function to add revisions to the log
see addrevision for argument descriptions.
note: "addrevision" takes non-raw text, "_addrevision" takes raw text.
+ if "deltacomputer" is not provided or None, a defaultdeltacomputer will
+ be used.
+
invariants:
- rawtext is optional (can be None); if not set, cachedelta must be set.
if both are set, they must correspond to each other.
@@ -1861,76 +2051,16 @@
raise RevlogError(_("%s: attempt to add wdir revision") %
(self.indexfile))
- btext = [rawtext]
- def buildtext():
- if btext[0] is not None:
- return btext[0]
- baserev = cachedelta[0]
- delta = cachedelta[1]
- # special case deltas which replace entire base; no need to decode
- # base revision. this neatly avoids censored bases, which throw when
- # they're decoded.
- hlen = struct.calcsize(">lll")
- if delta[:hlen] == mdiff.replacediffheader(self.rawsize(baserev),
- len(delta) - hlen):
- btext[0] = delta[hlen:]
- else:
- if self._inline:
- fh = ifh
- else:
- fh = dfh
- basetext = self.revision(baserev, _df=fh, raw=True)
- btext[0] = mdiff.patch(basetext, delta)
+ if self._inline:
+ fh = ifh
+ else:
+ fh = dfh
- try:
- res = self._processflags(btext[0], flags, 'read', raw=True)
- btext[0], validatehash = res
- if validatehash:
- self.checkhash(btext[0], node, p1=p1, p2=p2)
- if flags & REVIDX_ISCENSORED:
- raise RevlogError(_('node %s is not censored') % node)
- except CensoredNodeError:
- # must pass the censored index flag to add censored revisions
- if not flags & REVIDX_ISCENSORED:
- raise
- return btext[0]
-
- def builddelta(rev):
- # can we use the cached delta?
- if cachedelta and cachedelta[0] == rev:
- delta = cachedelta[1]
- else:
- t = buildtext()
- if self.iscensored(rev):
- # deltas based on a censored revision must replace the
- # full content in one patch, so delta works everywhere
- header = mdiff.replacediffheader(self.rawsize(rev), len(t))
- delta = header + t
- else:
- if self._inline:
- fh = ifh
- else:
- fh = dfh
- ptext = self.revision(rev, _df=fh, raw=True)
- delta = mdiff.textdiff(ptext, t)
- header, data = self.compress(delta)
- deltalen = len(header) + len(data)
- chainbase = self.chainbase(rev)
- dist = deltalen + offset - self.start(chainbase)
- if self._generaldelta:
- base = rev
- else:
- base = chainbase
- chainlen, compresseddeltalen = self._chaininfo(rev)
- chainlen += 1
- compresseddeltalen += deltalen
- return (dist, deltalen, (header, data), base,
- chainbase, chainlen, compresseddeltalen)
+ btext = [rawtext]
curr = len(self)
prev = curr - 1
offset = self.end(prev)
- delta = None
p1r, p2r = self.rev(p1), self.rev(p2)
# full versions are inserted when the needed deltas
@@ -1941,46 +2071,19 @@
else:
textlen = len(rawtext)
- # should we try to build a delta?
- if prev != nullrev and self.storedeltachains:
- tested = set()
- # This condition is true most of the time when processing
- # changegroup data into a generaldelta repo. The only time it
- # isn't true is if this is the first revision in a delta chain
- # or if ``format.generaldelta=true`` disabled ``lazydeltabase``.
- if cachedelta and self._generaldelta and self._lazydeltabase:
- # Assume what we received from the server is a good choice
- # build delta will reuse the cache
- candidatedelta = builddelta(cachedelta[0])
- tested.add(cachedelta[0])
- if self._isgooddelta(candidatedelta, textlen):
- delta = candidatedelta
- if delta is None and self._generaldelta:
- # exclude already lazy tested base if any
- parents = [p for p in (p1r, p2r)
- if p != nullrev and p not in tested]
- if parents and not self._aggressivemergedeltas:
- # Pick whichever parent is closer to us (to minimize the
- # chance of having to build a fulltext).
- parents = [max(parents)]
- tested.update(parents)
- pdeltas = []
- for p in parents:
- pd = builddelta(p)
- if self._isgooddelta(pd, textlen):
- pdeltas.append(pd)
- if pdeltas:
- delta = min(pdeltas, key=lambda x: x[1])
- if delta is None and prev not in tested:
- # other approach failed try against prev to hopefully save us a
- # fulltext.
- candidatedelta = builddelta(prev)
- if self._isgooddelta(candidatedelta, textlen):
- delta = candidatedelta
- if delta is not None:
- dist, l, data, base, chainbase, chainlen, compresseddeltalen = delta
+ if deltacomputer is None:
+ deltacomputer = _deltacomputer(self)
+
+ revinfo = _revisioninfo(node, p1, p2, btext, textlen, cachedelta, flags)
+ deltainfo = deltacomputer.finddeltainfo(revinfo, fh)
+
+ if deltainfo is not None:
+ base = deltainfo.base
+ chainbase = deltainfo.chainbase
+ data = deltainfo.data
+ l = deltainfo.deltalen
else:
- rawtext = buildtext()
+ rawtext = deltacomputer.buildtext(revinfo, fh)
data = self.compress(rawtext)
l = len(data[1]) + len(data[0])
base = chainbase = curr
@@ -1994,7 +2097,7 @@
self._writeentry(transaction, ifh, dfh, entry, data, link, offset)
if alwayscache and rawtext is None:
- rawtext = buildtext()
+ rawtext = deltacomputer._buildtext(revinfo, fh)
if type(rawtext) == str: # only accept immutable objects
self._cache = (node, curr, rawtext)
@@ -2064,6 +2167,7 @@
dfh.flush()
ifh.flush()
try:
+ deltacomputer = _deltacomputer(self)
# loop through our set of deltas
for data in deltas:
node, p1, p2, linknode, deltabase, delta, flags = data
@@ -2110,7 +2214,8 @@
self._addrevision(node, None, transaction, link,
p1, p2, flags, (baserev, delta),
ifh, dfh,
- alwayscache=bool(addrevisioncb))
+ alwayscache=bool(addrevisioncb),
+ deltacomputer=deltacomputer)
if addrevisioncb:
addrevisioncb(self, node)
@@ -2264,7 +2369,9 @@
DELTAREUSESAMEREVS = 'samerevs'
DELTAREUSENEVER = 'never'
- DELTAREUSEALL = {'always', 'samerevs', 'never'}
+ DELTAREUSEFULLADD = 'fulladd'
+
+ DELTAREUSEALL = {'always', 'samerevs', 'never', 'fulladd'}
def clone(self, tr, destrevlog, addrevisioncb=None,
deltareuse=DELTAREUSESAMEREVS, aggressivemergedeltas=None):
@@ -2331,6 +2438,7 @@
populatecachedelta = deltareuse in (self.DELTAREUSEALWAYS,
self.DELTAREUSESAMEREVS)
+ deltacomputer = _deltacomputer(destrevlog)
index = self.index
for rev in self:
entry = index[rev]
@@ -2355,18 +2463,26 @@
if not cachedelta:
rawtext = self.revision(rev, raw=True)
- ifh = destrevlog.opener(destrevlog.indexfile, 'a+',
- checkambig=False)
- dfh = None
- if not destrevlog._inline:
- dfh = destrevlog.opener(destrevlog.datafile, 'a+')
- try:
- destrevlog._addrevision(node, rawtext, tr, linkrev, p1, p2,
- flags, cachedelta, ifh, dfh)
- finally:
- if dfh:
- dfh.close()
- ifh.close()
+
+ if deltareuse == self.DELTAREUSEFULLADD:
+ destrevlog.addrevision(rawtext, tr, linkrev, p1, p2,
+ cachedelta=cachedelta,
+ node=node, flags=flags,
+ deltacomputer=deltacomputer)
+ else:
+ ifh = destrevlog.opener(destrevlog.indexfile, 'a+',
+ checkambig=False)
+ dfh = None
+ if not destrevlog._inline:
+ dfh = destrevlog.opener(destrevlog.datafile, 'a+')
+ try:
+ destrevlog._addrevision(node, rawtext, tr, linkrev, p1,
+ p2, flags, cachedelta, ifh, dfh,
+ deltacomputer=deltacomputer)
+ finally:
+ if dfh:
+ dfh.close()
+ ifh.close()
if addrevisioncb:
addrevisioncb(self, rev, node)
diff -r 87676e8ee056 -r 27b6df1b5adb mercurial/revset.py
--- a/mercurial/revset.py Mon Jan 08 16:07:51 2018 -0800
+++ b/mercurial/revset.py Mon Jan 22 17:53:02 2018 -0500
@@ -22,6 +22,7 @@
obsutil,
pathutil,
phases,
+ pycompat,
registrar,
repoview,
revsetlang,
@@ -123,7 +124,7 @@
def rangeall(repo, subset, x, order):
assert x is None
- return _makerangeset(repo, subset, 0, len(repo) - 1, order)
+ return _makerangeset(repo, subset, 0, repo.changelog.tiprev(), order)
def rangepre(repo, subset, y, order):
# ':y' can't be rewritten to '0:y' since '0' may be hidden
@@ -136,7 +137,8 @@
m = getset(repo, fullreposet(repo), x)
if not m:
return baseset()
- return _makerangeset(repo, subset, m.first(), len(repo) - 1, order)
+ return _makerangeset(repo, subset, m.first(), repo.changelog.tiprev(),
+ order)
def _makerangeset(repo, subset, m, n, order):
if m == n:
@@ -144,7 +146,7 @@
elif n == node.wdirrev:
r = spanset(repo, m, len(repo)) + baseset([n])
elif m == node.wdirrev:
- r = baseset([m]) + spanset(repo, len(repo) - 1, n - 1)
+ r = baseset([m]) + spanset(repo, repo.changelog.tiprev(), n - 1)
elif m < n:
r = spanset(repo, m, n + 1)
else:
@@ -266,7 +268,8 @@
def _destupdate(repo, subset, x):
# experimental revset for update destination
args = getargsdict(x, 'limit', 'clean')
- return subset & baseset([destutil.destupdate(repo, **args)[0]])
+ return subset & baseset([destutil.destupdate(repo,
+ **pycompat.strkwargs(args))[0]])
@predicate('_destmerge')
def _destmerge(repo, subset, x):
@@ -909,48 +912,43 @@
return limit(repo, subset, x, order)
def _follow(repo, subset, x, name, followfirst=False):
- l = getargs(x, 0, 2, _("%s takes no arguments or a pattern "
- "and an optional revset") % name)
- c = repo['.']
- if l:
- x = getstring(l[0], _("%s expected a pattern") % name)
- rev = None
- if len(l) >= 2:
- revs = getset(repo, fullreposet(repo), l[1])
- if len(revs) != 1:
- raise error.RepoLookupError(
- _("%s expected one starting revision") % name)
- rev = revs.last()
- c = repo[rev]
- matcher = matchmod.match(repo.root, repo.getcwd(), [x],
- ctx=repo[rev], default='path')
-
- files = c.manifest().walk(matcher)
-
- s = set()
- for fname in files:
- fctx = c[fname]
- s = s.union(set(c.rev() for c in fctx.ancestors(followfirst)))
- # include the revision responsible for the most recent version
- s.add(fctx.introrev())
+ args = getargsdict(x, name, 'file startrev')
+ revs = None
+ if 'startrev' in args:
+ revs = getset(repo, fullreposet(repo), args['startrev'])
+ if 'file' in args:
+ x = getstring(args['file'], _("%s expected a pattern") % name)
+ if revs is None:
+ revs = [None]
+ fctxs = []
+ for r in revs:
+ ctx = mctx = repo[r]
+ if r is None:
+ ctx = repo['.']
+ m = matchmod.match(repo.root, repo.getcwd(), [x],
+ ctx=mctx, default='path')
+ fctxs.extend(ctx[f].introfilectx() for f in ctx.manifest().walk(m))
+ s = dagop.filerevancestors(fctxs, followfirst)
else:
- s = dagop.revancestors(repo, baseset([c.rev()]), followfirst)
+ if revs is None:
+ revs = baseset([repo['.'].rev()])
+ s = dagop.revancestors(repo, revs, followfirst)
return subset & s
-@predicate('follow([pattern[, startrev]])', safe=True)
+@predicate('follow([file[, startrev]])', safe=True)
def follow(repo, subset, x):
"""
An alias for ``::.`` (ancestors of the working directory's first parent).
- If pattern is specified, the histories of files matching given
+ If file pattern is specified, the histories of files matching given
pattern in the revision given by startrev are followed, including copies.
"""
return _follow(repo, subset, x, 'follow')
@predicate('_followfirst', safe=True)
def _followfirst(repo, subset, x):
- # ``followfirst([pattern[, startrev]])``
- # Like ``follow([pattern[, startrev]])`` but follows only the first parent
+ # ``followfirst([file[, startrev]])``
+ # Like ``follow([file[, startrev]])`` but follows only the first parent
# of every revisions or files revisions.
return _follow(repo, subset, x, '_followfirst', followfirst=True)
@@ -1421,8 +1419,16 @@
l = getargs(x, 0, 1, _("outgoing takes one or no arguments"))
# i18n: "outgoing" is a keyword
dest = l and getstring(l[0], _("outgoing requires a repository path")) or ''
- dest = repo.ui.expandpath(dest or 'default-push', dest or 'default')
- dest, branches = hg.parseurl(dest)
+ if not dest:
+ # ui.paths.getpath() explicitly tests for None, not just a boolean
+ dest = None
+ path = repo.ui.paths.getpath(dest, default=('default-push', 'default'))
+ if not path:
+ raise error.Abort(_('default repository not configured!'),
+ hint=_("see 'hg help config.paths'"))
+ dest = path.pushloc or path.loc
+ branches = path.branch, []
+
revs, checkout = hg.addbranchrevs(repo, repo, branches, [])
if revs:
revs = [repo.lookup(rev) for rev in revs]
@@ -1509,8 +1515,7 @@
def _phase(repo, subset, *targets):
"""helper to select all rev in phases"""
- s = repo._phasecache.getrevset(repo, targets)
- return subset & s
+ return repo._phasecache.getrevset(repo, targets, subset)
@predicate('draft()', safe=True)
def draft(repo, subset, x):
@@ -1617,11 +1622,7 @@
"""Changeset in public phase."""
# i18n: "public" is a keyword
getargs(x, 0, 0, _("public takes no arguments"))
- phase = repo._phasecache.phase
- target = phases.public
- condition = lambda r: phase(repo, r) == target
- return subset.filter(condition, condrepr=('', target),
- cache=False)
+ return _phase(repo, subset, phases.public)
@predicate('remote([id [,path]])', safe=False)
def remote(repo, subset, x):
diff -r 87676e8ee056 -r 27b6df1b5adb mercurial/revsetlang.py
--- a/mercurial/revsetlang.py Mon Jan 08 16:07:51 2018 -0800
+++ b/mercurial/revsetlang.py Mon Jan 22 17:53:02 2018 -0500
@@ -27,8 +27,10 @@
"~": (18, None, None, ("ancestor", 18), None),
"^": (18, None, None, ("parent", 18), "parentpost"),
"-": (5, None, ("negate", 19), ("minus", 5), None),
- "::": (17, None, ("dagrangepre", 17), ("dagrange", 17), "dagrangepost"),
- "..": (17, None, ("dagrangepre", 17), ("dagrange", 17), "dagrangepost"),
+ "::": (17, "dagrangeall", ("dagrangepre", 17), ("dagrange", 17),
+ "dagrangepost"),
+ "..": (17, "dagrangeall", ("dagrangepre", 17), ("dagrange", 17),
+ "dagrangepost"),
":": (15, "rangeall", ("rangepre", 15), ("range", 15), "rangepost"),
"not": (10, None, ("not", 10), None, None),
"!": (10, None, ("not", 10), None, None),
@@ -288,6 +290,8 @@
post = ('parentpost', x[1])
if x[2][0] == 'dagrangepre':
return _fixops(('dagrange', post, x[2][1]))
+ elif x[2][0] == 'dagrangeall':
+ return _fixops(('dagrangepost', post))
elif x[2][0] == 'rangepre':
return _fixops(('range', post, x[2][1]))
elif x[2][0] == 'rangeall':
@@ -313,6 +317,8 @@
return _analyze(_build('only(_, _)', *x[1:]))
elif op == 'onlypost':
return _analyze(_build('only(_)', x[1]))
+ elif op == 'dagrangeall':
+ raise error.ParseError(_("can't use '::' in this context"))
elif op == 'dagrangepre':
return _analyze(_build('ancestors(_)', x[1]))
elif op == 'dagrangepost':
@@ -549,6 +555,52 @@
"""
return "'%s'" % util.escapestr(pycompat.bytestr(s))
+def _formatargtype(c, arg):
+ if c == 'd':
+ return '%d' % int(arg)
+ elif c == 's':
+ return _quote(arg)
+ elif c == 'r':
+ parse(arg) # make sure syntax errors are confined
+ return '(%s)' % arg
+ elif c == 'n':
+ return _quote(node.hex(arg))
+ elif c == 'b':
+ try:
+ return _quote(arg.branch())
+ except AttributeError:
+ raise TypeError
+ raise error.ParseError(_('unexpected revspec format character %s') % c)
+
+def _formatlistexp(s, t):
+ l = len(s)
+ if l == 0:
+ return "_list('')"
+ elif l == 1:
+ return _formatargtype(t, s[0])
+ elif t == 'd':
+ return "_intlist('%s')" % "\0".join('%d' % int(a) for a in s)
+ elif t == 's':
+ return "_list(%s)" % _quote("\0".join(s))
+ elif t == 'n':
+ return "_hexlist('%s')" % "\0".join(node.hex(a) for a in s)
+ elif t == 'b':
+ try:
+ return "_list('%s')" % "\0".join(a.branch() for a in s)
+ except AttributeError:
+ raise TypeError
+
+ m = l // 2
+ return '(%s or %s)' % (_formatlistexp(s[:m], t), _formatlistexp(s[m:], t))
+
+def _formatparamexp(args, t):
+ return ', '.join(_formatargtype(t, a) for a in args)
+
+_formatlistfuncs = {
+ 'l': _formatlistexp,
+ 'p': _formatparamexp,
+}
+
def formatspec(expr, *args):
'''
This is a convenience function for using revsets internally, and
@@ -564,7 +616,8 @@
%n = hex(arg), single-quoted
%% = a literal '%'
- Prefixing the type with 'l' specifies a parenthesized list of that type.
+ Prefixing the type with 'l' specifies a parenthesized list of that type,
+ and 'p' specifies a list of function parameters of that type.
>>> formatspec(b'%r:: and %lr', b'10 or 11', (b"this()", b"that()"))
'(10 or 11):: and ((this()) or (that()))'
@@ -579,68 +632,61 @@
>>> formatspec(b'branch(%b)', b)
"branch('default')"
>>> formatspec(b'root(%ls)', [b'a', b'b', b'c', b'd'])
- "root(_list('a\\x00b\\x00c\\x00d'))"
+ "root(_list('a\\\\x00b\\\\x00c\\\\x00d'))"
+ >>> formatspec(b'sort(%r, %ps)', b':', [b'desc', b'user'])
+ "sort((:), 'desc', 'user')"
+ >>> formatspec('%ls', ['a', "'"])
+ "_list('a\\\\x00\\\\'')"
'''
-
- def argtype(c, arg):
- if c == 'd':
- return '%d' % int(arg)
- elif c == 's':
- return _quote(arg)
- elif c == 'r':
- parse(arg) # make sure syntax errors are confined
- return '(%s)' % arg
- elif c == 'n':
- return _quote(node.hex(arg))
- elif c == 'b':
- return _quote(arg.branch())
+ expr = pycompat.bytestr(expr)
+ argiter = iter(args)
+ ret = []
+ pos = 0
+ while pos < len(expr):
+ q = expr.find('%', pos)
+ if q < 0:
+ ret.append(expr[pos:])
+ break
+ ret.append(expr[pos:q])
+ pos = q + 1
+ try:
+ d = expr[pos]
+ except IndexError:
+ raise error.ParseError(_('incomplete revspec format character'))
+ if d == '%':
+ ret.append(d)
+ pos += 1
+ continue
- def listexp(s, t):
- l = len(s)
- if l == 0:
- return "_list('')"
- elif l == 1:
- return argtype(t, s[0])
- elif t == 'd':
- return "_intlist('%s')" % "\0".join('%d' % int(a) for a in s)
- elif t == 's':
- return "_list('%s')" % "\0".join(s)
- elif t == 'n':
- return "_hexlist('%s')" % "\0".join(node.hex(a) for a in s)
- elif t == 'b':
- return "_list('%s')" % "\0".join(a.branch() for a in s)
-
- m = l // 2
- return '(%s or %s)' % (listexp(s[:m], t), listexp(s[m:], t))
-
- expr = pycompat.bytestr(expr)
- ret = ''
- pos = 0
- arg = 0
- while pos < len(expr):
- c = expr[pos]
- if c == '%':
+ try:
+ arg = next(argiter)
+ except StopIteration:
+ raise error.ParseError(_('missing argument for revspec'))
+ f = _formatlistfuncs.get(d)
+ if f:
+ # a list of some type
pos += 1
- d = expr[pos]
- if d == '%':
- ret += d
- elif d in 'dsnbr':
- ret += argtype(d, args[arg])
- arg += 1
- elif d == 'l':
- # a list of some type
- pos += 1
+ try:
d = expr[pos]
- ret += listexp(list(args[arg]), d)
- arg += 1
- else:
- raise error.Abort(_('unexpected revspec format character %s')
- % d)
+ except IndexError:
+ raise error.ParseError(_('incomplete revspec format character'))
+ try:
+ ret.append(f(list(arg), d))
+ except (TypeError, ValueError):
+ raise error.ParseError(_('invalid argument for revspec'))
else:
- ret += c
+ try:
+ ret.append(_formatargtype(d, arg))
+ except (TypeError, ValueError):
+ raise error.ParseError(_('invalid argument for revspec'))
pos += 1
- return ret
+ try:
+ next(argiter)
+ raise error.ParseError(_('too many revspec arguments specified'))
+ except StopIteration:
+ pass
+ return ''.join(ret)
def prettyformat(tree):
return parser.prettyformat(tree, ('string', 'symbol'))
@@ -661,3 +707,34 @@
if tree[0] == 'func':
funcs.add(tree[1][1])
return funcs
+
+_hashre = util.re.compile('[0-9a-fA-F]{1,40}$')
+
+def _ishashlikesymbol(symbol):
+ """returns true if the symbol looks like a hash"""
+ return _hashre.match(symbol)
+
+def gethashlikesymbols(tree):
+ """returns the list of symbols of the tree that look like hashes
+
+ >>> gethashlikesymbols(('dagrange', ('symbol', '3'), ('symbol', 'abe3ff')))
+ ['3', 'abe3ff']
+ >>> gethashlikesymbols(('func', ('symbol', 'precursors'), ('symbol', '.')))
+ []
+ >>> gethashlikesymbols(('func', ('symbol', 'precursors'), ('symbol', '34')))
+ ['34']
+ >>> gethashlikesymbols(('symbol', 'abe3ffZ'))
+ []
+ """
+ if not tree:
+ return []
+
+ if tree[0] == "symbol":
+ if _ishashlikesymbol(tree[1]):
+ return [tree[1]]
+ elif len(tree) >= 3:
+ results = []
+ for subtree in tree[1:]:
+ results += gethashlikesymbols(subtree)
+ return results
+ return []
diff -r 87676e8ee056 -r 27b6df1b5adb mercurial/rewriteutil.py
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/mercurial/rewriteutil.py Mon Jan 22 17:53:02 2018 -0500
@@ -0,0 +1,53 @@
+# rewriteutil.py - utility functions for rewriting changesets
+#
+# Copyright 2017 Octobus
+#
+# This software may be used and distributed according to the terms of the
+# GNU General Public License version 2 or any later version.
+
+from __future__ import absolute_import
+
+from .i18n import _
+
+from . import (
+ error,
+ node,
+ obsolete,
+ revset,
+)
+
+def precheck(repo, revs, action='rewrite'):
+ """check if revs can be rewritten
+ action is used to control the error message.
+
+ Make sure this function is called after taking the lock.
+ """
+ if node.nullrev in revs:
+ msg = _("cannot %s null changeset") % (action)
+ hint = _("no changeset checked out")
+ raise error.Abort(msg, hint=hint)
+
+ publicrevs = repo.revs('%ld and public()', revs)
+ if len(repo[None].parents()) > 1:
+ raise error.Abort(_("cannot %s while merging") % action)
+
+ if publicrevs:
+ msg = _("cannot %s public changesets") % (action)
+ hint = _("see 'hg help phases' for details")
+ raise error.Abort(msg, hint=hint)
+
+ newunstable = disallowednewunstable(repo, revs)
+ if newunstable:
+ raise error.Abort(_("cannot %s changeset with children") % action)
+
+def disallowednewunstable(repo, revs):
+ """Checks whether editing the revs will create new unstable changesets and
+ are we allowed to create them.
+
+ To allow new unstable changesets, set the config:
+ `experimental.evolution.allowunstable=True`
+ """
+ allowunstable = obsolete.isenabled(repo, obsolete.allowunstableopt)
+ if allowunstable:
+ return revset.baseset()
+ return repo.revs("(%ld::) - %ld", revs, revs)
diff -r 87676e8ee056 -r 27b6df1b5adb mercurial/scmutil.py
--- a/mercurial/scmutil.py Mon Jan 08 16:07:51 2018 -0800
+++ b/mercurial/scmutil.py Mon Jan 22 17:53:02 2018 -0500
@@ -1100,12 +1100,11 @@
finally:
if proc:
proc.communicate()
- if proc.returncode != 0:
- # not an error so 'cmd | grep' can be empty
- repo.ui.debug("extdata command '%s' %s\n"
- % (cmd, util.explainexit(proc.returncode)[0]))
if src:
src.close()
+ if proc and proc.returncode != 0:
+ raise error.Abort(_("extdata command '%s' failed: %s")
+ % (cmd, util.explainexit(proc.returncode)[0]))
return data
@@ -1223,6 +1222,9 @@
'unbundle',
]
+# A marker that tells the evolve extension to suppress its own reporting
+_reportstroubledchangesets = True
+
def registersummarycallback(repo, otr, txnname=''):
"""register a callback to issue a summary after the transaction is closed
"""
@@ -1245,7 +1247,7 @@
if filtername:
repo = repo.filtered(filtername)
func(repo, tr)
- newcat = '%2i-txnreport' % len(categories)
+ newcat = '%02i-txnreport' % len(categories)
otr.addpostclose(newcat, wrapped)
categories.append(newcat)
return wrapped
@@ -1258,11 +1260,38 @@
repo.ui.status(_('obsoleted %i changesets\n')
% len(obsoleted))
+ if (obsolete.isenabled(repo, obsolete.createmarkersopt) and
+ repo.ui.configbool('experimental', 'evolution.report-instabilities')):
+ instabilitytypes = [
+ ('orphan', 'orphan'),
+ ('phase-divergent', 'phasedivergent'),
+ ('content-divergent', 'contentdivergent'),
+ ]
+
+ def getinstabilitycounts(repo):
+ filtered = repo.changelog.filteredrevs
+ counts = {}
+ for instability, revset in instabilitytypes:
+ counts[instability] = len(set(obsolete.getrevs(repo, revset)) -
+ filtered)
+ return counts
+
+ oldinstabilitycounts = getinstabilitycounts(repo)
+ @reportsummary
+ def reportnewinstabilities(repo, tr):
+ newinstabilitycounts = getinstabilitycounts(repo)
+ for instability, revset in instabilitytypes:
+ delta = (newinstabilitycounts[instability] -
+ oldinstabilitycounts[instability])
+ if delta > 0:
+ repo.ui.warn(_('%i new %s changesets\n') %
+ (delta, instability))
+
if txmatch(_reportnewcssource):
@reportsummary
def reportnewcs(repo, tr):
"""Report the range of new revisions pulled/unbundled."""
- newrevs = list(tr.changes.get('revs', set()))
+ newrevs = tr.changes.get('revs', xrange(0, 0))
if not newrevs:
return
@@ -1279,3 +1308,108 @@
else:
revrange = '%s:%s' % (minrev, maxrev)
repo.ui.status(_('new changesets %s\n') % revrange)
+
+def nodesummaries(repo, nodes, maxnumnodes=4):
+ if len(nodes) <= maxnumnodes or repo.ui.verbose:
+ return ' '.join(short(h) for h in nodes)
+ first = ' '.join(short(h) for h in nodes[:maxnumnodes])
+ return _("%s and %d others") % (first, len(nodes) - maxnumnodes)
+
+def enforcesinglehead(repo, tr, desc):
+ """check that no named branch has multiple heads"""
+ if desc in ('strip', 'repair'):
+ # skip the logic during strip
+ return
+ visible = repo.filtered('visible')
+ # possible improvement: we could restrict the check to affected branch
+ for name, heads in visible.branchmap().iteritems():
+ if len(heads) > 1:
+ msg = _('rejecting multiple heads on branch "%s"')
+ msg %= name
+ hint = _('%d heads: %s')
+ hint %= (len(heads), nodesummaries(repo, heads))
+ raise error.Abort(msg, hint=hint)
+
+def wrapconvertsink(sink):
+ """Allow extensions to wrap the sink returned by convcmd.convertsink()
+ before it is used, whether or not the convert extension was formally loaded.
+ """
+ return sink
+
+def unhidehashlikerevs(repo, specs, hiddentype):
+ """parse the user specs and unhide changesets whose hash or revision number
+ is passed.
+
+ hiddentype can be: 1) 'warn': warn while unhiding changesets
+ 2) 'nowarn': don't warn while unhiding changesets
+
+ returns a repo object with the required changesets unhidden
+ """
+ if not repo.filtername or not repo.ui.configbool('experimental',
+ 'directaccess'):
+ return repo
+
+ if repo.filtername not in ('visible', 'visible-hidden'):
+ return repo
+
+ symbols = set()
+ for spec in specs:
+ try:
+ tree = revsetlang.parse(spec)
+ except error.ParseError: # will be reported by scmutil.revrange()
+ continue
+
+ symbols.update(revsetlang.gethashlikesymbols(tree))
+
+ if not symbols:
+ return repo
+
+ revs = _getrevsfromsymbols(repo, symbols)
+
+ if not revs:
+ return repo
+
+ if hiddentype == 'warn':
+ unfi = repo.unfiltered()
+ revstr = ", ".join([pycompat.bytestr(unfi[l]) for l in revs])
+ repo.ui.warn(_("warning: accessing hidden changesets for write "
+ "operation: %s\n") % revstr)
+
+ # we have to use new filtername to separate branch/tags cache until we can
+ # disbale these cache when revisions are dynamically pinned.
+ return repo.filtered('visible-hidden', revs)
+
+def _getrevsfromsymbols(repo, symbols):
+ """parse the list of symbols and returns a set of revision numbers of hidden
+ changesets present in symbols"""
+ revs = set()
+ unfi = repo.unfiltered()
+ unficl = unfi.changelog
+ cl = repo.changelog
+ tiprev = len(unficl)
+ pmatch = unficl._partialmatch
+ allowrevnums = repo.ui.configbool('experimental', 'directaccess.revnums')
+ for s in symbols:
+ try:
+ n = int(s)
+ if n <= tiprev:
+ if not allowrevnums:
+ continue
+ else:
+ if n not in cl:
+ revs.add(n)
+ continue
+ except ValueError:
+ pass
+
+ try:
+ s = pmatch(s)
+ except error.LookupError:
+ s = None
+
+ if s is not None:
+ rev = unficl.rev(s)
+ if rev not in cl:
+ revs.add(rev)
+
+ return revs
diff -r 87676e8ee056 -r 27b6df1b5adb mercurial/selectors2.py
--- a/mercurial/selectors2.py Mon Jan 08 16:07:51 2018 -0800
+++ /dev/null Thu Jan 01 00:00:00 1970 +0000
@@ -1,745 +0,0 @@
-""" Back-ported, durable, and portable selectors """
-
-# MIT License
-#
-# Copyright (c) 2017 Seth Michael Larson
-#
-# Permission is hereby granted, free of charge, to any person obtaining a copy
-# of this software and associated documentation files (the "Software"), to deal
-# in the Software without restriction, including without limitation the rights
-# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
-# copies of the Software, and to permit persons to whom the Software is
-# furnished to do so, subject to the following conditions:
-#
-# The above copyright notice and this permission notice shall be included in all
-# copies or substantial portions of the Software.
-#
-# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
-# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
-# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
-# SOFTWARE.
-
-# no-check-code
-
-from __future__ import absolute_import
-
-import collections
-import errno
-import math
-import select
-import socket
-import sys
-import time
-
-from . import pycompat
-
-namedtuple = collections.namedtuple
-Mapping = collections.Mapping
-
-try:
- monotonic = time.monotonic
-except AttributeError:
- monotonic = time.time
-
-__author__ = 'Seth Michael Larson'
-__email__ = 'sethmichaellarson@protonmail.com'
-__version__ = '2.0.0'
-__license__ = 'MIT'
-__url__ = 'https://www.github.com/SethMichaelLarson/selectors2'
-
-__all__ = ['EVENT_READ',
- 'EVENT_WRITE',
- 'SelectorKey',
- 'DefaultSelector',
- 'BaseSelector']
-
-EVENT_READ = (1 << 0)
-EVENT_WRITE = (1 << 1)
-_DEFAULT_SELECTOR = None
-_SYSCALL_SENTINEL = object() # Sentinel in case a system call returns None.
-_ERROR_TYPES = (OSError, IOError, socket.error)
-
-
-SelectorKey = namedtuple('SelectorKey', ['fileobj', 'fd', 'events', 'data'])
-
-
-class _SelectorMapping(Mapping):
- """ Mapping of file objects to selector keys """
-
- def __init__(self, selector):
- self._selector = selector
-
- def __len__(self):
- return len(self._selector._fd_to_key)
-
- def __getitem__(self, fileobj):
- try:
- fd = self._selector._fileobj_lookup(fileobj)
- return self._selector._fd_to_key[fd]
- except KeyError:
- raise KeyError("{0!r} is not registered.".format(fileobj))
-
- def __iter__(self):
- return iter(self._selector._fd_to_key)
-
-
-def _fileobj_to_fd(fileobj):
- """ Return a file descriptor from a file object. If
- given an integer will simply return that integer back. """
- if isinstance(fileobj, int):
- fd = fileobj
- else:
- try:
- fd = int(fileobj.fileno())
- except (AttributeError, TypeError, ValueError):
- raise ValueError("Invalid file object: {0!r}".format(fileobj))
- if fd < 0:
- raise ValueError("Invalid file descriptor: {0}".format(fd))
- return fd
-
-
-class BaseSelector(object):
- """ Abstract Selector class
-
- A selector supports registering file objects to be monitored
- for specific I/O events.
-
- A file object is a file descriptor or any object with a
- `fileno()` method. An arbitrary object can be attached to the
- file object which can be used for example to store context info,
- a callback, etc.
-
- A selector can use various implementations (select(), poll(), epoll(),
- and kqueue()) depending on the platform. The 'DefaultSelector' class uses
- the most efficient implementation for the current platform.
- """
- def __init__(self):
- # Maps file descriptors to keys.
- self._fd_to_key = {}
-
- # Read-only mapping returned by get_map()
- self._map = _SelectorMapping(self)
-
- def _fileobj_lookup(self, fileobj):
- """ Return a file descriptor from a file object.
- This wraps _fileobj_to_fd() to do an exhaustive
- search in case the object is invalid but we still
- have it in our map. Used by unregister() so we can
- unregister an object that was previously registered
- even if it is closed. It is also used by _SelectorMapping
- """
- try:
- return _fileobj_to_fd(fileobj)
- except ValueError:
-
- # Search through all our mapped keys.
- for key in self._fd_to_key.values():
- if key.fileobj is fileobj:
- return key.fd
-
- # Raise ValueError after all.
- raise
-
- def register(self, fileobj, events, data=None):
- """ Register a file object for a set of events to monitor. """
- if (not events) or (events & ~(EVENT_READ | EVENT_WRITE)):
- raise ValueError("Invalid events: {0!r}".format(events))
-
- key = SelectorKey(fileobj, self._fileobj_lookup(fileobj), events, data)
-
- if key.fd in self._fd_to_key:
- raise KeyError("{0!r} (FD {1}) is already registered"
- .format(fileobj, key.fd))
-
- self._fd_to_key[key.fd] = key
- return key
-
- def unregister(self, fileobj):
- """ Unregister a file object from being monitored. """
- try:
- key = self._fd_to_key.pop(self._fileobj_lookup(fileobj))
- except KeyError:
- raise KeyError("{0!r} is not registered".format(fileobj))
-
- # Getting the fileno of a closed socket on Windows errors with EBADF.
- except socket.error as err:
- if err.errno != errno.EBADF:
- raise
- else:
- for key in self._fd_to_key.values():
- if key.fileobj is fileobj:
- self._fd_to_key.pop(key.fd)
- break
- else:
- raise KeyError("{0!r} is not registered".format(fileobj))
- return key
-
- def modify(self, fileobj, events, data=None):
- """ Change a registered file object monitored events and data. """
- # NOTE: Some subclasses optimize this operation even further.
- try:
- key = self._fd_to_key[self._fileobj_lookup(fileobj)]
- except KeyError:
- raise KeyError("{0!r} is not registered".format(fileobj))
-
- if events != key.events:
- self.unregister(fileobj)
- key = self.register(fileobj, events, data)
-
- elif data != key.data:
- # Use a shortcut to update the data.
- key = key._replace(data=data)
- self._fd_to_key[key.fd] = key
-
- return key
-
- def select(self, timeout=None):
- """ Perform the actual selection until some monitored file objects
- are ready or the timeout expires. """
- raise NotImplementedError()
-
- def close(self):
- """ Close the selector. This must be called to ensure that all
- underlying resources are freed. """
- self._fd_to_key.clear()
- self._map = None
-
- def get_key(self, fileobj):
- """ Return the key associated with a registered file object. """
- mapping = self.get_map()
- if mapping is None:
- raise RuntimeError("Selector is closed")
- try:
- return mapping[fileobj]
- except KeyError:
- raise KeyError("{0!r} is not registered".format(fileobj))
-
- def get_map(self):
- """ Return a mapping of file objects to selector keys """
- return self._map
-
- def _key_from_fd(self, fd):
- """ Return the key associated to a given file descriptor
- Return None if it is not found. """
- try:
- return self._fd_to_key[fd]
- except KeyError:
- return None
-
- def __enter__(self):
- return self
-
- def __exit__(self, *_):
- self.close()
-
-
-# Almost all platforms have select.select()
-if hasattr(select, "select"):
- class SelectSelector(BaseSelector):
- """ Select-based selector. """
- def __init__(self):
- super(SelectSelector, self).__init__()
- self._readers = set()
- self._writers = set()
-
- def register(self, fileobj, events, data=None):
- key = super(SelectSelector, self).register(fileobj, events, data)
- if events & EVENT_READ:
- self._readers.add(key.fd)
- if events & EVENT_WRITE:
- self._writers.add(key.fd)
- return key
-
- def unregister(self, fileobj):
- key = super(SelectSelector, self).unregister(fileobj)
- self._readers.discard(key.fd)
- self._writers.discard(key.fd)
- return key
-
- def select(self, timeout=None):
- # Selecting on empty lists on Windows errors out.
- if not len(self._readers) and not len(self._writers):
- return []
-
- timeout = None if timeout is None else max(timeout, 0.0)
- ready = []
- r, w, _ = _syscall_wrapper(self._wrap_select, True, self._readers,
- self._writers, timeout)
- r = set(r)
- w = set(w)
- for fd in r | w:
- events = 0
- if fd in r:
- events |= EVENT_READ
- if fd in w:
- events |= EVENT_WRITE
-
- key = self._key_from_fd(fd)
- if key:
- ready.append((key, events & key.events))
- return ready
-
- def _wrap_select(self, r, w, timeout=None):
- """ Wrapper for select.select because timeout is a positional arg """
- return select.select(r, w, [], timeout)
-
- __all__.append('SelectSelector')
-
- # Jython has a different implementation of .fileno() for socket objects.
- if pycompat.isjython:
- class _JythonSelectorMapping(object):
- """ This is an implementation of _SelectorMapping that is built
- for use specifically with Jython, which does not provide a hashable
- value from socket.socket.fileno(). """
-
- def __init__(self, selector):
- assert isinstance(selector, JythonSelectSelector)
- self._selector = selector
-
- def __len__(self):
- return len(self._selector._sockets)
-
- def __getitem__(self, fileobj):
- for sock, key in self._selector._sockets:
- if sock is fileobj:
- return key
- else:
- raise KeyError("{0!r} is not registered.".format(fileobj))
-
- class JythonSelectSelector(SelectSelector):
- """ This is an implementation of SelectSelector that is for Jython
- which works around that Jython's socket.socket.fileno() does not
- return an integer fd value. All SelectorKey.fd will be equal to -1
- and should not be used. This instead uses object id to compare fileobj
- and will only use select.select as it's the only selector that allows
- directly passing in socket objects rather than registering fds.
- See: http://bugs.jython.org/issue1678
- https://wiki.python.org/jython/NewSocketModule#socket.fileno.28.29_does_not_return_an_integer
- """
-
- def __init__(self):
- super(JythonSelectSelector, self).__init__()
-
- self._sockets = [] # Uses a list of tuples instead of dictionary.
- self._map = _JythonSelectorMapping(self)
- self._readers = []
- self._writers = []
-
- # Jython has a select.cpython_compatible_select function in older versions.
- self._select_func = getattr(select, 'cpython_compatible_select', select.select)
-
- def register(self, fileobj, events, data=None):
- for sock, _ in self._sockets:
- if sock is fileobj:
- raise KeyError("{0!r} is already registered"
- .format(fileobj, sock))
-
- key = SelectorKey(fileobj, -1, events, data)
- self._sockets.append((fileobj, key))
-
- if events & EVENT_READ:
- self._readers.append(fileobj)
- if events & EVENT_WRITE:
- self._writers.append(fileobj)
- return key
-
- def unregister(self, fileobj):
- for i, (sock, key) in enumerate(self._sockets):
- if sock is fileobj:
- break
- else:
- raise KeyError("{0!r} is not registered.".format(fileobj))
-
- if key.events & EVENT_READ:
- self._readers.remove(fileobj)
- if key.events & EVENT_WRITE:
- self._writers.remove(fileobj)
-
- del self._sockets[i]
- return key
-
- def _wrap_select(self, r, w, timeout=None):
- """ Wrapper for select.select because timeout is a positional arg """
- return self._select_func(r, w, [], timeout)
-
- __all__.append('JythonSelectSelector')
- SelectSelector = JythonSelectSelector # Override so the wrong selector isn't used.
-
-
-if hasattr(select, "poll"):
- class PollSelector(BaseSelector):
- """ Poll-based selector """
- def __init__(self):
- super(PollSelector, self).__init__()
- self._poll = select.poll()
-
- def register(self, fileobj, events, data=None):
- key = super(PollSelector, self).register(fileobj, events, data)
- event_mask = 0
- if events & EVENT_READ:
- event_mask |= select.POLLIN
- if events & EVENT_WRITE:
- event_mask |= select.POLLOUT
- self._poll.register(key.fd, event_mask)
- return key
-
- def unregister(self, fileobj):
- key = super(PollSelector, self).unregister(fileobj)
- self._poll.unregister(key.fd)
- return key
-
- def _wrap_poll(self, timeout=None):
- """ Wrapper function for select.poll.poll() so that
- _syscall_wrapper can work with only seconds. """
- if timeout is not None:
- if timeout <= 0:
- timeout = 0
- else:
- # select.poll.poll() has a resolution of 1 millisecond,
- # round away from zero to wait *at least* timeout seconds.
- timeout = math.ceil(timeout * 1000)
-
- result = self._poll.poll(timeout)
- return result
-
- def select(self, timeout=None):
- ready = []
- fd_events = _syscall_wrapper(self._wrap_poll, True, timeout=timeout)
- for fd, event_mask in fd_events:
- events = 0
- if event_mask & ~select.POLLIN:
- events |= EVENT_WRITE
- if event_mask & ~select.POLLOUT:
- events |= EVENT_READ
-
- key = self._key_from_fd(fd)
- if key:
- ready.append((key, events & key.events))
-
- return ready
-
- __all__.append('PollSelector')
-
-if hasattr(select, "epoll"):
- class EpollSelector(BaseSelector):
- """ Epoll-based selector """
- def __init__(self):
- super(EpollSelector, self).__init__()
- self._epoll = select.epoll()
-
- def fileno(self):
- return self._epoll.fileno()
-
- def register(self, fileobj, events, data=None):
- key = super(EpollSelector, self).register(fileobj, events, data)
- events_mask = 0
- if events & EVENT_READ:
- events_mask |= select.EPOLLIN
- if events & EVENT_WRITE:
- events_mask |= select.EPOLLOUT
- _syscall_wrapper(self._epoll.register, False, key.fd, events_mask)
- return key
-
- def unregister(self, fileobj):
- key = super(EpollSelector, self).unregister(fileobj)
- try:
- _syscall_wrapper(self._epoll.unregister, False, key.fd)
- except _ERROR_TYPES:
- # This can occur when the fd was closed since registry.
- pass
- return key
-
- def select(self, timeout=None):
- if timeout is not None:
- if timeout <= 0:
- timeout = 0.0
- else:
- # select.epoll.poll() has a resolution of 1 millisecond
- # but luckily takes seconds so we don't need a wrapper
- # like PollSelector. Just for better rounding.
- timeout = math.ceil(timeout * 1000) * 0.001
- timeout = float(timeout)
- else:
- timeout = -1.0 # epoll.poll() must have a float.
-
- # We always want at least 1 to ensure that select can be called
- # with no file descriptors registered. Otherwise will fail.
- max_events = max(len(self._fd_to_key), 1)
-
- ready = []
- fd_events = _syscall_wrapper(self._epoll.poll, True,
- timeout=timeout,
- maxevents=max_events)
- for fd, event_mask in fd_events:
- events = 0
- if event_mask & ~select.EPOLLIN:
- events |= EVENT_WRITE
- if event_mask & ~select.EPOLLOUT:
- events |= EVENT_READ
-
- key = self._key_from_fd(fd)
- if key:
- ready.append((key, events & key.events))
- return ready
-
- def close(self):
- self._epoll.close()
- super(EpollSelector, self).close()
-
- __all__.append('EpollSelector')
-
-
-if hasattr(select, "devpoll"):
- class DevpollSelector(BaseSelector):
- """Solaris /dev/poll selector."""
-
- def __init__(self):
- super(DevpollSelector, self).__init__()
- self._devpoll = select.devpoll()
-
- def fileno(self):
- return self._devpoll.fileno()
-
- def register(self, fileobj, events, data=None):
- key = super(DevpollSelector, self).register(fileobj, events, data)
- poll_events = 0
- if events & EVENT_READ:
- poll_events |= select.POLLIN
- if events & EVENT_WRITE:
- poll_events |= select.POLLOUT
- self._devpoll.register(key.fd, poll_events)
- return key
-
- def unregister(self, fileobj):
- key = super(DevpollSelector, self).unregister(fileobj)
- self._devpoll.unregister(key.fd)
- return key
-
- def _wrap_poll(self, timeout=None):
- """ Wrapper function for select.poll.poll() so that
- _syscall_wrapper can work with only seconds. """
- if timeout is not None:
- if timeout <= 0:
- timeout = 0
- else:
- # select.devpoll.poll() has a resolution of 1 millisecond,
- # round away from zero to wait *at least* timeout seconds.
- timeout = math.ceil(timeout * 1000)
-
- result = self._devpoll.poll(timeout)
- return result
-
- def select(self, timeout=None):
- ready = []
- fd_events = _syscall_wrapper(self._wrap_poll, True, timeout=timeout)
- for fd, event_mask in fd_events:
- events = 0
- if event_mask & ~select.POLLIN:
- events |= EVENT_WRITE
- if event_mask & ~select.POLLOUT:
- events |= EVENT_READ
-
- key = self._key_from_fd(fd)
- if key:
- ready.append((key, events & key.events))
-
- return ready
-
- def close(self):
- self._devpoll.close()
- super(DevpollSelector, self).close()
-
- __all__.append('DevpollSelector')
-
-
-if hasattr(select, "kqueue"):
- class KqueueSelector(BaseSelector):
- """ Kqueue / Kevent-based selector """
- def __init__(self):
- super(KqueueSelector, self).__init__()
- self._kqueue = select.kqueue()
-
- def fileno(self):
- return self._kqueue.fileno()
-
- def register(self, fileobj, events, data=None):
- key = super(KqueueSelector, self).register(fileobj, events, data)
- if events & EVENT_READ:
- kevent = select.kevent(key.fd,
- select.KQ_FILTER_READ,
- select.KQ_EV_ADD)
-
- _syscall_wrapper(self._kqueue.control, False, [kevent], 0, 0)
-
- if events & EVENT_WRITE:
- kevent = select.kevent(key.fd,
- select.KQ_FILTER_WRITE,
- select.KQ_EV_ADD)
-
- _syscall_wrapper(self._kqueue.control, False, [kevent], 0, 0)
-
- return key
-
- def unregister(self, fileobj):
- key = super(KqueueSelector, self).unregister(fileobj)
- if key.events & EVENT_READ:
- kevent = select.kevent(key.fd,
- select.KQ_FILTER_READ,
- select.KQ_EV_DELETE)
- try:
- _syscall_wrapper(self._kqueue.control, False, [kevent], 0, 0)
- except _ERROR_TYPES:
- pass
- if key.events & EVENT_WRITE:
- kevent = select.kevent(key.fd,
- select.KQ_FILTER_WRITE,
- select.KQ_EV_DELETE)
- try:
- _syscall_wrapper(self._kqueue.control, False, [kevent], 0, 0)
- except _ERROR_TYPES:
- pass
-
- return key
-
- def select(self, timeout=None):
- if timeout is not None:
- timeout = max(timeout, 0)
-
- max_events = len(self._fd_to_key) * 2
- ready_fds = {}
-
- kevent_list = _syscall_wrapper(self._kqueue.control, True,
- None, max_events, timeout)
-
- for kevent in kevent_list:
- fd = kevent.ident
- event_mask = kevent.filter
- events = 0
- if event_mask == select.KQ_FILTER_READ:
- events |= EVENT_READ
- if event_mask == select.KQ_FILTER_WRITE:
- events |= EVENT_WRITE
-
- key = self._key_from_fd(fd)
- if key:
- if key.fd not in ready_fds:
- ready_fds[key.fd] = (key, events & key.events)
- else:
- old_events = ready_fds[key.fd][1]
- ready_fds[key.fd] = (key, (events | old_events) & key.events)
-
- return list(ready_fds.values())
-
- def close(self):
- self._kqueue.close()
- super(KqueueSelector, self).close()
-
- __all__.append('KqueueSelector')
-
-
-def _can_allocate(struct):
- """ Checks that select structs can be allocated by the underlying
- operating system, not just advertised by the select module. We don't
- check select() because we'll be hopeful that most platforms that
- don't have it available will not advertise it. (ie: GAE) """
- try:
- # select.poll() objects won't fail until used.
- if struct == 'poll':
- p = select.poll()
- p.poll(0)
-
- # All others will fail on allocation.
- else:
- getattr(select, struct)().close()
- return True
- except (OSError, AttributeError):
- return False
-
-
-# Python 3.5 uses a more direct route to wrap system calls to increase speed.
-if sys.version_info >= (3, 5):
- def _syscall_wrapper(func, _, *args, **kwargs):
- """ This is the short-circuit version of the below logic
- because in Python 3.5+ all selectors restart system calls. """
- return func(*args, **kwargs)
-else:
- def _syscall_wrapper(func, recalc_timeout, *args, **kwargs):
- """ Wrapper function for syscalls that could fail due to EINTR.
- All functions should be retried if there is time left in the timeout
- in accordance with PEP 475. """
- timeout = kwargs.get("timeout", None)
- if timeout is None:
- expires = None
- recalc_timeout = False
- else:
- timeout = float(timeout)
- if timeout < 0.0: # Timeout less than 0 treated as no timeout.
- expires = None
- else:
- expires = monotonic() + timeout
-
- args = list(args)
- if recalc_timeout and "timeout" not in kwargs:
- raise ValueError(
- "Timeout must be in args or kwargs to be recalculated")
-
- result = _SYSCALL_SENTINEL
- while result is _SYSCALL_SENTINEL:
- try:
- result = func(*args, **kwargs)
- # OSError is thrown by select.select
- # IOError is thrown by select.epoll.poll
- # select.error is thrown by select.poll.poll
- # Aren't we thankful for Python 3.x rework for exceptions?
- except (OSError, IOError, select.error) as e:
- # select.error wasn't a subclass of OSError in the past.
- errcode = None
- if hasattr(e, "errno"):
- errcode = e.errno
- elif hasattr(e, "args"):
- errcode = e.args[0]
-
- # Also test for the Windows equivalent of EINTR.
- is_interrupt = (errcode == errno.EINTR or (hasattr(errno, "WSAEINTR") and
- errcode == errno.WSAEINTR))
-
- if is_interrupt:
- if expires is not None:
- current_time = monotonic()
- if current_time > expires:
- raise OSError(errno=errno.ETIMEDOUT)
- if recalc_timeout:
- if "timeout" in kwargs:
- kwargs["timeout"] = expires - current_time
- continue
- raise
- return result
-
-
-# Choose the best implementation, roughly:
-# kqueue == devpoll == epoll > poll > select
-# select() also can't accept a FD > FD_SETSIZE (usually around 1024)
-def DefaultSelector():
- """ This function serves as a first call for DefaultSelector to
- detect if the select module is being monkey-patched incorrectly
- by eventlet, greenlet, and preserve proper behavior. """
- global _DEFAULT_SELECTOR
- if _DEFAULT_SELECTOR is None:
- if pycompat.isjython:
- _DEFAULT_SELECTOR = JythonSelectSelector
- elif _can_allocate('kqueue'):
- _DEFAULT_SELECTOR = KqueueSelector
- elif _can_allocate('devpoll'):
- _DEFAULT_SELECTOR = DevpollSelector
- elif _can_allocate('epoll'):
- _DEFAULT_SELECTOR = EpollSelector
- elif _can_allocate('poll'):
- _DEFAULT_SELECTOR = PollSelector
- elif hasattr(select, 'select'):
- _DEFAULT_SELECTOR = SelectSelector
- else: # Platform-specific: AppEngine
- raise RuntimeError('Platform does not have a selector.')
- return _DEFAULT_SELECTOR()
diff -r 87676e8ee056 -r 27b6df1b5adb mercurial/setdiscovery.py
--- a/mercurial/setdiscovery.py Mon Jan 08 16:07:51 2018 -0800
+++ b/mercurial/setdiscovery.py Mon Jan 22 17:53:02 2018 -0500
@@ -133,7 +133,8 @@
def findcommonheads(ui, local, remote,
initialsamplesize=100,
fullsamplesize=200,
- abortwhenunrelated=True):
+ abortwhenunrelated=True,
+ ancestorsof=None):
'''Return a tuple (common, anyincoming, remoteheads) used to identify
missing nodes from or in remote.
'''
@@ -141,7 +142,11 @@
roundtrips = 0
cl = local.changelog
- dag = dagutil.revlogdag(cl)
+ localsubset = None
+ if ancestorsof is not None:
+ rev = local.changelog.rev
+ localsubset = [rev(n) for n in ancestorsof]
+ dag = dagutil.revlogdag(cl, localsubset=localsubset)
# early exit if we know all the specified remote heads already
ui.debug("query 1; heads\n")
diff -r 87676e8ee056 -r 27b6df1b5adb mercurial/simplemerge.py
--- a/mercurial/simplemerge.py Mon Jan 08 16:07:51 2018 -0800
+++ b/mercurial/simplemerge.py Mon Jan 22 17:53:02 2018 -0500
@@ -418,6 +418,8 @@
The merged result is written into `localctx`.
"""
+ opts = pycompat.byteskwargs(opts)
+
def readctx(ctx):
# Merges were always run in the working copy before, which means
# they used decoded data, if the user defined any repository
diff -r 87676e8ee056 -r 27b6df1b5adb mercurial/smartset.py
--- a/mercurial/smartset.py Mon Jan 08 16:07:51 2018 -0800
+++ b/mercurial/smartset.py Mon Jan 22 17:53:02 2018 -0500
@@ -772,6 +772,16 @@
>>> xs.last() # cached
4
"""
+ def __new__(cls, gen, iterasc=None):
+ if iterasc is None:
+ typ = cls
+ elif iterasc:
+ typ = _generatorsetasc
+ else:
+ typ = _generatorsetdesc
+
+ return super(generatorset, cls).__new__(typ)
+
def __init__(self, gen, iterasc=None):
"""
gen: a generator producing the values for the generatorset.
@@ -782,13 +792,6 @@
self._genlist = []
self._finished = False
self._ascending = True
- if iterasc is not None:
- if iterasc:
- self.fastasc = self._iterator
- self.__contains__ = self._asccontains
- else:
- self.fastdesc = self._iterator
- self.__contains__ = self._desccontains
def __nonzero__(self):
# Do not use 'for r in self' because it will enforce the iteration
@@ -814,36 +817,6 @@
self._cache[x] = False
return False
- def _asccontains(self, x):
- """version of contains optimised for ascending generator"""
- if x in self._cache:
- return self._cache[x]
-
- # Use new values only, as existing values would be cached.
- for l in self._consumegen():
- if l == x:
- return True
- if l > x:
- break
-
- self._cache[x] = False
- return False
-
- def _desccontains(self, x):
- """version of contains optimised for descending generator"""
- if x in self._cache:
- return self._cache[x]
-
- # Use new values only, as existing values would be cached.
- for l in self._consumegen():
- if l == x:
- return True
- if l < x:
- break
-
- self._cache[x] = False
- return False
-
def __iter__(self):
if self._ascending:
it = self.fastasc
@@ -947,7 +920,45 @@
def __repr__(self):
d = {False: '-', True: '+'}[self._ascending]
- return '<%s%s>' % (type(self).__name__, d)
+ return '<%s%s>' % (type(self).__name__.lstrip('_'), d)
+
+class _generatorsetasc(generatorset):
+ """Special case of generatorset optimized for ascending generators."""
+
+ fastasc = generatorset._iterator
+
+ def __contains__(self, x):
+ if x in self._cache:
+ return self._cache[x]
+
+ # Use new values only, as existing values would be cached.
+ for l in self._consumegen():
+ if l == x:
+ return True
+ if l > x:
+ break
+
+ self._cache[x] = False
+ return False
+
+class _generatorsetdesc(generatorset):
+ """Special case of generatorset optimized for descending generators."""
+
+ fastdesc = generatorset._iterator
+
+ def __contains__(self, x):
+ if x in self._cache:
+ return self._cache[x]
+
+ # Use new values only, as existing values would be cached.
+ for l in self._consumegen():
+ if l == x:
+ return True
+ if l < x:
+ break
+
+ self._cache[x] = False
+ return False
def spanset(repo, start=0, end=None):
"""Create a spanset that represents a range of repository revisions
diff -r 87676e8ee056 -r 27b6df1b5adb mercurial/sparse.py
--- a/mercurial/sparse.py Mon Jan 08 16:07:51 2018 -0800
+++ b/mercurial/sparse.py Mon Jan 22 17:53:02 2018 -0500
@@ -12,7 +12,10 @@
import os
from .i18n import _
-from .node import nullid
+from .node import (
+ hex,
+ nullid,
+)
from . import (
error,
match as matchmod,
@@ -173,12 +176,12 @@
tempsignature = '0'
if signature is None or (includetemp and tempsignature is None):
- signature = hashlib.sha1(repo.vfs.tryread('sparse')).hexdigest()
+ signature = hex(hashlib.sha1(repo.vfs.tryread('sparse')).digest())
cache['signature'] = signature
if includetemp:
raw = repo.vfs.tryread('tempsparse')
- tempsignature = hashlib.sha1(raw).hexdigest()
+ tempsignature = hex(hashlib.sha1(raw).digest())
cache['tempsignature'] = tempsignature
return '%s %s' % (signature, tempsignature)
@@ -291,24 +294,9 @@
includes, excludes, profiles = patternsforrev(repo, rev)
if includes or excludes:
- # Explicitly include subdirectories of includes so
- # status will walk them down to the actual include.
- subdirs = set()
- for include in includes:
- # TODO consider using posix path functions here so Windows
- # \ directory separators don't come into play.
- dirname = os.path.dirname(include)
- # basename is used to avoid issues with absolute
- # paths (which on Windows can include the drive).
- while os.path.basename(dirname):
- subdirs.add(dirname)
- dirname = os.path.dirname(dirname)
-
matcher = matchmod.match(repo.root, '', [],
include=includes, exclude=excludes,
default='relpath')
- if subdirs:
- matcher = forceincludematcher(matcher, subdirs)
matchers.append(matcher)
except IOError:
pass
diff -r 87676e8ee056 -r 27b6df1b5adb mercurial/sshpeer.py
--- a/mercurial/sshpeer.py Mon Jan 08 16:07:51 2018 -0800
+++ b/mercurial/sshpeer.py Mon Jan 22 17:53:02 2018 -0500
@@ -18,9 +18,9 @@
)
def _serverquote(s):
+ """quote a string for the remote shell ... which we assume is sh"""
if not s:
return s
- '''quote a string for the remote shell ... which we assume is sh'''
if re.match('[a-zA-Z0-9@%_+=:,./-]*$', s):
return s
return "'%s'" % s.replace("'", "'\\''")
@@ -136,6 +136,8 @@
sshcmd = self.ui.config("ui", "ssh")
remotecmd = self.ui.config("ui", "remotecmd")
+ sshaddenv = dict(self.ui.configitems("sshenv"))
+ sshenv = util.shellenviron(sshaddenv)
args = util.sshargs(sshcmd, self._host, self._user, self._port)
@@ -144,11 +146,11 @@
util.shellquote("%s init %s" %
(_serverquote(remotecmd), _serverquote(self._path))))
ui.debug('running %s\n' % cmd)
- res = ui.system(cmd, blockedtag='sshpeer')
+ res = ui.system(cmd, blockedtag='sshpeer', environ=sshenv)
if res != 0:
self._abort(error.RepoError(_("could not create remote repo")))
- self._validaterepo(sshcmd, args, remotecmd)
+ self._validaterepo(sshcmd, args, remotecmd, sshenv)
# Begin of _basepeer interface.
@@ -180,7 +182,7 @@
# End of _basewirecommands interface.
- def _validaterepo(self, sshcmd, args, remotecmd):
+ def _validaterepo(self, sshcmd, args, remotecmd, sshenv=None):
# cleanup up previous run
self._cleanup()
@@ -196,7 +198,7 @@
# no buffer allow the use of 'select'
# feel free to remove buffering and select usage when we ultimately
# move to threading.
- sub = util.popen4(cmd, bufsize=0)
+ sub = util.popen4(cmd, bufsize=0, env=sshenv)
self._pipeo, self._pipei, self._pipee, self._subprocess = sub
self._pipei = util.bufferedinputpipe(self._pipei)
@@ -204,8 +206,9 @@
self._pipeo = doublepipe(self.ui, self._pipeo, self._pipee)
def badresponse():
- self._abort(error.RepoError(_('no suitable response from '
- 'remote hg')))
+ msg = _("no suitable response from remote hg")
+ hint = self.ui.config("ui", "ssherrorhint")
+ self._abort(error.RepoError(msg, hint=hint))
try:
# skip any noise generated by remote shell
@@ -280,6 +283,17 @@
def _callstream(self, cmd, **args):
args = pycompat.byteskwargs(args)
+ if (self.ui.debugflag
+ and self.ui.configbool('devel', 'debug.peer-request')):
+ dbg = self.ui.debug
+ line = 'devel-peer-request: %s\n'
+ dbg(line % cmd)
+ for key, value in sorted(args.items()):
+ if not isinstance(value, dict):
+ dbg(line % ' %s: %d bytes' % (key, len(value)))
+ else:
+ for dk, dv in sorted(value.items()):
+ dbg(line % ' %s-%s: %d' % (key, dk, len(dv)))
self.ui.debug("sending %s command\n" % cmd)
self._pipeo.write("%s\n" % cmd)
_func, names = wireproto.commands[cmd]
diff -r 87676e8ee056 -r 27b6df1b5adb mercurial/sshserver.py
--- a/mercurial/sshserver.py Mon Jan 08 16:07:51 2018 -0800
+++ b/mercurial/sshserver.py Mon Jan 22 17:53:02 2018 -0500
@@ -76,13 +76,7 @@
def sendstream(self, source):
write = self.fout.write
-
- if source.reader:
- gen = iter(lambda: source.reader.read(4096), '')
- else:
- gen = source.gen
-
- for chunk in gen:
+ for chunk in source.gen:
write(chunk)
self.fout.flush()
@@ -111,6 +105,7 @@
handlers = {
str: sendresponse,
wireproto.streamres: sendstream,
+ wireproto.streamres_legacy: sendstream,
wireproto.pushres: sendpushresponse,
wireproto.pusherr: sendpusherror,
wireproto.ooberror: sendooberror,
diff -r 87676e8ee056 -r 27b6df1b5adb mercurial/sslutil.py
--- a/mercurial/sslutil.py Mon Jan 08 16:07:51 2018 -0800
+++ b/mercurial/sslutil.py Mon Jan 22 17:53:02 2018 -0500
@@ -17,6 +17,7 @@
from .i18n import _
from . import (
error,
+ node,
pycompat,
util,
)
@@ -96,13 +97,13 @@
# in this legacy code since we don't support SNI.
args = {
- 'keyfile': self._keyfile,
- 'certfile': self._certfile,
- 'server_side': server_side,
- 'cert_reqs': self.verify_mode,
- 'ssl_version': self.protocol,
- 'ca_certs': self._cacerts,
- 'ciphers': self._ciphers,
+ r'keyfile': self._keyfile,
+ r'certfile': self._certfile,
+ r'server_side': server_side,
+ r'cert_reqs': self.verify_mode,
+ r'ssl_version': self.protocol,
+ r'ca_certs': self._cacerts,
+ r'ciphers': self._ciphers,
}
return ssl.wrap_socket(socket, **args)
@@ -808,9 +809,9 @@
# If a certificate fingerprint is pinned, use it and only it to
# validate the remote cert.
peerfingerprints = {
- 'sha1': hashlib.sha1(peercert).hexdigest(),
- 'sha256': hashlib.sha256(peercert).hexdigest(),
- 'sha512': hashlib.sha512(peercert).hexdigest(),
+ 'sha1': node.hex(hashlib.sha1(peercert).digest()),
+ 'sha256': node.hex(hashlib.sha256(peercert).digest()),
+ 'sha512': node.hex(hashlib.sha512(peercert).digest()),
}
def fmtfingerprint(s):
diff -r 87676e8ee056 -r 27b6df1b5adb mercurial/statichttprepo.py
--- a/mercurial/statichttprepo.py Mon Jan 08 16:07:51 2018 -0800
+++ b/mercurial/statichttprepo.py Mon Jan 22 17:53:02 2018 -0500
@@ -166,8 +166,6 @@
self.encodepats = None
self.decodepats = None
self._transref = None
- # Cache of types representing filtered repos.
- self._filteredrepotypes = {}
def _restrictcapabilities(self, caps):
caps = super(statichttprepository, self)._restrictcapabilities(caps)
diff -r 87676e8ee056 -r 27b6df1b5adb mercurial/statprof.py
--- a/mercurial/statprof.py Mon Jan 08 16:07:51 2018 -0800
+++ b/mercurial/statprof.py Mon Jan 22 17:53:02 2018 -0500
@@ -815,7 +815,6 @@
tos = sample.stack[0]
name = tos.function
path = simplifypath(tos.path)
- category = '%s:%d' % (path, tos.lineno)
stack = tuple((('%s:%d' % (simplifypath(frame.path), frame.lineno),
frame.function) for frame in sample.stack))
qstack = collections.deque(stack)
@@ -922,7 +921,7 @@
load_data(path=path)
- display(**displayargs)
+ display(**pycompat.strkwargs(displayargs))
return 0
diff -r 87676e8ee056 -r 27b6df1b5adb mercurial/store.py
--- a/mercurial/store.py Mon Jan 08 16:07:51 2018 -0800
+++ b/mercurial/store.py Mon Jan 22 17:53:02 2018 -0500
@@ -15,6 +15,7 @@
from .i18n import _
from . import (
error,
+ node,
policy,
pycompat,
util,
@@ -221,7 +222,7 @@
_maxshortdirslen = 8 * (_dirprefixlen + 1) - 4
def _hashencode(path, dotencode):
- digest = hashlib.sha1(path).hexdigest()
+ digest = node.hex(hashlib.sha1(path).digest())
le = lowerencode(path[5:]).split('/') # skips prefix 'data/' or 'meta/'
parts = _auxencode(le, dotencode)
basename = parts[-1]
diff -r 87676e8ee056 -r 27b6df1b5adb mercurial/streamclone.py
--- a/mercurial/streamclone.py Mon Jan 08 16:07:51 2018 -0800
+++ b/mercurial/streamclone.py Mon Jan 22 17:53:02 2018 -0500
@@ -7,23 +7,27 @@
from __future__ import absolute_import
+import contextlib
+import os
import struct
+import tempfile
+import warnings
from .i18n import _
from . import (
branchmap,
+ cacheutil,
error,
phases,
store,
util,
)
-def canperformstreamclone(pullop, bailifbundle2supported=False):
+def canperformstreamclone(pullop, bundle2=False):
"""Whether it is possible to perform a streaming clone as part of pull.
- ``bailifbundle2supported`` will cause the function to return False if
- bundle2 stream clones are supported. It should only be called by the
- legacy stream clone code path.
+ ``bundle2`` will cause the function to consider stream clone through
+ bundle2 and only through bundle2.
Returns a tuple of (supported, requirements). ``supported`` is True if
streaming clone is supported and False otherwise. ``requirements`` is
@@ -35,18 +39,18 @@
bundle2supported = False
if pullop.canusebundle2:
- if 'v1' in pullop.remotebundle2caps.get('stream', []):
+ if 'v2' in pullop.remotebundle2caps.get('stream', []):
bundle2supported = True
# else
# Server doesn't support bundle2 stream clone or doesn't support
# the versions we support. Fall back and possibly allow legacy.
# Ensures legacy code path uses available bundle2.
- if bailifbundle2supported and bundle2supported:
+ if bundle2supported and not bundle2:
return False, None
# Ensures bundle2 doesn't try to do a stream clone if it isn't supported.
- #elif not bailifbundle2supported and not bundle2supported:
- # return False, None
+ elif bundle2 and not bundle2supported:
+ return False, None
# Streaming clone only works on empty repositories.
if len(repo):
@@ -235,10 +239,26 @@
def generatev1wireproto(repo):
"""Emit content for version 1 of streaming clone suitable for the wire.
- This is the data output from ``generatev1()`` with a header line
- indicating file count and byte size.
+ This is the data output from ``generatev1()`` with 2 header lines. The
+ first line indicates overall success. The 2nd contains the file count and
+ byte size of payload.
+
+ The success line contains "0" for success, "1" for stream generation not
+ allowed, and "2" for error locking the repository (possibly indicating
+ a permissions error for the server process).
"""
- filecount, bytecount, it = generatev1(repo)
+ if not allowservergeneration(repo):
+ yield '1\n'
+ return
+
+ try:
+ filecount, bytecount, it = generatev1(repo)
+ except error.LockError:
+ yield '2\n'
+ return
+
+ # Indicates successful response.
+ yield '0\n'
yield '%d %d\n' % (filecount, bytecount)
for chunk in it:
yield chunk
@@ -412,3 +432,203 @@
def apply(self, repo):
return applybundlev1(repo, self._fh)
+
+# type of file to stream
+_fileappend = 0 # append only file
+_filefull = 1 # full snapshot file
+
+# Source of the file
+_srcstore = 's' # store (svfs)
+_srccache = 'c' # cache (cache)
+
+# This is it's own function so extensions can override it.
+def _walkstreamfullstorefiles(repo):
+ """list snapshot file from the store"""
+ fnames = []
+ if not repo.publishing():
+ fnames.append('phaseroots')
+ return fnames
+
+def _filterfull(entry, copy, vfsmap):
+ """actually copy the snapshot files"""
+ src, name, ftype, data = entry
+ if ftype != _filefull:
+ return entry
+ return (src, name, ftype, copy(vfsmap[src].join(name)))
+
+@contextlib.contextmanager
+def maketempcopies():
+ """return a function to temporary copy file"""
+ files = []
+ try:
+ def copy(src):
+ fd, dst = tempfile.mkstemp()
+ os.close(fd)
+ files.append(dst)
+ util.copyfiles(src, dst, hardlink=True)
+ return dst
+ yield copy
+ finally:
+ for tmp in files:
+ util.tryunlink(tmp)
+
+def _makemap(repo):
+ """make a (src -> vfs) map for the repo"""
+ vfsmap = {
+ _srcstore: repo.svfs,
+ _srccache: repo.cachevfs,
+ }
+ # we keep repo.vfs out of the on purpose, ther are too many danger there
+ # (eg: .hg/hgrc)
+ assert repo.vfs not in vfsmap.values()
+
+ return vfsmap
+
+def _emit(repo, entries, totalfilesize):
+ """actually emit the stream bundle"""
+ vfsmap = _makemap(repo)
+ progress = repo.ui.progress
+ progress(_('bundle'), 0, total=totalfilesize, unit=_('bytes'))
+ with maketempcopies() as copy:
+ try:
+ # copy is delayed until we are in the try
+ entries = [_filterfull(e, copy, vfsmap) for e in entries]
+ yield None # this release the lock on the repository
+ seen = 0
+
+ for src, name, ftype, data in entries:
+ vfs = vfsmap[src]
+ yield src
+ yield util.uvarintencode(len(name))
+ if ftype == _fileappend:
+ fp = vfs(name)
+ size = data
+ elif ftype == _filefull:
+ fp = open(data, 'rb')
+ size = util.fstat(fp).st_size
+ try:
+ yield util.uvarintencode(size)
+ yield name
+ if size <= 65536:
+ chunks = (fp.read(size),)
+ else:
+ chunks = util.filechunkiter(fp, limit=size)
+ for chunk in chunks:
+ seen += len(chunk)
+ progress(_('bundle'), seen, total=totalfilesize,
+ unit=_('bytes'))
+ yield chunk
+ finally:
+ fp.close()
+ finally:
+ progress(_('bundle'), None)
+
+def generatev2(repo):
+ """Emit content for version 2 of a streaming clone.
+
+ the data stream consists the following entries:
+ 1) A char representing the file destination (eg: store or cache)
+ 2) A varint containing the length of the filename
+ 3) A varint containing the length of file data
+ 4) N bytes containing the filename (the internal, store-agnostic form)
+ 5) N bytes containing the file data
+
+ Returns a 3-tuple of (file count, file size, data iterator).
+ """
+
+ with repo.lock():
+
+ entries = []
+ totalfilesize = 0
+
+ repo.ui.debug('scanning\n')
+ for name, ename, size in _walkstreamfiles(repo):
+ if size:
+ entries.append((_srcstore, name, _fileappend, size))
+ totalfilesize += size
+ for name in _walkstreamfullstorefiles(repo):
+ if repo.svfs.exists(name):
+ totalfilesize += repo.svfs.lstat(name).st_size
+ entries.append((_srcstore, name, _filefull, None))
+ for name in cacheutil.cachetocopy(repo):
+ if repo.cachevfs.exists(name):
+ totalfilesize += repo.cachevfs.lstat(name).st_size
+ entries.append((_srccache, name, _filefull, None))
+
+ chunks = _emit(repo, entries, totalfilesize)
+ first = next(chunks)
+ assert first is None
+
+ return len(entries), totalfilesize, chunks
+
+@contextlib.contextmanager
+def nested(*ctxs):
+ with warnings.catch_warnings():
+ # For some reason, Python decided 'nested' was deprecated without
+ # replacement. They officially advertised for filtering the deprecation
+ # warning for people who actually need the feature.
+ warnings.filterwarnings("ignore",category=DeprecationWarning)
+ with contextlib.nested(*ctxs):
+ yield
+
+def consumev2(repo, fp, filecount, filesize):
+ """Apply the contents from a version 2 streaming clone.
+
+ Data is read from an object that only needs to provide a ``read(size)``
+ method.
+ """
+ with repo.lock():
+ repo.ui.status(_('%d files to transfer, %s of data\n') %
+ (filecount, util.bytecount(filesize)))
+
+ start = util.timer()
+ handledbytes = 0
+ progress = repo.ui.progress
+
+ progress(_('clone'), handledbytes, total=filesize, unit=_('bytes'))
+
+ vfsmap = _makemap(repo)
+
+ with repo.transaction('clone'):
+ ctxs = (vfs.backgroundclosing(repo.ui)
+ for vfs in vfsmap.values())
+ with nested(*ctxs):
+ for i in range(filecount):
+ src = fp.read(1)
+ vfs = vfsmap[src]
+ namelen = util.uvarintdecodestream(fp)
+ datalen = util.uvarintdecodestream(fp)
+
+ name = fp.read(namelen)
+
+ if repo.ui.debugflag:
+ repo.ui.debug('adding [%s] %s (%s)\n' %
+ (src, name, util.bytecount(datalen)))
+
+ with vfs(name, 'w') as ofp:
+ for chunk in util.filechunkiter(fp, limit=datalen):
+ handledbytes += len(chunk)
+ progress(_('clone'), handledbytes, total=filesize,
+ unit=_('bytes'))
+ ofp.write(chunk)
+
+ # force @filecache properties to be reloaded from
+ # streamclone-ed file at next access
+ repo.invalidate(clearfilecache=True)
+
+ elapsed = util.timer() - start
+ if elapsed <= 0:
+ elapsed = 0.001
+ progress(_('clone'), None)
+ repo.ui.status(_('transferred %s in %.1f seconds (%s/sec)\n') %
+ (util.bytecount(handledbytes), elapsed,
+ util.bytecount(handledbytes / elapsed)))
+
+def applybundlev2(repo, fp, filecount, filesize, requirements):
+ missingreqs = [r for r in requirements if r not in repo.supported]
+ if missingreqs:
+ raise error.Abort(_('unable to apply stream clone: '
+ 'unsupported format: %s') %
+ ', '.join(sorted(missingreqs)))
+
+ consumev2(repo, fp, filecount, filesize)
diff -r 87676e8ee056 -r 27b6df1b5adb mercurial/subrepo.py
--- a/mercurial/subrepo.py Mon Jan 08 16:07:51 2018 -0800
+++ b/mercurial/subrepo.py Mon Jan 22 17:53:02 2018 -0500
@@ -55,13 +55,13 @@
def _getstorehashcachename(remotepath):
'''get a unique filename for the store hash cache of a remote repository'''
- return hashlib.sha1(_expandedabspath(remotepath)).hexdigest()[0:12]
+ return node.hex(hashlib.sha1(_expandedabspath(remotepath)).digest())[0:12]
class SubrepoAbort(error.Abort):
"""Exception class used to avoid handling a subrepo error more than once"""
def __init__(self, *args, **kw):
- self.subrepo = kw.pop('subrepo', None)
- self.cause = kw.pop('cause', None)
+ self.subrepo = kw.pop(r'subrepo', None)
+ self.cause = kw.pop(r'cause', None)
error.Abort.__init__(self, *args, **kw)
def annotatesubrepoerror(func):
@@ -389,24 +389,44 @@
if util.safehasattr(repo, '_subparent'):
source = util.url(repo._subsource)
if source.isabs():
- return str(source)
+ return bytes(source)
source.path = posixpath.normpath(source.path)
parent = _abssource(repo._subparent, push, abort=False)
if parent:
parent = util.url(util.pconvert(parent))
parent.path = posixpath.join(parent.path or '', source.path)
parent.path = posixpath.normpath(parent.path)
- return str(parent)
+ return bytes(parent)
else: # recursion reached top repo
+ path = None
if util.safehasattr(repo, '_subtoppath'):
- return repo._subtoppath
- if push and repo.ui.config('paths', 'default-push'):
- return repo.ui.config('paths', 'default-push')
- if repo.ui.config('paths', 'default'):
- return repo.ui.config('paths', 'default')
- if repo.shared():
- # chop off the .hg component to get the default path form
+ path = repo._subtoppath
+ elif push and repo.ui.config('paths', 'default-push'):
+ path = repo.ui.config('paths', 'default-push')
+ elif repo.ui.config('paths', 'default'):
+ path = repo.ui.config('paths', 'default')
+ elif repo.shared():
+ # chop off the .hg component to get the default path form. This has
+ # already run through vfsmod.vfs(..., realpath=True), so it doesn't
+ # have problems with 'C:'
return os.path.dirname(repo.sharedpath)
+ if path:
+ # issue5770: 'C:\' and 'C:' are not equivalent paths. The former is
+ # as expected: an absolute path to the root of the C: drive. The
+ # latter is a relative path, and works like so:
+ #
+ # C:\>cd C:\some\path
+ # C:\>D:
+ # D:\>python -c "import os; print os.path.abspath('C:')"
+ # C:\some\path
+ #
+ # D:\>python -c "import os; print os.path.abspath('C:relative')"
+ # C:\some\path\relative
+ if util.hasdriveletter(path):
+ if len(path) == 2 or path[2:3] not in br'\/':
+ path = os.path.abspath(path)
+ return path
+
if abort:
raise error.Abort(_("default path for subrepository not found"))
@@ -789,7 +809,7 @@
yield '# %s\n' % _expandedabspath(remotepath)
vfs = self._repo.vfs
for relname in filelist:
- filehash = hashlib.sha1(vfs.tryread(relname)).hexdigest()
+ filehash = node.hex(hashlib.sha1(vfs.tryread(relname)).digest())
yield '%s = %s\n' % (relname, filehash)
@propertycache
@@ -811,7 +831,7 @@
with self._repo.lock():
storehash = list(self._calcstorehash(remotepath))
vfs = self._cachestorehashvfs
- vfs.writelines(cachefile, storehash, mode='w', notindexed=True)
+ vfs.writelines(cachefile, storehash, mode='wb', notindexed=True)
def _getctx(self):
'''fetch the context for this subrepo revision, possibly a workingctx
@@ -841,11 +861,7 @@
if defpath != defpushpath:
addpathconfig('default-push', defpushpath)
- fp = self._repo.vfs("hgrc", "w", text=True)
- try:
- fp.write(''.join(lines))
- finally:
- fp.close()
+ self._repo.vfs.write('hgrc', util.tonativeeol(''.join(lines)))
@annotatesubrepoerror
def add(self, ui, match, prefix, explicitonly, **opts):
@@ -1154,24 +1170,24 @@
# 2. update the subrepo to the revision specified in
# the corresponding substate dictionary
self.ui.status(_('reverting subrepo %s\n') % substate[0])
- if not opts.get('no_backup'):
+ if not opts.get(r'no_backup'):
# Revert all files on the subrepo, creating backups
# Note that this will not recursively revert subrepos
# We could do it if there was a set:subrepos() predicate
opts = opts.copy()
- opts['date'] = None
- opts['rev'] = substate[1]
+ opts[r'date'] = None
+ opts[r'rev'] = substate[1]
self.filerevert(*pats, **opts)
# Update the repo to the revision specified in the given substate
- if not opts.get('dry_run'):
+ if not opts.get(r'dry_run'):
self.get(substate, overwrite=True)
def filerevert(self, *pats, **opts):
- ctx = self._repo[opts['rev']]
+ ctx = self._repo[opts[r'rev']]
parents = self._repo.dirstate.parents()
- if opts.get('all'):
+ if opts.get(r'all'):
pats = ['set:modified()']
else:
pats = []
@@ -1244,7 +1260,7 @@
if not self.ui.interactive():
# Making stdin be a pipe should prevent svn from behaving
# interactively even if we can't pass --non-interactive.
- extrakw['stdin'] = subprocess.PIPE
+ extrakw[r'stdin'] = subprocess.PIPE
# Starting in svn 1.5 --non-interactive is a global flag
# instead of being per-command, but we need to support 1.4 so
# we have to be intelligent about what commands take
@@ -1284,6 +1300,9 @@
raise error.Abort(_('cannot retrieve svn tool version'))
return (int(m.group(1)), int(m.group(2)))
+ def _svnmissing(self):
+ return not self.wvfs.exists('.svn')
+
def _wcrevs(self):
# Get the working directory revision as well as the last
# commit revision so we can compare the subrepo state with
@@ -1331,7 +1350,10 @@
return True, True, bool(missing)
return bool(changes), False, bool(missing)
+ @annotatesubrepoerror
def dirty(self, ignoreupdate=False, missing=False):
+ if self._svnmissing():
+ return self._state[1] != ''
wcchanged = self._wcchanged()
changed = wcchanged[0] or (missing and wcchanged[2])
if not changed:
diff -r 87676e8ee056 -r 27b6df1b5adb mercurial/templatefilters.py
--- a/mercurial/templatefilters.py Mon Jan 08 16:07:51 2018 -0800
+++ b/mercurial/templatefilters.py Mon Jan 22 17:53:02 2018 -0500
@@ -348,6 +348,11 @@
"""Date. Returns a date like "2006-09-18"."""
return util.shortdate(text)
+@templatefilter('slashpath')
+def slashpath(path):
+ """Any text. Replaces the native path separator with slash."""
+ return util.pconvert(path)
+
@templatefilter('splitlines')
def splitlines(text):
"""Any text. Split text into a list of lines."""
diff -r 87676e8ee056 -r 27b6df1b5adb mercurial/templatekw.py
--- a/mercurial/templatekw.py Mon Jan 08 16:07:51 2018 -0800
+++ b/mercurial/templatekw.py Mon Jan 22 17:53:02 2018 -0500
@@ -17,6 +17,7 @@
encoding,
error,
hbisect,
+ i18n,
obsutil,
patch,
pycompat,
@@ -301,6 +302,30 @@
return getrenamed
+def getlogcolumns():
+ """Return a dict of log column labels"""
+ _ = pycompat.identity # temporarily disable gettext
+ # i18n: column positioning for "hg log"
+ columns = _('bookmark: %s\n'
+ 'branch: %s\n'
+ 'changeset: %s\n'
+ 'copies: %s\n'
+ 'date: %s\n'
+ 'extra: %s=%s\n'
+ 'files+: %s\n'
+ 'files-: %s\n'
+ 'files: %s\n'
+ 'instability: %s\n'
+ 'manifest: %s\n'
+ 'obsolete: %s\n'
+ 'parent: %s\n'
+ 'phase: %s\n'
+ 'summary: %s\n'
+ 'tag: %s\n'
+ 'user: %s\n')
+ return dict(zip([s.split(':', 1)[0] for s in columns.splitlines()],
+ i18n._(columns).splitlines(True)))
+
# default templates internally used for rendering of lists
defaulttempl = {
'parent': '{rev}:{node|formatnode} ',
@@ -513,6 +538,8 @@
return '@'
elif ctx.obsolete():
return 'x'
+ elif ctx.isunstable():
+ return '*'
elif ctx.closesbranch():
return '_'
else:
@@ -608,6 +635,7 @@
# the verbosity templatekw available.
succsandmarkers = showsuccsandmarkers(**args)
+ args = pycompat.byteskwargs(args)
ui = args['ui']
values = []
@@ -816,7 +844,7 @@
@templatekeyword('phaseidx')
def showphaseidx(repo, ctx, templ, **args):
- """Integer. The changeset phase index."""
+ """Integer. The changeset phase index. (ADVANCED)"""
return ctx.phase()
@templatekeyword('rev')
@@ -860,12 +888,6 @@
"""List of strings. Any tags associated with the changeset."""
return shownames('tags', **args)
-def loadkeyword(ui, extname, registrarobj):
- """Load template keyword from specified registrarobj
- """
- for name, func in registrarobj._table.iteritems():
- keywords[name] = func
-
@templatekeyword('termwidth')
def showtermwidth(repo, ctx, templ, **args):
"""Integer. The width of the current terminal."""
@@ -891,5 +913,24 @@
return showlist('instability', args['ctx'].instabilities(), args,
plural='instabilities')
+@templatekeyword('verbosity')
+def showverbosity(ui, **args):
+ """String. The current output verbosity in 'debug', 'quiet', 'verbose',
+ or ''."""
+ # see cmdutil.changeset_templater for priority of these flags
+ if ui.debugflag:
+ return 'debug'
+ elif ui.quiet:
+ return 'quiet'
+ elif ui.verbose:
+ return 'verbose'
+ return ''
+
+def loadkeyword(ui, extname, registrarobj):
+ """Load template keyword from specified registrarobj
+ """
+ for name, func in registrarobj._table.iteritems():
+ keywords[name] = func
+
# tell hggettext to extract docstrings from these functions:
i18nfunctions = keywords.values()
diff -r 87676e8ee056 -r 27b6df1b5adb mercurial/templater.py
--- a/mercurial/templater.py Mon Jan 08 16:07:51 2018 -0800
+++ b/mercurial/templater.py Mon Jan 22 17:53:02 2018 -0500
@@ -184,6 +184,8 @@
return parsed, n + 1
parseres, pos = p.parse(tokenize(tmpl, n + 1, stop, '}'))
+ if not tmpl.endswith('}', n + 1, pos):
+ raise error.ParseError(_("invalid token"), pos)
parsed.append(parseres)
if quote:
@@ -257,6 +259,8 @@
def compileexp(exp, context, curmethods):
"""Compile parsed template tree to (func, data) pair"""
+ if not exp:
+ raise error.ParseError(_("missing argument"))
t = exp[0]
if t in curmethods:
return curmethods[t](exp, context)
@@ -382,9 +386,7 @@
raise error.Abort(_("recursive reference '%s' in template") % key)
def runsymbol(context, mapping, key, default=''):
- v = mapping.get(key)
- if v is None:
- v = context._defaults.get(key)
+ v = context.symbol(mapping, key)
if v is None:
# put poison to cut recursion. we can't move this to parsing phase
# because "x = {x}" is allowed if "x" is a keyword. (issue4758)
@@ -395,7 +397,11 @@
except TemplateNotFound:
v = default
if callable(v):
- return v(**pycompat.strkwargs(mapping))
+ # TODO: templatekw functions will be updated to take (context, mapping)
+ # pair instead of **props
+ props = context._resources.copy()
+ props.update(mapping)
+ return v(**pycompat.strkwargs(props))
return v
def buildtemplate(exp, context):
@@ -626,7 +632,7 @@
return [s]
return []
- ctx = mapping['ctx']
+ ctx = context.resource(mapping, 'ctx')
chunks = ctx.diff(match=ctx.match([], getpatterns(0), getpatterns(1)))
return ''.join(chunks)
@@ -639,8 +645,8 @@
raise error.ParseError(_('extdata expects one argument'))
source = evalstring(context, mapping, args['source'])
- cache = mapping['cache'].setdefault('extdata', {})
- ctx = mapping['ctx']
+ cache = context.resource(mapping, 'cache').setdefault('extdata', {})
+ ctx = context.resource(mapping, 'ctx')
if source in cache:
data = cache[source]
else:
@@ -656,10 +662,13 @@
raise error.ParseError(_("files expects one argument"))
raw = evalstring(context, mapping, args[0])
- ctx = mapping['ctx']
+ ctx = context.resource(mapping, 'ctx')
m = ctx.match([raw])
files = list(ctx.matches(m))
- return templatekw.showlist("file", files, mapping)
+ # TODO: pass (context, mapping) pair to keyword function
+ props = context._resources.copy()
+ props.update(mapping)
+ return templatekw.showlist("file", files, props)
@templatefunc('fill(text[, width[, initialident[, hangindent]]])')
def fill(context, mapping, args):
@@ -692,7 +701,7 @@
# i18n: "formatnode" is a keyword
raise error.ParseError(_("formatnode expects one argument"))
- ui = mapping['ui']
+ ui = context.resource(mapping, 'ui')
node = evalstring(context, mapping, args[0])
if ui.debugflag:
return node
@@ -858,7 +867,7 @@
# i18n: "label" is a keyword
raise error.ParseError(_("label expects two arguments"))
- ui = mapping['ui']
+ ui = context.resource(mapping, 'ui')
thing = evalstring(context, mapping, args[1])
# preserve unknown symbol as literal so effects like 'red', 'bold',
# etc. don't need to be quoted
@@ -880,7 +889,10 @@
if len(args) == 1:
pattern = evalstring(context, mapping, args[0])
- return templatekw.showlatesttags(pattern, **mapping)
+ # TODO: pass (context, mapping) pair to keyword function
+ props = context._resources.copy()
+ props.update(mapping)
+ return templatekw.showlatesttags(pattern, **pycompat.strkwargs(props))
@templatefunc('localdate(date[, tz])')
def localdate(context, mapping, args):
@@ -1005,17 +1017,18 @@
"obsmakers")
raise error.ParseError(msg)
-@templatefunc('obsfateverb(successors)')
+@templatefunc('obsfateverb(successors, markers)')
def obsfateverb(context, mapping, args):
"""Compute obsfate related information based on successors (EXPERIMENTAL)"""
- if len(args) != 1:
+ if len(args) != 2:
# i18n: "obsfateverb" is a keyword
- raise error.ParseError(_("obsfateverb expects one arguments"))
+ raise error.ParseError(_("obsfateverb expects two arguments"))
successors = evalfuncarg(context, mapping, args[0])
+ markers = evalfuncarg(context, mapping, args[1])
try:
- return obsutil.successorsetverb(successors)
+ return obsutil.obsfateverb(successors, markers)
except TypeError:
# i18n: "obsfateverb" is a keyword
errmsg = _("obsfateverb first argument should be countable")
@@ -1029,7 +1042,7 @@
# i18n: "relpath" is a keyword
raise error.ParseError(_("relpath expects one argument"))
- repo = mapping['ctx'].repo()
+ repo = context.resource(mapping, 'ctx').repo()
path = evalstring(context, mapping, args[0])
return repo.pathto(path)
@@ -1042,7 +1055,7 @@
raise error.ParseError(_("revset expects one or more arguments"))
raw = evalstring(context, mapping, args[0])
- ctx = mapping['ctx']
+ ctx = context.resource(mapping, 'ctx')
repo = ctx.repo()
def query(expr):
@@ -1054,7 +1067,8 @@
revs = query(revsetlang.formatspec(raw, *formatargs))
revs = list(revs)
else:
- revsetcache = mapping['cache'].setdefault("revsetcache", {})
+ cache = context.resource(mapping, 'cache')
+ revsetcache = cache.setdefault("revsetcache", {})
if raw in revsetcache:
revs = revsetcache[raw]
else:
@@ -1062,7 +1076,11 @@
revs = list(revs)
revsetcache[raw] = revs
- return templatekw.showrevslist("revision", revs, **mapping)
+ # TODO: pass (context, mapping) pair to keyword function
+ props = context._resources.copy()
+ props.update(mapping)
+ return templatekw.showrevslist("revision", revs,
+ **pycompat.strkwargs(props))
@templatefunc('rstdoc(text, style)')
def rstdoc(context, mapping, args):
@@ -1114,7 +1132,7 @@
# _partialmatch() of filtered changelog could take O(len(repo)) time,
# which would be unacceptably slow. so we look for hash collision in
# unfiltered space, which means some hashes may be slightly longer.
- cl = mapping['ctx']._repo.unfiltered().changelog
+ cl = context.resource(mapping, 'ctx')._repo.unfiltered().changelog
return cl.shortest(node, minlength)
@templatefunc('strip(text[, chars])')
@@ -1289,17 +1307,42 @@
filter uses function to transform value. syntax is
{key|filter1|filter2|...}.'''
- def __init__(self, loader, filters=None, defaults=None, aliases=()):
+ def __init__(self, loader, filters=None, defaults=None, resources=None,
+ aliases=()):
self._loader = loader
if filters is None:
filters = {}
self._filters = filters
if defaults is None:
defaults = {}
+ if resources is None:
+ resources = {}
self._defaults = defaults
+ self._resources = resources
self._aliasmap = _aliasrules.buildmap(aliases)
self._cache = {} # key: (func, data)
+ def symbol(self, mapping, key):
+ """Resolve symbol to value or function; None if nothing found"""
+ v = None
+ if key not in self._resources:
+ v = mapping.get(key)
+ if v is None:
+ v = self._defaults.get(key)
+ return v
+
+ def resource(self, mapping, key):
+ """Return internal data (e.g. cache) used for keyword/function
+ evaluation"""
+ v = None
+ if key in self._resources:
+ v = mapping.get(key)
+ if v is None:
+ v = self._resources.get(key)
+ if v is None:
+ raise error.Abort(_('template resource not available: %s') % key)
+ return v
+
def _load(self, t):
'''load, parse, and cache a template'''
if t not in self._cache:
@@ -1393,17 +1436,27 @@
class templater(object):
- def __init__(self, filters=None, defaults=None, cache=None, aliases=(),
- minchunk=1024, maxchunk=65536):
- '''set up template engine.
- filters is dict of functions. each transforms a value into another.
- defaults is dict of default map definitions.
- aliases is list of alias (name, replacement) pairs.
- '''
+ def __init__(self, filters=None, defaults=None, resources=None,
+ cache=None, aliases=(), minchunk=1024, maxchunk=65536):
+ """Create template engine optionally with preloaded template fragments
+
+ - ``filters``: a dict of functions to transform a value into another.
+ - ``defaults``: a dict of symbol values/functions; may be overridden
+ by a ``mapping`` dict.
+ - ``resources``: a dict of internal data (e.g. cache), inaccessible
+ from user template; may be overridden by a ``mapping`` dict.
+ - ``cache``: a dict of preloaded template fragments.
+ - ``aliases``: a list of alias (name, replacement) pairs.
+
+ self.cache may be updated later to register additional template
+ fragments.
+ """
if filters is None:
filters = {}
if defaults is None:
defaults = {}
+ if resources is None:
+ resources = {}
if cache is None:
cache = {}
self.cache = cache.copy()
@@ -1411,15 +1464,17 @@
self.filters = templatefilters.filters.copy()
self.filters.update(filters)
self.defaults = defaults
+ self._resources = {'templ': self}
+ self._resources.update(resources)
self._aliases = aliases
self.minchunk, self.maxchunk = minchunk, maxchunk
self.ecache = {}
@classmethod
- def frommapfile(cls, mapfile, filters=None, defaults=None, cache=None,
- minchunk=1024, maxchunk=65536):
+ def frommapfile(cls, mapfile, filters=None, defaults=None, resources=None,
+ cache=None, minchunk=1024, maxchunk=65536):
"""Create templater from the specified map file"""
- t = cls(filters, defaults, cache, [], minchunk, maxchunk)
+ t = cls(filters, defaults, resources, cache, [], minchunk, maxchunk)
cache, tmap, aliases = _readmapfile(mapfile)
t.cache.update(cache)
t.map = tmap
@@ -1456,7 +1511,7 @@
except KeyError:
raise error.Abort(_('invalid template engine: %s') % ttype)
self.ecache[ttype] = ecls(self.load, self.filters, self.defaults,
- self._aliases)
+ self._resources, self._aliases)
proc = self.ecache[ttype]
stream = proc.process(t, mapping)
diff -r 87676e8ee056 -r 27b6df1b5adb mercurial/templates/gitweb/changelogentry.tmpl
--- a/mercurial/templates/gitweb/changelogentry.tmpl Mon Jan 08 16:07:51 2018 -0800
+++ b/mercurial/templates/gitweb/changelogentry.tmpl Mon Jan 22 17:53:02 2018 -0500
@@ -1,5 +1,9 @@
{file|urlescape}{if(linerange,
-' (following lines {linerange}{if(descend, ', descending')} back to filelog)')}
+' (following lines {linerange}{if(descend, ', descending')} all revisions for this file)')}