# HG changeset patch # User Matt Mackall # Date 1390336206 21600 # Node ID fe8e254c7ad639f03f27a24d857d2019105b56b2 # Parent 8c69c69dbcd2a6fef48a01e8e7c6c903b9ec5a42# Parent e5a2177d97f04e851cede67712ec6ecbc72818d7 merge default into stable for 2.9 code freeze diff -r 8c69c69dbcd2 -r fe8e254c7ad6 .hgignore --- a/.hgignore Wed Jan 01 21:46:45 2014 -0600 +++ b/.hgignore Tue Jan 21 14:30:06 2014 -0600 @@ -1,6 +1,7 @@ syntax: glob *.elc +*.tmp *.orig *.rej *~ diff -r 8c69c69dbcd2 -r fe8e254c7ad6 Makefile --- a/Makefile Wed Jan 01 21:46:45 2014 -0600 +++ b/Makefile Tue Jan 21 14:30:06 2014 -0600 @@ -53,7 +53,8 @@ clean: -$(PYTHON) setup.py clean --all # ignore errors from this command - find . \( -name '*.py[cdo]' -o -name '*.so' \) -exec rm -f '{}' ';' + find contrib doc hgext i18n mercurial tests \ + \( -name '*.py[cdo]' -o -name '*.so' \) -exec rm -f '{}' ';' rm -f $(addprefix mercurial/,$(notdir $(wildcard mercurial/pure/[a-z]*.py))) rm -f MANIFEST MANIFEST.in mercurial/__version__.py tests/*.err rm -rf build mercurial/locale @@ -107,7 +108,7 @@ mercurial/fileset.py mercurial/revset.py \ mercurial/templatefilters.py mercurial/templatekw.py \ mercurial/filemerge.py \ - $(DOCFILES) > i18n/hg.pot + $(DOCFILES) > i18n/hg.pot.tmp # All strings marked for translation in Mercurial contain # ASCII characters only. But some files contain string # literals like this '\037\213'. xgettext thinks it has to @@ -119,11 +120,17 @@ --msgid-bugs-address "" \ --copyright-holder "Matt Mackall and others" \ --from-code ISO-8859-1 --join --sort-by-file --add-comments=i18n: \ - -d hg -p i18n -o hg.pot - $(PYTHON) i18n/posplit i18n/hg.pot + -d hg -p i18n -o hg.pot.tmp + $(PYTHON) i18n/posplit i18n/hg.pot.tmp + # The target file is not created before the last step. So it never is in + # an intermediate state. + mv -f i18n/hg.pot.tmp i18n/hg.pot %.po: i18n/hg.pot - msgmerge --no-location --update $@ $^ + # work on a temporary copy for never having a half completed target + cp $@ $@.tmp + msgmerge --no-location --update $@.tmp $^ + mv -f $@.tmp $@ .PHONY: help all local build doc clean install install-bin install-doc \ install-home install-home-bin install-home-doc dist dist-notests tests \ diff -r 8c69c69dbcd2 -r fe8e254c7ad6 contrib/bash_completion --- a/contrib/bash_completion Wed Jan 01 21:46:45 2014 -0600 +++ b/contrib/bash_completion Tue Jan 21 14:30:06 2014 -0600 @@ -76,7 +76,7 @@ { local i for i in $(compgen -d -- "$cur"); do - test ! -d "$i"/.hg || COMPREPLY=(${COMPREPLY[@]:-} "$i") + test ! -d "$i"/.hg || COMPREPLY=(${COMPREPLY[@]:-} "$i") done } @@ -84,7 +84,6 @@ { local files="$(_hg_cmd debugpathcomplete $1 "$cur")" local IFS=$'\n' - compopt -o filenames 2>/dev/null COMPREPLY=(${COMPREPLY[@]:-} $(compgen -W '$files' -- "$cur")) } @@ -92,10 +91,16 @@ { local files="$(_hg_cmd status -n$1 "glob:$cur**")" local IFS=$'\n' - compopt -o filenames 2>/dev/null COMPREPLY=(${COMPREPLY[@]:-} $(compgen -W '$files' -- "$cur")) } +_hg_branches() +{ + local branches="$(_hg_cmd branches -q)" + local IFS=$'\n' + COMPREPLY=(${COMPREPLY[@]:-} $(compgen -W '$branches' -- "$cur")) +} + _hg_bookmarks() { local bookmarks="$(_hg_cmd bookmarks -q)" @@ -117,25 +122,45 @@ local filters="$1" for ((i=1; $i<=$COMP_CWORD; i++)); do - if [[ "${COMP_WORDS[i]}" != -* ]]; then - if [[ ${COMP_WORDS[i-1]} == @($filters|$global_args) ]]; then - continue - fi - count=$(($count + 1)) - fi + if [[ "${COMP_WORDS[i]}" != -* ]]; then + if [[ ${COMP_WORDS[i-1]} == @($filters|$global_args) ]]; then + continue + fi + count=$(($count + 1)) + fi done echo $(($count - 1)) } +_hg_fix_wordlist() +{ + local LASTCHAR=' ' + if [ ${#COMPREPLY[@]} = 1 ]; then + [ -d "$COMPREPLY" ] && LASTCHAR=/ + COMPREPLY=$(printf %q%s "$COMPREPLY" "$LASTCHAR") + else + for ((i=0; i < ${#COMPREPLY[@]}; i++)); do + [ -d "${COMPREPLY[$i]}" ] && COMPREPLY[$i]=${COMPREPLY[$i]}/ + done + fi +} + _hg() { - local cur prev cmd cmd_index opts i + local cur prev cmd cmd_index opts i aliashg # global options that receive an argument local global_args='--cwd|-R|--repository' local hg="$1" local canonical=0 + aliashg=$(alias $hg 2>/dev/null) + if [[ -n "$aliashg" ]]; then + aliashg=${aliashg#"alias $hg='"} + aliashg=${aliashg%"'"} + hg=$aliashg + fi + COMPREPLY=() cur="$2" prev="$3" @@ -144,145 +169,169 @@ # (first non-option argument that doesn't follow a global option that # receives an argument) for ((i=1; $i<=$COMP_CWORD; i++)); do - if [[ ${COMP_WORDS[i]} != -* ]]; then - if [[ ${COMP_WORDS[i-1]} != @($global_args) ]]; then - cmd="${COMP_WORDS[i]}" - cmd_index=$i - break - fi - fi + if [[ ${COMP_WORDS[i]} != -* ]]; then + if [[ ${COMP_WORDS[i-1]} != @($global_args) ]]; then + cmd="${COMP_WORDS[i]}" + cmd_index=$i + break + fi + fi done if [[ "$cur" == -* ]]; then - if [ "$(type -t "_hg_opt_$cmd")" = function ] && "_hg_opt_$cmd"; then - return - fi + if [ "$(type -t "_hg_opt_$cmd")" = function ] && "_hg_opt_$cmd"; then + _hg_fix_wordlist + return + fi - opts=$(_hg_cmd debugcomplete --options "$cmd") + opts=$(_hg_cmd debugcomplete --options "$cmd") - COMPREPLY=(${COMPREPLY[@]:-} $(compgen -W '$opts' -- "$cur")) - return + COMPREPLY=(${COMPREPLY[@]:-} $(compgen -W '$opts' -- "$cur")) + _hg_fix_wordlist + return fi # global options case "$prev" in - -R|--repository) - _hg_paths - _hg_repos - return - ;; - --cwd) - # Stick with default bash completion - return - ;; + -R|--repository) + _hg_paths + _hg_repos + _hg_fix_wordlist + return + ;; + --cwd) + # Stick with default bash completion + _hg_fix_wordlist + return + ;; esac if [ -z "$cmd" ] || [ $COMP_CWORD -eq $i ]; then - _hg_commands - return + _hg_commands + _hg_fix_wordlist + return fi # try to generate completion candidates for whatever command the user typed local help if _hg_command_specific; then - return + _hg_fix_wordlist + return fi # canonicalize the command name and try again help=$(_hg_cmd help "$cmd") if [ $? -ne 0 ]; then - # Probably either the command doesn't exist or it's ambiguous - return + # Probably either the command doesn't exist or it's ambiguous + return fi cmd=${help#hg } cmd=${cmd%%[$' \n']*} canonical=1 _hg_command_specific + _hg_fix_wordlist } _hg_command_specific() { if [ "$(type -t "_hg_cmd_$cmd")" = function ]; then - "_hg_cmd_$cmd" - return 0 + "_hg_cmd_$cmd" + return 0 fi - if [ "$cmd" != status ] && [ "$prev" = -r ] || [ "$prev" == --rev ]; then - if [ $canonical = 1 ]; then - _hg_labels - return 0 - elif [[ status != "$cmd"* ]]; then - _hg_labels - return 0 - else - return 1 - fi + if [ "$cmd" != status ]; then + case "$prev" in + -r|--rev) + if [[ $canonical = 1 || status != "$cmd"* ]]; then + _hg_labels + return 0 + fi + return 1 + ;; + -B|--bookmark) + if [[ $canonical = 1 || status != "$cmd"* ]]; then + _hg_bookmarks + return 0 + fi + return 1 + ;; + -b|--branch) + if [[ $canonical = 1 || status != "$cmd"* ]]; then + _hg_branches + return 0 + fi + return 1 + ;; + esac fi + local aliascmd=$(_hg_cmd showconfig alias.$cmd | awk '{print $1}') + [ -n "$aliascmd" ] && cmd=$aliascmd + case "$cmd" in - help) - _hg_commands - ;; - export) - if _hg_ext_mq_patchlist qapplied && [ "${COMPREPLY[*]}" ]; then - return 0 - fi - _hg_labels - ;; - manifest|update|up|checkout|co) - _hg_labels - ;; - pull|push|outgoing|incoming) - _hg_paths - _hg_repos - ;; - paths) - _hg_paths - ;; - add) - _hg_status "u" - ;; - merge) - _hg_labels - ;; - commit|ci|record) - _hg_status "mar" - ;; - remove|rm) - _hg_debugpathcomplete -n - ;; - forget) - _hg_debugpathcomplete -fa - ;; - diff) - _hg_status "mar" - ;; - revert) - _hg_debugpathcomplete - ;; - clone) - local count=$(_hg_count_non_option) - if [ $count = 1 ]; then - _hg_paths - fi - _hg_repos - ;; - debugindex|debugindexdot) - COMPREPLY=(${COMPREPLY[@]:-} $(compgen -f -X "!*.i" -- "$cur")) - ;; - debugdata) - COMPREPLY=(${COMPREPLY[@]:-} $(compgen -f -X "!*.d" -- "$cur")) - ;; - *) - return 1 - ;; + help) + _hg_commands + ;; + export) + if _hg_ext_mq_patchlist qapplied && [ "${COMPREPLY[*]}" ]; then + return 0 + fi + _hg_labels + ;; + manifest|update|up|checkout|co) + _hg_labels + ;; + pull|push|outgoing|incoming) + _hg_paths + _hg_repos + ;; + paths) + _hg_paths + ;; + add) + _hg_status "u" + ;; + merge) + _hg_labels + ;; + commit|ci|record) + _hg_status "mar" + ;; + remove|rm) + _hg_debugpathcomplete -n + ;; + forget) + _hg_debugpathcomplete -fa + ;; + diff) + _hg_status "mar" + ;; + revert) + _hg_debugpathcomplete + ;; + clone) + local count=$(_hg_count_non_option) + if [ $count = 1 ]; then + _hg_paths + fi + _hg_repos + ;; + debugindex|debugindexdot) + COMPREPLY=(${COMPREPLY[@]:-} $(compgen -f -X "!*.i" -- "$cur")) + ;; + debugdata) + COMPREPLY=(${COMPREPLY[@]:-} $(compgen -f -X "!*.d" -- "$cur")) + ;; + *) + return 1 + ;; esac return 0 } -complete -o bashdefault -o default -F _hg hg \ - || complete -o default -F _hg hg +complete -o bashdefault -o default -o nospace -F _hg hg \ + || complete -o default -o nospace -F _hg hg # Completion for commands provided by extensions @@ -290,10 +339,8 @@ # bookmarks _hg_cmd_bookmarks() { - if [[ "$prev" = @(-d|--delete|-m|--rename) ]]; then - _hg_bookmarks - return - fi + _hg_bookmarks + return } # mq @@ -302,8 +349,8 @@ local patches patches=$(_hg_cmd $1) if [ $? -eq 0 ] && [ "$patches" ]; then - COMPREPLY=(${COMPREPLY[@]:-} $(compgen -W '$patches' -- "$cur")) - return 0 + COMPREPLY=(${COMPREPLY[@]:-} $(compgen -W '$patches' -- "$cur")) + return 0 fi return 1 } @@ -313,19 +360,19 @@ local root=$(_hg_cmd root) local n for n in $(cd "$root"/.hg && compgen -d -- "$cur"); do - # I think we're usually not interested in the regular "patches" queue - # so just filter it. - if [ "$n" != patches ] && [ -e "$root/.hg/$n/series" ]; then - COMPREPLY=(${COMPREPLY[@]:-} "$n") - fi + # I think we're usually not interested in the regular "patches" queue + # so just filter it. + if [ "$n" != patches ] && [ -e "$root/.hg/$n/series" ]; then + COMPREPLY=(${COMPREPLY[@]:-} "$n") + fi done } _hg_cmd_qpop() { if [[ "$prev" = @(-n|--name) ]]; then - _hg_ext_mq_queues - return + _hg_ext_mq_queues + return fi _hg_ext_mq_patchlist qapplied } @@ -333,8 +380,8 @@ _hg_cmd_qpush() { if [[ "$prev" = @(-n|--name) ]]; then - _hg_ext_mq_queues - return + _hg_ext_mq_queues + return fi _hg_ext_mq_patchlist qunapplied } @@ -342,8 +389,8 @@ _hg_cmd_qgoto() { if [[ "$prev" = @(-n|--name) ]]; then - _hg_ext_mq_queues - return + _hg_ext_mq_queues + return fi _hg_ext_mq_patchlist qseries } @@ -352,7 +399,7 @@ { local qcmd=qunapplied if [[ "$prev" = @(-r|--rev) ]]; then - qcmd=qapplied + qcmd=qapplied fi _hg_ext_mq_patchlist $qcmd } @@ -360,7 +407,7 @@ _hg_cmd_qfinish() { if [[ "$prev" = @(-a|--applied) ]]; then - return + return fi _hg_ext_mq_patchlist qapplied } @@ -368,8 +415,8 @@ _hg_cmd_qsave() { if [[ "$prev" = @(-n|--name) ]]; then - _hg_ext_mq_queues - return + _hg_ext_mq_queues + return fi } @@ -382,6 +429,10 @@ _hg_cmd_strip() { + if [[ "$prev" = @(-B|--bookmark) ]]; then + _hg_bookmarks + return + fi _hg_labels } @@ -412,7 +463,7 @@ { local count=$(_hg_count_non_option) if [ $count = 1 ]; then - _hg_paths + _hg_paths fi _hg_repos } @@ -433,15 +484,15 @@ local prefix='' if [[ "$cur" == +* ]]; then - prefix=+ + prefix=+ elif [[ "$cur" == -* ]]; then - prefix=- + prefix=- fi local ncur=${cur#[-+]} if ! [ "$prefix" ]; then - _hg_ext_mq_patchlist qseries - return + _hg_ext_mq_patchlist qseries + return fi local guards=$(_hg_ext_mq_guards) @@ -452,15 +503,15 @@ { local i for ((i=cmd_index+1; i<=COMP_CWORD; i++)); do - if [[ ${COMP_WORDS[i]} != -* ]]; then - if [[ ${COMP_WORDS[i-1]} != @($global_args) ]]; then - _hg_cmd_qguard - return 0 - fi - elif [ "${COMP_WORDS[i]}" = -- ]; then - _hg_cmd_qguard - return 0 - fi + if [[ ${COMP_WORDS[i]} != -* ]]; then + if [[ ${COMP_WORDS[i-1]} != @($global_args) ]]; then + _hg_cmd_qguard + return 0 + fi + elif [ "${COMP_WORDS[i]}" = -- ]; then + _hg_cmd_qguard + return 0 + fi done return 1 } @@ -484,24 +535,24 @@ # find the sub-command for ((i=cmd_index+1; i<=COMP_CWORD; i++)); do - if [[ ${COMP_WORDS[i]} != -* ]]; then - if [[ ${COMP_WORDS[i-1]} != @($global_args) ]]; then - subcmd="${COMP_WORDS[i]}" - break - fi - fi + if [[ ${COMP_WORDS[i]} != -* ]]; then + if [[ ${COMP_WORDS[i-1]} != @($global_args) ]]; then + subcmd="${COMP_WORDS[i]}" + break + fi + fi done if [ -z "$subcmd" ] || [ $COMP_CWORD -eq $i ] || [ "$subcmd" = help ]; then - COMPREPLY=(${COMPREPLY[@]:-} - $(compgen -W 'bad good help init next reset' -- "$cur")) - return + COMPREPLY=(${COMPREPLY[@]:-} + $(compgen -W 'bad good help init next reset' -- "$cur")) + return fi case "$subcmd" in - good|bad) - _hg_labels - ;; + good|bad) + _hg_labels + ;; esac return @@ -512,28 +563,28 @@ _hg_cmd_email() { case "$prev" in - -c|--cc|-t|--to|-f|--from|--bcc) - # we need an e-mail address. let the user provide a function - # to get them - if [ "$(type -t _hg_emails)" = function ]; then - local arg=to - if [[ "$prev" == @(-f|--from) ]]; then - arg=from - fi - local addresses=$(_hg_emails $arg) - COMPREPLY=(${COMPREPLY[@]:-} - $(compgen -W '$addresses' -- "$cur")) - fi - return - ;; - -m|--mbox) - # fallback to standard filename completion - return - ;; - -s|--subject) - # free form string - return - ;; + -c|--cc|-t|--to|-f|--from|--bcc) + # we need an e-mail address. let the user provide a function + # to get them + if [ "$(type -t _hg_emails)" = function ]; then + local arg=to + if [[ "$prev" == @(-f|--from) ]]; then + arg=from + fi + local addresses=$(_hg_emails $arg) + COMPREPLY=(${COMPREPLY[@]:-} + $(compgen -W '$addresses' -- "$cur")) + fi + return + ;; + -m|--mbox) + # fallback to standard filename completion + return + ;; + -s|--subject) + # free form string + return + ;; esac _hg_labels @@ -552,15 +603,15 @@ _hg_cmd_transplant() { case "$prev" in - -s|--source) - _hg_paths - _hg_repos - return - ;; - --filter) - # standard filename completion - return - ;; + -s|--source) + _hg_paths + _hg_repos + return + ;; + --filter) + # standard filename completion + return + ;; esac # all other transplant options values and command parameters are revisions @@ -571,14 +622,18 @@ # shelve _hg_shelves() { - local shelves="$(_hg_cmd unshelve -l .)" + local shelves="$(_hg_cmd shelve -ql)" local IFS=$'\n' COMPREPLY=(${COMPREPLY[@]:-} $(compgen -W '$shelves' -- "$cur")) } _hg_cmd_shelve() { - _hg_status "mard" + if [[ "$prev" = @(-d|--delete) ]]; then + _hg_shelves + else + _hg_status "mard" + fi } _hg_cmd_unshelve() diff -r 8c69c69dbcd2 -r fe8e254c7ad6 contrib/check-code.py --- a/contrib/check-code.py Wed Jan 01 21:46:45 2014 -0600 +++ b/contrib/check-code.py Tue Jan 21 14:30:06 2014 -0600 @@ -7,6 +7,18 @@ # This software may be used and distributed according to the terms of the # GNU General Public License version 2 or any later version. +"""style and portability checker for Mercurial + +when a rule triggers wrong, do one of the following (prefer one from top): + * do the work-around the rule suggests + * doublecheck that it is a false match + * improve the rule pattern + * add an ignore pattern to the rule (3rd arg) which matches your good line + (you can append a short comment and match this, like: #re-raises, # no-py24) + * change the pattern to a warning and list the exception in test-check-code-hg + * ONLY use no--check-code for skipping entire files from external sources +""" + import re, glob, os, sys import keyword import optparse @@ -141,17 +153,15 @@ (r'^ saved backup bundle to \$TESTTMP.*\.hg$', winglobmsg), (r'^ changeset .* references (corrupted|missing) \$TESTTMP/.*[^)]$', winglobmsg), - (r'^ pulling from \$TESTTMP/.*[^)]$', winglobmsg, '\$TESTTMP/unix-repo$'), - (r'^ reverting .*/.*[^)]$', winglobmsg, '\$TESTTMP/unix-repo$'), - (r'^ cloning subrepo \S+/.*[^)]$', winglobmsg, '\$TESTTMP/unix-repo$'), - (r'^ pushing to \$TESTTMP/.*[^)]$', winglobmsg, '\$TESTTMP/unix-repo$'), - (r'^ pushing subrepo \S+/\S+ to.*[^)]$', winglobmsg, - '\$TESTTMP/unix-repo$'), + (r'^ pulling from \$TESTTMP/.*[^)]$', winglobmsg, + '\$TESTTMP/unix-repo$'), # in test-issue1802.t which skipped on windows + (r'^ reverting .*/.*[^)]$', winglobmsg), + (r'^ cloning subrepo \S+/.*[^)]$', winglobmsg), + (r'^ pushing to \$TESTTMP/.*[^)]$', winglobmsg), + (r'^ pushing subrepo \S+/\S+ to.*[^)]$', winglobmsg), (r'^ moving \S+/.*[^)]$', winglobmsg), - (r'^ no changes made to subrepo since.*/.*[^)]$', - winglobmsg, '\$TESTTMP/unix-repo$'), - (r'^ .*: largefile \S+ not available from file:.*/.*[^)]$', - winglobmsg, '\$TESTTMP/unix-repo$'), + (r'^ no changes made to subrepo since.*/.*[^)]$', winglobmsg), + (r'^ .*: largefile \S+ not available from file:.*/.*[^)]$', winglobmsg), ], # warnings [ @@ -227,7 +237,7 @@ (r'^\s*except.* as .*:', "except as not available in Python 2.4"), (r'^\s*os\.path\.relpath', "relpath not available in Python 2.4"), (r'(?>> dotted_name_of_path('mercurial/error.py') + 'mercurial.error' + """ + parts = path.split('/') + parts[-1] = parts[-1][:-3] # remove .py + return '.'.join(parts) + + +def list_stdlib_modules(): + """List the modules present in the stdlib. + + >>> mods = set(list_stdlib_modules()) + >>> 'BaseHTTPServer' in mods + True + + os.path isn't really a module, so it's missing: + + >>> 'os.path' in mods + False + + sys requires special treatment, because it's baked into the + interpreter, but it should still appear: + + >>> 'sys' in mods + True + + >>> 'collections' in mods + True + + >>> 'cStringIO' in mods + True + """ + for m in sys.builtin_module_names: + yield m + # These modules only exist on windows, but we should always + # consider them stdlib. + for m in ['msvcrt', '_winreg']: + yield m + # These get missed too + for m in 'ctypes', 'email': + yield m + yield 'builtins' # python3 only + stdlib_prefixes = set([sys.prefix, sys.exec_prefix]) + # We need to supplement the list of prefixes for the search to work + # when run from within a virtualenv. + for mod in (BaseHTTPServer, zlib): + try: + # Not all module objects have a __file__ attribute. + filename = mod.__file__ + except AttributeError: + continue + dirname = os.path.dirname(filename) + for prefix in stdlib_prefixes: + if dirname.startswith(prefix): + # Then this directory is redundant. + break + else: + stdlib_prefixes.add(dirname) + for libpath in sys.path: + # We want to walk everything in sys.path that starts with + # something in stdlib_prefixes. check-code suppressed because + # the ast module used by this script implies the availability + # of any(). + if not any(libpath.startswith(p) for p in stdlib_prefixes): # no-py24 + continue + if 'site-packages' in libpath: + continue + for top, dirs, files in os.walk(libpath): + for name in files: + if name == '__init__.py': + continue + if not (name.endswith('.py') or name.endswith('.so')): + continue + full_path = os.path.join(top, name) + if 'site-packages' in full_path: + continue + rel_path = full_path[len(libpath) + 1:] + mod = dotted_name_of_path(rel_path) + yield mod + +stdlib_modules = set(list_stdlib_modules()) + +def imported_modules(source, ignore_nested=False): + """Given the source of a file as a string, yield the names + imported by that file. + + Args: + source: The python source to examine as a string. + ignore_nested: If true, import statements that do not start in + column zero will be ignored. + + Returns: + A list of module names imported by the given source. + + >>> sorted(imported_modules( + ... 'import foo ; from baz import bar; import foo.qux')) + ['baz.bar', 'foo', 'foo.qux'] + >>> sorted(imported_modules( + ... '''import foo + ... def wat(): + ... import bar + ... ''', ignore_nested=True)) + ['foo'] + """ + for node in ast.walk(ast.parse(source)): + if ignore_nested and getattr(node, 'col_offset', 0) > 0: + continue + if isinstance(node, ast.Import): + for n in node.names: + yield n.name + elif isinstance(node, ast.ImportFrom): + prefix = node.module + '.' + for n in node.names: + yield prefix + n.name + +def verify_stdlib_on_own_line(source): + """Given some python source, verify that stdlib imports are done + in separate statements from relative local module imports. + + Observing this limitation is important as it works around an + annoying lib2to3 bug in relative import rewrites: + http://bugs.python.org/issue19510. + + >>> list(verify_stdlib_on_own_line('import sys, foo')) + ['mixed stdlib and relative imports:\\n foo, sys'] + >>> list(verify_stdlib_on_own_line('import sys, os')) + [] + >>> list(verify_stdlib_on_own_line('import foo, bar')) + [] + """ + for node in ast.walk(ast.parse(source)): + if isinstance(node, ast.Import): + from_stdlib = {} + for n in node.names: + from_stdlib[n.name] = n.name in stdlib_modules + num_std = len([x for x in from_stdlib.values() if x]) + if num_std not in (len(from_stdlib.values()), 0): + yield ('mixed stdlib and relative imports:\n %s' % + ', '.join(sorted(from_stdlib.iterkeys()))) + +class CircularImport(Exception): + pass + + +def cyclekey(names): + return tuple(sorted(set(names))) + +def check_one_mod(mod, imports, path=None, ignore=None): + if path is None: + path = [] + if ignore is None: + ignore = [] + path = path + [mod] + for i in sorted(imports.get(mod, [])): + if i not in stdlib_modules: + i = mod.rsplit('.', 1)[0] + '.' + i + if i in path: + firstspot = path.index(i) + cycle = path[firstspot:] + [i] + if cyclekey(cycle) not in ignore: + raise CircularImport(cycle) + continue + check_one_mod(i, imports, path=path, ignore=ignore) + +def rotatecycle(cycle): + """arrange a cycle so that the lexicographically first module listed first + + >>> rotatecycle(['foo', 'bar', 'foo']) + ['bar', 'foo', 'bar'] + """ + lowest = min(cycle) + idx = cycle.index(lowest) + return cycle[idx:] + cycle[1:idx] + [lowest] + +def find_cycles(imports): + """Find cycles in an already-loaded import graph. + + >>> imports = {'top.foo': ['bar', 'os.path', 'qux'], + ... 'top.bar': ['baz', 'sys'], + ... 'top.baz': ['foo'], + ... 'top.qux': ['foo']} + >>> print '\\n'.join(sorted(find_cycles(imports))) + top.bar -> top.baz -> top.foo -> top.bar -> top.bar + top.foo -> top.qux -> top.foo -> top.foo + """ + cycles = {} + for mod in sorted(imports.iterkeys()): + try: + check_one_mod(mod, imports, ignore=cycles) + except CircularImport, e: + cycle = e.args[0] + cycles[cyclekey(cycle)] = ' -> '.join(rotatecycle(cycle)) + return cycles.values() + +def _cycle_sortkey(c): + return len(c), c + +def main(argv): + if len(argv) < 2: + print 'Usage: %s file [file] [file] ...' + return 1 + used_imports = {} + any_errors = False + for source_path in argv[1:]: + f = open(source_path) + modname = dotted_name_of_path(source_path) + src = f.read() + used_imports[modname] = sorted( + imported_modules(src, ignore_nested=True)) + for error in verify_stdlib_on_own_line(src): + any_errors = True + print source_path, error + f.close() + cycles = find_cycles(used_imports) + if cycles: + firstmods = set() + for c in sorted(cycles, key=_cycle_sortkey): + first = c.split()[0] + # As a rough cut, ignore any cycle that starts with the + # same module as some other cycle. Otherwise we see lots + # of cycles that are effectively duplicates. + if first in firstmods: + continue + print 'Import cycle:', c + firstmods.add(first) + any_errors = True + return not any_errors + +if __name__ == '__main__': + sys.exit(int(main(sys.argv))) diff -r 8c69c69dbcd2 -r fe8e254c7ad6 contrib/lock-checker.py --- a/contrib/lock-checker.py Wed Jan 01 21:46:45 2014 -0600 +++ b/contrib/lock-checker.py Tue Jan 21 14:30:06 2014 -0600 @@ -7,21 +7,12 @@ This currently only checks store locks, not working copy locks. """ import os -import traceback - -def _warnstack(ui, msg, skip=1): - '''issue warning with the message and the current stack, skipping the - skip last entries''' - ui.warn('%s at:\n' % msg) - entries = traceback.extract_stack()[:-skip] - fnmax = max(len(entry[0]) for entry in entries) - for fn, ln, func, _text in entries: - ui.warn(' %*s:%-4s in %s\n' % (fnmax, fn, ln, func)) +from mercurial import util def _checklock(repo): l = repo._lockref and repo._lockref() if l is None or not l.held: - _warnstack(repo.ui, 'missing lock', skip=2) + util.debugstacktrace('missing lock', skip=1) def reposetup(ui, repo): orig = repo.__class__ diff -r 8c69c69dbcd2 -r fe8e254c7ad6 contrib/perf.py --- a/contrib/perf.py Wed Jan 01 21:46:45 2014 -0600 +++ b/contrib/perf.py Tue Jan 21 14:30:06 2014 -0600 @@ -243,6 +243,18 @@ copies=opts.get('rename'))) ui.popbuffer() +@command('perfmoonwalk') +def perfmoonwalk(ui, repo): + """benchmark walking the changelog backwards + + This also loads the changelog data for each revision in the changelog. + """ + def moonwalk(): + for i in xrange(len(repo), -1, -1): + ctx = repo[i] + ctx.branch() # read changelog data (in addition to the index) + timer(moonwalk) + @command('perftemplating') def perftemplating(ui, repo): ui.pushbuffer() @@ -349,7 +361,7 @@ def getfiltered(name): def d(): repo.invalidatevolatilesets() - repoview.filteredrevs(repo, name) + repoview.filterrevs(repo, name) return d allfilter = sorted(repoview.filtertable) @@ -386,7 +398,7 @@ allfilters = [] while possiblefilters: for name in possiblefilters: - subset = repoview.subsettable.get(name) + subset = branchmap.subsettable.get(name) if subset not in possiblefilters: break else: diff -r 8c69c69dbcd2 -r fe8e254c7ad6 contrib/win32/ReadMe.html --- a/contrib/win32/ReadMe.html Wed Jan 01 21:46:45 2014 -0600 +++ b/contrib/win32/ReadMe.html Tue Jan 21 14:30:06 2014 -0600 @@ -140,7 +140,7 @@

- Mercurial is Copyright 2005-2013 Matt Mackall and others. See + Mercurial is Copyright 2005-2014 Matt Mackall and others. See the Contributors.txt file for a list of contributors.

diff -r 8c69c69dbcd2 -r fe8e254c7ad6 doc/gendoc.py --- a/doc/gendoc.py Wed Jan 01 21:46:45 2014 -0600 +++ b/doc/gendoc.py Tue Jan 21 14:30:06 2014 -0600 @@ -40,11 +40,16 @@ shortopt, longopt, default, desc, optlabel = opt else: shortopt, longopt, default, desc = opt + optlabel = _("VALUE") allopts = [] if shortopt: allopts.append("-%s" % shortopt) if longopt: allopts.append("--%s" % longopt) + if isinstance(default, list): + allopts[-1] += " <%s[+]>" % optlabel + elif (default is not None) and not isinstance(default, bool): + allopts[-1] += " <%s>" % optlabel desc += default and _(" (default: %s)") % default or "" yield (", ".join(allopts), desc) @@ -71,8 +76,14 @@ def showdoc(ui): # print options ui.write(minirst.section(_("Options"))) + multioccur = False for optstr, desc in get_opts(globalopts): ui.write("%s\n %s\n\n" % (optstr, desc)) + if optstr.endswith("[+]>"): + multioccur = True + if multioccur: + ui.write(_("\n[+] marked option can be specified multiple times\n")) + ui.write("\n") # print cmds ui.write(minirst.section(_("Commands"))) @@ -157,12 +168,18 @@ if opt_output: opts_len = max([len(line[0]) for line in opt_output]) ui.write(_("Options:\n\n")) + multioccur = False for optstr, desc in opt_output: if desc: s = "%-*s %s" % (opts_len, optstr, desc) else: s = optstr ui.write("%s\n" % s) + if optstr.endswith("[+]>"): + multioccur = True + if multioccur: + ui.write(_("\n[+] marked option can be specified" + " multiple times\n")) ui.write("\n") # aliases if d['aliases']: diff -r 8c69c69dbcd2 -r fe8e254c7ad6 hgext/convert/filemap.py --- a/hgext/convert/filemap.py Wed Jan 01 21:46:45 2014 -0600 +++ b/hgext/convert/filemap.py Tue Jan 21 14:30:06 2014 -0600 @@ -10,12 +10,20 @@ from mercurial import util, error from common import SKIPREV, converter_source -def rpairs(name): - e = len(name) - while e != -1: - yield name[:e], name[e + 1:] - e = name.rfind('/', 0, e) - yield '.', name +def rpairs(path): + '''Yield tuples with path split at '/', starting with the full path. + No leading, trailing or double '/', please. + >>> for x in rpairs('foo/bar/baz'): print x + ('foo/bar/baz', '') + ('foo/bar', 'baz') + ('foo', 'bar/baz') + ('.', 'foo/bar/baz') + ''' + i = len(path) + while i != -1: + yield path[:i], path[i + 1:] + i = path.rfind('/', 0, i) + yield '.', path def normalize(path): ''' We use posixpath.normpath to support cross-platform path format. diff -r 8c69c69dbcd2 -r fe8e254c7ad6 hgext/graphlog.py --- a/hgext/graphlog.py Wed Jan 01 21:46:45 2014 -0600 +++ b/hgext/graphlog.py Tue Jan 21 14:30:06 2014 -0600 @@ -5,7 +5,10 @@ # This software may be used and distributed according to the terms of the # GNU General Public License version 2 or any later version. -'''command to view revision graphs from a shell +'''command to view revision graphs from a shell (DEPRECATED) + +The functionality of this extension has been include in core Mercurial +since version 2.3. This extension adds a --graph option to the incoming, outgoing and log commands. When this options is given, an ASCII representation of the diff -r 8c69c69dbcd2 -r fe8e254c7ad6 hgext/keyword.py --- a/hgext/keyword.py Wed Jan 01 21:46:45 2014 -0600 +++ b/hgext/keyword.py Tue Jan 21 14:30:06 2014 -0600 @@ -84,7 +84,7 @@ from mercurial import commands, context, cmdutil, dispatch, filelog, extensions from mercurial import localrepo, match, patch, templatefilters, templater, util -from mercurial import scmutil +from mercurial import scmutil, pathutil from mercurial.hgweb import webcommands from mercurial.i18n import _ import os, re, shutil, tempfile @@ -439,12 +439,16 @@ repo[None].add([fn]) ui.note(_('\nkeywords written to %s:\n') % fn) ui.note(keywords) - repo.dirstate.setbranch('demobranch') + wlock = repo.wlock() + try: + repo.dirstate.setbranch('demobranch') + finally: + wlock.release() for name, cmd in ui.configitems('hooks'): if name.split('.', 1)[0].find('commit') > -1: repo.ui.setconfig('hooks', name, '') msg = _('hg keyword configuration and expansion example') - ui.note("hg ci -m '%s'\n" % msg) # check-code-ignore + ui.note(("hg ci -m '%s'\n" % msg)) repo.commit(text=msg) ui.status(_('\n\tkeywords expanded\n')) ui.write(repo.wread(fn)) @@ -673,7 +677,7 @@ expansion. ''' source = repo.dirstate.copied(dest) if 'l' in wctx.flags(source): - source = scmutil.canonpath(repo.root, cwd, + source = pathutil.canonpath(repo.root, cwd, os.path.realpath(source)) return kwt.match(source) diff -r 8c69c69dbcd2 -r fe8e254c7ad6 hgext/largefiles/overrides.py --- a/hgext/largefiles/overrides.py Wed Jan 01 21:46:45 2014 -0600 +++ b/hgext/largefiles/overrides.py Tue Jan 21 14:30:06 2014 -0600 @@ -12,7 +12,7 @@ import copy from mercurial import hg, commands, util, cmdutil, scmutil, match as match_, \ - node, archival, error, merge, discovery + node, archival, error, merge, discovery, pathutil from mercurial.i18n import _ from mercurial.node import hex from hgext import rebase @@ -415,47 +415,23 @@ return processed # Override filemerge to prompt the user about how they wish to merge -# largefiles. This will handle identical edits, and copy/rename + -# edit without prompting the user. +# largefiles. This will handle identical edits without prompting the user. def overridefilemerge(origfn, repo, mynode, orig, fcd, fco, fca): - # Use better variable names here. Because this is a wrapper we cannot - # change the variable names in the function declaration. - fcdest, fcother, fcancestor = fcd, fco, fca if not lfutil.isstandin(orig): - return origfn(repo, mynode, orig, fcdest, fcother, fcancestor) - else: - if not fcother.cmp(fcdest): # files identical? - return None + return origfn(repo, mynode, orig, fcd, fco, fca) - # backwards, use working dir parent as ancestor - if fcancestor == fcother: - fcancestor = fcdest.parents()[0] + if not fco.cmp(fcd): # files identical? + return None - if orig != fcother.path(): - repo.ui.status(_('merging %s and %s to %s\n') - % (lfutil.splitstandin(orig), - lfutil.splitstandin(fcother.path()), - lfutil.splitstandin(fcdest.path()))) - else: - repo.ui.status(_('merging %s\n') - % lfutil.splitstandin(fcdest.path())) - - if fcancestor.path() != fcother.path() and fcother.data() == \ - fcancestor.data(): - return 0 - if fcancestor.path() != fcdest.path() and fcdest.data() == \ - fcancestor.data(): - repo.wwrite(fcdest.path(), fcother.data(), fcother.flags()) - return 0 - - if repo.ui.promptchoice(_('largefile %s has a merge conflict\n' - 'keep (l)ocal or take (o)ther?' - '$$ &Local $$ &Other') % - lfutil.splitstandin(orig), 0) == 0: - return 0 - else: - repo.wwrite(fcdest.path(), fcother.data(), fcother.flags()) - return 0 + if repo.ui.promptchoice( + _('largefile %s has a merge conflict\nancestor was %s\n' + 'keep (l)ocal %s or\ntake (o)ther %s?' + '$$ &Local $$ &Other') % + (lfutil.splitstandin(orig), + fca.data().strip(), fcd.data().strip(), fco.data().strip()), + 0) == 1: + repo.wwrite(fcd.path(), fco.data(), fco.flags()) + return 0 # Copy first changes the matchers to match standins instead of # largefiles. Then it overrides util.copyfile in that function it @@ -469,7 +445,7 @@ return orig(ui, repo, pats, opts, rename) def makestandin(relpath): - path = scmutil.canonpath(repo.root, repo.getcwd(), relpath) + path = pathutil.canonpath(repo.root, repo.getcwd(), relpath) return os.path.join(repo.wjoin(lfutil.standin(path))) fullpats = scmutil.expandpats(pats) diff -r 8c69c69dbcd2 -r fe8e254c7ad6 hgext/largefiles/reposetup.py --- a/hgext/largefiles/reposetup.py Wed Jan 01 21:46:45 2014 -0600 +++ b/hgext/largefiles/reposetup.py Tue Jan 21 14:30:06 2014 -0600 @@ -445,8 +445,8 @@ for f in files if lfutil.isstandin(f) and f in ctx])) lfcommands.uploadlfiles(ui, self, remote, toupload) - return super(lfilesrepo, self).push(remote, force, revs, - newbranch) + return super(lfilesrepo, self).push(remote, force=force, revs=revs, + newbranch=newbranch) def _subdirlfs(self, files, lfiles): ''' diff -r 8c69c69dbcd2 -r fe8e254c7ad6 hgext/mq.py --- a/hgext/mq.py Wed Jan 01 21:46:45 2014 -0600 +++ b/hgext/mq.py Tue Jan 21 14:30:06 2014 -0600 @@ -1204,7 +1204,9 @@ diffopts = self.diffopts() wlock = repo.wlock() try: - heads = [h for hs in repo.branchmap().itervalues() for h in hs] + heads = [] + for hs in repo.branchmap().itervalues(): + heads.extend(hs) if not heads: heads = [nullid] if repo.dirstate.p1() not in heads and not exact: @@ -2565,8 +2567,10 @@ ph = patchheader(q.join(parent), q.plainmode) message, user = ph.message, ph.user for msg in messages: - message.append('* * *') - message.extend(msg) + if msg: + if message: + message.append('* * *') + message.extend(msg) message = '\n'.join(message) if opts.get('edit'): diff -r 8c69c69dbcd2 -r fe8e254c7ad6 hgext/rebase.py --- a/hgext/rebase.py Wed Jan 01 21:46:45 2014 -0600 +++ b/hgext/rebase.py Tue Jan 21 14:30:06 2014 -0600 @@ -221,46 +221,73 @@ if revf: rebaseset = scmutil.revrange(repo, revf) + if not rebaseset: + raise util.Abort(_('empty "rev" revision set - ' + 'nothing to rebase')) elif srcf: src = scmutil.revrange(repo, [srcf]) + if not src: + raise util.Abort(_('empty "source" revision set - ' + 'nothing to rebase')) rebaseset = repo.revs('(%ld)::', src) + assert rebaseset else: base = scmutil.revrange(repo, [basef or '.']) + if not base: + raise util.Abort(_('empty "base" revision set - ' + "can't compute rebase set")) rebaseset = repo.revs( '(children(ancestor(%ld, %d)) and ::(%ld))::', base, dest, base) - if rebaseset: - root = min(rebaseset) - else: - root = None + if not rebaseset: + if base == [dest.rev()]: + if basef: + ui.status(_('nothing to rebase - %s is both "base"' + ' and destination\n') % dest) + else: + ui.status(_('nothing to rebase - working directory ' + 'parent is also destination\n')) + elif not repo.revs('%ld - ::%d', base, dest): + if basef: + ui.status(_('nothing to rebase - "base" %s is ' + 'already an ancestor of destination ' + '%s\n') % + ('+'.join(str(repo[r]) for r in base), + dest)) + else: + ui.status(_('nothing to rebase - working ' + 'directory parent is already an ' + 'ancestor of destination %s\n') % dest) + else: # can it happen? + ui.status(_('nothing to rebase from %s to %s\n') % + ('+'.join(str(repo[r]) for r in base), dest)) + return 1 - if not rebaseset: - repo.ui.debug('base is ancestor of destination\n') - result = None - elif (not (keepf or obsolete._enabled) + if (not (keepf or obsolete._enabled) and repo.revs('first(children(%ld) - %ld)', rebaseset, rebaseset)): raise util.Abort( _("can't remove original changesets with" " unrebased descendants"), hint=_('use --keep to keep original changesets')) - else: - result = buildstate(repo, dest, rebaseset, collapsef) + result = buildstate(repo, dest, rebaseset, collapsef) if not result: # Empty state built, nothing to rebase ui.status(_('nothing to rebase\n')) return 1 - elif not keepf and not repo[root].mutable(): + + root = min(rebaseset) + if not keepf and not repo[root].mutable(): raise util.Abort(_("can't rebase immutable changeset %s") % repo[root], hint=_('see hg help phases for details')) - else: - originalwd, target, state = result - if collapsef: - targetancestors = repo.changelog.ancestors([target], - inclusive=True) - external = externalparent(repo, state, targetancestors) + + originalwd, target, state = result + if collapsef: + targetancestors = repo.changelog.ancestors([target], + inclusive=True) + external = externalparent(repo, state, targetancestors) if keepbranchesf: # insert _savebranch at the start of extrafns so if @@ -275,7 +302,6 @@ raise util.Abort(_('cannot collapse multiple named ' 'branches')) - # Rebase if not targetancestors: targetancestors = repo.changelog.ancestors([target], inclusive=True) diff -r 8c69c69dbcd2 -r fe8e254c7ad6 hgext/record.py --- a/hgext/record.py Wed Jan 01 21:46:45 2014 -0600 +++ b/hgext/record.py Tue Jan 21 14:30:06 2014 -0600 @@ -7,7 +7,7 @@ '''commands to interactively select changes for commit/qrefresh''' -from mercurial.i18n import gettext, _ +from mercurial.i18n import _ from mercurial import cmdutil, commands, extensions, hg, patch from mercurial import util import copy, cStringIO, errno, os, re, shutil, tempfile @@ -18,15 +18,6 @@ lines_re = re.compile(r'@@ -(\d+),(\d+) \+(\d+),(\d+) @@\s*(.*)') -diffopts = [ - ('w', 'ignore-all-space', False, - _('ignore white space when comparing lines')), - ('b', 'ignore-space-change', None, - _('ignore changes in the amount of white space')), - ('B', 'ignore-blank-lines', None, - _('ignore changes whose lines are all blank')), -] - def scanpatch(fp): """like patch.iterhunks, but yield different events @@ -286,21 +277,18 @@ resps = _('[Ynesfdaq?]' '$$ &Yes, record this change' '$$ &No, skip this change' - '$$ &Edit the change manually' + '$$ &Edit this change manually' '$$ &Skip remaining changes to this file' '$$ Record remaining changes to this &file' '$$ &Done, skip remaining changes and files' '$$ Record &all changes to all remaining files' '$$ &Quit, recording no changes' - '$$ &?') + '$$ &? (display help)') r = ui.promptchoice("%s %s" % (query, resps)) ui.write("\n") if r == 8: # ? - doc = gettext(record.__doc__) - c = doc.find('::') + 2 - for l in doc[c:].splitlines(): - if l.startswith(' '): - ui.write(l.strip(), '\n') + for c, t in ui.extractchoices(resps)[1]: + ui.write('%s - %s\n' % (c, t.lower())) continue elif r == 0: # yes ret = True @@ -423,7 +411,7 @@ @command("record", # same options as commit + white space diff options - commands.table['^commit|ci'][1][:] + diffopts, + commands.table['^commit|ci'][1][:] + commands.diffwsopts, _('hg record [OPTION]... [FILE]...')) def record(ui, repo, *pats, **opts): '''interactively select changes to commit @@ -502,7 +490,8 @@ cmdsuggest) # make sure username is set before going interactive - ui.username() + if not opts.get('user'): + ui.username() # raise exception, username not provided def recordfunc(ui, repo, message, match, opts): """This is generic record driver. @@ -526,11 +515,10 @@ '(use "hg commit" instead)')) changes = repo.status(match=match)[:3] - diffopts = patch.diffopts(ui, opts=dict( - git=True, nodates=True, - ignorews=opts.get('ignore_all_space'), - ignorewsamount=opts.get('ignore_space_change'), - ignoreblanklines=opts.get('ignore_blank_lines'))) + diffopts = opts.copy() + diffopts['nodates'] = True + diffopts['git'] = True + diffopts = patch.diffopts(ui, opts=diffopts) chunks = patch.diff(repo, changes=changes, opts=diffopts) fp = cStringIO.StringIO() fp.write(''.join(chunks)) @@ -667,7 +655,7 @@ (qrecord, # same options as qnew, but copy them so we don't get # -i/--interactive for qrecord and add white space diff options - mq.cmdtable['^qnew'][1][:] + diffopts, + mq.cmdtable['^qnew'][1][:] + commands.diffwsopts, _('hg qrecord [OPTION]... PATCH [FILE]...')) _wrapcmd('qnew', mq.cmdtable, qnew, _("interactively record a new patch")) diff -r 8c69c69dbcd2 -r fe8e254c7ad6 hgext/relink.py --- a/hgext/relink.py Wed Jan 01 21:46:45 2014 -0600 +++ b/hgext/relink.py Tue Jan 21 14:30:06 2014 -0600 @@ -48,6 +48,10 @@ ui.status(_('there is nothing to relink\n')) return + if not util.samedevice(src.store.path, repo.store.path): + # No point in continuing + raise util.Abort(_('source and destination are on different devices')) + locallock = repo.lock() try: remotelock = src.lock() diff -r 8c69c69dbcd2 -r fe8e254c7ad6 hgext/strip.py --- a/hgext/strip.py Wed Jan 01 21:46:45 2014 -0600 +++ b/hgext/strip.py Tue Jan 21 14:30:06 2014 -0600 @@ -182,37 +182,32 @@ revs = sorted(rootnodes) if update and opts.get('keep'): - wlock = repo.wlock() - try: - urev, p2 = repo.changelog.parents(revs[0]) - if (util.safehasattr(repo, 'mq') and p2 != nullid - and p2 in [x.node for x in repo.mq.applied]): - urev = p2 - uctx = repo[urev] + urev, p2 = repo.changelog.parents(revs[0]) + if (util.safehasattr(repo, 'mq') and p2 != nullid + and p2 in [x.node for x in repo.mq.applied]): + urev = p2 + uctx = repo[urev] - # only reset the dirstate for files that would actually change - # between the working context and uctx - descendantrevs = repo.revs("%s::." % uctx.rev()) - changedfiles = [] - for rev in descendantrevs: - # blindly reset the files, regardless of what actually - # changed - changedfiles.extend(repo[rev].files()) + # only reset the dirstate for files that would actually change + # between the working context and uctx + descendantrevs = repo.revs("%s::." % uctx.rev()) + changedfiles = [] + for rev in descendantrevs: + # blindly reset the files, regardless of what actually changed + changedfiles.extend(repo[rev].files()) - # reset files that only changed in the dirstate too - dirstate = repo.dirstate - dirchanges = [f for f in dirstate if dirstate[f] != 'n'] - changedfiles.extend(dirchanges) + # reset files that only changed in the dirstate too + dirstate = repo.dirstate + dirchanges = [f for f in dirstate if dirstate[f] != 'n'] + changedfiles.extend(dirchanges) - repo.dirstate.rebuild(urev, uctx.manifest(), changedfiles) - repo.dirstate.write() - update = False - finally: - wlock.release() + repo.dirstate.rebuild(urev, uctx.manifest(), changedfiles) + repo.dirstate.write() + update = False if opts.get('bookmark'): if mark == repo._bookmarkcurrent: - bookmarks.setcurrent(repo, None) + bookmarks.unsetcurrent(repo) del marks[mark] marks.write() ui.write(_("bookmark '%s' deleted\n") % mark) diff -r 8c69c69dbcd2 -r fe8e254c7ad6 hgext/transplant.py --- a/hgext/transplant.py Wed Jan 01 21:46:45 2014 -0600 +++ b/hgext/transplant.py Tue Jan 21 14:30:06 2014 -0600 @@ -154,7 +154,7 @@ # transplants before them fail. domerge = True if not hasnode(repo, node): - repo.pull(source, heads=[node]) + repo.pull(source.peer(), heads=[node]) skipmerge = False if parents[1] != revlog.nullid: @@ -451,34 +451,31 @@ def browserevs(ui, repo, nodes, opts): '''interactively transplant changesets''' - def browsehelp(ui): - ui.write(_('y: transplant this changeset\n' - 'n: skip this changeset\n' - 'm: merge at this changeset\n' - 'p: show patch\n' - 'c: commit selected changesets\n' - 'q: cancel transplant\n' - '?: show this help\n')) - displayer = cmdutil.show_changeset(ui, repo, opts) transplants = [] merges = [] + prompt = _('apply changeset? [ynmpcq?]:' + '$$ &yes, transplant this changeset' + '$$ &no, skip this changeset' + '$$ &merge at this changeset' + '$$ show &patch' + '$$ &commit selected changesets' + '$$ &quit and cancel transplant' + '$$ &? (show this help)') for node in nodes: displayer.show(repo[node]) action = None while not action: - action = ui.prompt(_('apply changeset? [ynmpcq?]:')) + action = 'ynmpcq?'[ui.promptchoice(prompt)] if action == '?': - browsehelp(ui) + for c, t in ui.extractchoices(prompt)[1]: + ui.write('%s: %s\n' % (c, t)) action = None elif action == 'p': parent = repo.changelog.parents(node)[0] for chunk in patch.diff(repo, parent, node): ui.write(chunk) action = None - elif action not in ('y', 'n', 'm', 'c', 'q'): - ui.write(_('no such option\n')) - action = None if action == 'y': transplants.append(node) elif action == 'm': diff -r 8c69c69dbcd2 -r fe8e254c7ad6 i18n/de.po --- a/i18n/de.po Wed Jan 01 21:46:45 2014 -0600 +++ b/i18n/de.po Tue Jan 21 14:30:06 2014 -0600 @@ -6362,7 +6362,7 @@ #: mercurial/sshpeer.py:128 mercurial/wireproto.py:251 #: mercurial/wireproto.py:319 msgid "remote: " -msgstr "Entfernt: " +msgstr "Gegenseite: " #: hgext/largefiles/proto.py:98 #, python-format @@ -11710,7 +11710,7 @@ #: mercurial/commands.py:62 msgid "specify hg command to run on the remote side" -msgstr "Spezifiziert den hg-Befehl, der entfernt ausgeführt wird" +msgstr "Spezifiziert den hg-Befehl, der auf der Gegenseite ausgeführt wird" #: mercurial/commands.py:64 msgid "do not verify server certificate (ignoring web.cacerts config)" @@ -13000,7 +13000,7 @@ msgstr "" " Nur lokale Pfade und ``ssh://``-URLs werden als Ziele unterstützt.\n" " Für ``ssh://``-Ziele wird kein Arbeitsverzeichnis und keine\n" -" ``.hg/hgrc``-Datei auf der entfernten Seite erstellt." +" ``.hg/hgrc``-Datei auf der Gegenseite erstellt." #: mercurial/commands.py:1192 msgid "" @@ -15128,7 +15128,7 @@ #: mercurial/commands.py:3865 mercurial/commands.py:4335 msgid "remote doesn't support bookmarks\n" -msgstr "Quellarchiv unterstützt keine Lesezeichen\n" +msgstr "Gegenseite unterstützt keine Lesezeichen\n" #: mercurial/commands.py:3877 msgid "[-e CMD] [--remotecmd CMD] [DEST]" @@ -15856,7 +15856,7 @@ #: mercurial/commands.py:4587 #, python-format msgid "remote bookmark %s not found!" -msgstr "Entferntes Lesezeichen %s wurde nicht gefunden!" +msgstr "Lesezeichen %s existiert auf der Gegenseite nicht!" #: mercurial/commands.py:4614 #, python-format @@ -16952,12 +16952,12 @@ #: mercurial/commands.py:5569 #, python-format msgid "remote: %s\n" -msgstr "Entfernt: %s\n" +msgstr "Gegenseite: %s\n" #. i18n: column positioning for "hg summary" #: mercurial/commands.py:5572 msgid "remote: (synced)\n" -msgstr "Entfernt: (synchonisiert)\n" +msgstr "Gegenseite: (synchonisiert)\n" #: mercurial/commands.py:5575 msgid "force tag" @@ -17607,7 +17607,7 @@ #: mercurial/dispatch.py:137 msgid "abort: remote error:\n" -msgstr "Abbruch: Entfernter Fehler:\n" +msgstr "Abbruch: Fehler auf der Gegenseite:\n" #: mercurial/dispatch.py:140 mercurial/dispatch.py:152 #: mercurial/dispatch.py:171 @@ -24422,7 +24422,7 @@ "revisions`." msgstr "" "Ein optionaler Bezeichner nach # verweist auf einen bestimmten Zweig,\n" -"Tag oder Änderungssatz des anderen Projektarchivs. Siehe auch :hg:\n" +"Tag oder Änderungssatz des entfernten Projektarchivs. Siehe auch :hg:\n" "`help revisions`." #: mercurial/help/urls.txt:17 @@ -25568,7 +25568,7 @@ #: mercurial/phases.py:373 #, python-format msgid "ignoring unexpected root from remote: %i %s\n" -msgstr "Ignorieren einer unerwarteten Wurzel von entfernt: %i %s\n" +msgstr "Ignorieren einer unerwarteten Wurzel von der Gegenseite: %i %s\n" #: mercurial/phases.py:403 #, python-format diff -r 8c69c69dbcd2 -r fe8e254c7ad6 mercurial/ancestor.py --- a/mercurial/ancestor.py Wed Jan 01 21:46:45 2014 -0600 +++ b/mercurial/ancestor.py Tue Jan 21 14:30:06 2014 -0600 @@ -5,7 +5,8 @@ # This software may be used and distributed according to the terms of the # GNU General Public License version 2 or any later version. -import heapq, util +import heapq +import util from node import nullrev def ancestors(pfunc, *orignodes): diff -r 8c69c69dbcd2 -r fe8e254c7ad6 mercurial/bookmarks.py --- a/mercurial/bookmarks.py Wed Jan 01 21:46:45 2014 -0600 +++ b/mercurial/bookmarks.py Tue Jan 21 14:30:06 2014 -0600 @@ -6,7 +6,7 @@ # GNU General Public License version 2 or any later version. from mercurial.i18n import _ -from mercurial.node import hex +from mercurial.node import hex, bin from mercurial import encoding, error, util, obsolete import errno @@ -58,7 +58,7 @@ ''' repo = self._repo if repo._bookmarkcurrent not in self: - setcurrent(repo, None) + unsetcurrent(repo) wlock = repo.wlock() try: @@ -106,13 +106,13 @@ Set the name of the bookmark that we are on (hg update ). The name is recorded in .hg/bookmarks.current ''' + if mark not in repo._bookmarks: + raise AssertionError('bookmark %s does not exist!' % mark) + current = repo._bookmarkcurrent if current == mark: return - if mark not in repo._bookmarks: - mark = '' - wlock = repo.wlock() try: file = repo.opener('bookmarks.current', 'w', atomictemp=True) @@ -192,13 +192,12 @@ return False if marks[cur] in parents: - old = repo[marks[cur]] new = repo[node] divs = [repo[b] for b in marks if b.split('@', 1)[0] == cur.split('@', 1)[0]] anc = repo.changelog.ancestors([new.rev()]) deletefrom = [b.node() for b in divs if b.rev() in anc or b == new] - if old.descendant(new): + if validdest(repo, repo[marks[cur]], new): marks[cur] = new.node() update = True @@ -239,49 +238,176 @@ finally: w.release() +def compare(repo, srcmarks, dstmarks, + srchex=None, dsthex=None, targets=None): + '''Compare bookmarks between srcmarks and dstmarks + + This returns tuple "(addsrc, adddst, advsrc, advdst, diverge, + differ, invalid)", each are list of bookmarks below: + + :addsrc: added on src side (removed on dst side, perhaps) + :adddst: added on dst side (removed on src side, perhaps) + :advsrc: advanced on src side + :advdst: advanced on dst side + :diverge: diverge + :differ: changed, but changeset referred on src is unknown on dst + :invalid: unknown on both side + + Each elements of lists in result tuple is tuple "(bookmark name, + changeset ID on source side, changeset ID on destination + side)". Each changeset IDs are 40 hexadecimal digit string or + None. + + Changeset IDs of tuples in "addsrc", "adddst", "differ" or + "invalid" list may be unknown for repo. + + This function expects that "srcmarks" and "dstmarks" return + changeset ID in 40 hexadecimal digit string for specified + bookmark. If not so (e.g. bmstore "repo._bookmarks" returning + binary value), "srchex" or "dsthex" should be specified to convert + into such form. + + If "targets" is specified, only bookmarks listed in it are + examined. + ''' + if not srchex: + srchex = lambda x: x + if not dsthex: + dsthex = lambda x: x + + if targets: + bset = set(targets) + else: + srcmarkset = set(srcmarks) + dstmarkset = set(dstmarks) + bset = srcmarkset ^ dstmarkset + for b in srcmarkset & dstmarkset: + if srchex(srcmarks[b]) != dsthex(dstmarks[b]): + bset.add(b) + + results = ([], [], [], [], [], [], []) + addsrc = results[0].append + adddst = results[1].append + advsrc = results[2].append + advdst = results[3].append + diverge = results[4].append + differ = results[5].append + invalid = results[6].append + + for b in sorted(bset): + if b not in srcmarks: + if b in dstmarks: + adddst((b, None, dsthex(dstmarks[b]))) + else: + invalid((b, None, None)) + elif b not in dstmarks: + addsrc((b, srchex(srcmarks[b]), None)) + else: + scid = srchex(srcmarks[b]) + dcid = dsthex(dstmarks[b]) + if scid in repo and dcid in repo: + sctx = repo[scid] + dctx = repo[dcid] + if sctx.rev() < dctx.rev(): + if validdest(repo, sctx, dctx): + advdst((b, scid, dcid)) + else: + diverge((b, scid, dcid)) + else: + if validdest(repo, dctx, sctx): + advsrc((b, scid, dcid)) + else: + diverge((b, scid, dcid)) + else: + # it is too expensive to examine in detail, in this case + differ((b, scid, dcid)) + + return results + +def _diverge(ui, b, path, localmarks): + if b == '@': + b = '' + # find a unique @ suffix + for x in range(1, 100): + n = '%s@%d' % (b, x) + if n not in localmarks: + break + # try to use an @pathalias suffix + # if an @pathalias already exists, we overwrite (update) it + for p, u in ui.configitems("paths"): + if path == u: + n = '%s@%s' % (b, p) + return n + def updatefromremote(ui, repo, remotemarks, path): ui.debug("checking for updated bookmarks\n") - changed = False localmarks = repo._bookmarks - for k in sorted(remotemarks): - if k in localmarks: - nr, nl = remotemarks[k], localmarks[k] - if nr in repo: - cr = repo[nr] - cl = repo[nl] - if cl.rev() >= cr.rev(): - continue - if validdest(repo, cl, cr): - localmarks[k] = cr.node() - changed = True - ui.status(_("updating bookmark %s\n") % k) - else: - if k == '@': - kd = '' - else: - kd = k - # find a unique @ suffix - for x in range(1, 100): - n = '%s@%d' % (kd, x) - if n not in localmarks: - break - # try to use an @pathalias suffix - # if an @pathalias already exists, we overwrite (update) it - for p, u in ui.configitems("paths"): - if path == u: - n = '%s@%s' % (kd, p) + (addsrc, adddst, advsrc, advdst, diverge, differ, invalid + ) = compare(repo, remotemarks, localmarks, dsthex=hex) + + changed = [] + for b, scid, dcid in addsrc: + if scid in repo: # add remote bookmarks for changes we already have + changed.append((b, bin(scid), ui.status, + _("adding remote bookmark %s\n") % (b))) + for b, scid, dcid in advsrc: + changed.append((b, bin(scid), ui.status, + _("updating bookmark %s\n") % (b))) + for b, scid, dcid in diverge: + db = _diverge(ui, b, path, localmarks) + changed.append((db, bin(scid), ui.warn, + _("divergent bookmark %s stored as %s\n") % (b, db))) + if changed: + for b, node, writer, msg in sorted(changed): + localmarks[b] = node + writer(msg) + localmarks.write() + +def updateremote(ui, repo, remote, revs): + ui.debug("checking for updated bookmarks\n") + revnums = map(repo.changelog.rev, revs or []) + ancestors = [a for a in repo.changelog.ancestors(revnums, inclusive=True)] + (addsrc, adddst, advsrc, advdst, diverge, differ, invalid + ) = compare(repo, repo._bookmarks, remote.listkeys('bookmarks'), + srchex=hex) - localmarks[n] = cr.node() - changed = True - ui.warn(_("divergent bookmark %s stored as %s\n") % (k, n)) - elif remotemarks[k] in repo: - # add remote bookmarks for changes we already have - localmarks[k] = repo[remotemarks[k]].node() - changed = True - ui.status(_("adding remote bookmark %s\n") % k) + for b, scid, dcid in advsrc: + if ancestors and repo[scid].rev() not in ancestors: + continue + if remote.pushkey('bookmarks', b, dcid, scid): + ui.status(_("updating bookmark %s\n") % b) + else: + ui.warn(_('updating bookmark %s failed!\n') % b) + +def pushtoremote(ui, repo, remote, targets): + (addsrc, adddst, advsrc, advdst, diverge, differ, invalid + ) = compare(repo, repo._bookmarks, remote.listkeys('bookmarks'), + srchex=hex, targets=targets) + if invalid: + b, scid, dcid = invalid[0] + ui.warn(_('bookmark %s does not exist on the local ' + 'or remote repository!\n') % b) + return 2 - if changed: - localmarks.write() + def push(b, old, new): + r = remote.pushkey('bookmarks', b, old, new) + if not r: + ui.warn(_('updating bookmark %s failed!\n') % b) + return 1 + return 0 + failed = 0 + for b, scid, dcid in sorted(addsrc + advsrc + advdst + diverge + differ): + ui.status(_("exporting bookmark %s\n") % b) + if dcid is None: + dcid = '' + failed += push(b, dcid, scid) + for b, scid, dcid in adddst: + # treat as "deleted locally" + ui.status(_("deleting remote bookmark %s\n") % b) + failed += push(b, dcid, '') + + if failed: + return 1 def diff(ui, dst, src): ui.status(_("searching for changed bookmarks\n")) diff -r 8c69c69dbcd2 -r fe8e254c7ad6 mercurial/branchmap.py --- a/mercurial/branchmap.py Wed Jan 01 21:46:45 2014 -0600 +++ b/mercurial/branchmap.py Tue Jan 21 14:30:06 2014 -0600 @@ -7,11 +7,11 @@ from node import bin, hex, nullid, nullrev import encoding -import util, repoview +import util def _filename(repo): """name of a branchcache file for a given repo or repoview""" - filename = "cache/branchheads" + filename = "cache/branch2" if repo.filtername: filename = '%s-%s' % (filename, repo.filtername) return filename @@ -39,11 +39,16 @@ for l in lines: if not l: continue - node, label = l.split(" ", 1) + node, state, label = l.split(" ", 2) + if state not in 'oc': + raise ValueError('invalid branch state') label = encoding.tolocal(label.strip()) if not node in repo: raise ValueError('node %s does not exist' % node) - partial.setdefault(label, []).append(bin(node)) + node = bin(node) + partial.setdefault(label, []).append(node) + if state == 'c': + partial._closednodes.add(node) except KeyboardInterrupt: raise except Exception, inst: @@ -58,6 +63,17 @@ +### Nearest subset relation +# Nearest subset of filter X is a filter Y so that: +# * Y is included in X, +# * X - Y is as small as possible. +# This create and ordering used for branchmap purpose. +# the ordering may be partial +subsettable = {None: 'visible', + 'visible': 'served', + 'served': 'immutable', + 'immutable': 'base'} + def updatecache(repo): cl = repo.changelog filtername = repo.filtername @@ -67,7 +83,7 @@ if partial is None or not partial.validfor(repo): partial = read(repo) if partial is None: - subsetname = repoview.subsettable.get(filtername) + subsetname = subsettable.get(filtername) if subsetname is None: partial = branchcache() else: @@ -83,14 +99,40 @@ repo._branchcaches[repo.filtername] = partial class branchcache(dict): - """A dict like object that hold branches heads cache""" + """A dict like object that hold branches heads cache. + + This cache is used to avoid costly computations to determine all the + branch heads of a repo. + + The cache is serialized on disk in the following format: + + [optional filtered repo hex hash] + + + ... + + The first line is used to check if the cache is still valid. If the + branch cache is for a filtered repo view, an optional third hash is + included that hashes the hashes of all filtered revisions. + + The open/closed state is represented by a single letter 'o' or 'c'. + This field can be used to avoid changelog reads when determining if a + branch head closes a branch or not. + """ def __init__(self, entries=(), tipnode=nullid, tiprev=nullrev, - filteredhash=None): + filteredhash=None, closednodes=None): super(branchcache, self).__init__(entries) self.tipnode = tipnode self.tiprev = tiprev self.filteredhash = filteredhash + # closednodes is a set of nodes that close their branch. If the branch + # cache has been updated, it may contain nodes that are no longer + # heads. + if closednodes is None: + self._closednodes = set() + else: + self._closednodes = closednodes def _hashfiltered(self, repo): """build hash of revision filtered in the current cache @@ -124,9 +166,38 @@ except IndexError: return False + def _branchtip(self, heads): + '''Return tuple with last open head in heads and false, + otherwise return last closed head and true.''' + tip = heads[-1] + closed = True + for h in reversed(heads): + if h not in self._closednodes: + tip = h + closed = False + break + return tip, closed + + def branchtip(self, branch): + '''Return the tipmost open head on branch head, otherwise return the + tipmost closed head on branch. + Raise KeyError for unknown branch.''' + return self._branchtip(self[branch])[0] + + def branchheads(self, branch, closed=False): + heads = self[branch] + if not closed: + heads = [h for h in heads if h not in self._closednodes] + return heads + + def iterbranches(self): + for bn, heads in self.iteritems(): + yield (bn, heads) + self._branchtip(heads) + def copy(self): """return an deep copy of the branchcache object""" - return branchcache(self, self.tipnode, self.tiprev, self.filteredhash) + return branchcache(self, self.tipnode, self.tiprev, self.filteredhash, + self._closednodes) def write(self, repo): try: @@ -137,7 +208,12 @@ f.write(" ".join(cachekey) + '\n') for label, nodes in sorted(self.iteritems()): for node in nodes: - f.write("%s %s\n" % (hex(node), encoding.fromlocal(label))) + if node in self._closednodes: + state = 'c' + else: + state = 'o' + f.write("%s %s %s\n" % (hex(node), state, + encoding.fromlocal(label))) f.close() except (IOError, OSError, util.Abort): # Abort may be raise by read only opener @@ -145,55 +221,43 @@ def update(self, repo, revgen): """Given a branchhead cache, self, that may have extra nodes or be - missing heads, and a generator of nodes that are at least a superset of + missing heads, and a generator of nodes that are strictly a superset of heads missing, this function updates self to be correct. """ cl = repo.changelog # collect new branch entries newbranches = {} - getbranch = cl.branch + getbranchinfo = cl.branchinfo for r in revgen: - newbranches.setdefault(getbranch(r), []).append(cl.node(r)) + branch, closesbranch = getbranchinfo(r) + newbranches.setdefault(branch, []).append(r) + if closesbranch: + self._closednodes.add(cl.node(r)) # if older branchheads are reachable from new ones, they aren't # really branchheads. Note checking parents is insufficient: # 1 (branch a) -> 2 (branch b) -> 3 (branch a) - for branch, newnodes in newbranches.iteritems(): + for branch, newheadrevs in newbranches.iteritems(): bheads = self.setdefault(branch, []) - # Remove candidate heads that no longer are in the repo (e.g., as - # the result of a strip that just happened). Avoid using 'node in - # self' here because that dives down into branchcache code somewhat - # recursively. - bheadrevs = [cl.rev(node) for node in bheads - if cl.hasnode(node)] - newheadrevs = [cl.rev(node) for node in newnodes - if cl.hasnode(node)] - ctxisnew = bheadrevs and min(newheadrevs) > max(bheadrevs) - # Remove duplicates - nodes that are in newheadrevs and are already - # in bheadrevs. This can happen if you strip a node whose parent - # was already a head (because they're on different branches). - bheadrevs = sorted(set(bheadrevs).union(newheadrevs)) + bheadset = set(cl.rev(node) for node in bheads) - # Starting from tip means fewer passes over reachable. If we know - # the new candidates are not ancestors of existing heads, we don't - # have to examine ancestors of existing heads - if ctxisnew: - iterrevs = sorted(newheadrevs) - else: - iterrevs = list(bheadrevs) + # This have been tested True on all internal usage of this function. + # run it again in case of doubt + # assert not (set(bheadrevs) & set(newheadrevs)) + newheadrevs.sort() + bheadset.update(newheadrevs) # This loop prunes out two kinds of heads - heads that are # superseded by a head in newheadrevs, and newheadrevs that are not # heads because an existing head is their descendant. - while iterrevs: - latest = iterrevs.pop() - if latest not in bheadrevs: + while newheadrevs: + latest = newheadrevs.pop() + if latest not in bheadset: continue - ancestors = set(cl.ancestors([latest], - bheadrevs[0])) - if ancestors: - bheadrevs = [b for b in bheadrevs if b not in ancestors] + ancestors = set(cl.ancestors([latest], min(bheadset))) + bheadset -= ancestors + bheadrevs = sorted(bheadset) self[branch] = [cl.node(rev) for rev in bheadrevs] - tiprev = max(bheadrevs) + tiprev = bheadrevs[-1] if tiprev > self.tiprev: self.tipnode = cl.node(tiprev) self.tiprev = tiprev diff -r 8c69c69dbcd2 -r fe8e254c7ad6 mercurial/changelog.py --- a/mercurial/changelog.py Wed Jan 01 21:46:45 2014 -0600 +++ b/mercurial/changelog.py Tue Jan 21 14:30:06 2014 -0600 @@ -342,9 +342,10 @@ text = "\n".join(l) return self.addrevision(text, transaction, len(self), p1, p2) - def branch(self, rev): - """return the branch of a revision + def branchinfo(self, rev): + """return the branch name and open/close state of a revision This function exists because creating a changectx object just to access this is costly.""" - return encoding.tolocal(self.read(rev)[5].get("branch")) + extra = self.read(rev)[5] + return encoding.tolocal(extra.get("branch")), 'close' in extra diff -r 8c69c69dbcd2 -r fe8e254c7ad6 mercurial/cmdutil.py --- a/mercurial/cmdutil.py Wed Jan 01 21:46:45 2014 -0600 +++ b/mercurial/cmdutil.py Tue Jan 21 14:30:06 2014 -0600 @@ -10,7 +10,7 @@ import os, sys, errno, re, tempfile import util, scmutil, templater, patch, error, templatekw, revlog, copies import match as matchmod -import subrepo, context, repair, graphmod, revset, phases, obsolete +import subrepo, context, repair, graphmod, revset, phases, obsolete, pathutil import changelog import bookmarks import lock as lockmod @@ -274,7 +274,7 @@ # relsrc: ossep # otarget: ossep def copyfile(abssrc, relsrc, otarget, exact): - abstarget = scmutil.canonpath(repo.root, cwd, otarget) + abstarget = pathutil.canonpath(repo.root, cwd, otarget) if '/' in abstarget: # We cannot normalize abstarget itself, this would prevent # case only renames, like a => A. @@ -367,7 +367,7 @@ # return: function that takes hgsep and returns ossep def targetpathfn(pat, dest, srcs): if os.path.isdir(pat): - abspfx = scmutil.canonpath(repo.root, cwd, pat) + abspfx = pathutil.canonpath(repo.root, cwd, pat) abspfx = util.localpath(abspfx) if destdirexists: striplen = len(os.path.split(abspfx)[0]) @@ -393,7 +393,7 @@ res = lambda p: os.path.join(dest, os.path.basename(util.localpath(p))) else: - abspfx = scmutil.canonpath(repo.root, cwd, pat) + abspfx = pathutil.canonpath(repo.root, cwd, pat) if len(abspfx) < len(srcs[0][0]): # A directory. Either the target path contains the last # component of the source path or it does not. @@ -2063,7 +2063,7 @@ fc = ctx[f] repo.wwrite(f, fc.data(), fc.flags()) - audit_path = scmutil.pathauditor(repo.root) + audit_path = pathutil.pathauditor(repo.root) for f in remove[0]: if repo.dirstate[f] == 'a': repo.dirstate.drop(f) diff -r 8c69c69dbcd2 -r fe8e254c7ad6 mercurial/commands.py --- a/mercurial/commands.py Wed Jan 01 21:46:45 2014 -0600 +++ b/mercurial/commands.py Tue Jan 21 14:30:06 2014 -0600 @@ -17,7 +17,8 @@ import merge as mergemod import minirst, revset, fileset import dagparser, context, simplemerge, graphmod -import random, setdiscovery, treediscovery, dagutil, pvec, localrepo +import random +import setdiscovery, treediscovery, dagutil, pvec, localrepo import phases, obsolete table = {} @@ -460,16 +461,29 @@ try: branch = repo.dirstate.branch() bheads = repo.branchheads(branch) - hg.clean(repo, node, show_stats=False) - repo.dirstate.setbranch(branch) rctx = scmutil.revsingle(repo, hex(parent)) - cmdutil.revert(ui, repo, rctx, repo.dirstate.parents()) if not opts.get('merge') and op1 != node: try: ui.setconfig('ui', 'forcemerge', opts.get('tool', '')) - return hg.update(repo, op1) + stats = mergemod.update(repo, parent, True, True, False, + node, False) + repo.setparents(op1, op2) + hg._showstats(repo, stats) + if stats[3]: + repo.ui.status(_("use 'hg resolve' to retry unresolved " + "file merges\n")) + else: + msg = _("changeset %s backed out, " + "don't forget to commit.\n") + ui.status(msg % short(node)) + return stats[3] > 0 finally: ui.setconfig('ui', 'forcemerge', '') + else: + hg.clean(repo, node, show_stats=False) + repo.dirstate.setbranch(branch) + cmdutil.revert(ui, repo, rctx, repo.dirstate.parents()) + e = cmdutil.commiteditor if not opts['message'] and not opts['logfile']: @@ -666,12 +680,13 @@ if command: changesets = 1 - try: - node = state['current'][0] - except LookupError: - if noupdate: + if noupdate: + try: + node = state['current'][0] + except LookupError: raise util.Abort(_('current bisect revision is unknown - ' 'start a new bisect to fix')) + else: node, p2 = repo.dirstate.parents() if p2 != nullid: raise util.Abort(_('current bisect revision is a merge')) @@ -700,7 +715,7 @@ ui.status(_('changeset %d:%s: %s\n') % (ctx, ctx, transition)) check_state(state, interactive=False) # bisect - nodes, changesets, good = hbisect.bisect(repo.changelog, state) + nodes, changesets, bgood = hbisect.bisect(repo.changelog, state) # update to next check node = nodes[0] if not noupdate: @@ -709,7 +724,7 @@ finally: state['current'] = [node] hbisect.save_state(repo, state) - print_result(nodes, good) + print_result(nodes, bgood) return # update state @@ -806,10 +821,6 @@ rename = opts.get('rename') inactive = opts.get('inactive') - hexfn = ui.debugflag and hex or short - marks = repo._bookmarks - cur = repo.changectx('.').node() - def checkformat(mark): mark = mark.strip() if not mark: @@ -818,7 +829,7 @@ scmutil.checknewlabel(repo, mark, 'bookmark') return mark - def checkconflict(repo, mark, force=False, target=None): + def checkconflict(repo, mark, cur, force=False, target=None): if mark in marks and not force: if target: if marks[mark] == target and target == cur: @@ -836,12 +847,10 @@ bookmarks.deletedivergent(repo, [target], mark) return - # consider successor changesets as well - foreground = obsolete.foreground(repo, [marks[mark]]) deletefrom = [b for b in divs if repo[b].rev() in anc or b == target] bookmarks.deletedivergent(repo, deletefrom, mark) - if bmctx.rev() in anc or target in foreground: + if bookmarks.validdest(repo, bmctx, repo[target]): ui.status(_("moving bookmark '%s' forward from %s\n") % (mark, short(bmctx.node()))) return @@ -861,75 +870,84 @@ if not names and (delete or rev): raise util.Abort(_("bookmark name required")) - if delete: - for mark in names: - if mark not in marks: - raise util.Abort(_("bookmark '%s' does not exist") % mark) - if mark == repo._bookmarkcurrent: - bookmarks.setcurrent(repo, None) - del marks[mark] - marks.write() - - elif rename: - if not names: - raise util.Abort(_("new bookmark name required")) - elif len(names) > 1: - raise util.Abort(_("only one new bookmark name allowed")) - mark = checkformat(names[0]) - if rename not in marks: - raise util.Abort(_("bookmark '%s' does not exist") % rename) - checkconflict(repo, mark, force) - marks[mark] = marks[rename] - if repo._bookmarkcurrent == rename and not inactive: - bookmarks.setcurrent(repo, mark) - del marks[rename] - marks.write() - - elif names: - newact = None - for mark in names: - mark = checkformat(mark) - if newact is None: - newact = mark - if inactive and mark == repo._bookmarkcurrent: - bookmarks.setcurrent(repo, None) - return - tgt = cur - if rev: - tgt = scmutil.revsingle(repo, rev).node() - checkconflict(repo, mark, force, tgt) - marks[mark] = tgt - if not inactive and cur == marks[newact] and not rev: - bookmarks.setcurrent(repo, newact) - elif cur != tgt and newact == repo._bookmarkcurrent: - bookmarks.setcurrent(repo, None) - marks.write() - - # Same message whether trying to deactivate the current bookmark (-i - # with no NAME) or listing bookmarks - elif len(marks) == 0: - ui.status(_("no bookmarks set\n")) - - elif inactive: - if not repo._bookmarkcurrent: - ui.status(_("no active bookmark\n")) + if delete or rename or names or inactive: + wlock = repo.wlock() + try: + cur = repo.changectx('.').node() + marks = repo._bookmarks + if delete: + for mark in names: + if mark not in marks: + raise util.Abort(_("bookmark '%s' does not exist") % + mark) + if mark == repo._bookmarkcurrent: + bookmarks.unsetcurrent(repo) + del marks[mark] + marks.write() + + elif rename: + if not names: + raise util.Abort(_("new bookmark name required")) + elif len(names) > 1: + raise util.Abort(_("only one new bookmark name allowed")) + mark = checkformat(names[0]) + if rename not in marks: + raise util.Abort(_("bookmark '%s' does not exist") % rename) + checkconflict(repo, mark, cur, force) + marks[mark] = marks[rename] + if repo._bookmarkcurrent == rename and not inactive: + bookmarks.setcurrent(repo, mark) + del marks[rename] + marks.write() + + elif names: + newact = None + for mark in names: + mark = checkformat(mark) + if newact is None: + newact = mark + if inactive and mark == repo._bookmarkcurrent: + bookmarks.unsetcurrent(repo) + return + tgt = cur + if rev: + tgt = scmutil.revsingle(repo, rev).node() + checkconflict(repo, mark, cur, force, tgt) + marks[mark] = tgt + if not inactive and cur == marks[newact] and not rev: + bookmarks.setcurrent(repo, newact) + elif cur != tgt and newact == repo._bookmarkcurrent: + bookmarks.unsetcurrent(repo) + marks.write() + + elif inactive: + if len(marks) == 0: + ui.status(_("no bookmarks set\n")) + elif not repo._bookmarkcurrent: + ui.status(_("no active bookmark\n")) + else: + bookmarks.unsetcurrent(repo) + finally: + wlock.release() + else: # show bookmarks + hexfn = ui.debugflag and hex or short + marks = repo._bookmarks + if len(marks) == 0: + ui.status(_("no bookmarks set\n")) else: - bookmarks.setcurrent(repo, None) - - else: # show bookmarks - for bmark, n in sorted(marks.iteritems()): - current = repo._bookmarkcurrent - if bmark == current: - prefix, label = '*', 'bookmarks.current' - else: - prefix, label = ' ', '' - - if ui.quiet: - ui.write("%s\n" % bmark, label=label) - else: - ui.write(" %s %-25s %d:%s\n" % ( - prefix, bmark, repo.changelog.rev(n), hexfn(n)), - label=label) + for bmark, n in sorted(marks.iteritems()): + current = repo._bookmarkcurrent + if bmark == current: + prefix, label = '*', 'bookmarks.current' + else: + prefix, label = ' ', '' + + if ui.quiet: + ui.write("%s\n" % bmark, label=label) + else: + ui.write(" %s %-25s %d:%s\n" % ( + prefix, bmark, repo.changelog.rev(n), hexfn(n)), + label=label) @command('branch', [('f', 'force', None, @@ -1012,23 +1030,15 @@ hexfunc = ui.debugflag and hex or short - activebranches = set([repo[n].branch() for n in repo.heads()]) + allheads = set(repo.heads()) branches = [] - for tag, heads in repo.branchmap().iteritems(): - for h in reversed(heads): - ctx = repo[h] - isopen = not ctx.closesbranch() - if isopen: - tip = ctx - break - else: - tip = repo[heads[-1]] - isactive = tag in activebranches and isopen - branches.append((tip, isactive, isopen)) - branches.sort(key=lambda i: (i[1], i[0].rev(), i[0].branch(), i[2]), + for tag, heads, tip, isclosed in repo.branchmap().iterbranches(): + isactive = not isclosed and bool(set(heads) & allheads) + branches.append((tag, repo[tip], isactive, not isclosed)) + branches.sort(key=lambda i: (i[2], i[1].rev(), i[0], i[3]), reverse=True) - for ctx, isactive, isopen in branches: + for tag, ctx, isactive, isopen in branches: if (not active) or isactive: if isactive: label = 'branches.active' @@ -1041,16 +1051,16 @@ else: label = 'branches.inactive' notice = _(' (inactive)') - if ctx.branch() == repo.dirstate.branch(): + if tag == repo.dirstate.branch(): label = 'branches.current' - rev = str(ctx.rev()).rjust(31 - encoding.colwidth(ctx.branch())) + rev = str(ctx.rev()).rjust(31 - encoding.colwidth(tag)) rev = ui.label('%s:%s' % (rev, hexfunc(ctx.node())), 'log.changeset changeset.%s' % ctx.phasestr()) - tag = ui.label(ctx.branch(), label) + labeledtag = ui.label(tag, label) if ui.quiet: - ui.write("%s\n" % tag) + ui.write("%s\n" % labeledtag) else: - ui.write("%s %s%s\n" % (tag, rev, notice)) + ui.write("%s %s%s\n" % (labeledtag, rev, notice)) @command('bundle', [('f', 'force', None, _('run even when the destination is unrelated')), @@ -1158,14 +1168,28 @@ ctx = scmutil.revsingle(repo, opts.get('rev')) err = 1 m = scmutil.match(ctx, (file1,) + pats, opts) - for abs in ctx.walk(m): + + def write(path): fp = cmdutil.makefileobj(repo, opts.get('output'), ctx.node(), - pathname=abs) - data = ctx[abs].data() + pathname=path) + data = ctx[path].data() if opts.get('decode'): - data = repo.wwritedata(abs, data) + data = repo.wwritedata(path, data) fp.write(data) fp.close() + + # Automation often uses hg cat on single files, so special case it + # for performance to avoid the cost of parsing the manifest. + if len(m.files()) == 1 and not m.anypats(): + file = m.files()[0] + mf = repo.manifest + mfnode = ctx._changeset[0] + if mf.find(mfnode, file)[0]: + write(file) + return 0 + + for abs in ctx.walk(m): + write(abs) err = 0 return err @@ -2142,11 +2166,8 @@ labels = set() labels.update(t[0] for t in repo.tagslist()) labels.update(repo._bookmarks.keys()) - for heads in repo.branchmap().itervalues(): - for h in heads: - ctx = repo[h] - if not ctx.closesbranch(): - labels.add(ctx.branch()) + labels.update(tag for (tag, heads, tip, closed) + in repo.branchmap().iterbranches() if not closed) completions = set() if not args: args = [''] @@ -2244,7 +2265,7 @@ continue s = f.find(os.sep, speclen) if s >= 0: - adddir(f[:s + 1]) + adddir(f[:s]) else: addfile(f) return files, dirs @@ -2265,10 +2286,6 @@ f, d = complete(spec, acceptable or 'nmar') files.update(f) dirs.update(d) - if not files and len(dirs) == 1: - # force the shell to consider a completion that matches one - # directory and zero files to be ambiguous - dirs.add(iter(dirs).next() + '.') files.update(dirs) ui.write('\n'.join(repo.pathto(p, cwd) for p in sorted(files))) ui.write('\n') @@ -3762,12 +3779,12 @@ files, eolmode=None) except patch.PatchError, e: raise util.Abort(str(e)) - memctx = patch.makememctx(repo, (p1.node(), p2.node()), - message, - opts.get('user') or user, - opts.get('date') or date, - branch, files, store, - editor=cmdutil.commiteditor) + memctx = context.makememctx(repo, (p1.node(), p2.node()), + message, + opts.get('user') or user, + opts.get('date') or date, + branch, files, store, + editor=cmdutil.commiteditor) repo.savecommitmessage(memctx.description()) n = memctx.commit() finally: @@ -4680,6 +4697,7 @@ """ if opts.get('bookmark'): + ui.setconfig('bookmarks', 'pushing', opts['bookmark']) for b in opts['bookmark']: # translate -B options to -r so changesets get pushed if b in repo._bookmarks: @@ -4713,25 +4731,11 @@ result = not result if opts.get('bookmark'): - rb = other.listkeys('bookmarks') - for b in opts['bookmark']: - # explicit push overrides remote bookmark if any - if b in repo._bookmarks: - ui.status(_("exporting bookmark %s\n") % b) - new = repo[b].hex() - elif b in rb: - ui.status(_("deleting remote bookmark %s\n") % b) - new = '' # delete - else: - ui.warn(_('bookmark %s does not exist on the local ' - 'or remote repository!\n') % b) - return 2 - old = rb.get(b, '') - r = other.pushkey('bookmarks', b, old, new) - if not r: - ui.warn(_('updating bookmark %s failed!\n') % b) - if not result: - result = 2 + bresult = bookmarks.pushtoremote(ui, repo, other, opts['bookmark']) + if bresult == 2: + return 2 + if not result and bresult: + result = 2 return result @@ -5915,7 +5919,7 @@ % util.version()) ui.status(_( "(see http://mercurial.selenic.com for more information)\n" - "\nCopyright (C) 2005-2013 Matt Mackall and others\n" + "\nCopyright (C) 2005-2014 Matt Mackall and others\n" "This is free software; see the source for copying conditions. " "There is NO\nwarranty; " "not even for MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.\n" diff -r 8c69c69dbcd2 -r fe8e254c7ad6 mercurial/commandserver.py --- a/mercurial/commandserver.py Wed Jan 01 21:46:45 2014 -0600 +++ b/mercurial/commandserver.py Tue Jan 21 14:30:06 2014 -0600 @@ -184,7 +184,10 @@ # persist between requests copiedui = self.ui.copy() self.repo.baseui = copiedui - self.repo.ui = self.repo.dirstate._ui = self.repoui.copy() + # clone ui without using ui.copy because this is protected + repoui = self.repoui.__class__(self.repoui) + repoui.copy = copiedui.copy # redo copy protection + self.repo.ui = self.repo.dirstate._ui = repoui self.repo.invalidate() self.repo.invalidatedirstate() diff -r 8c69c69dbcd2 -r fe8e254c7ad6 mercurial/context.py --- a/mercurial/context.py Wed Jan 01 21:46:45 2014 -0600 +++ b/mercurial/context.py Tue Jan 21 14:30:06 2014 -0600 @@ -8,7 +8,6 @@ from node import nullid, nullrev, short, hex, bin from i18n import _ import ancestor, mdiff, error, util, scmutil, subrepo, patch, encoding, phases -import copies import match as matchmod import os, errno, stat import obsolete as obsmod @@ -195,6 +194,21 @@ def dirty(self): return False +def makememctx(repo, parents, text, user, date, branch, files, store, + editor=None): + def getfilectx(repo, memctx, path): + data, (islink, isexec), copied = store.getfile(path) + return memfilectx(path, data, islink=islink, isexec=isexec, + copied=copied) + extra = {} + if branch: + extra['branch'] = encoding.fromlocal(branch) + ctx = memctx(repo, parents, text, files, getfilectx, user, + date, extra) + if editor: + ctx._text = editor(repo, ctx, []) + return ctx + class changectx(basectx): """A changecontext object makes access to data related to a particular changeset convenient. It represents a read-only context already present in @@ -396,6 +410,15 @@ # for dirstate.walk, files=['.'] means "walk the whole tree". # follow that here, too fset.discard('.') + + # avoid the entire walk if we're only looking for specific files + if fset and not match.anypats(): + if util.all([fn in self for fn in fset]): + for fn in sorted(fset): + if match(fn): + yield fn + raise StopIteration + for fn in self: if fn in fset: # specified pattern is the exact name @@ -722,14 +745,6 @@ c = visit.pop(max(visit)) yield c - def copies(self, c2): - if not util.safehasattr(self, "_copycache"): - self._copycache = {} - sc2 = str(c2) - if sc2 not in self._copycache: - self._copycache[sc2] = copies.pathcopies(c2) - return self._copycache[sc2] - class filectx(basefilectx): """A filecontext object makes access to data related to a particular filerevision convenient.""" diff -r 8c69c69dbcd2 -r fe8e254c7ad6 mercurial/copies.py --- a/mercurial/copies.py Wed Jan 01 21:46:45 2014 -0600 +++ b/mercurial/copies.py Tue Jan 21 14:30:06 2014 -0600 @@ -98,15 +98,14 @@ return t -def _tracefile(fctx, actx): - '''return file context that is the ancestor of fctx present in actx''' - stop = actx.rev() - am = actx.manifest() +def _tracefile(fctx, am, limit=-1): + '''return file context that is the ancestor of fctx present in ancestor + manifest am, stopping after the first ancestor lower than limit''' for f in fctx.ancestors(): if am.get(f.path(), None) == f.filenode(): return f - if f.rev() < stop: + if f.rev() < limit: return None def _dirstatecopies(d): @@ -129,6 +128,13 @@ # short-circuit to avoid issues with merge states return _dirstatecopies(w) + # files might have to be traced back to the fctx parent of the last + # one-side-only changeset, but not further back than that + limit = _findlimit(a._repo, a.rev(), b.rev()) + if limit is None: + limit = -1 + am = a.manifest() + # find where new files came from # we currently don't try to find where old files went, too expensive # this means we can miss a case like 'hg rm b; hg cp a b' @@ -137,7 +143,7 @@ missing.difference_update(a.manifest().iterkeys()) for f in missing: - ofctx = _tracefile(b[f], a) + ofctx = _tracefile(b[f], am, limit) if ofctx: cm[f] = ofctx.path() diff -r 8c69c69dbcd2 -r fe8e254c7ad6 mercurial/dagutil.py --- a/mercurial/dagutil.py Wed Jan 01 21:46:45 2014 -0600 +++ b/mercurial/dagutil.py Tue Jan 21 14:30:06 2014 -0600 @@ -141,7 +141,9 @@ rl = self._revlog if filterunknown: return [r for r in map(rl.nodemap.get, ids) - if r is not None and r != nullrev] + if (r is not None + and r != nullrev + and r not in rl.filteredrevs)] return map(self._internalize, ids) @@ -149,7 +151,7 @@ '''dag interface to a revlog''' def __init__(self, revlog): - revlogbaseddag.__init__(self, revlog, set(xrange(len(revlog)))) + revlogbaseddag.__init__(self, revlog, set(revlog)) def _getheads(self): return [r for r in self._revlog.headrevs() if r != nullrev] diff -r 8c69c69dbcd2 -r fe8e254c7ad6 mercurial/dirstate.py --- a/mercurial/dirstate.py Wed Jan 01 21:46:45 2014 -0600 +++ b/mercurial/dirstate.py Tue Jan 21 14:30:06 2014 -0600 @@ -8,7 +8,7 @@ from node import nullid from i18n import _ -import scmutil, util, ignore, osutil, parsers, encoding +import scmutil, util, ignore, osutil, parsers, encoding, pathutil import os, stat, errno, gc propertycache = util.propertycache @@ -736,7 +736,7 @@ # unknown == True means we walked the full directory tree above. # So if a file is not seen it was either a) not matching matchfn # b) ignored, c) missing, or d) under a symlink directory. - audit_path = scmutil.pathauditor(self._root) + audit_path = pathutil.pathauditor(self._root) for nf in iter(visit): # Report ignored items in the dmap as long as they are not diff -r 8c69c69dbcd2 -r fe8e254c7ad6 mercurial/discovery.py --- a/mercurial/discovery.py Wed Jan 01 21:46:45 2014 -0600 +++ b/mercurial/discovery.py Tue Jan 21 14:30:06 2014 -0600 @@ -34,9 +34,9 @@ if heads: allknown = True - nm = repo.changelog.nodemap + knownnode = repo.changelog.hasnode # no nodemap until it is filtered for h in heads: - if nm.get(h) is None: + if not knownnode(h): allknown = False break if allknown: @@ -172,8 +172,9 @@ remotebranches.add(branch) known = [] unsynced = [] + knownnode = cl.hasnode # do not use nodemap until it is filtered for h in heads: - if h in cl.nodemap: + if knownnode(h): known.append(h) else: unsynced.append(h) @@ -204,11 +205,11 @@ def _oldheadssummary(repo, remoteheads, outgoing, inc=False): """Compute branchmapsummary for repo without branchmap support""" - cl = repo.changelog # 1-4b. old servers: Check for new topological heads. # Construct {old,new}map with branch = None (topological branch). # (code based on update) - oldheads = set(h for h in remoteheads if h in cl.nodemap) + knownnode = repo.changelog.hasnode # no nodemap until it is filtered + oldheads = set(h for h in remoteheads if knownnode(h)) # all nodes in outgoing.missing are children of either: # - an element of oldheads # - another element of outgoing.missing @@ -219,7 +220,8 @@ unsynced = inc and set([None]) or set() return {None: (oldheads, newheads, unsynced)} -def checkheads(repo, remote, outgoing, remoteheads, newbranch=False, inc=False): +def checkheads(repo, remote, outgoing, remoteheads, newbranch=False, inc=False, + newbookmarks=[]): """Check that a push won't add any outgoing head raise Abort error and display ui message as needed. @@ -259,6 +261,9 @@ lctx, rctx = repo[bm], repo[rnode] if bookmarks.validdest(repo, rctx, lctx): bookmarkedheads.add(lctx.node()) + else: + if bm in newbookmarks: + bookmarkedheads.add(repo[bm].node()) # 3. Check for new heads. # If there are more heads after the push than before, a suitable @@ -313,8 +318,8 @@ if 1 < len(newhs): dhs = list(newhs) if error is None: - error = (_("push creates multiple headed new branch '%s'") - % (branch)) + error = (_("push creates new branch '%s' " + "with multiple heads") % (branch)) hint = _("merge or" " see \"hg help push\" for details about" " pushing new heads") @@ -337,10 +342,12 @@ hint = _("merge or" " see \"hg help push\" for details about" " pushing new heads") - if branch is not None: - repo.ui.note(_("new remote heads on branch '%s'\n") % branch) + if branch is None: + repo.ui.note(_("new remote heads:\n")) + else: + repo.ui.note(_("new remote heads on branch '%s':\n") % branch) for h in dhs: - repo.ui.note(_("new remote head %s\n") % short(h)) + repo.ui.note((" %s\n") % short(h)) if error: raise util.Abort(error, hint=hint) diff -r 8c69c69dbcd2 -r fe8e254c7ad6 mercurial/dispatch.py --- a/mercurial/dispatch.py Wed Jan 01 21:46:45 2014 -0600 +++ b/mercurial/dispatch.py Tue Jan 21 14:30:06 2014 -0600 @@ -106,8 +106,9 @@ for cfg in cfgs: req.repo.ui.setconfig(*cfg) + # if we are in HGPLAIN mode, then disable custom debugging debugger = ui.config("ui", "debugger") - if not debugger: + if not debugger or ui.plain(): debugger = 'pdb' try: diff -r 8c69c69dbcd2 -r fe8e254c7ad6 mercurial/fancyopts.py --- a/mercurial/fancyopts.py Wed Jan 01 21:46:45 2014 -0600 +++ b/mercurial/fancyopts.py Tue Jan 21 14:30:06 2014 -0600 @@ -5,7 +5,8 @@ # This software may be used and distributed according to the terms of the # GNU General Public License version 2 or any later version. -import getopt, util +import getopt +import util from i18n import _ def gnugetopt(args, options, longoptions): diff -r 8c69c69dbcd2 -r fe8e254c7ad6 mercurial/fileset.py --- a/mercurial/fileset.py Wed Jan 01 21:46:45 2014 -0600 +++ b/mercurial/fileset.py Tue Jan 21 14:30:06 2014 -0600 @@ -5,7 +5,8 @@ # This software may be used and distributed according to the terms of the # GNU General Public License version 2 or any later version. -import parser, error, util, merge, re +import re +import parser, error, util, merge from i18n import _ elements = { diff -r 8c69c69dbcd2 -r fe8e254c7ad6 mercurial/hbisect.py --- a/mercurial/hbisect.py Wed Jan 01 21:46:45 2014 -0600 +++ b/mercurial/hbisect.py Tue Jan 21 14:30:06 2014 -0600 @@ -8,7 +8,8 @@ # This software may be used and distributed according to the terms of the # GNU General Public License version 2 or any later version. -import os, error +import os +import error from i18n import _ from node import short, hex import util diff -r 8c69c69dbcd2 -r fe8e254c7ad6 mercurial/help.py --- a/mercurial/help.py Wed Jan 01 21:46:45 2014 -0600 +++ b/mercurial/help.py Tue Jan 21 14:30:06 2014 -0600 @@ -6,7 +6,8 @@ # GNU General Public License version 2 or any later version. from i18n import gettext, _ -import itertools, sys, os, error +import itertools, sys, os +import error import extensions, revset, fileset, templatekw, templatefilters, filemerge import encoding, util, minirst import cmdutil diff -r 8c69c69dbcd2 -r fe8e254c7ad6 mercurial/help/config.txt --- a/mercurial/help/config.txt Wed Jan 01 21:46:45 2014 -0600 +++ b/mercurial/help/config.txt Tue Jan 21 14:30:06 2014 -0600 @@ -945,6 +945,15 @@ Phase of newly-created commits. Default: draft +``checksubrepos`` + + Check phase of state in each subrepositories, allowed values are + "ignore", "follow" or "abort". For settings other than "ignore", + the phase of each subrepository commit is checked before committing + in the parent repository. If there is any greater phase than the parent + ("secret" vs "draft", for example), the commit is either aborted + with "abort" or the higher phase is used with "follow". Default: "follow". + ``profiling`` ------------- diff -r 8c69c69dbcd2 -r fe8e254c7ad6 mercurial/help/hg.1.txt --- a/mercurial/help/hg.1.txt Wed Jan 01 21:46:45 2014 -0600 +++ b/mercurial/help/hg.1.txt Tue Jan 21 14:30:06 2014 -0600 @@ -112,7 +112,7 @@ Copying """"""" -Copyright (C) 2005-2013 Matt Mackall. +Copyright (C) 2005-2014 Matt Mackall. Free use of this software is granted under the terms of the GNU General Public License version 2 or any later version. diff -r 8c69c69dbcd2 -r fe8e254c7ad6 mercurial/help/hgignore.5.txt --- a/mercurial/help/hgignore.5.txt Wed Jan 01 21:46:45 2014 -0600 +++ b/mercurial/help/hgignore.5.txt Tue Jan 21 14:30:06 2014 -0600 @@ -26,7 +26,7 @@ Copying ======= This manual page is copyright 2006 Vadim Gelfer. -Mercurial is copyright 2005-2013 Matt Mackall. +Mercurial is copyright 2005-2014 Matt Mackall. Free use of this software is granted under the terms of the GNU General Public License version 2 or any later version. diff -r 8c69c69dbcd2 -r fe8e254c7ad6 mercurial/help/hgrc.5.txt --- a/mercurial/help/hgrc.5.txt Wed Jan 01 21:46:45 2014 -0600 +++ b/mercurial/help/hgrc.5.txt Tue Jan 21 14:30:06 2014 -0600 @@ -34,7 +34,7 @@ Copying ======= This manual page is copyright 2005 Bryan O'Sullivan. -Mercurial is copyright 2005-2013 Matt Mackall. +Mercurial is copyright 2005-2014 Matt Mackall. Free use of this software is granted under the terms of the GNU General Public License version 2 or any later version. diff -r 8c69c69dbcd2 -r fe8e254c7ad6 mercurial/help/patterns.txt --- a/mercurial/help/patterns.txt Wed Jan 01 21:46:45 2014 -0600 +++ b/mercurial/help/patterns.txt Tue Jan 21 14:30:06 2014 -0600 @@ -29,6 +29,11 @@ feeds. Each string read from the file is itself treated as a file pattern. +All patterns, except for ``glob:`` specified in command line (not for +``-I`` or ``-X`` options) or ``.hgignore`` file, can match also +against directories: files under matched directories are treated as +matched. + Plain examples:: path:foo/bar a name bar in a directory named foo in the root diff -r 8c69c69dbcd2 -r fe8e254c7ad6 mercurial/help/phases.txt --- a/mercurial/help/phases.txt Wed Jan 01 21:46:45 2014 -0600 +++ b/mercurial/help/phases.txt Tue Jan 21 14:30:06 2014 -0600 @@ -58,6 +58,13 @@ Servers running older versions of Mercurial are treated as publishing. +.. note:: + Changesets in secret phase are not exchanged with the server. This + applies to their content: file names, file contents, and changeset + metadata. For technical reasons, the identifier (e.g. d825e4025e39) + of the secret changeset may be communicated to the server. + + Examples ======== diff -r 8c69c69dbcd2 -r fe8e254c7ad6 mercurial/help/revisions.txt --- a/mercurial/help/revisions.txt Wed Jan 01 21:46:45 2014 -0600 +++ b/mercurial/help/revisions.txt Tue Jan 21 14:30:06 2014 -0600 @@ -14,9 +14,9 @@ Any other string is treated as a bookmark, tag, or branch name. A bookmark is a movable pointer to a revision. A tag is a permanent name -associated with a revision. A branch name denotes the tipmost revision -of that branch. Bookmark, tag, and branch names must not contain the ":" -character. +associated with a revision. A branch name denotes the tipmost open branch head +of that branch - or if they are all closed, the tipmost closed head of the +branch. Bookmark, tag, and branch names must not contain the ":" character. The reserved name "tip" always identifies the most recent revision. diff -r 8c69c69dbcd2 -r fe8e254c7ad6 mercurial/help/templates.txt --- a/mercurial/help/templates.txt Wed Jan 01 21:46:45 2014 -0600 +++ b/mercurial/help/templates.txt Tue Jan 21 14:30:06 2014 -0600 @@ -102,3 +102,7 @@ - Invert the firstline filter, i.e. everything but the first line:: $ hg log -r 0 --template "{sub(r'^.*\n?\n?', '', desc)}\n" + +- Display the contents of the 'extra' field, one per line:: + + $ hg log -r 0 --template "{join(extras, '\n')}\n" diff -r 8c69c69dbcd2 -r fe8e254c7ad6 mercurial/hg.py --- a/mercurial/hg.py Wed Jan 01 21:46:45 2014 -0600 +++ b/mercurial/hg.py Tue Jan 21 14:30:06 2014 -0600 @@ -202,19 +202,20 @@ hardlink = None num = 0 srcpublishing = srcrepo.ui.configbool('phases', 'publish', True) + srcvfs = scmutil.vfs(srcrepo.sharedpath) + dstvfs = scmutil.vfs(destpath) for f in srcrepo.store.copylist(): if srcpublishing and f.endswith('phaseroots'): continue - src = os.path.join(srcrepo.sharedpath, f) - dst = os.path.join(destpath, f) - dstbase = os.path.dirname(dst) - if dstbase and not os.path.exists(dstbase): - os.mkdir(dstbase) - if os.path.exists(src): - if dst.endswith('data'): + dstbase = os.path.dirname(f) + if dstbase and not dstvfs.exists(dstbase): + dstvfs.mkdir(dstbase) + if srcvfs.exists(f): + if f.endswith('data'): # lock to avoid premature writing to the target - destlock = lock.lock(os.path.join(dstbase, "lock")) - hardlink, n = util.copyfiles(src, dst, hardlink) + destlock = lock.lock(dstvfs, dstbase + "/lock") + hardlink, n = util.copyfiles(srcvfs.join(f), dstvfs.join(f), + hardlink) num += n if hardlink: ui.debug("linked %d files\n" % num) @@ -337,8 +338,8 @@ # Recomputing branch cache might be slow on big repos, # so just copy it dstcachedir = os.path.join(destpath, 'cache') - srcbranchcache = srcrepo.sjoin('cache/branchheads') - dstbranchcache = os.path.join(dstcachedir, 'branchheads') + srcbranchcache = srcrepo.sjoin('cache/branch2') + dstbranchcache = os.path.join(dstcachedir, 'branch2') if os.path.exists(srcbranchcache): if not os.path.exists(dstcachedir): os.mkdir(dstcachedir) @@ -511,11 +512,7 @@ return subreporecurse() displayer = cmdutil.show_changeset(ui, other, opts, buffered) - - # XXX once graphlog extension makes it into core, - # should be replaced by a if graph/else displaychlist(other, chlist, displayer) - displayer.close() finally: cleanupfn() diff -r 8c69c69dbcd2 -r fe8e254c7ad6 mercurial/hgweb/hgweb_mod.py --- a/mercurial/hgweb/hgweb_mod.py Wed Jan 01 21:46:45 2014 -0600 +++ b/mercurial/hgweb/hgweb_mod.py Tue Jan 21 14:30:06 2014 -0600 @@ -6,7 +6,7 @@ # This software may be used and distributed according to the terms of the # GNU General Public License version 2 or any later version. -import os +import os, re from mercurial import ui, hg, hook, error, encoding, templater, util, repoview from mercurial.templatefilters import websub from mercurial.i18n import _ @@ -14,7 +14,7 @@ from common import HTTP_OK, HTTP_NOT_MODIFIED, HTTP_BAD_REQUEST from common import HTTP_NOT_FOUND, HTTP_SERVER_ERROR from request import wsgirequest -import webcommands, protocol, webutil, re +import webcommands, protocol, webutil perms = { 'changegroup': 'pull', @@ -373,6 +373,7 @@ "motd": motd, "sessionvars": sessionvars, "pathdef": makebreadcrumb(req.url), + "style": style, }) return tmpl diff -r 8c69c69dbcd2 -r fe8e254c7ad6 mercurial/hgweb/hgwebdir_mod.py --- a/mercurial/hgweb/hgwebdir_mod.py Wed Jan 01 21:46:45 2014 -0600 +++ b/mercurial/hgweb/hgwebdir_mod.py Tue Jan 21 14:30:06 2014 -0600 @@ -448,7 +448,9 @@ "logourl": logourl, "logoimg": logoimg, "staticurl": staticurl, - "sessionvars": sessionvars}) + "sessionvars": sessionvars, + "style": style, + }) return tmpl def updatereqenv(self, env): diff -r 8c69c69dbcd2 -r fe8e254c7ad6 mercurial/hgweb/webcommands.py --- a/mercurial/hgweb/webcommands.py Wed Jan 01 21:46:45 2014 -0600 +++ b/mercurial/hgweb/webcommands.py Tue Jan 21 14:30:06 2014 -0600 @@ -537,18 +537,18 @@ tips = [] heads = web.repo.heads() parity = paritygen(web.stripecount) - sortkey = lambda ctx: (not ctx.closesbranch(), ctx.rev()) + sortkey = lambda item: (not item[1], item[0].rev()) def entries(limit, **map): count = 0 if not tips: - for t, n in web.repo.branchtags().iteritems(): - tips.append(web.repo[n]) - for ctx in sorted(tips, key=sortkey, reverse=True): + for tag, hs, tip, closed in web.repo.branchmap().iterbranches(): + tips.append((web.repo[tip], closed)) + for ctx, closed in sorted(tips, key=sortkey, reverse=True): if limit > 0 and count >= limit: return count += 1 - if not web.repo.branchheads(ctx.branch()): + if closed: status = 'closed' elif ctx.node() not in heads: status = 'inactive' @@ -596,8 +596,9 @@ def branches(**map): parity = paritygen(web.stripecount) - b = web.repo.branchtags() - l = [(-web.repo.changelog.rev(n), n, t) for t, n in b.iteritems()] + b = web.repo.branchmap() + l = [(-web.repo.changelog.rev(tip), tip, tag) + for tag, heads, tip, closed in b.iterbranches()] for r, n, t in sorted(l): yield {'parity': parity.next(), 'branch': t, @@ -845,15 +846,11 @@ end = min(count, start + revcount) # last rev on this page parity = paritygen(web.stripecount, offset=start - end) - def entries(latestonly, **map): + def entries(): l = [] repo = web.repo - revs = repo.changelog.revs(start, end - 1) - if latestonly: - for r in revs: - pass - revs = (r,) + revs = fctx.filelog().revs(start, end - 1) for i in revs: iterfctx = fctx.filectx(i) @@ -877,11 +874,14 @@ for e in reversed(l): yield e + entries = list(entries()) + latestentry = entries[:1] + revnav = webutil.filerevnav(web.repo, fctx.path()) nav = revnav.gen(end - 1, revcount, count) return tmpl("filelog", file=f, node=fctx.hex(), nav=nav, - entries=lambda **x: entries(latestonly=False, **x), - latestentry=lambda **x: entries(latestonly=True, **x), + entries=entries, + latestentry=latestentry, revcount=revcount, morevars=morevars, lessvars=lessvars) def archive(web, req, tmpl): diff -r 8c69c69dbcd2 -r fe8e254c7ad6 mercurial/hgweb/webutil.py --- a/mercurial/hgweb/webutil.py Wed Jan 01 21:46:45 2014 -0600 +++ b/mercurial/hgweb/webutil.py Tue Jan 21 14:30:06 2014 -0600 @@ -7,7 +7,7 @@ # GNU General Public License version 2 or any later version. import os, copy -from mercurial import match, patch, scmutil, error, ui, util +from mercurial import match, patch, error, ui, util, pathutil from mercurial.i18n import _ from mercurial.node import hex, nullid from common import ErrorResponse @@ -196,7 +196,7 @@ def cleanpath(repo, path): path = path.lstrip('/') - return scmutil.canonpath(repo.root, '', path) + return pathutil.canonpath(repo.root, '', path) def changeidctx (repo, changeid): try: diff -r 8c69c69dbcd2 -r fe8e254c7ad6 mercurial/localrepo.py --- a/mercurial/localrepo.py Wed Jan 01 21:46:45 2014 -0600 +++ b/mercurial/localrepo.py Tue Jan 21 14:30:06 2014 -0600 @@ -8,14 +8,15 @@ from i18n import _ import peer, changegroup, subrepo, discovery, pushkey, obsolete, repoview import changelog, dirstate, filelog, manifest, context, bookmarks, phases -import lock, transaction, store, encoding +import lock as lockmod +import transaction, store, encoding import scmutil, util, extensions, hook, error, revset import match as matchmod import merge as mergemod import tags as tagsmod from lock import release import weakref, errno, os, time, inspect -import branchmap +import branchmap, pathutil propertycache = util.propertycache filecache = scmutil.filecache @@ -166,11 +167,12 @@ self.root = self.wvfs.base self.path = self.wvfs.join(".hg") self.origroot = path - self.auditor = scmutil.pathauditor(self.root, self._checknested) + self.auditor = pathutil.pathauditor(self.root, self._checknested) self.vfs = scmutil.vfs(self.path) self.opener = self.vfs self.baseui = baseui self.ui = baseui.copy() + self.ui.copy = baseui.copy # prevent copying repo configuration # A list of callback to shape the phase if no data were found. # Callback are in the form: func(repo, roots) --> processed root. # This list it to be filled by extension during repo setup @@ -279,6 +281,9 @@ self.requirements = requirements self.sopener.options = dict((r, 1) for r in requirements if r in self.openerreqs) + chunkcachesize = self.ui.configint('format', 'chunkcachesize') + if chunkcachesize is not None: + self.sopener.options['chunkcachesize'] = chunkcachesize def _writerequirements(self): reqfile = self.opener("requires", "w") @@ -650,33 +655,17 @@ return sorted(marks) def branchmap(self): - '''returns a dictionary {branch: [branchheads]}''' + '''returns a dictionary {branch: [branchheads]} with branchheads + ordered by increasing revision number''' branchmap.updatecache(self) return self._branchcaches[self.filtername] - - def _branchtip(self, heads): - '''return the tipmost branch head in heads''' - tip = heads[-1] - for h in reversed(heads): - if not self[h].closesbranch(): - tip = h - break - return tip - def branchtip(self, branch): '''return the tip node for a given branch''' - if branch not in self.branchmap(): + try: + return self.branchmap().branchtip(branch) + except KeyError: raise error.RepoLookupError(_("unknown branch '%s'") % branch) - return self._branchtip(self.branchmap()[branch]) - - def branchtags(self): - '''return a dict where branch names map to the tipmost head of - the branch, open heads come before closed''' - bt = {} - for bn, heads in self.branchmap().iteritems(): - bt[bn] = self._branchtip(heads) - return bt def lookup(self, key): return self[key].node() @@ -832,7 +821,7 @@ renames = [(vfs, x, undoname(x)) for vfs, x in self._journalfiles()] rp = report and report or self.ui.warn tr = transaction.transaction(rp, self.sopener, - self.sjoin("journal"), + "journal", aftertrans(renames), self.store.createmode) self._transref = weakref.ref(tr) @@ -866,7 +855,7 @@ try: if self.svfs.exists("journal"): self.ui.status(_("rolling back interrupted transaction\n")) - transaction.rollback(self.sopener, self.sjoin("journal"), + transaction.rollback(self.sopener, "journal", self.ui.warn) self.invalidate() return True @@ -922,7 +911,7 @@ parents = self.dirstate.parents() self.destroying() - transaction.rollback(self.sopener, self.sjoin('undo'), ui.warn) + transaction.rollback(self.sopener, 'undo', ui.warn) if self.vfs.exists('undo.bookmarks'): self.vfs.rename('undo.bookmarks', 'bookmarks') if self.svfs.exists('undo.phaseroots'): @@ -998,17 +987,18 @@ pass self.invalidatecaches() - def _lock(self, lockname, wait, releasefn, acquirefn, desc): + def _lock(self, vfs, lockname, wait, releasefn, acquirefn, desc): try: - l = lock.lock(lockname, 0, releasefn, desc=desc) + l = lockmod.lock(vfs, lockname, 0, releasefn, desc=desc) except error.LockHeld, inst: if not wait: raise self.ui.warn(_("waiting for lock on %s held by %r\n") % (desc, inst.locker)) # default to 600 seconds timeout - l = lock.lock(lockname, int(self.ui.config("ui", "timeout", "600")), - releasefn, desc=desc) + l = lockmod.lock(vfs, lockname, + int(self.ui.config("ui", "timeout", "600")), + releasefn, desc=desc) if acquirefn: acquirefn() return l @@ -1041,7 +1031,7 @@ continue ce.refresh() - l = self._lock(self.sjoin("lock"), wait, unlock, + l = self._lock(self.svfs, "lock", wait, unlock, self.invalidate, _('repository %s') % self.origroot) self._lockref = weakref.ref(l) return l @@ -1059,7 +1049,7 @@ self.dirstate.write() self._filecache['dirstate'].refresh() - l = self._lock(self.join("wlock"), wait, unlock, + l = self._lock(self.vfs, "wlock", wait, unlock, self.invalidatedirstate, _('working directory of %s') % self.origroot) self._wlockref = weakref.ref(l) @@ -1379,7 +1369,7 @@ parent2=xp2, pending=p) self.changelog.finalize(trp) # set the new commit is proper phase - targetphase = phases.newcommitphase(self.ui) + targetphase = subrepo.newcommitphase(self.ui, ctx) if targetphase: # retract boundary do not alter parent changeset. # if a parent have higher the resulting phase will @@ -1621,13 +1611,11 @@ if branch not in branches: return [] # the cache returns heads ordered lowest to highest - bheads = list(reversed(branches[branch])) + bheads = list(reversed(branches.branchheads(branch, closed=closed))) if start is not None: # filter out the heads that cannot be reached from startrev fbheads = set(self.changelog.nodesbetween([start], bheads)[2]) bheads = [h for h in bheads if h in fbheads] - if not closed: - bheads = [h for h in bheads if not self[h].closesbranch()] return bheads def branches(self, nodes): @@ -1678,12 +1666,11 @@ trname = 'pull\n' + util.hidepassword(remote.url()) lock = self.lock() try: - tmp = discovery.findcommonincoming(self, remote, heads=heads, - force=force) + tmp = discovery.findcommonincoming(self.unfiltered(), remote, + heads=heads, force=force) common, fetch, rheads = tmp if not fetch: self.ui.status(_("no changes found\n")) - added = [] result = 0 else: tr = self.transaction(trname) @@ -1705,20 +1692,13 @@ "changegroupsubset.")) else: cg = remote.changegroupsubset(fetch, heads, 'pull') - # we use unfiltered changelog here because hidden revision must - # be taken in account for phase synchronization. They may - # becomes public and becomes visible again. - cl = self.unfiltered().changelog - clstart = len(cl) result = self.addchangegroup(cg, 'pull', remote.url()) - clend = len(cl) - added = [cl.node(r) for r in xrange(clstart, clend)] # compute target subset if heads is None: # We pulled every thing possible # sync on everything common - subset = common + added + subset = common + rheads else: # We pulled a specific subset # sync on this subset @@ -1861,14 +1841,16 @@ raise util.Abort(_(mst) % (ctx.troubles()[0], ctx)) + newbm = self.ui.configlist('bookmarks', 'pushing') discovery.checkheads(unfi, remote, outgoing, remoteheads, newbranch, - bool(inc)) + bool(inc), newbm) # TODO: get bundlecaps from remote bundlecaps = None # create a changegroup from local - if revs is None and not outgoing.excluded: + if revs is None and not (outgoing.excluded + or self.changelog.filteredrevs): # push everything, # use the fast path, no race possible on push bundler = changegroup.bundle10(self, bundlecaps) @@ -1976,27 +1958,7 @@ if locallock is not None: locallock.release() - self.ui.debug("checking for updated bookmarks\n") - rb = remote.listkeys('bookmarks') - revnums = map(unfi.changelog.rev, revs or []) - ancestors = [ - a for a in unfi.changelog.ancestors(revnums, inclusive=True)] - for k in rb.keys(): - if k in unfi._bookmarks: - nr, nl = rb[k], hex(self._bookmarks[k]) - if nr in unfi: - cr = unfi[nr] - cl = unfi[nl] - if bookmarks.validdest(unfi, cr, cl): - if ancestors and cl.rev() not in ancestors: - continue - r = remote.pushkey('bookmarks', k, nr, nl) - if r: - self.ui.status(_("updating bookmark %s\n") % k) - else: - self.ui.warn(_('updating bookmark %s' - ' failed!\n') % k) - + bookmarks.updateremote(self.ui, unfi, remote, revs) return ret def changegroupinfo(self, nodes, source): @@ -2025,8 +1987,10 @@ bases = [nullid] # TODO: remove call to nodesbetween. csets, bases, heads = cl.nodesbetween(bases, heads) - bases = [p for n in bases for p in cl.parents(n) if p != nullid] - outgoing = discovery.outgoing(cl, bases, heads) + discbases = [] + for n in bases: + discbases.extend([p for p in cl.parents(n) if p != nullid]) + outgoing = discovery.outgoing(cl, discbases, heads) bundler = changegroup.bundle10(self) return self._changegroupsubset(outgoing, bundler, source) diff -r 8c69c69dbcd2 -r fe8e254c7ad6 mercurial/lock.py --- a/mercurial/lock.py Wed Jan 01 21:46:45 2014 -0600 +++ b/mercurial/lock.py Tue Jan 21 14:30:06 2014 -0600 @@ -29,7 +29,8 @@ _host = None - def __init__(self, file, timeout=-1, releasefn=None, desc=None): + def __init__(self, vfs, file, timeout=-1, releasefn=None, desc=None): + self.vfs = vfs self.f = file self.held = 0 self.timeout = timeout @@ -75,13 +76,14 @@ lockname = '%s:%s' % (lock._host, self.pid) while not self.held: try: - util.makelock(lockname, self.f) + self.vfs.makelock(lockname, self.f) self.held = 1 except (OSError, IOError), why: if why.errno == errno.EEXIST: locker = self.testlock() if locker is not None: - raise error.LockHeld(errno.EAGAIN, self.f, self.desc, + raise error.LockHeld(errno.EAGAIN, + self.vfs.join(self.f), self.desc, locker) else: raise error.LockUnavailable(why.errno, why.strerror, @@ -99,7 +101,7 @@ """ try: - locker = util.readlock(self.f) + locker = self.vfs.readlock(self.f) except (OSError, IOError), why: if why.errno == errno.ENOENT: return None @@ -119,8 +121,8 @@ # if locker dead, break lock. must do this with another lock # held, or can race and break valid lock. try: - l = lock(self.f + '.break', timeout=0) - util.unlink(self.f) + l = lock(self.vfs, self.f + '.break', timeout=0) + self.vfs.unlink(self.f) l.release() except error.LockError: return locker @@ -140,7 +142,7 @@ if self.releasefn: self.releasefn() try: - util.unlink(self.f) + self.vfs.unlink(self.f) except OSError: pass for callback in self.postrelease: diff -r 8c69c69dbcd2 -r fe8e254c7ad6 mercurial/manifest.py --- a/mercurial/manifest.py Wed Jan 01 21:46:45 2014 -0600 +++ b/mercurial/manifest.py Tue Jan 21 14:30:06 2014 -0600 @@ -30,8 +30,9 @@ class manifest(revlog.revlog): def __init__(self, opener): - # we expect to deal with not more than three revs at a time in merge - self._mancache = util.lrucachedict(3) + # we expect to deal with not more than four revs at a time, + # during a commit --amend + self._mancache = util.lrucachedict(4) revlog.revlog.__init__(self, opener, "00manifest.i") def parse(self, lines): diff -r 8c69c69dbcd2 -r fe8e254c7ad6 mercurial/match.py --- a/mercurial/match.py Wed Jan 01 21:46:45 2014 -0600 +++ b/mercurial/match.py Tue Jan 21 14:30:06 2014 -0600 @@ -6,7 +6,7 @@ # GNU General Public License version 2 or any later version. import re -import scmutil, util, fileset +import util, fileset, pathutil from i18n import _ def _rematcher(pat): @@ -317,7 +317,7 @@ pats = [] for kind, name in [_patsplit(p, default) for p in names]: if kind in ('glob', 'relpath'): - name = scmutil.canonpath(root, cwd, name, auditor) + name = pathutil.canonpath(root, cwd, name, auditor) elif kind in ('relglob', 'path'): name = util.normpath(name) elif kind in ('listfile', 'listfile0'): diff -r 8c69c69dbcd2 -r fe8e254c7ad6 mercurial/mdiff.py --- a/mercurial/mdiff.py Wed Jan 01 21:46:45 2014 -0600 +++ b/mercurial/mdiff.py Tue Jan 21 14:30:06 2014 -0600 @@ -6,8 +6,8 @@ # GNU General Public License version 2 or any later version. from i18n import _ -import bdiff, mpatch, util -import re, struct, base85, zlib +import bdiff, mpatch, util, base85 +import re, struct, zlib def splitnewlines(text): '''like str.splitlines, but only split on newlines.''' diff -r 8c69c69dbcd2 -r fe8e254c7ad6 mercurial/merge.py --- a/mercurial/merge.py Wed Jan 01 21:46:45 2014 -0600 +++ b/mercurial/merge.py Tue Jan 21 14:30:06 2014 -0600 @@ -679,8 +679,16 @@ wlock = repo.wlock() try: wc = repo[None] + pl = wc.parents() + p1 = pl[0] + pa = None + if ancestor: + pa = repo[ancestor] + if node is None: - # tip of current branch + # Here is where we should consider bookmarks, divergent bookmarks, + # foreground changesets (successors), and tip of current branch; + # but currently we are only checking the branch tips. try: node = repo.branchtip(wc.branch()) except error.RepoLookupError: @@ -688,12 +696,38 @@ node = repo.lookup("tip") # update to tip else: raise util.Abort(_("branch %s not found") % wc.branch()) + + if p1.obsolete() and not p1.children(): + # allow updating to successors + successors = obsolete.successorssets(repo, p1.node()) + + # behavior of certain cases is as follows, + # + # divergent changesets: update to highest rev, similar to what + # is currently done when there are more than one head + # (i.e. 'tip') + # + # replaced changesets: same as divergent except we know there + # is no conflict + # + # pruned changeset: no update is done; though, we could + # consider updating to the first non-obsolete parent, + # similar to what is current done for 'hg prune' + + if successors: + # flatten the list here handles both divergent (len > 1) + # and the usual case (len = 1) + successors = [n for sub in successors for n in sub] + + # get the max revision for the given successors set, + # i.e. the 'tip' of a set + node = repo.revs("max(%ln)", successors)[0] + pa = p1 + overwrite = force and not branchmerge - pl = wc.parents() - p1, p2 = pl[0], repo[node] - if ancestor: - pa = repo[ancestor] - else: + + p2 = repo[node] + if pa is None: pa = p1.ancestor(p2) fp1, fp2, xp1, xp2 = p1.node(), p2.node(), str(p1), str(p2) diff -r 8c69c69dbcd2 -r fe8e254c7ad6 mercurial/obsolete.py --- a/mercurial/obsolete.py Wed Jan 01 21:46:45 2014 -0600 +++ b/mercurial/obsolete.py Tue Jan 21 14:30:06 2014 -0600 @@ -84,6 +84,7 @@ """ import struct import util, base85, node +import phases from i18n import _ _pack = struct.pack @@ -196,6 +197,14 @@ self._data = data self._decodedmeta = None + def __hash__(self): + return hash(self._data) + + def __eq__(self, other): + if type(other) != type(self): + return False + return self._data == other._data + def precnode(self): """Precursor changeset node identifier""" return self._data[0] @@ -268,7 +277,11 @@ if not _enabled: raise util.Abort('obsolete feature is not enabled on this repo') known = set(self._all) - new = [m for m in markers if m not in known] + new = [] + for m in markers: + if m not in known: + known.add(m) + new.append(m) if new: f = self.sopener('obsstore', 'ab') try: @@ -428,14 +441,15 @@ Some successors may be unknown locally. - This is a linear yield unsuited to detecting split changesets.""" + This is a linear yield unsuited to detecting split changesets. It includes + initial nodes too.""" remaining = set(nodes) seen = set(remaining) while remaining: current = remaining.pop() yield current for mark in obsstore.successors.get(current, ()): - # ignore marker flagged with with specified flag + # ignore marker flagged with specified flag if mark[2] & ignoreflags: continue for suc in mark[1]: @@ -443,6 +457,28 @@ seen.add(suc) remaining.add(suc) +def allprecursors(obsstore, nodes, ignoreflags=0): + """Yield node for every precursors of . + + Some precursors may be unknown locally. + + This is a linear yield unsuited to detecting folded changesets. It includes + initial nodes too.""" + + remaining = set(nodes) + seen = set(remaining) + while remaining: + current = remaining.pop() + yield current + for mark in obsstore.precursors.get(current, ()): + # ignore marker flagged with specified flag + if mark[2] & ignoreflags: + continue + suc = mark[0] + if suc not in seen: + seen.add(suc) + remaining.add(suc) + def foreground(repo, nodes): """return all nodes in the "foreground" of other node @@ -473,29 +509,41 @@ def successorssets(repo, initialnode, cache=None): """Return all set of successors of initial nodes - Successors set of changeset A are a group of revision that succeed A. It - succeed A as a consistent whole, each revision being only partial - replacement. Successors set contains non-obsolete changeset only. + The successors set of a changeset A are a group of revisions that succeed + A. It succeeds A as a consistent whole, each revision being only a partial + replacement. The successors set contains non-obsolete changesets only. - In most cases a changeset A have zero (changeset pruned) or a single - successors set that contains a single successor (changeset A replaced by - A') + This function returns the full list of successor sets which is why it + returns a list of tuples and not just a single tuple. Each tuple is a valid + successors set. Not that (A,) may be a valid successors set for changeset A + (see below). - When changeset is split, it results successors set containing more than - a single element. Divergent rewriting will result in multiple successors - sets. + In most cases, a changeset A will have a single element (e.g. the changeset + A is replaced by A') in its successors set. Though, it is also common for a + changeset A to have no elements in its successor set (e.g. the changeset + has been pruned). Therefore, the returned list of successors sets will be + [(A',)] or [], respectively. - They are returned as a list of tuples containing all valid successors sets. + When a changeset A is split into A' and B', however, it will result in a + successors set containing more than a single element, i.e. [(A',B')]. + Divergent changesets will result in multiple successors sets, i.e. [(A',), + (A'')]. - Final successors unknown locally are considered plain prune (obsoleted - without successors). + If a changeset A is not obsolete, then it will conceptually have no + successors set. To distinguish this from a pruned changeset, the successor + set will only contain itself, i.e. [(A,)]. - The optional `cache` parameter is a dictionary that may contains - precomputed successors sets. It is meant to reuse the computation of - previous call to `successorssets` when multiple calls are made at the same - time. The cache dictionary is updated in place. The caller is responsible - for its live spawn. Code that makes multiple calls to `successorssets` - *must* use this cache mechanism or suffer terrible performances.""" + Finally, successors unknown locally are considered to be pruned (obsoleted + without any successors). + + The optional `cache` parameter is a dictionary that may contain precomputed + successors sets. It is meant to reuse the computation of a previous call to + `successorssets` when multiple calls are made at the same time. The cache + dictionary is updated in place. The caller is responsible for its live + spawn. Code that makes multiple calls to `successorssets` *must* use this + cache mechanism or suffer terrible performances. + + """ succmarkers = repo.obsstore.successors @@ -751,14 +799,26 @@ @cachefor('bumped') def _computebumpedset(repo): """the set of revs trying to obsolete public revisions""" - # get all possible bumped changesets - tonode = repo.changelog.node - publicnodes = (tonode(r) for r in repo.revs('public()')) - successors = allsuccessors(repo.obsstore, publicnodes, - ignoreflags=bumpedfix) - # revision public or already obsolete don't count as bumped - query = '%ld - obsolete() - public()' - return set(repo.revs(query, _knownrevs(repo, successors))) + bumped = set() + # utils function (avoid attribut lookup in the loop) + phase = repo._phasecache.phase # would be faster to grab the full list + public = phases.public + cl = repo.changelog + torev = cl.nodemap.get + obs = getrevs(repo, 'obsolete') + for rev in repo: + # We only evaluate mutable, non-obsolete revision + if (public < phase(repo, rev)) and (rev not in obs): + node = cl.node(rev) + # (future) A cache of precursors may worth if split is very common + for pnode in allprecursors(repo.obsstore, [node], + ignoreflags=bumpedfix): + prev = torev(pnode) # unfiltered! but so is phasecache + if (prev is not None) and (phase(repo, prev) <= public): + # we have a public precursors + bumped.add(rev) + break # Next draft! + return bumped @cachefor('divergent') def _computedivergentset(repo): diff -r 8c69c69dbcd2 -r fe8e254c7ad6 mercurial/parsers.c --- a/mercurial/parsers.c Wed Jan 01 21:46:45 2014 -0600 +++ b/mercurial/parsers.c Tue Jan 21 14:30:06 2014 -0600 @@ -926,8 +926,13 @@ static int nt_init(indexObject *self) { if (self->nt == NULL) { + if (self->raw_length > INT_MAX) { + PyErr_SetString(PyExc_ValueError, "overflow in nt_init"); + return -1; + } self->ntcapacity = self->raw_length < 4 - ? 4 : self->raw_length / 2; + ? 4 : (int)self->raw_length / 2; + self->nt = calloc(self->ntcapacity, sizeof(nodetree)); if (self->nt == NULL) { PyErr_NoMemory(); diff -r 8c69c69dbcd2 -r fe8e254c7ad6 mercurial/patch.py --- a/mercurial/patch.py Wed Jan 01 21:46:45 2014 -0600 +++ b/mercurial/patch.py Tue Jan 21 14:30:06 2014 -0600 @@ -16,7 +16,6 @@ from i18n import _ from node import hex, short import base85, mdiff, scmutil, util, diffhelpers, copies, encoding, error -import context gitre = re.compile('diff --git a/(.*) b/(.*)') @@ -722,8 +721,9 @@ if self.remove: self.backend.unlink(self.fname) else: - self.lines[:] = h.new() - self.offset += len(h.new()) + l = h.new(self.lines) + self.lines[:] = l + self.offset += len(l) self.dirty = True return 0 @@ -1017,9 +1017,10 @@ return old, oldstart, new, newstart class binhunk(object): - 'A binary patch file. Only understands literals so far.' + 'A binary patch file.' def __init__(self, lr, fname): self.text = None + self.delta = False self.hunk = ['GIT binary patch\n'] self._fname = fname self._read(lr) @@ -1027,7 +1028,9 @@ def complete(self): return self.text is not None - def new(self): + def new(self, lines): + if self.delta: + return [applybindelta(self.text, ''.join(lines))] return [self.text] def _read(self, lr): @@ -1036,14 +1039,19 @@ hunk.append(l) return l.rstrip('\r\n') + size = 0 while True: line = getline(lr, self.hunk) if not line: raise PatchError(_('could not extract "%s" binary data') % self._fname) if line.startswith('literal '): + size = int(line[8:].rstrip()) break - size = int(line[8:].rstrip()) + if line.startswith('delta '): + size = int(line[6:].rstrip()) + self.delta = True + break dec = [] line = getline(lr, self.hunk) while len(line) > 1: @@ -1266,6 +1274,62 @@ gp = gitpatches.pop() yield 'file', ('a/' + gp.path, 'b/' + gp.path, None, gp.copy()) +def applybindelta(binchunk, data): + """Apply a binary delta hunk + The algorithm used is the algorithm from git's patch-delta.c + """ + def deltahead(binchunk): + i = 0 + for c in binchunk: + i += 1 + if not (ord(c) & 0x80): + return i + return i + out = "" + s = deltahead(binchunk) + binchunk = binchunk[s:] + s = deltahead(binchunk) + binchunk = binchunk[s:] + i = 0 + while i < len(binchunk): + cmd = ord(binchunk[i]) + i += 1 + if (cmd & 0x80): + offset = 0 + size = 0 + if (cmd & 0x01): + offset = ord(binchunk[i]) + i += 1 + if (cmd & 0x02): + offset |= ord(binchunk[i]) << 8 + i += 1 + if (cmd & 0x04): + offset |= ord(binchunk[i]) << 16 + i += 1 + if (cmd & 0x08): + offset |= ord(binchunk[i]) << 24 + i += 1 + if (cmd & 0x10): + size = ord(binchunk[i]) + i += 1 + if (cmd & 0x20): + size |= ord(binchunk[i]) << 8 + i += 1 + if (cmd & 0x40): + size |= ord(binchunk[i]) << 16 + i += 1 + if size == 0: + size = 0x10000 + offset_end = offset + size + out += data[offset:offset_end] + elif cmd != 0: + offset_end = i + cmd + out += binchunk[i:offset_end] + i += cmd + else: + raise PatchError(_('unexpected delta opcode 0')) + return out + def applydiff(ui, fp, backend, store, strip=1, eolmode='strict'): """Reads a patch from fp and tries to apply it. @@ -1441,21 +1505,6 @@ backend = repobackend(ui, repo, ctx, store) return patchbackend(ui, backend, patchobj, strip, files, eolmode) -def makememctx(repo, parents, text, user, date, branch, files, store, - editor=None): - def getfilectx(repo, memctx, path): - data, (islink, isexec), copied = store.getfile(path) - return context.memfilectx(path, data, islink=islink, isexec=isexec, - copied=copied) - extra = {} - if branch: - extra['branch'] = encoding.fromlocal(branch) - ctx = context.memctx(repo, parents, text, files, getfilectx, user, - date, extra) - if editor: - ctx._text = editor(repo, ctx, []) - return ctx - def patch(ui, repo, patchname, strip=1, files=None, eolmode='strict', similarity=0): """Apply to the working directory. diff -r 8c69c69dbcd2 -r fe8e254c7ad6 mercurial/pathutil.py --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/mercurial/pathutil.py Tue Jan 21 14:30:06 2014 -0600 @@ -0,0 +1,144 @@ +import os, errno, stat + +import util +from i18n import _ + +class pathauditor(object): + '''ensure that a filesystem path contains no banned components. + the following properties of a path are checked: + + - ends with a directory separator + - under top-level .hg + - starts at the root of a windows drive + - contains ".." + - traverses a symlink (e.g. a/symlink_here/b) + - inside a nested repository (a callback can be used to approve + some nested repositories, e.g., subrepositories) + ''' + + def __init__(self, root, callback=None): + self.audited = set() + self.auditeddir = set() + self.root = root + self.callback = callback + if os.path.lexists(root) and not util.checkcase(root): + self.normcase = util.normcase + else: + self.normcase = lambda x: x + + def __call__(self, path): + '''Check the relative path. + path may contain a pattern (e.g. foodir/**.txt)''' + + path = util.localpath(path) + normpath = self.normcase(path) + if normpath in self.audited: + return + # AIX ignores "/" at end of path, others raise EISDIR. + if util.endswithsep(path): + raise util.Abort(_("path ends in directory separator: %s") % path) + parts = util.splitpath(path) + if (os.path.splitdrive(path)[0] + or parts[0].lower() in ('.hg', '.hg.', '') + or os.pardir in parts): + raise util.Abort(_("path contains illegal component: %s") % path) + if '.hg' in path.lower(): + lparts = [p.lower() for p in parts] + for p in '.hg', '.hg.': + if p in lparts[1:]: + pos = lparts.index(p) + base = os.path.join(*parts[:pos]) + raise util.Abort(_("path '%s' is inside nested repo %r") + % (path, base)) + + normparts = util.splitpath(normpath) + assert len(parts) == len(normparts) + + parts.pop() + normparts.pop() + prefixes = [] + while parts: + prefix = os.sep.join(parts) + normprefix = os.sep.join(normparts) + if normprefix in self.auditeddir: + break + curpath = os.path.join(self.root, prefix) + try: + st = os.lstat(curpath) + except OSError, err: + # EINVAL can be raised as invalid path syntax under win32. + # They must be ignored for patterns can be checked too. + if err.errno not in (errno.ENOENT, errno.ENOTDIR, errno.EINVAL): + raise + else: + if stat.S_ISLNK(st.st_mode): + raise util.Abort( + _('path %r traverses symbolic link %r') + % (path, prefix)) + elif (stat.S_ISDIR(st.st_mode) and + os.path.isdir(os.path.join(curpath, '.hg'))): + if not self.callback or not self.callback(curpath): + raise util.Abort(_("path '%s' is inside nested " + "repo %r") + % (path, prefix)) + prefixes.append(normprefix) + parts.pop() + normparts.pop() + + self.audited.add(normpath) + # only add prefixes to the cache after checking everything: we don't + # want to add "foo/bar/baz" before checking if there's a "foo/.hg" + self.auditeddir.update(prefixes) + + def check(self, path): + try: + self(path) + return True + except (OSError, util.Abort): + return False + +def canonpath(root, cwd, myname, auditor=None): + '''return the canonical path of myname, given cwd and root''' + if util.endswithsep(root): + rootsep = root + else: + rootsep = root + os.sep + name = myname + if not os.path.isabs(name): + name = os.path.join(root, cwd, name) + name = os.path.normpath(name) + if auditor is None: + auditor = pathauditor(root) + if name != rootsep and name.startswith(rootsep): + name = name[len(rootsep):] + auditor(name) + return util.pconvert(name) + elif name == root: + return '' + else: + # Determine whether `name' is in the hierarchy at or beneath `root', + # by iterating name=dirname(name) until that causes no change (can't + # check name == '/', because that doesn't work on windows). The list + # `rel' holds the reversed list of components making up the relative + # file name we want. + rel = [] + while True: + try: + s = util.samefile(name, root) + except OSError: + s = False + if s: + if not rel: + # name was actually the same as root (maybe a symlink) + return '' + rel.reverse() + name = os.path.join(*rel) + auditor(name) + return util.pconvert(name) + dirname, basename = util.split(name) + rel.append(basename) + if dirname == name: + break + name = dirname + + raise util.Abort(_("%s not under root '%s'") % (myname, root)) diff -r 8c69c69dbcd2 -r fe8e254c7ad6 mercurial/posix.py --- a/mercurial/posix.py Wed Jan 01 21:46:45 2014 -0600 +++ b/mercurial/posix.py Tue Jan 21 14:30:06 2014 -0600 @@ -197,7 +197,6 @@ return path.lower() if sys.platform == 'darwin': - import fcntl # only needed on darwin, missing on jython def normcase(path): ''' @@ -265,51 +264,6 @@ # Decompose then lowercase (HFS+ technote specifies lower) return unicodedata.normalize('NFD', u).lower().encode('utf-8') - def realpath(path): - ''' - Returns the true, canonical file system path equivalent to the given - path. - - Equivalent means, in this case, resulting in the same, unique - file system link to the path. Every file system entry, whether a file, - directory, hard link or symbolic link or special, will have a single - path preferred by the system, but may allow multiple, differing path - lookups to point to it. - - Most regular UNIX file systems only allow a file system entry to be - looked up by its distinct path. Obviously, this does not apply to case - insensitive file systems, whether case preserving or not. The most - complex issue to deal with is file systems transparently reencoding the - path, such as the non-standard Unicode normalisation required for HFS+ - and HFSX. - ''' - # Constants copied from /usr/include/sys/fcntl.h - F_GETPATH = 50 - O_SYMLINK = 0x200000 - - try: - fd = os.open(path, O_SYMLINK) - except OSError, err: - if err.errno == errno.ENOENT: - return path - raise - - try: - return fcntl.fcntl(fd, F_GETPATH, '\0' * 1024).rstrip('\0') - finally: - os.close(fd) -elif sys.version_info < (2, 4, 2, 'final'): - # Workaround for http://bugs.python.org/issue1213894 (os.path.realpath - # didn't resolve symlinks that were the first component of the path.) - def realpath(path): - if os.path.isabs(path): - return os.path.realpath(path) - else: - return os.path.realpath('./' + path) -else: - # Fallback to the likely inadequate Python builtin function. - realpath = os.path.realpath - if sys.platform == 'cygwin': # workaround for cygwin, in which mount point part of path is # treated as case sensitive, even though underlying NTFS is case diff -r 8c69c69dbcd2 -r fe8e254c7ad6 mercurial/repair.py --- a/mercurial/repair.py Wed Jan 01 21:46:45 2014 -0600 +++ b/mercurial/repair.py Tue Jan 21 14:30:06 2014 -0600 @@ -38,16 +38,8 @@ """return the changesets which will be broken by the truncation""" s = set() def collectone(revlog): - linkgen = (revlog.linkrev(i) for i in revlog) - # find the truncation point of the revlog - for lrev in linkgen: - if lrev >= striprev: - break - # see if any revision after this point has a linkrev - # less than striprev (those will be broken by strip) - for lrev in linkgen: - if lrev < striprev: - s.add(lrev) + _, brokenset = revlog.getstrippoint(striprev) + s.update([revlog.linkrev(r) for r in brokenset]) collectone(repo.manifest) for fname in files: diff -r 8c69c69dbcd2 -r fe8e254c7ad6 mercurial/repoview.py --- a/mercurial/repoview.py Wed Jan 01 21:46:45 2014 -0600 +++ b/mercurial/repoview.py Tue Jan 21 14:30:06 2014 -0600 @@ -94,20 +94,15 @@ return frozenset(xrange(firstmutable, len(cl))) # function to compute filtered set +# +# When addding a new filter you MUST update the table at: +# mercurial.branchmap.subsettable +# Otherwise your filter will have to recompute all its branches cache +# from scratch (very slow). filtertable = {'visible': computehidden, 'served': computeunserved, 'immutable': computemutable, 'base': computeimpactable} -### Nearest subset relation -# Nearest subset of filter X is a filter Y so that: -# * Y is included in X, -# * X - Y is as small as possible. -# This create and ordering used for branchmap purpose. -# the ordering may be partial -subsettable = {None: 'visible', - 'visible': 'served', - 'served': 'immutable', - 'immutable': 'base'} def filterrevs(repo, filtername): """returns set of filtered revision for this filter name""" @@ -215,4 +210,3 @@ @property def requirements(self): return self._unfilteredrepo.requirements - diff -r 8c69c69dbcd2 -r fe8e254c7ad6 mercurial/revlog.py --- a/mercurial/revlog.py Wed Jan 01 21:46:45 2014 -0600 +++ b/mercurial/revlog.py Tue Jan 21 14:30:06 2014 -0600 @@ -202,6 +202,7 @@ self._cache = None self._basecache = None self._chunkcache = (0, '') + self._chunkcachesize = 65536 self.index = [] self._pcache = {} self._nodecache = {nullid: nullrev} @@ -215,6 +216,15 @@ v |= REVLOGGENERALDELTA else: v = 0 + if 'chunkcachesize' in opts: + self._chunkcachesize = opts['chunkcachesize'] + + if self._chunkcachesize <= 0: + raise RevlogError(_('revlog chunk cache size %r is not greater ' + 'than 0') % self._chunkcachesize) + elif self._chunkcachesize & (self._chunkcachesize - 1): + raise RevlogError(_('revlog chunk cache size %r is not a power ' + 'of 2') % self._chunkcachesize) i = '' self._initempty = True @@ -401,7 +411,29 @@ heads = [self.rev(n) for n in heads] # we want the ancestors, but inclusive - has = set(self.ancestors(common)) + class lazyset(object): + def __init__(self, lazyvalues): + self.addedvalues = set() + self.lazyvalues = lazyvalues + + def __contains__(self, value): + return value in self.addedvalues or value in self.lazyvalues + + def __iter__(self): + added = self.addedvalues + for r in added: + yield r + for r in self.lazyvalues: + if not r in added: + yield r + + def add(self, value): + self.addedvalues.add(value) + + def update(self, values): + self.addedvalues.update(values) + + has = lazyset(self.ancestors(common)) has.add(nullrev) has.update(common) @@ -820,13 +852,19 @@ else: df = self.opener(self.datafile) - readahead = max(65536, length) - df.seek(offset) - d = df.read(readahead) + # Cache data both forward and backward around the requested + # data, in a fixed size window. This helps speed up operations + # involving reading the revlog backwards. + cachesize = self._chunkcachesize + realoffset = offset & ~(cachesize - 1) + reallength = (((offset + length + cachesize) & ~(cachesize - 1)) + - realoffset) + df.seek(realoffset) + d = df.read(reallength) df.close() - self._addchunk(offset, d) - if readahead > length: - return util.buffer(d, 0, length) + self._addchunk(realoffset, d) + if offset != realoffset or reallength != length: + return util.buffer(d, offset - realoffset, length) return d def _getchunk(self, offset, length): @@ -1168,6 +1206,15 @@ self.nodemap[node] = curr entry = self._io.packentry(e, self.node, self.version, curr) + self._writeentry(transaction, ifh, dfh, entry, data, link, offset) + + if type(text) == str: # only accept immutable objects + self._cache = (node, curr, text) + self._basecache = (curr, chainbase) + return node + + def _writeentry(self, transaction, ifh, dfh, entry, data, link, offset): + curr = len(self) - 1 if not self._inline: transaction.add(self.datafile, offset) transaction.add(self.indexfile, curr * len(entry)) @@ -1184,11 +1231,6 @@ ifh.write(data[1]) self.checkinlinesize(transaction, ifh) - if type(text) == str: # only accept immutable objects - self._cache = (node, curr, text) - self._basecache = (curr, chainbase) - return node - def addgroup(self, bundle, linkmapper, transaction): """ add a delta group @@ -1263,6 +1305,46 @@ return content + def getstrippoint(self, minlink): + """find the minimum rev that must be stripped to strip the linkrev + + Returns a tuple containing the minimum rev and a set of all revs that + have linkrevs that will be broken by this strip. + """ + brokenrevs = set() + strippoint = len(self) + + heads = {} + futurelargelinkrevs = set() + for head in self.headrevs(): + headlinkrev = self.linkrev(head) + heads[head] = headlinkrev + if headlinkrev >= minlink: + futurelargelinkrevs.add(headlinkrev) + + # This algorithm involves walking down the rev graph, starting at the + # heads. Since the revs are topologically sorted according to linkrev, + # once all head linkrevs are below the minlink, we know there are + # no more revs that could have a linkrev greater than minlink. + # So we can stop walking. + while futurelargelinkrevs: + strippoint -= 1 + linkrev = heads.pop(strippoint) + + if linkrev < minlink: + brokenrevs.add(strippoint) + else: + futurelargelinkrevs.remove(linkrev) + + for p in self.parentrevs(strippoint): + if p != nullrev: + plinkrev = self.linkrev(p) + heads[p] = plinkrev + if plinkrev >= minlink: + futurelargelinkrevs.add(plinkrev) + + return strippoint, brokenrevs + def strip(self, minlink, transaction): """truncate the revlog on the first revision with a linkrev >= minlink @@ -1280,10 +1362,8 @@ if len(self) == 0: return - for rev in self: - if self.index[rev][4] >= minlink: - break - else: + rev, _ = self.getstrippoint(minlink) + if rev == len(self): return # first truncate the files on disk diff -r 8c69c69dbcd2 -r fe8e254c7ad6 mercurial/revset.py --- a/mercurial/revset.py Wed Jan 01 21:46:45 2014 -0600 +++ b/mercurial/revset.py Tue Jan 21 14:30:06 2014 -0600 @@ -12,6 +12,7 @@ from i18n import _ import encoding import obsolete as obsmod +import pathutil import repoview def _revancestors(repo, revs, followfirst): @@ -269,6 +270,10 @@ def adds(repo, subset, x): """``adds(pattern)`` Changesets that add a file matching pattern. + + The pattern without explicit kind like ``glob:`` is expected to be + relative to the current directory and match against a file or a + directory. """ # i18n: "adds" is a keyword pat = getstring(x, _("adds requires a pattern")) @@ -525,16 +530,21 @@ """``contains(pattern)`` Revision contains a file matching pattern. See :hg:`help patterns` for information about file patterns. + + The pattern without explicit kind like ``glob:`` is expected to be + relative to the current directory and match against a file exactly + for efficiency. """ # i18n: "contains" is a keyword pat = getstring(x, _("contains requires a pattern")) - m = None s = [] if not matchmod.patkind(pat): + pat = pathutil.canonpath(repo.root, repo.getcwd(), pat) for r in subset: if pat in repo[r]: s.append(r) else: + m = None for r in subset: c = repo[r] if not m or matchmod.patkind(pat) == 'set': @@ -711,20 +721,23 @@ For performance reasons, ``filelog()`` does not show every changeset that affects the requested file(s). See :hg:`help log` for details. For a slower, more accurate result, use ``file()``. + + The pattern without explicit kind like ``glob:`` is expected to be + relative to the current directory and match against a file exactly + for efficiency. """ # i18n: "filelog" is a keyword pat = getstring(x, _("filelog requires a pattern")) - m = matchmod.match(repo.root, repo.getcwd(), [pat], default='relpath', - ctx=repo[None]) s = set() if not matchmod.patkind(pat): - for f in m.files(): - fl = repo.file(f) - for fr in fl: - s.add(fl.linkrev(fr)) + f = pathutil.canonpath(repo.root, repo.getcwd(), pat) + fl = repo.file(f) + for fr in fl: + s.add(fl.linkrev(fr)) else: + m = matchmod.match(repo.root, repo.getcwd(), [pat], ctx=repo[None]) for f in repo[None]: if m(f): fl = repo.file(f) @@ -867,6 +880,8 @@ For a faster but less accurate result, consider using ``filelog()`` instead. + + This predicate uses ``glob:`` as the default kind of pattern. """ # i18n: "file" is a keyword pat = getstring(x, _("file requires a pattern")) @@ -1002,6 +1017,10 @@ def modifies(repo, subset, x): """``modifies(pattern)`` Changesets modifying files matched by pattern. + + The pattern without explicit kind like ``glob:`` is expected to be + relative to the current directory and match against a file or a + directory. """ # i18n: "modifies" is a keyword pat = getstring(x, _("modifies requires a pattern")) @@ -1215,6 +1234,10 @@ def removes(repo, subset, x): """``removes(pattern)`` Changesets which remove files matching pattern. + + The pattern without explicit kind like ``glob:`` is expected to be + relative to the current directory and match against a file or a + directory. """ # i18n: "removes" is a keyword pat = getstring(x, _("removes requires a pattern")) diff -r 8c69c69dbcd2 -r fe8e254c7ad6 mercurial/scmutil.py --- a/mercurial/scmutil.py Wed Jan 01 21:46:45 2014 -0600 +++ b/mercurial/scmutil.py Tue Jan 21 14:30:06 2014 -0600 @@ -8,8 +8,9 @@ from i18n import _ from mercurial.node import nullrev import util, error, osutil, revset, similar, encoding, phases, parsers +import pathutil import match as matchmod -import os, errno, re, stat, glob +import os, errno, re, glob if os.name == 'nt': import scmwindows as scmplatform @@ -97,9 +98,10 @@ self._newfiles = set() def __call__(self, f): + if f in self._newfiles: + return fl = encoding.lower(f) - if (fl in self._loweredfiles and f not in self._dirstate and - f not in self._newfiles): + if fl in self._loweredfiles and f not in self._dirstate: msg = _('possible case-folding collision for %s') % f if self._abort: raise util.Abort(msg) @@ -107,100 +109,6 @@ self._loweredfiles.add(fl) self._newfiles.add(f) -class pathauditor(object): - '''ensure that a filesystem path contains no banned components. - the following properties of a path are checked: - - - ends with a directory separator - - under top-level .hg - - starts at the root of a windows drive - - contains ".." - - traverses a symlink (e.g. a/symlink_here/b) - - inside a nested repository (a callback can be used to approve - some nested repositories, e.g., subrepositories) - ''' - - def __init__(self, root, callback=None): - self.audited = set() - self.auditeddir = set() - self.root = root - self.callback = callback - if os.path.lexists(root) and not util.checkcase(root): - self.normcase = util.normcase - else: - self.normcase = lambda x: x - - def __call__(self, path): - '''Check the relative path. - path may contain a pattern (e.g. foodir/**.txt)''' - - path = util.localpath(path) - normpath = self.normcase(path) - if normpath in self.audited: - return - # AIX ignores "/" at end of path, others raise EISDIR. - if util.endswithsep(path): - raise util.Abort(_("path ends in directory separator: %s") % path) - parts = util.splitpath(path) - if (os.path.splitdrive(path)[0] - or parts[0].lower() in ('.hg', '.hg.', '') - or os.pardir in parts): - raise util.Abort(_("path contains illegal component: %s") % path) - if '.hg' in path.lower(): - lparts = [p.lower() for p in parts] - for p in '.hg', '.hg.': - if p in lparts[1:]: - pos = lparts.index(p) - base = os.path.join(*parts[:pos]) - raise util.Abort(_("path '%s' is inside nested repo %r") - % (path, base)) - - normparts = util.splitpath(normpath) - assert len(parts) == len(normparts) - - parts.pop() - normparts.pop() - prefixes = [] - while parts: - prefix = os.sep.join(parts) - normprefix = os.sep.join(normparts) - if normprefix in self.auditeddir: - break - curpath = os.path.join(self.root, prefix) - try: - st = os.lstat(curpath) - except OSError, err: - # EINVAL can be raised as invalid path syntax under win32. - # They must be ignored for patterns can be checked too. - if err.errno not in (errno.ENOENT, errno.ENOTDIR, errno.EINVAL): - raise - else: - if stat.S_ISLNK(st.st_mode): - raise util.Abort( - _('path %r traverses symbolic link %r') - % (path, prefix)) - elif (stat.S_ISDIR(st.st_mode) and - os.path.isdir(os.path.join(curpath, '.hg'))): - if not self.callback or not self.callback(curpath): - raise util.Abort(_("path '%s' is inside nested " - "repo %r") - % (path, prefix)) - prefixes.append(normprefix) - parts.pop() - normparts.pop() - - self.audited.add(normpath) - # only add prefixes to the cache after checking everything: we don't - # want to add "foo/bar/baz" before checking if there's a "foo/.hg" - self.auditeddir.update(prefixes) - - def check(self, path): - try: - self(path) - return True - except (OSError, util.Abort): - return False - class abstractvfs(object): """Abstract base class; cannot be instantiated""" @@ -242,6 +150,9 @@ finally: fp.close() + def chmod(self, path, mode): + return os.chmod(self.join(path), mode) + def exists(self, path=None): return os.path.exists(self.join(path)) @@ -251,6 +162,9 @@ def isdir(self, path=None): return os.path.isdir(self.join(path)) + def isfile(self, path=None): + return os.path.isfile(self.join(path)) + def islink(self, path=None): return os.path.islink(self.join(path)) @@ -263,12 +177,18 @@ def makedirs(self, path=None, mode=None): return util.makedirs(self.join(path), mode) + def makelock(self, info, path): + return util.makelock(info, self.join(path)) + def mkdir(self, path=None): return os.mkdir(self.join(path)) def readdir(self, path=None, stat=None, skip=None): return osutil.listdir(self.join(path), stat, skip) + def readlock(self, path): + return util.readlock(self.join(path)) + def rename(self, src, dst): return util.rename(self.join(src), self.join(dst)) @@ -309,7 +229,7 @@ def _setmustaudit(self, onoff): self._audit = onoff if onoff: - self.audit = pathauditor(self.base) + self.audit = pathutil.pathauditor(self.base) else: self.audit = util.always @@ -444,52 +364,6 @@ return self.vfs(path, mode, *args, **kw) -def canonpath(root, cwd, myname, auditor=None): - '''return the canonical path of myname, given cwd and root''' - if util.endswithsep(root): - rootsep = root - else: - rootsep = root + os.sep - name = myname - if not os.path.isabs(name): - name = os.path.join(root, cwd, name) - name = os.path.normpath(name) - if auditor is None: - auditor = pathauditor(root) - if name != rootsep and name.startswith(rootsep): - name = name[len(rootsep):] - auditor(name) - return util.pconvert(name) - elif name == root: - return '' - else: - # Determine whether `name' is in the hierarchy at or beneath `root', - # by iterating name=dirname(name) until that causes no change (can't - # check name == '/', because that doesn't work on windows). The list - # `rel' holds the reversed list of components making up the relative - # file name we want. - rel = [] - while True: - try: - s = util.samefile(name, root) - except OSError: - s = False - if s: - if not rel: - # name was actually the same as root (maybe a symlink) - return '' - rel.reverse() - name = os.path.join(*rel) - auditor(name) - return util.pconvert(name) - dirname, basename = util.split(name) - rel.append(basename) - if dirname == name: - break - name = dirname - - raise util.Abort(_("%s not under root '%s'") % (myname, root)) - def walkrepos(path, followsym=False, seen_dirs=None, recurse=False): '''yield every hg repository under path, always recursively. The recurse flag will only control recursion into repo working dirs''' @@ -767,7 +641,7 @@ This is different from dirstate.status because it doesn't care about whether files are modified or clean.''' added, unknown, deleted, removed = [], [], [], [] - audit_path = pathauditor(repo.root) + audit_path = pathutil.pathauditor(repo.root) ctx = repo[None] dirstate = repo.dirstate @@ -851,14 +725,14 @@ "Mercurial)") % "', '".join(missings)) return requirements -class filecacheentry(object): - def __init__(self, path, stat=True): +class filecachesubentry(object): + def __init__(self, path, stat): self.path = path self.cachestat = None self._cacheable = None if stat: - self.cachestat = filecacheentry.stat(self.path) + self.cachestat = filecachesubentry.stat(self.path) if self.cachestat: self._cacheable = self.cachestat.cacheable() @@ -868,7 +742,7 @@ def refresh(self): if self.cacheable(): - self.cachestat = filecacheentry.stat(self.path) + self.cachestat = filecachesubentry.stat(self.path) def cacheable(self): if self._cacheable is not None: @@ -882,7 +756,7 @@ if not self.cacheable(): return True - newstat = filecacheentry.stat(self.path) + newstat = filecachesubentry.stat(self.path) # we may not know if it's cacheable yet, check again now if newstat and self._cacheable is None: @@ -906,24 +780,44 @@ if e.errno != errno.ENOENT: raise +class filecacheentry(object): + def __init__(self, paths, stat=True): + self._entries = [] + for path in paths: + self._entries.append(filecachesubentry(path, stat)) + + def changed(self): + '''true if any entry has changed''' + for entry in self._entries: + if entry.changed(): + return True + return False + + def refresh(self): + for entry in self._entries: + entry.refresh() + class filecache(object): - '''A property like decorator that tracks a file under .hg/ for updates. + '''A property like decorator that tracks files under .hg/ for updates. Records stat info when called in _filecache. - On subsequent calls, compares old stat info with new info, and recreates - the object when needed, updating the new stat info in _filecache. + On subsequent calls, compares old stat info with new info, and recreates the + object when any of the files changes, updating the new stat info in + _filecache. Mercurial either atomic renames or appends for files under .hg, so to ensure the cache is reliable we need the filesystem to be able to tell us if a file has been replaced. If it can't, we fallback to recreating the object on every call (essentially the same behaviour as - propertycache).''' - def __init__(self, path): - self.path = path + propertycache). + + ''' + def __init__(self, *paths): + self.paths = paths def join(self, obj, fname): - """Used to compute the runtime path of the cached file. + """Used to compute the runtime path of a cached file. Users should subclass filecache and provide their own version of this function to call the appropriate join function on 'obj' (an instance @@ -948,11 +842,11 @@ if entry.changed(): entry.obj = self.func(obj) else: - path = self.join(obj, self.path) + paths = [self.join(obj, path) for path in self.paths] # We stat -before- creating the object so our cache doesn't lie if # a writer modified between the time we read and stat - entry = filecacheentry(path) + entry = filecacheentry(paths, True) entry.obj = self.func(obj) obj._filecache[self.name] = entry @@ -964,7 +858,8 @@ if self.name not in obj._filecache: # we add an entry for the missing value because X in __dict__ # implies X in _filecache - ce = filecacheentry(self.join(obj, self.path), False) + paths = [self.join(obj, path) for path in self.paths] + ce = filecacheentry(paths, False) obj._filecache[self.name] = ce else: ce = obj._filecache[self.name] diff -r 8c69c69dbcd2 -r fe8e254c7ad6 mercurial/setdiscovery.py --- a/mercurial/setdiscovery.py Wed Jan 01 21:46:45 2014 -0600 +++ b/mercurial/setdiscovery.py Tue Jan 21 14:30:06 2014 -0600 @@ -8,7 +8,8 @@ from node import nullid from i18n import _ -import random, util, dagutil +import random +import util, dagutil def _updatesample(dag, nodes, sample, always, quicksamplesize=0): # if nodes is empty we scan the entire graph diff -r 8c69c69dbcd2 -r fe8e254c7ad6 mercurial/statichttprepo.py --- a/mercurial/statichttprepo.py Wed Jan 01 21:46:45 2014 -0600 +++ b/mercurial/statichttprepo.py Tue Jan 21 14:30:06 2014 -0600 @@ -54,8 +54,10 @@ data = data[:bytes] self.pos += len(data) return data + def readlines(self): + return self.read().splitlines(True) def __iter__(self): - return iter(self.read().splitlines(1)) + return iter(self.readlines()) def close(self): pass diff -r 8c69c69dbcd2 -r fe8e254c7ad6 mercurial/subrepo.py --- a/mercurial/subrepo.py Wed Jan 01 21:46:45 2014 -0600 +++ b/mercurial/subrepo.py Tue Jan 21 14:30:06 2014 -0600 @@ -9,7 +9,9 @@ import xml.dom.minidom import stat, subprocess, tarfile from i18n import _ -import config, scmutil, util, node, error, cmdutil, bookmarks, match as matchmod +import config, util, node, error, cmdutil, bookmarks, match as matchmod +import phases +import pathutil hg = None propertycache = util.propertycache @@ -344,12 +346,43 @@ import hg as h hg = h - scmutil.pathauditor(ctx._repo.root)(path) + pathutil.pathauditor(ctx._repo.root)(path) state = ctx.substate[path] if state[2] not in types: raise util.Abort(_('unknown subrepo type %s') % state[2]) return types[state[2]](ctx, path, state[:2]) +def newcommitphase(ui, ctx): + commitphase = phases.newcommitphase(ui) + substate = getattr(ctx, "substate", None) + if not substate: + return commitphase + check = ui.config('phases', 'checksubrepos', 'follow') + if check not in ('ignore', 'follow', 'abort'): + raise util.Abort(_('invalid phases.checksubrepos configuration: %s') + % (check)) + if check == 'ignore': + return commitphase + maxphase = phases.public + maxsub = None + for s in sorted(substate): + sub = ctx.sub(s) + subphase = sub.phase(substate[s][1]) + if maxphase < subphase: + maxphase = subphase + maxsub = s + if commitphase < maxphase: + if check == 'abort': + raise util.Abort(_("can't commit in %s phase" + " conflicting %s from subrepository %s") % + (phases.phasenames[commitphase], + phases.phasenames[maxphase], maxsub)) + ui.warn(_("warning: changes are committed in" + " %s phase from subrepository %s\n") % + (phases.phasenames[maxphase], maxsub)) + return maxphase + return commitphase + # subrepo classes need to implement the following abstract class: class abstractsubrepo(object): @@ -384,6 +417,11 @@ """ raise NotImplementedError + def phase(self, state): + """returns phase of specified state in the subrepository. + """ + return phases.public + def remove(self): """remove the subrepo @@ -651,6 +689,10 @@ return node.hex(n) @annotatesubrepoerror + def phase(self, state): + return self._repo[state].phase() + + @annotatesubrepoerror def remove(self): # we can't fully delete the repository as it may contain # local-only history diff -r 8c69c69dbcd2 -r fe8e254c7ad6 mercurial/templatekw.py --- a/mercurial/templatekw.py Wed Jan 01 21:46:45 2014 -0600 +++ b/mercurial/templatekw.py Tue Jan 21 14:30:06 2014 -0600 @@ -117,7 +117,8 @@ if rev in latesttags: continue ctx = repo[rev] - tags = [t for t in ctx.tags() if repo.tagtype(t) == 'global'] + tags = [t for t in ctx.tags() + if (repo.tagtype(t) and repo.tagtype(t) != 'local')] if tags: latesttags[rev] = ctx.date()[0], 0, ':'.join(sorted(tags)) continue @@ -220,11 +221,12 @@ return '%s: +%s/-%s' % (len(stats), adds, removes) def showextras(**args): - templ = args['templ'] - for key, value in sorted(args['ctx'].extra().items()): - args = args.copy() - args.update(dict(key=key, value=value)) - yield templ('extra', **args) + """:extras: List of dicts with key, value entries of the 'extras' + field of this changeset.""" + extras = args['ctx'].extra() + c = [{'key': x[0], 'value': x[1]} for x in sorted(extras.items())] + f = _showlist('extra', c, plural='extras', **args) + return _hybrid(f, c, lambda x: '%s=%s' % (x['key'], x['value'])) def showfileadds(**args): """:file_adds: List of strings. Files added by this changeset.""" @@ -392,6 +394,7 @@ 'parents': _showparents, } dockeywords.update(keywords) +del dockeywords['branches'] # tell hggettext to extract docstrings from these functions: i18nfunctions = dockeywords.values() diff -r 8c69c69dbcd2 -r fe8e254c7ad6 mercurial/templates/gitweb/graph.tmpl --- a/mercurial/templates/gitweb/graph.tmpl Wed Jan 01 21:46:45 2014 -0600 +++ b/mercurial/templates/gitweb/graph.tmpl Tue Jan 21 14:30:06 2014 -0600 @@ -108,4 +108,15 @@ | {changenav%navgraph} + + {footer} diff -r 8c69c69dbcd2 -r fe8e254c7ad6 mercurial/templates/gitweb/shortlog.tmpl --- a/mercurial/templates/gitweb/shortlog.tmpl Wed Jan 01 21:46:45 2014 -0600 +++ b/mercurial/templates/gitweb/shortlog.tmpl Tue Jan 21 14:30:06 2014 -0600 @@ -32,7 +32,7 @@
 
- +
{entries%shortlogentry}
@@ -40,4 +40,19 @@ {changenav%navshort} + + {footer} diff -r 8c69c69dbcd2 -r fe8e254c7ad6 mercurial/templates/monoblue/graph.tmpl --- a/mercurial/templates/monoblue/graph.tmpl Wed Jan 01 21:46:45 2014 -0600 +++ b/mercurial/templates/monoblue/graph.tmpl Tue Jan 21 14:30:06 2014 -0600 @@ -104,4 +104,15 @@ | {changenav%navgraph} + + {footer} diff -r 8c69c69dbcd2 -r fe8e254c7ad6 mercurial/templates/monoblue/shortlog.tmpl --- a/mercurial/templates/monoblue/shortlog.tmpl Wed Jan 01 21:46:45 2014 -0600 +++ b/mercurial/templates/monoblue/shortlog.tmpl Tue Jan 21 14:30:06 2014 -0600 @@ -33,7 +33,7 @@ - +
{entries%shortlogentry}
@@ -41,4 +41,19 @@ {changenav%navshort} + + {footer} diff -r 8c69c69dbcd2 -r fe8e254c7ad6 mercurial/templates/paper/graph.tmpl --- a/mercurial/templates/paper/graph.tmpl Wed Jan 01 21:46:45 2014 -0600 +++ b/mercurial/templates/paper/graph.tmpl Tue Jan 21 14:30:06 2014 -0600 @@ -124,7 +124,7 @@